linux/kernel/trace/ftrace.c
<<
>>
Prefs
   1/*
   2 * Infrastructure for profiling code inserted by 'gcc -pg'.
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally ported from the -rt patch by:
   8 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code in the latency_tracer, that is:
  11 *
  12 *  Copyright (C) 2004-2006 Ingo Molnar
  13 *  Copyright (C) 2004 Nadia Yvette Chambers
  14 */
  15
  16#include <linux/stop_machine.h>
  17#include <linux/clocksource.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/suspend.h>
  21#include <linux/debugfs.h>
  22#include <linux/hardirq.h>
  23#include <linux/kthread.h>
  24#include <linux/uaccess.h>
  25#include <linux/bsearch.h>
  26#include <linux/module.h>
  27#include <linux/ftrace.h>
  28#include <linux/sysctl.h>
  29#include <linux/slab.h>
  30#include <linux/ctype.h>
  31#include <linux/sort.h>
  32#include <linux/list.h>
  33#include <linux/hash.h>
  34#include <linux/rcupdate.h>
  35
  36#include <trace/events/sched.h>
  37
  38#include <asm/setup.h>
  39
  40#include "trace_output.h"
  41#include "trace_stat.h"
  42
  43#define FTRACE_WARN_ON(cond)                    \
  44        ({                                      \
  45                int ___r = cond;                \
  46                if (WARN_ON(___r))              \
  47                        ftrace_kill();          \
  48                ___r;                           \
  49        })
  50
  51#define FTRACE_WARN_ON_ONCE(cond)               \
  52        ({                                      \
  53                int ___r = cond;                \
  54                if (WARN_ON_ONCE(___r))         \
  55                        ftrace_kill();          \
  56                ___r;                           \
  57        })
  58
  59/* hash bits for specific function selection */
  60#define FTRACE_HASH_BITS 7
  61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
  62#define FTRACE_HASH_DEFAULT_BITS 10
  63#define FTRACE_HASH_MAX_BITS 12
  64
  65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
  66
  67static struct ftrace_ops ftrace_list_end __read_mostly = {
  68        .func           = ftrace_stub,
  69        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
  70};
  71
  72/* ftrace_enabled is a method to turn ftrace on or off */
  73int ftrace_enabled __read_mostly;
  74static int last_ftrace_enabled;
  75
  76/* Quick disabling of function tracer. */
  77int function_trace_stop __read_mostly;
  78
  79/* Current function tracing op */
  80struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
  81
  82/* List for set_ftrace_pid's pids. */
  83LIST_HEAD(ftrace_pids);
  84struct ftrace_pid {
  85        struct list_head list;
  86        struct pid *pid;
  87};
  88
  89/*
  90 * ftrace_disabled is set when an anomaly is discovered.
  91 * ftrace_disabled is much stronger than ftrace_enabled.
  92 */
  93static int ftrace_disabled __read_mostly;
  94
  95static DEFINE_MUTEX(ftrace_lock);
  96
  97static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
  98static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
  99static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 100ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 101ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 102static struct ftrace_ops global_ops;
 103static struct ftrace_ops control_ops;
 104
 105#if ARCH_SUPPORTS_FTRACE_OPS
 106static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 107                                 struct ftrace_ops *op, struct pt_regs *regs);
 108#else
 109/* See comment below, where ftrace_ops_list_func is defined */
 110static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 111#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 112#endif
 113
 114/*
 115 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
 116 * can use rcu_dereference_raw() is that elements removed from this list
 117 * are simply leaked, so there is no need to interact with a grace-period
 118 * mechanism.  The rcu_dereference_raw() calls are needed to handle
 119 * concurrent insertions into the ftrace_global_list.
 120 *
 121 * Silly Alpha and silly pointer-speculation compiler optimizations!
 122 */
 123#define do_for_each_ftrace_op(op, list)                 \
 124        op = rcu_dereference_raw(list);                 \
 125        do
 126
 127/*
 128 * Optimized for just a single item in the list (as that is the normal case).
 129 */
 130#define while_for_each_ftrace_op(op)                            \
 131        while (likely(op = rcu_dereference_raw((op)->next)) &&  \
 132               unlikely((op) != &ftrace_list_end))
 133
 134/**
 135 * ftrace_nr_registered_ops - return number of ops registered
 136 *
 137 * Returns the number of ftrace_ops registered and tracing functions
 138 */
 139int ftrace_nr_registered_ops(void)
 140{
 141        struct ftrace_ops *ops;
 142        int cnt = 0;
 143
 144        mutex_lock(&ftrace_lock);
 145
 146        for (ops = ftrace_ops_list;
 147             ops != &ftrace_list_end; ops = ops->next)
 148                cnt++;
 149
 150        mutex_unlock(&ftrace_lock);
 151
 152        return cnt;
 153}
 154
 155static void
 156ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
 157                        struct ftrace_ops *op, struct pt_regs *regs)
 158{
 159        int bit;
 160
 161        bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
 162        if (bit < 0)
 163                return;
 164
 165        do_for_each_ftrace_op(op, ftrace_global_list) {
 166                op->func(ip, parent_ip, op, regs);
 167        } while_for_each_ftrace_op(op);
 168
 169        trace_clear_recursion(bit);
 170}
 171
 172static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 173                            struct ftrace_ops *op, struct pt_regs *regs)
 174{
 175        if (!test_tsk_trace_trace(current))
 176                return;
 177
 178        ftrace_pid_function(ip, parent_ip, op, regs);
 179}
 180
 181static void set_ftrace_pid_function(ftrace_func_t func)
 182{
 183        /* do not set ftrace_pid_function to itself! */
 184        if (func != ftrace_pid_func)
 185                ftrace_pid_function = func;
 186}
 187
 188/**
 189 * clear_ftrace_function - reset the ftrace function
 190 *
 191 * This NULLs the ftrace function and in essence stops
 192 * tracing.  There may be lag
 193 */
 194void clear_ftrace_function(void)
 195{
 196        ftrace_trace_function = ftrace_stub;
 197        ftrace_pid_function = ftrace_stub;
 198}
 199
 200static void control_ops_disable_all(struct ftrace_ops *ops)
 201{
 202        int cpu;
 203
 204        for_each_possible_cpu(cpu)
 205                *per_cpu_ptr(ops->disabled, cpu) = 1;
 206}
 207
 208static int control_ops_alloc(struct ftrace_ops *ops)
 209{
 210        int __percpu *disabled;
 211
 212        disabled = alloc_percpu(int);
 213        if (!disabled)
 214                return -ENOMEM;
 215
 216        ops->disabled = disabled;
 217        control_ops_disable_all(ops);
 218        return 0;
 219}
 220
 221static void control_ops_free(struct ftrace_ops *ops)
 222{
 223        free_percpu(ops->disabled);
 224}
 225
 226static void update_global_ops(void)
 227{
 228        ftrace_func_t func;
 229
 230        /*
 231         * If there's only one function registered, then call that
 232         * function directly. Otherwise, we need to iterate over the
 233         * registered callers.
 234         */
 235        if (ftrace_global_list == &ftrace_list_end ||
 236            ftrace_global_list->next == &ftrace_list_end) {
 237                func = ftrace_global_list->func;
 238                /*
 239                 * As we are calling the function directly.
 240                 * If it does not have recursion protection,
 241                 * the function_trace_op needs to be updated
 242                 * accordingly.
 243                 */
 244                if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
 245                        global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
 246                else
 247                        global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
 248        } else {
 249                func = ftrace_global_list_func;
 250                /* The list has its own recursion protection. */
 251                global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
 252        }
 253
 254
 255        /* If we filter on pids, update to use the pid function */
 256        if (!list_empty(&ftrace_pids)) {
 257                set_ftrace_pid_function(func);
 258                func = ftrace_pid_func;
 259        }
 260
 261        global_ops.func = func;
 262}
 263
 264static void update_ftrace_function(void)
 265{
 266        ftrace_func_t func;
 267
 268        update_global_ops();
 269
 270        /*
 271         * If we are at the end of the list and this ops is
 272         * recursion safe and not dynamic and the arch supports passing ops,
 273         * then have the mcount trampoline call the function directly.
 274         */
 275        if (ftrace_ops_list == &ftrace_list_end ||
 276            (ftrace_ops_list->next == &ftrace_list_end &&
 277             !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
 278             (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
 279             !FTRACE_FORCE_LIST_FUNC)) {
 280                /* Set the ftrace_ops that the arch callback uses */
 281                if (ftrace_ops_list == &global_ops)
 282                        function_trace_op = ftrace_global_list;
 283                else
 284                        function_trace_op = ftrace_ops_list;
 285                func = ftrace_ops_list->func;
 286        } else {
 287                /* Just use the default ftrace_ops */
 288                function_trace_op = &ftrace_list_end;
 289                func = ftrace_ops_list_func;
 290        }
 291
 292        ftrace_trace_function = func;
 293}
 294
 295static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 296{
 297        ops->next = *list;
 298        /*
 299         * We are entering ops into the list but another
 300         * CPU might be walking that list. We need to make sure
 301         * the ops->next pointer is valid before another CPU sees
 302         * the ops pointer included into the list.
 303         */
 304        rcu_assign_pointer(*list, ops);
 305}
 306
 307static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 308{
 309        struct ftrace_ops **p;
 310
 311        /*
 312         * If we are removing the last function, then simply point
 313         * to the ftrace_stub.
 314         */
 315        if (*list == ops && ops->next == &ftrace_list_end) {
 316                *list = &ftrace_list_end;
 317                return 0;
 318        }
 319
 320        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 321                if (*p == ops)
 322                        break;
 323
 324        if (*p != ops)
 325                return -1;
 326
 327        *p = (*p)->next;
 328        return 0;
 329}
 330
 331static void add_ftrace_list_ops(struct ftrace_ops **list,
 332                                struct ftrace_ops *main_ops,
 333                                struct ftrace_ops *ops)
 334{
 335        int first = *list == &ftrace_list_end;
 336        add_ftrace_ops(list, ops);
 337        if (first)
 338                add_ftrace_ops(&ftrace_ops_list, main_ops);
 339}
 340
 341static int remove_ftrace_list_ops(struct ftrace_ops **list,
 342                                  struct ftrace_ops *main_ops,
 343                                  struct ftrace_ops *ops)
 344{
 345        int ret = remove_ftrace_ops(list, ops);
 346        if (!ret && *list == &ftrace_list_end)
 347                ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
 348        return ret;
 349}
 350
 351static int __register_ftrace_function(struct ftrace_ops *ops)
 352{
 353        if (unlikely(ftrace_disabled))
 354                return -ENODEV;
 355
 356        if (FTRACE_WARN_ON(ops == &global_ops))
 357                return -EINVAL;
 358
 359        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 360                return -EBUSY;
 361
 362        /* We don't support both control and global flags set. */
 363        if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
 364                return -EINVAL;
 365
 366#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 367        /*
 368         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
 369         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
 370         * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
 371         */
 372        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
 373            !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
 374                return -EINVAL;
 375
 376        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
 377                ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 378#endif
 379
 380        if (!core_kernel_data((unsigned long)ops))
 381                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 382
 383        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
 384                add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
 385                ops->flags |= FTRACE_OPS_FL_ENABLED;
 386        } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
 387                if (control_ops_alloc(ops))
 388                        return -ENOMEM;
 389                add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
 390        } else
 391                add_ftrace_ops(&ftrace_ops_list, ops);
 392
 393        if (ftrace_enabled)
 394                update_ftrace_function();
 395
 396        return 0;
 397}
 398
 399static int __unregister_ftrace_function(struct ftrace_ops *ops)
 400{
 401        int ret;
 402
 403        if (ftrace_disabled)
 404                return -ENODEV;
 405
 406        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 407                return -EBUSY;
 408
 409        if (FTRACE_WARN_ON(ops == &global_ops))
 410                return -EINVAL;
 411
 412        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
 413                ret = remove_ftrace_list_ops(&ftrace_global_list,
 414                                             &global_ops, ops);
 415                if (!ret)
 416                        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 417        } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
 418                ret = remove_ftrace_list_ops(&ftrace_control_list,
 419                                             &control_ops, ops);
 420                if (!ret) {
 421                        /*
 422                         * The ftrace_ops is now removed from the list,
 423                         * so there'll be no new users. We must ensure
 424                         * all current users are done before we free
 425                         * the control data.
 426                         */
 427                        synchronize_sched();
 428                        control_ops_free(ops);
 429                }
 430        } else
 431                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 432
 433        if (ret < 0)
 434                return ret;
 435
 436        if (ftrace_enabled)
 437                update_ftrace_function();
 438
 439        /*
 440         * Dynamic ops may be freed, we must make sure that all
 441         * callers are done before leaving this function.
 442         */
 443        if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
 444                synchronize_sched();
 445
 446        return 0;
 447}
 448
 449static void ftrace_update_pid_func(void)
 450{
 451        /* Only do something if we are tracing something */
 452        if (ftrace_trace_function == ftrace_stub)
 453                return;
 454
 455        update_ftrace_function();
 456}
 457
 458#ifdef CONFIG_FUNCTION_PROFILER
 459struct ftrace_profile {
 460        struct hlist_node               node;
 461        unsigned long                   ip;
 462        unsigned long                   counter;
 463#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 464        unsigned long long              time;
 465        unsigned long long              time_squared;
 466#endif
 467};
 468
 469struct ftrace_profile_page {
 470        struct ftrace_profile_page      *next;
 471        unsigned long                   index;
 472        struct ftrace_profile           records[];
 473};
 474
 475struct ftrace_profile_stat {
 476        atomic_t                        disabled;
 477        struct hlist_head               *hash;
 478        struct ftrace_profile_page      *pages;
 479        struct ftrace_profile_page      *start;
 480        struct tracer_stat              stat;
 481};
 482
 483#define PROFILE_RECORDS_SIZE                                            \
 484        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 485
 486#define PROFILES_PER_PAGE                                       \
 487        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 488
 489static int ftrace_profile_bits __read_mostly;
 490static int ftrace_profile_enabled __read_mostly;
 491
 492/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 493static DEFINE_MUTEX(ftrace_profile_lock);
 494
 495static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 496
 497#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
 498
 499static void *
 500function_stat_next(void *v, int idx)
 501{
 502        struct ftrace_profile *rec = v;
 503        struct ftrace_profile_page *pg;
 504
 505        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 506
 507 again:
 508        if (idx != 0)
 509                rec++;
 510
 511        if ((void *)rec >= (void *)&pg->records[pg->index]) {
 512                pg = pg->next;
 513                if (!pg)
 514                        return NULL;
 515                rec = &pg->records[0];
 516                if (!rec->counter)
 517                        goto again;
 518        }
 519
 520        return rec;
 521}
 522
 523static void *function_stat_start(struct tracer_stat *trace)
 524{
 525        struct ftrace_profile_stat *stat =
 526                container_of(trace, struct ftrace_profile_stat, stat);
 527
 528        if (!stat || !stat->start)
 529                return NULL;
 530
 531        return function_stat_next(&stat->start->records[0], 0);
 532}
 533
 534#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 535/* function graph compares on total time */
 536static int function_stat_cmp(void *p1, void *p2)
 537{
 538        struct ftrace_profile *a = p1;
 539        struct ftrace_profile *b = p2;
 540
 541        if (a->time < b->time)
 542                return -1;
 543        if (a->time > b->time)
 544                return 1;
 545        else
 546                return 0;
 547}
 548#else
 549/* not function graph compares against hits */
 550static int function_stat_cmp(void *p1, void *p2)
 551{
 552        struct ftrace_profile *a = p1;
 553        struct ftrace_profile *b = p2;
 554
 555        if (a->counter < b->counter)
 556                return -1;
 557        if (a->counter > b->counter)
 558                return 1;
 559        else
 560                return 0;
 561}
 562#endif
 563
 564static int function_stat_headers(struct seq_file *m)
 565{
 566#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 567        seq_printf(m, "  Function                               "
 568                   "Hit    Time            Avg             s^2\n"
 569                      "  --------                               "
 570                   "---    ----            ---             ---\n");
 571#else
 572        seq_printf(m, "  Function                               Hit\n"
 573                      "  --------                               ---\n");
 574#endif
 575        return 0;
 576}
 577
 578static int function_stat_show(struct seq_file *m, void *v)
 579{
 580        struct ftrace_profile *rec = v;
 581        char str[KSYM_SYMBOL_LEN];
 582        int ret = 0;
 583#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 584        static struct trace_seq s;
 585        unsigned long long avg;
 586        unsigned long long stddev;
 587#endif
 588        mutex_lock(&ftrace_profile_lock);
 589
 590        /* we raced with function_profile_reset() */
 591        if (unlikely(rec->counter == 0)) {
 592                ret = -EBUSY;
 593                goto out;
 594        }
 595
 596        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 597        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 598
 599#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 600        seq_printf(m, "    ");
 601        avg = rec->time;
 602        do_div(avg, rec->counter);
 603
 604        /* Sample standard deviation (s^2) */
 605        if (rec->counter <= 1)
 606                stddev = 0;
 607        else {
 608                stddev = rec->time_squared - rec->counter * avg * avg;
 609                /*
 610                 * Divide only 1000 for ns^2 -> us^2 conversion.
 611                 * trace_print_graph_duration will divide 1000 again.
 612                 */
 613                do_div(stddev, (rec->counter - 1) * 1000);
 614        }
 615
 616        trace_seq_init(&s);
 617        trace_print_graph_duration(rec->time, &s);
 618        trace_seq_puts(&s, "    ");
 619        trace_print_graph_duration(avg, &s);
 620        trace_seq_puts(&s, "    ");
 621        trace_print_graph_duration(stddev, &s);
 622        trace_print_seq(m, &s);
 623#endif
 624        seq_putc(m, '\n');
 625out:
 626        mutex_unlock(&ftrace_profile_lock);
 627
 628        return ret;
 629}
 630
 631static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 632{
 633        struct ftrace_profile_page *pg;
 634
 635        pg = stat->pages = stat->start;
 636
 637        while (pg) {
 638                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 639                pg->index = 0;
 640                pg = pg->next;
 641        }
 642
 643        memset(stat->hash, 0,
 644               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 645}
 646
 647int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 648{
 649        struct ftrace_profile_page *pg;
 650        int functions;
 651        int pages;
 652        int i;
 653
 654        /* If we already allocated, do nothing */
 655        if (stat->pages)
 656                return 0;
 657
 658        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 659        if (!stat->pages)
 660                return -ENOMEM;
 661
 662#ifdef CONFIG_DYNAMIC_FTRACE
 663        functions = ftrace_update_tot_cnt;
 664#else
 665        /*
 666         * We do not know the number of functions that exist because
 667         * dynamic tracing is what counts them. With past experience
 668         * we have around 20K functions. That should be more than enough.
 669         * It is highly unlikely we will execute every function in
 670         * the kernel.
 671         */
 672        functions = 20000;
 673#endif
 674
 675        pg = stat->start = stat->pages;
 676
 677        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 678
 679        for (i = 0; i < pages; i++) {
 680                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 681                if (!pg->next)
 682                        goto out_free;
 683                pg = pg->next;
 684        }
 685
 686        return 0;
 687
 688 out_free:
 689        pg = stat->start;
 690        while (pg) {
 691                unsigned long tmp = (unsigned long)pg;
 692
 693                pg = pg->next;
 694                free_page(tmp);
 695        }
 696
 697        stat->pages = NULL;
 698        stat->start = NULL;
 699
 700        return -ENOMEM;
 701}
 702
 703static int ftrace_profile_init_cpu(int cpu)
 704{
 705        struct ftrace_profile_stat *stat;
 706        int size;
 707
 708        stat = &per_cpu(ftrace_profile_stats, cpu);
 709
 710        if (stat->hash) {
 711                /* If the profile is already created, simply reset it */
 712                ftrace_profile_reset(stat);
 713                return 0;
 714        }
 715
 716        /*
 717         * We are profiling all functions, but usually only a few thousand
 718         * functions are hit. We'll make a hash of 1024 items.
 719         */
 720        size = FTRACE_PROFILE_HASH_SIZE;
 721
 722        stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
 723
 724        if (!stat->hash)
 725                return -ENOMEM;
 726
 727        if (!ftrace_profile_bits) {
 728                size--;
 729
 730                for (; size; size >>= 1)
 731                        ftrace_profile_bits++;
 732        }
 733
 734        /* Preallocate the function profiling pages */
 735        if (ftrace_profile_pages_init(stat) < 0) {
 736                kfree(stat->hash);
 737                stat->hash = NULL;
 738                return -ENOMEM;
 739        }
 740
 741        return 0;
 742}
 743
 744static int ftrace_profile_init(void)
 745{
 746        int cpu;
 747        int ret = 0;
 748
 749        for_each_online_cpu(cpu) {
 750                ret = ftrace_profile_init_cpu(cpu);
 751                if (ret)
 752                        break;
 753        }
 754
 755        return ret;
 756}
 757
 758/* interrupts must be disabled */
 759static struct ftrace_profile *
 760ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 761{
 762        struct ftrace_profile *rec;
 763        struct hlist_head *hhd;
 764        unsigned long key;
 765
 766        key = hash_long(ip, ftrace_profile_bits);
 767        hhd = &stat->hash[key];
 768
 769        if (hlist_empty(hhd))
 770                return NULL;
 771
 772        hlist_for_each_entry_rcu(rec, hhd, node) {
 773                if (rec->ip == ip)
 774                        return rec;
 775        }
 776
 777        return NULL;
 778}
 779
 780static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 781                               struct ftrace_profile *rec)
 782{
 783        unsigned long key;
 784
 785        key = hash_long(rec->ip, ftrace_profile_bits);
 786        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 787}
 788
 789/*
 790 * The memory is already allocated, this simply finds a new record to use.
 791 */
 792static struct ftrace_profile *
 793ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 794{
 795        struct ftrace_profile *rec = NULL;
 796
 797        /* prevent recursion (from NMIs) */
 798        if (atomic_inc_return(&stat->disabled) != 1)
 799                goto out;
 800
 801        /*
 802         * Try to find the function again since an NMI
 803         * could have added it
 804         */
 805        rec = ftrace_find_profiled_func(stat, ip);
 806        if (rec)
 807                goto out;
 808
 809        if (stat->pages->index == PROFILES_PER_PAGE) {
 810                if (!stat->pages->next)
 811                        goto out;
 812                stat->pages = stat->pages->next;
 813        }
 814
 815        rec = &stat->pages->records[stat->pages->index++];
 816        rec->ip = ip;
 817        ftrace_add_profile(stat, rec);
 818
 819 out:
 820        atomic_dec(&stat->disabled);
 821
 822        return rec;
 823}
 824
 825static void
 826function_profile_call(unsigned long ip, unsigned long parent_ip,
 827                      struct ftrace_ops *ops, struct pt_regs *regs)
 828{
 829        struct ftrace_profile_stat *stat;
 830        struct ftrace_profile *rec;
 831        unsigned long flags;
 832
 833        if (!ftrace_profile_enabled)
 834                return;
 835
 836        local_irq_save(flags);
 837
 838        stat = &__get_cpu_var(ftrace_profile_stats);
 839        if (!stat->hash || !ftrace_profile_enabled)
 840                goto out;
 841
 842        rec = ftrace_find_profiled_func(stat, ip);
 843        if (!rec) {
 844                rec = ftrace_profile_alloc(stat, ip);
 845                if (!rec)
 846                        goto out;
 847        }
 848
 849        rec->counter++;
 850 out:
 851        local_irq_restore(flags);
 852}
 853
 854#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 855static int profile_graph_entry(struct ftrace_graph_ent *trace)
 856{
 857        function_profile_call(trace->func, 0, NULL, NULL);
 858        return 1;
 859}
 860
 861static void profile_graph_return(struct ftrace_graph_ret *trace)
 862{
 863        struct ftrace_profile_stat *stat;
 864        unsigned long long calltime;
 865        struct ftrace_profile *rec;
 866        unsigned long flags;
 867
 868        local_irq_save(flags);
 869        stat = &__get_cpu_var(ftrace_profile_stats);
 870        if (!stat->hash || !ftrace_profile_enabled)
 871                goto out;
 872
 873        /* If the calltime was zero'd ignore it */
 874        if (!trace->calltime)
 875                goto out;
 876
 877        calltime = trace->rettime - trace->calltime;
 878
 879        if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
 880                int index;
 881
 882                index = trace->depth;
 883
 884                /* Append this call time to the parent time to subtract */
 885                if (index)
 886                        current->ret_stack[index - 1].subtime += calltime;
 887
 888                if (current->ret_stack[index].subtime < calltime)
 889                        calltime -= current->ret_stack[index].subtime;
 890                else
 891                        calltime = 0;
 892        }
 893
 894        rec = ftrace_find_profiled_func(stat, trace->func);
 895        if (rec) {
 896                rec->time += calltime;
 897                rec->time_squared += calltime * calltime;
 898        }
 899
 900 out:
 901        local_irq_restore(flags);
 902}
 903
 904static int register_ftrace_profiler(void)
 905{
 906        return register_ftrace_graph(&profile_graph_return,
 907                                     &profile_graph_entry);
 908}
 909
 910static void unregister_ftrace_profiler(void)
 911{
 912        unregister_ftrace_graph();
 913}
 914#else
 915static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 916        .func           = function_profile_call,
 917        .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
 918};
 919
 920static int register_ftrace_profiler(void)
 921{
 922        return register_ftrace_function(&ftrace_profile_ops);
 923}
 924
 925static void unregister_ftrace_profiler(void)
 926{
 927        unregister_ftrace_function(&ftrace_profile_ops);
 928}
 929#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 930
 931static ssize_t
 932ftrace_profile_write(struct file *filp, const char __user *ubuf,
 933                     size_t cnt, loff_t *ppos)
 934{
 935        unsigned long val;
 936        int ret;
 937
 938        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 939        if (ret)
 940                return ret;
 941
 942        val = !!val;
 943
 944        mutex_lock(&ftrace_profile_lock);
 945        if (ftrace_profile_enabled ^ val) {
 946                if (val) {
 947                        ret = ftrace_profile_init();
 948                        if (ret < 0) {
 949                                cnt = ret;
 950                                goto out;
 951                        }
 952
 953                        ret = register_ftrace_profiler();
 954                        if (ret < 0) {
 955                                cnt = ret;
 956                                goto out;
 957                        }
 958                        ftrace_profile_enabled = 1;
 959                } else {
 960                        ftrace_profile_enabled = 0;
 961                        /*
 962                         * unregister_ftrace_profiler calls stop_machine
 963                         * so this acts like an synchronize_sched.
 964                         */
 965                        unregister_ftrace_profiler();
 966                }
 967        }
 968 out:
 969        mutex_unlock(&ftrace_profile_lock);
 970
 971        *ppos += cnt;
 972
 973        return cnt;
 974}
 975
 976static ssize_t
 977ftrace_profile_read(struct file *filp, char __user *ubuf,
 978                     size_t cnt, loff_t *ppos)
 979{
 980        char buf[64];           /* big enough to hold a number */
 981        int r;
 982
 983        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 984        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 985}
 986
 987static const struct file_operations ftrace_profile_fops = {
 988        .open           = tracing_open_generic,
 989        .read           = ftrace_profile_read,
 990        .write          = ftrace_profile_write,
 991        .llseek         = default_llseek,
 992};
 993
 994/* used to initialize the real stat files */
 995static struct tracer_stat function_stats __initdata = {
 996        .name           = "functions",
 997        .stat_start     = function_stat_start,
 998        .stat_next      = function_stat_next,
 999        .stat_cmp       = function_stat_cmp,
1000        .stat_headers   = function_stat_headers,
1001        .stat_show      = function_stat_show
1002};
1003
1004static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1005{
1006        struct ftrace_profile_stat *stat;
1007        struct dentry *entry;
1008        char *name;
1009        int ret;
1010        int cpu;
1011
1012        for_each_possible_cpu(cpu) {
1013                stat = &per_cpu(ftrace_profile_stats, cpu);
1014
1015                /* allocate enough for function name + cpu number */
1016                name = kmalloc(32, GFP_KERNEL);
1017                if (!name) {
1018                        /*
1019                         * The files created are permanent, if something happens
1020                         * we still do not free memory.
1021                         */
1022                        WARN(1,
1023                             "Could not allocate stat file for cpu %d\n",
1024                             cpu);
1025                        return;
1026                }
1027                stat->stat = function_stats;
1028                snprintf(name, 32, "function%d", cpu);
1029                stat->stat.name = name;
1030                ret = register_stat_tracer(&stat->stat);
1031                if (ret) {
1032                        WARN(1,
1033                             "Could not register function stat for cpu %d\n",
1034                             cpu);
1035                        kfree(name);
1036                        return;
1037                }
1038        }
1039
1040        entry = debugfs_create_file("function_profile_enabled", 0644,
1041                                    d_tracer, NULL, &ftrace_profile_fops);
1042        if (!entry)
1043                pr_warning("Could not create debugfs "
1044                           "'function_profile_enabled' entry\n");
1045}
1046
1047#else /* CONFIG_FUNCTION_PROFILER */
1048static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1049{
1050}
1051#endif /* CONFIG_FUNCTION_PROFILER */
1052
1053static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1054
1055loff_t
1056ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1057{
1058        loff_t ret;
1059
1060        if (file->f_mode & FMODE_READ)
1061                ret = seq_lseek(file, offset, whence);
1062        else
1063                file->f_pos = ret = 1;
1064
1065        return ret;
1066}
1067
1068#ifdef CONFIG_DYNAMIC_FTRACE
1069
1070#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1071# error Dynamic ftrace depends on MCOUNT_RECORD
1072#endif
1073
1074static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1075
1076struct ftrace_func_probe {
1077        struct hlist_node       node;
1078        struct ftrace_probe_ops *ops;
1079        unsigned long           flags;
1080        unsigned long           ip;
1081        void                    *data;
1082        struct rcu_head         rcu;
1083};
1084
1085struct ftrace_func_entry {
1086        struct hlist_node hlist;
1087        unsigned long ip;
1088};
1089
1090struct ftrace_hash {
1091        unsigned long           size_bits;
1092        struct hlist_head       *buckets;
1093        unsigned long           count;
1094        struct rcu_head         rcu;
1095};
1096
1097/*
1098 * We make these constant because no one should touch them,
1099 * but they are used as the default "empty hash", to avoid allocating
1100 * it all the time. These are in a read only section such that if
1101 * anyone does try to modify it, it will cause an exception.
1102 */
1103static const struct hlist_head empty_buckets[1];
1104static const struct ftrace_hash empty_hash = {
1105        .buckets = (struct hlist_head *)empty_buckets,
1106};
1107#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1108
1109static struct ftrace_ops global_ops = {
1110        .func                   = ftrace_stub,
1111        .notrace_hash           = EMPTY_HASH,
1112        .filter_hash            = EMPTY_HASH,
1113        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
1114};
1115
1116static DEFINE_MUTEX(ftrace_regex_lock);
1117
1118struct ftrace_page {
1119        struct ftrace_page      *next;
1120        struct dyn_ftrace       *records;
1121        int                     index;
1122        int                     size;
1123};
1124
1125static struct ftrace_page *ftrace_new_pgs;
1126
1127#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1128#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1129
1130/* estimate from running different kernels */
1131#define NR_TO_INIT              10000
1132
1133static struct ftrace_page       *ftrace_pages_start;
1134static struct ftrace_page       *ftrace_pages;
1135
1136static bool ftrace_hash_empty(struct ftrace_hash *hash)
1137{
1138        return !hash || !hash->count;
1139}
1140
1141static struct ftrace_func_entry *
1142ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1143{
1144        unsigned long key;
1145        struct ftrace_func_entry *entry;
1146        struct hlist_head *hhd;
1147
1148        if (ftrace_hash_empty(hash))
1149                return NULL;
1150
1151        if (hash->size_bits > 0)
1152                key = hash_long(ip, hash->size_bits);
1153        else
1154                key = 0;
1155
1156        hhd = &hash->buckets[key];
1157
1158        hlist_for_each_entry_rcu(entry, hhd, hlist) {
1159                if (entry->ip == ip)
1160                        return entry;
1161        }
1162        return NULL;
1163}
1164
1165static void __add_hash_entry(struct ftrace_hash *hash,
1166                             struct ftrace_func_entry *entry)
1167{
1168        struct hlist_head *hhd;
1169        unsigned long key;
1170
1171        if (hash->size_bits)
1172                key = hash_long(entry->ip, hash->size_bits);
1173        else
1174                key = 0;
1175
1176        hhd = &hash->buckets[key];
1177        hlist_add_head(&entry->hlist, hhd);
1178        hash->count++;
1179}
1180
1181static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1182{
1183        struct ftrace_func_entry *entry;
1184
1185        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1186        if (!entry)
1187                return -ENOMEM;
1188
1189        entry->ip = ip;
1190        __add_hash_entry(hash, entry);
1191
1192        return 0;
1193}
1194
1195static void
1196free_hash_entry(struct ftrace_hash *hash,
1197                  struct ftrace_func_entry *entry)
1198{
1199        hlist_del(&entry->hlist);
1200        kfree(entry);
1201        hash->count--;
1202}
1203
1204static void
1205remove_hash_entry(struct ftrace_hash *hash,
1206                  struct ftrace_func_entry *entry)
1207{
1208        hlist_del(&entry->hlist);
1209        hash->count--;
1210}
1211
1212static void ftrace_hash_clear(struct ftrace_hash *hash)
1213{
1214        struct hlist_head *hhd;
1215        struct hlist_node *tn;
1216        struct ftrace_func_entry *entry;
1217        int size = 1 << hash->size_bits;
1218        int i;
1219
1220        if (!hash->count)
1221                return;
1222
1223        for (i = 0; i < size; i++) {
1224                hhd = &hash->buckets[i];
1225                hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1226                        free_hash_entry(hash, entry);
1227        }
1228        FTRACE_WARN_ON(hash->count);
1229}
1230
1231static void free_ftrace_hash(struct ftrace_hash *hash)
1232{
1233        if (!hash || hash == EMPTY_HASH)
1234                return;
1235        ftrace_hash_clear(hash);
1236        kfree(hash->buckets);
1237        kfree(hash);
1238}
1239
1240static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1241{
1242        struct ftrace_hash *hash;
1243
1244        hash = container_of(rcu, struct ftrace_hash, rcu);
1245        free_ftrace_hash(hash);
1246}
1247
1248static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1249{
1250        if (!hash || hash == EMPTY_HASH)
1251                return;
1252        call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1253}
1254
1255void ftrace_free_filter(struct ftrace_ops *ops)
1256{
1257        free_ftrace_hash(ops->filter_hash);
1258        free_ftrace_hash(ops->notrace_hash);
1259}
1260
1261static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1262{
1263        struct ftrace_hash *hash;
1264        int size;
1265
1266        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1267        if (!hash)
1268                return NULL;
1269
1270        size = 1 << size_bits;
1271        hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1272
1273        if (!hash->buckets) {
1274                kfree(hash);
1275                return NULL;
1276        }
1277
1278        hash->size_bits = size_bits;
1279
1280        return hash;
1281}
1282
1283static struct ftrace_hash *
1284alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1285{
1286        struct ftrace_func_entry *entry;
1287        struct ftrace_hash *new_hash;
1288        int size;
1289        int ret;
1290        int i;
1291
1292        new_hash = alloc_ftrace_hash(size_bits);
1293        if (!new_hash)
1294                return NULL;
1295
1296        /* Empty hash? */
1297        if (ftrace_hash_empty(hash))
1298                return new_hash;
1299
1300        size = 1 << hash->size_bits;
1301        for (i = 0; i < size; i++) {
1302                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1303                        ret = add_hash_entry(new_hash, entry->ip);
1304                        if (ret < 0)
1305                                goto free_hash;
1306                }
1307        }
1308
1309        FTRACE_WARN_ON(new_hash->count != hash->count);
1310
1311        return new_hash;
1312
1313 free_hash:
1314        free_ftrace_hash(new_hash);
1315        return NULL;
1316}
1317
1318static void
1319ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1320static void
1321ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1322
1323static int
1324ftrace_hash_move(struct ftrace_ops *ops, int enable,
1325                 struct ftrace_hash **dst, struct ftrace_hash *src)
1326{
1327        struct ftrace_func_entry *entry;
1328        struct hlist_node *tn;
1329        struct hlist_head *hhd;
1330        struct ftrace_hash *old_hash;
1331        struct ftrace_hash *new_hash;
1332        unsigned long key;
1333        int size = src->count;
1334        int bits = 0;
1335        int ret;
1336        int i;
1337
1338        /*
1339         * Remove the current set, update the hash and add
1340         * them back.
1341         */
1342        ftrace_hash_rec_disable(ops, enable);
1343
1344        /*
1345         * If the new source is empty, just free dst and assign it
1346         * the empty_hash.
1347         */
1348        if (!src->count) {
1349                free_ftrace_hash_rcu(*dst);
1350                rcu_assign_pointer(*dst, EMPTY_HASH);
1351                /* still need to update the function records */
1352                ret = 0;
1353                goto out;
1354        }
1355
1356        /*
1357         * Make the hash size about 1/2 the # found
1358         */
1359        for (size /= 2; size; size >>= 1)
1360                bits++;
1361
1362        /* Don't allocate too much */
1363        if (bits > FTRACE_HASH_MAX_BITS)
1364                bits = FTRACE_HASH_MAX_BITS;
1365
1366        ret = -ENOMEM;
1367        new_hash = alloc_ftrace_hash(bits);
1368        if (!new_hash)
1369                goto out;
1370
1371        size = 1 << src->size_bits;
1372        for (i = 0; i < size; i++) {
1373                hhd = &src->buckets[i];
1374                hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1375                        if (bits > 0)
1376                                key = hash_long(entry->ip, bits);
1377                        else
1378                                key = 0;
1379                        remove_hash_entry(src, entry);
1380                        __add_hash_entry(new_hash, entry);
1381                }
1382        }
1383
1384        old_hash = *dst;
1385        rcu_assign_pointer(*dst, new_hash);
1386        free_ftrace_hash_rcu(old_hash);
1387
1388        ret = 0;
1389 out:
1390        /*
1391         * Enable regardless of ret:
1392         *  On success, we enable the new hash.
1393         *  On failure, we re-enable the original hash.
1394         */
1395        ftrace_hash_rec_enable(ops, enable);
1396
1397        return ret;
1398}
1399
1400/*
1401 * Test the hashes for this ops to see if we want to call
1402 * the ops->func or not.
1403 *
1404 * It's a match if the ip is in the ops->filter_hash or
1405 * the filter_hash does not exist or is empty,
1406 *  AND
1407 * the ip is not in the ops->notrace_hash.
1408 *
1409 * This needs to be called with preemption disabled as
1410 * the hashes are freed with call_rcu_sched().
1411 */
1412static int
1413ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1414{
1415        struct ftrace_hash *filter_hash;
1416        struct ftrace_hash *notrace_hash;
1417        int ret;
1418
1419        filter_hash = rcu_dereference_raw(ops->filter_hash);
1420        notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1421
1422        if ((ftrace_hash_empty(filter_hash) ||
1423             ftrace_lookup_ip(filter_hash, ip)) &&
1424            (ftrace_hash_empty(notrace_hash) ||
1425             !ftrace_lookup_ip(notrace_hash, ip)))
1426                ret = 1;
1427        else
1428                ret = 0;
1429
1430        return ret;
1431}
1432
1433/*
1434 * This is a double for. Do not use 'break' to break out of the loop,
1435 * you must use a goto.
1436 */
1437#define do_for_each_ftrace_rec(pg, rec)                                 \
1438        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1439                int _____i;                                             \
1440                for (_____i = 0; _____i < pg->index; _____i++) {        \
1441                        rec = &pg->records[_____i];
1442
1443#define while_for_each_ftrace_rec()             \
1444                }                               \
1445        }
1446
1447
1448static int ftrace_cmp_recs(const void *a, const void *b)
1449{
1450        const struct dyn_ftrace *key = a;
1451        const struct dyn_ftrace *rec = b;
1452
1453        if (key->flags < rec->ip)
1454                return -1;
1455        if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1456                return 1;
1457        return 0;
1458}
1459
1460static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1461{
1462        struct ftrace_page *pg;
1463        struct dyn_ftrace *rec;
1464        struct dyn_ftrace key;
1465
1466        key.ip = start;
1467        key.flags = end;        /* overload flags, as it is unsigned long */
1468
1469        for (pg = ftrace_pages_start; pg; pg = pg->next) {
1470                if (end < pg->records[0].ip ||
1471                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1472                        continue;
1473                rec = bsearch(&key, pg->records, pg->index,
1474                              sizeof(struct dyn_ftrace),
1475                              ftrace_cmp_recs);
1476                if (rec)
1477                        return rec->ip;
1478        }
1479
1480        return 0;
1481}
1482
1483/**
1484 * ftrace_location - return true if the ip giving is a traced location
1485 * @ip: the instruction pointer to check
1486 *
1487 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1488 * That is, the instruction that is either a NOP or call to
1489 * the function tracer. It checks the ftrace internal tables to
1490 * determine if the address belongs or not.
1491 */
1492unsigned long ftrace_location(unsigned long ip)
1493{
1494        return ftrace_location_range(ip, ip);
1495}
1496
1497/**
1498 * ftrace_text_reserved - return true if range contains an ftrace location
1499 * @start: start of range to search
1500 * @end: end of range to search (inclusive). @end points to the last byte to check.
1501 *
1502 * Returns 1 if @start and @end contains a ftrace location.
1503 * That is, the instruction that is either a NOP or call to
1504 * the function tracer. It checks the ftrace internal tables to
1505 * determine if the address belongs or not.
1506 */
1507int ftrace_text_reserved(void *start, void *end)
1508{
1509        unsigned long ret;
1510
1511        ret = ftrace_location_range((unsigned long)start,
1512                                    (unsigned long)end);
1513
1514        return (int)!!ret;
1515}
1516
1517static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1518                                     int filter_hash,
1519                                     bool inc)
1520{
1521        struct ftrace_hash *hash;
1522        struct ftrace_hash *other_hash;
1523        struct ftrace_page *pg;
1524        struct dyn_ftrace *rec;
1525        int count = 0;
1526        int all = 0;
1527
1528        /* Only update if the ops has been registered */
1529        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1530                return;
1531
1532        /*
1533         * In the filter_hash case:
1534         *   If the count is zero, we update all records.
1535         *   Otherwise we just update the items in the hash.
1536         *
1537         * In the notrace_hash case:
1538         *   We enable the update in the hash.
1539         *   As disabling notrace means enabling the tracing,
1540         *   and enabling notrace means disabling, the inc variable
1541         *   gets inversed.
1542         */
1543        if (filter_hash) {
1544                hash = ops->filter_hash;
1545                other_hash = ops->notrace_hash;
1546                if (ftrace_hash_empty(hash))
1547                        all = 1;
1548        } else {
1549                inc = !inc;
1550                hash = ops->notrace_hash;
1551                other_hash = ops->filter_hash;
1552                /*
1553                 * If the notrace hash has no items,
1554                 * then there's nothing to do.
1555                 */
1556                if (ftrace_hash_empty(hash))
1557                        return;
1558        }
1559
1560        do_for_each_ftrace_rec(pg, rec) {
1561                int in_other_hash = 0;
1562                int in_hash = 0;
1563                int match = 0;
1564
1565                if (all) {
1566                        /*
1567                         * Only the filter_hash affects all records.
1568                         * Update if the record is not in the notrace hash.
1569                         */
1570                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1571                                match = 1;
1572                } else {
1573                        in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1574                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1575
1576                        /*
1577                         *
1578                         */
1579                        if (filter_hash && in_hash && !in_other_hash)
1580                                match = 1;
1581                        else if (!filter_hash && in_hash &&
1582                                 (in_other_hash || ftrace_hash_empty(other_hash)))
1583                                match = 1;
1584                }
1585                if (!match)
1586                        continue;
1587
1588                if (inc) {
1589                        rec->flags++;
1590                        if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1591                                return;
1592                        /*
1593                         * If any ops wants regs saved for this function
1594                         * then all ops will get saved regs.
1595                         */
1596                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1597                                rec->flags |= FTRACE_FL_REGS;
1598                } else {
1599                        if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1600                                return;
1601                        rec->flags--;
1602                }
1603                count++;
1604                /* Shortcut, if we handled all records, we are done. */
1605                if (!all && count == hash->count)
1606                        return;
1607        } while_for_each_ftrace_rec();
1608}
1609
1610static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1611                                    int filter_hash)
1612{
1613        __ftrace_hash_rec_update(ops, filter_hash, 0);
1614}
1615
1616static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1617                                   int filter_hash)
1618{
1619        __ftrace_hash_rec_update(ops, filter_hash, 1);
1620}
1621
1622static void print_ip_ins(const char *fmt, unsigned char *p)
1623{
1624        int i;
1625
1626        printk(KERN_CONT "%s", fmt);
1627
1628        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1629                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1630}
1631
1632/**
1633 * ftrace_bug - report and shutdown function tracer
1634 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1635 * @ip: The address that failed
1636 *
1637 * The arch code that enables or disables the function tracing
1638 * can call ftrace_bug() when it has detected a problem in
1639 * modifying the code. @failed should be one of either:
1640 * EFAULT - if the problem happens on reading the @ip address
1641 * EINVAL - if what is read at @ip is not what was expected
1642 * EPERM - if the problem happens on writting to the @ip address
1643 */
1644void ftrace_bug(int failed, unsigned long ip)
1645{
1646        switch (failed) {
1647        case -EFAULT:
1648                FTRACE_WARN_ON_ONCE(1);
1649                pr_info("ftrace faulted on modifying ");
1650                print_ip_sym(ip);
1651                break;
1652        case -EINVAL:
1653                FTRACE_WARN_ON_ONCE(1);
1654                pr_info("ftrace failed to modify ");
1655                print_ip_sym(ip);
1656                print_ip_ins(" actual: ", (unsigned char *)ip);
1657                printk(KERN_CONT "\n");
1658                break;
1659        case -EPERM:
1660                FTRACE_WARN_ON_ONCE(1);
1661                pr_info("ftrace faulted on writing ");
1662                print_ip_sym(ip);
1663                break;
1664        default:
1665                FTRACE_WARN_ON_ONCE(1);
1666                pr_info("ftrace faulted on unknown error ");
1667                print_ip_sym(ip);
1668        }
1669}
1670
1671static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1672{
1673        unsigned long flag = 0UL;
1674
1675        /*
1676         * If we are updating calls:
1677         *
1678         *   If the record has a ref count, then we need to enable it
1679         *   because someone is using it.
1680         *
1681         *   Otherwise we make sure its disabled.
1682         *
1683         * If we are disabling calls, then disable all records that
1684         * are enabled.
1685         */
1686        if (enable && (rec->flags & ~FTRACE_FL_MASK))
1687                flag = FTRACE_FL_ENABLED;
1688
1689        /*
1690         * If enabling and the REGS flag does not match the REGS_EN, then
1691         * do not ignore this record. Set flags to fail the compare against
1692         * ENABLED.
1693         */
1694        if (flag &&
1695            (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1696                flag |= FTRACE_FL_REGS;
1697
1698        /* If the state of this record hasn't changed, then do nothing */
1699        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1700                return FTRACE_UPDATE_IGNORE;
1701
1702        if (flag) {
1703                /* Save off if rec is being enabled (for return value) */
1704                flag ^= rec->flags & FTRACE_FL_ENABLED;
1705
1706                if (update) {
1707                        rec->flags |= FTRACE_FL_ENABLED;
1708                        if (flag & FTRACE_FL_REGS) {
1709                                if (rec->flags & FTRACE_FL_REGS)
1710                                        rec->flags |= FTRACE_FL_REGS_EN;
1711                                else
1712                                        rec->flags &= ~FTRACE_FL_REGS_EN;
1713                        }
1714                }
1715
1716                /*
1717                 * If this record is being updated from a nop, then
1718                 *   return UPDATE_MAKE_CALL.
1719                 * Otherwise, if the EN flag is set, then return
1720                 *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1721                 *   from the non-save regs, to a save regs function.
1722                 * Otherwise,
1723                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
1724                 *   from the save regs, to a non-save regs function.
1725                 */
1726                if (flag & FTRACE_FL_ENABLED)
1727                        return FTRACE_UPDATE_MAKE_CALL;
1728                else if (rec->flags & FTRACE_FL_REGS_EN)
1729                        return FTRACE_UPDATE_MODIFY_CALL_REGS;
1730                else
1731                        return FTRACE_UPDATE_MODIFY_CALL;
1732        }
1733
1734        if (update) {
1735                /* If there's no more users, clear all flags */
1736                if (!(rec->flags & ~FTRACE_FL_MASK))
1737                        rec->flags = 0;
1738                else
1739                        /* Just disable the record (keep REGS state) */
1740                        rec->flags &= ~FTRACE_FL_ENABLED;
1741        }
1742
1743        return FTRACE_UPDATE_MAKE_NOP;
1744}
1745
1746/**
1747 * ftrace_update_record, set a record that now is tracing or not
1748 * @rec: the record to update
1749 * @enable: set to 1 if the record is tracing, zero to force disable
1750 *
1751 * The records that represent all functions that can be traced need
1752 * to be updated when tracing has been enabled.
1753 */
1754int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1755{
1756        return ftrace_check_record(rec, enable, 1);
1757}
1758
1759/**
1760 * ftrace_test_record, check if the record has been enabled or not
1761 * @rec: the record to test
1762 * @enable: set to 1 to check if enabled, 0 if it is disabled
1763 *
1764 * The arch code may need to test if a record is already set to
1765 * tracing to determine how to modify the function code that it
1766 * represents.
1767 */
1768int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1769{
1770        return ftrace_check_record(rec, enable, 0);
1771}
1772
1773static int
1774__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1775{
1776        unsigned long ftrace_old_addr;
1777        unsigned long ftrace_addr;
1778        int ret;
1779
1780        ret = ftrace_update_record(rec, enable);
1781
1782        if (rec->flags & FTRACE_FL_REGS)
1783                ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1784        else
1785                ftrace_addr = (unsigned long)FTRACE_ADDR;
1786
1787        switch (ret) {
1788        case FTRACE_UPDATE_IGNORE:
1789                return 0;
1790
1791        case FTRACE_UPDATE_MAKE_CALL:
1792                return ftrace_make_call(rec, ftrace_addr);
1793
1794        case FTRACE_UPDATE_MAKE_NOP:
1795                return ftrace_make_nop(NULL, rec, ftrace_addr);
1796
1797        case FTRACE_UPDATE_MODIFY_CALL_REGS:
1798        case FTRACE_UPDATE_MODIFY_CALL:
1799                if (rec->flags & FTRACE_FL_REGS)
1800                        ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1801                else
1802                        ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1803
1804                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1805        }
1806
1807        return -1; /* unknow ftrace bug */
1808}
1809
1810void __weak ftrace_replace_code(int enable)
1811{
1812        struct dyn_ftrace *rec;
1813        struct ftrace_page *pg;
1814        int failed;
1815
1816        if (unlikely(ftrace_disabled))
1817                return;
1818
1819        do_for_each_ftrace_rec(pg, rec) {
1820                failed = __ftrace_replace_code(rec, enable);
1821                if (failed) {
1822                        ftrace_bug(failed, rec->ip);
1823                        /* Stop processing */
1824                        return;
1825                }
1826        } while_for_each_ftrace_rec();
1827}
1828
1829struct ftrace_rec_iter {
1830        struct ftrace_page      *pg;
1831        int                     index;
1832};
1833
1834/**
1835 * ftrace_rec_iter_start, start up iterating over traced functions
1836 *
1837 * Returns an iterator handle that is used to iterate over all
1838 * the records that represent address locations where functions
1839 * are traced.
1840 *
1841 * May return NULL if no records are available.
1842 */
1843struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1844{
1845        /*
1846         * We only use a single iterator.
1847         * Protected by the ftrace_lock mutex.
1848         */
1849        static struct ftrace_rec_iter ftrace_rec_iter;
1850        struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1851
1852        iter->pg = ftrace_pages_start;
1853        iter->index = 0;
1854
1855        /* Could have empty pages */
1856        while (iter->pg && !iter->pg->index)
1857                iter->pg = iter->pg->next;
1858
1859        if (!iter->pg)
1860                return NULL;
1861
1862        return iter;
1863}
1864
1865/**
1866 * ftrace_rec_iter_next, get the next record to process.
1867 * @iter: The handle to the iterator.
1868 *
1869 * Returns the next iterator after the given iterator @iter.
1870 */
1871struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1872{
1873        iter->index++;
1874
1875        if (iter->index >= iter->pg->index) {
1876                iter->pg = iter->pg->next;
1877                iter->index = 0;
1878
1879                /* Could have empty pages */
1880                while (iter->pg && !iter->pg->index)
1881                        iter->pg = iter->pg->next;
1882        }
1883
1884        if (!iter->pg)
1885                return NULL;
1886
1887        return iter;
1888}
1889
1890/**
1891 * ftrace_rec_iter_record, get the record at the iterator location
1892 * @iter: The current iterator location
1893 *
1894 * Returns the record that the current @iter is at.
1895 */
1896struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1897{
1898        return &iter->pg->records[iter->index];
1899}
1900
1901static int
1902ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1903{
1904        unsigned long ip;
1905        int ret;
1906
1907        ip = rec->ip;
1908
1909        if (unlikely(ftrace_disabled))
1910                return 0;
1911
1912        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1913        if (ret) {
1914                ftrace_bug(ret, ip);
1915                return 0;
1916        }
1917        return 1;
1918}
1919
1920/*
1921 * archs can override this function if they must do something
1922 * before the modifying code is performed.
1923 */
1924int __weak ftrace_arch_code_modify_prepare(void)
1925{
1926        return 0;
1927}
1928
1929/*
1930 * archs can override this function if they must do something
1931 * after the modifying code is performed.
1932 */
1933int __weak ftrace_arch_code_modify_post_process(void)
1934{
1935        return 0;
1936}
1937
1938void ftrace_modify_all_code(int command)
1939{
1940        if (command & FTRACE_UPDATE_CALLS)
1941                ftrace_replace_code(1);
1942        else if (command & FTRACE_DISABLE_CALLS)
1943                ftrace_replace_code(0);
1944
1945        if (command & FTRACE_UPDATE_TRACE_FUNC)
1946                ftrace_update_ftrace_func(ftrace_trace_function);
1947
1948        if (command & FTRACE_START_FUNC_RET)
1949                ftrace_enable_ftrace_graph_caller();
1950        else if (command & FTRACE_STOP_FUNC_RET)
1951                ftrace_disable_ftrace_graph_caller();
1952}
1953
1954static int __ftrace_modify_code(void *data)
1955{
1956        int *command = data;
1957
1958        ftrace_modify_all_code(*command);
1959
1960        return 0;
1961}
1962
1963/**
1964 * ftrace_run_stop_machine, go back to the stop machine method
1965 * @command: The command to tell ftrace what to do
1966 *
1967 * If an arch needs to fall back to the stop machine method, the
1968 * it can call this function.
1969 */
1970void ftrace_run_stop_machine(int command)
1971{
1972        stop_machine(__ftrace_modify_code, &command, NULL);
1973}
1974
1975/**
1976 * arch_ftrace_update_code, modify the code to trace or not trace
1977 * @command: The command that needs to be done
1978 *
1979 * Archs can override this function if it does not need to
1980 * run stop_machine() to modify code.
1981 */
1982void __weak arch_ftrace_update_code(int command)
1983{
1984        ftrace_run_stop_machine(command);
1985}
1986
1987static void ftrace_run_update_code(int command)
1988{
1989        int ret;
1990
1991        ret = ftrace_arch_code_modify_prepare();
1992        FTRACE_WARN_ON(ret);
1993        if (ret)
1994                return;
1995        /*
1996         * Do not call function tracer while we update the code.
1997         * We are in stop machine.
1998         */
1999        function_trace_stop++;
2000
2001        /*
2002         * By default we use stop_machine() to modify the code.
2003         * But archs can do what ever they want as long as it
2004         * is safe. The stop_machine() is the safest, but also
2005         * produces the most overhead.
2006         */
2007        arch_ftrace_update_code(command);
2008
2009        function_trace_stop--;
2010
2011        ret = ftrace_arch_code_modify_post_process();
2012        FTRACE_WARN_ON(ret);
2013}
2014
2015static ftrace_func_t saved_ftrace_func;
2016static int ftrace_start_up;
2017static int global_start_up;
2018
2019static void ftrace_startup_enable(int command)
2020{
2021        if (saved_ftrace_func != ftrace_trace_function) {
2022                saved_ftrace_func = ftrace_trace_function;
2023                command |= FTRACE_UPDATE_TRACE_FUNC;
2024        }
2025
2026        if (!command || !ftrace_enabled)
2027                return;
2028
2029        ftrace_run_update_code(command);
2030}
2031
2032static int ftrace_startup(struct ftrace_ops *ops, int command)
2033{
2034        bool hash_enable = true;
2035
2036        if (unlikely(ftrace_disabled))
2037                return -ENODEV;
2038
2039        ftrace_start_up++;
2040        command |= FTRACE_UPDATE_CALLS;
2041
2042        /* ops marked global share the filter hashes */
2043        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2044                ops = &global_ops;
2045                /* Don't update hash if global is already set */
2046                if (global_start_up)
2047                        hash_enable = false;
2048                global_start_up++;
2049        }
2050
2051        ops->flags |= FTRACE_OPS_FL_ENABLED;
2052        if (hash_enable)
2053                ftrace_hash_rec_enable(ops, 1);
2054
2055        ftrace_startup_enable(command);
2056
2057        return 0;
2058}
2059
2060static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2061{
2062        bool hash_disable = true;
2063
2064        if (unlikely(ftrace_disabled))
2065                return;
2066
2067        ftrace_start_up--;
2068        /*
2069         * Just warn in case of unbalance, no need to kill ftrace, it's not
2070         * critical but the ftrace_call callers may be never nopped again after
2071         * further ftrace uses.
2072         */
2073        WARN_ON_ONCE(ftrace_start_up < 0);
2074
2075        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2076                ops = &global_ops;
2077                global_start_up--;
2078                WARN_ON_ONCE(global_start_up < 0);
2079                /* Don't update hash if global still has users */
2080                if (global_start_up) {
2081                        WARN_ON_ONCE(!ftrace_start_up);
2082                        hash_disable = false;
2083                }
2084        }
2085
2086        if (hash_disable)
2087                ftrace_hash_rec_disable(ops, 1);
2088
2089        if (ops != &global_ops || !global_start_up)
2090                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2091
2092        command |= FTRACE_UPDATE_CALLS;
2093
2094        if (saved_ftrace_func != ftrace_trace_function) {
2095                saved_ftrace_func = ftrace_trace_function;
2096                command |= FTRACE_UPDATE_TRACE_FUNC;
2097        }
2098
2099        if (!command || !ftrace_enabled)
2100                return;
2101
2102        ftrace_run_update_code(command);
2103}
2104
2105static void ftrace_startup_sysctl(void)
2106{
2107        if (unlikely(ftrace_disabled))
2108                return;
2109
2110        /* Force update next time */
2111        saved_ftrace_func = NULL;
2112        /* ftrace_start_up is true if we want ftrace running */
2113        if (ftrace_start_up)
2114                ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2115}
2116
2117static void ftrace_shutdown_sysctl(void)
2118{
2119        if (unlikely(ftrace_disabled))
2120                return;
2121
2122        /* ftrace_start_up is true if ftrace is running */
2123        if (ftrace_start_up)
2124                ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2125}
2126
2127static cycle_t          ftrace_update_time;
2128static unsigned long    ftrace_update_cnt;
2129unsigned long           ftrace_update_tot_cnt;
2130
2131static int ops_traces_mod(struct ftrace_ops *ops)
2132{
2133        struct ftrace_hash *hash;
2134
2135        hash = ops->filter_hash;
2136        return ftrace_hash_empty(hash);
2137}
2138
2139static int ftrace_update_code(struct module *mod)
2140{
2141        struct ftrace_page *pg;
2142        struct dyn_ftrace *p;
2143        cycle_t start, stop;
2144        unsigned long ref = 0;
2145        int i;
2146
2147        /*
2148         * When adding a module, we need to check if tracers are
2149         * currently enabled and if they are set to trace all functions.
2150         * If they are, we need to enable the module functions as well
2151         * as update the reference counts for those function records.
2152         */
2153        if (mod) {
2154                struct ftrace_ops *ops;
2155
2156                for (ops = ftrace_ops_list;
2157                     ops != &ftrace_list_end; ops = ops->next) {
2158                        if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2159                            ops_traces_mod(ops))
2160                                ref++;
2161                }
2162        }
2163
2164        start = ftrace_now(raw_smp_processor_id());
2165        ftrace_update_cnt = 0;
2166
2167        for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2168
2169                for (i = 0; i < pg->index; i++) {
2170                        /* If something went wrong, bail without enabling anything */
2171                        if (unlikely(ftrace_disabled))
2172                                return -1;
2173
2174                        p = &pg->records[i];
2175                        p->flags = ref;
2176
2177                        /*
2178                         * Do the initial record conversion from mcount jump
2179                         * to the NOP instructions.
2180                         */
2181                        if (!ftrace_code_disable(mod, p))
2182                                break;
2183
2184                        ftrace_update_cnt++;
2185
2186                        /*
2187                         * If the tracing is enabled, go ahead and enable the record.
2188                         *
2189                         * The reason not to enable the record immediatelly is the
2190                         * inherent check of ftrace_make_nop/ftrace_make_call for
2191                         * correct previous instructions.  Making first the NOP
2192                         * conversion puts the module to the correct state, thus
2193                         * passing the ftrace_make_call check.
2194                         */
2195                        if (ftrace_start_up && ref) {
2196                                int failed = __ftrace_replace_code(p, 1);
2197                                if (failed)
2198                                        ftrace_bug(failed, p->ip);
2199                        }
2200                }
2201        }
2202
2203        ftrace_new_pgs = NULL;
2204
2205        stop = ftrace_now(raw_smp_processor_id());
2206        ftrace_update_time = stop - start;
2207        ftrace_update_tot_cnt += ftrace_update_cnt;
2208
2209        return 0;
2210}
2211
2212static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2213{
2214        int order;
2215        int cnt;
2216
2217        if (WARN_ON(!count))
2218                return -EINVAL;
2219
2220        order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2221
2222        /*
2223         * We want to fill as much as possible. No more than a page
2224         * may be empty.
2225         */
2226        while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2227                order--;
2228
2229 again:
2230        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2231
2232        if (!pg->records) {
2233                /* if we can't allocate this size, try something smaller */
2234                if (!order)
2235                        return -ENOMEM;
2236                order >>= 1;
2237                goto again;
2238        }
2239
2240        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2241        pg->size = cnt;
2242
2243        if (cnt > count)
2244                cnt = count;
2245
2246        return cnt;
2247}
2248
2249static struct ftrace_page *
2250ftrace_allocate_pages(unsigned long num_to_init)
2251{
2252        struct ftrace_page *start_pg;
2253        struct ftrace_page *pg;
2254        int order;
2255        int cnt;
2256
2257        if (!num_to_init)
2258                return 0;
2259
2260        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2261        if (!pg)
2262                return NULL;
2263
2264        /*
2265         * Try to allocate as much as possible in one continues
2266         * location that fills in all of the space. We want to
2267         * waste as little space as possible.
2268         */
2269        for (;;) {
2270                cnt = ftrace_allocate_records(pg, num_to_init);
2271                if (cnt < 0)
2272                        goto free_pages;
2273
2274                num_to_init -= cnt;
2275                if (!num_to_init)
2276                        break;
2277
2278                pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2279                if (!pg->next)
2280                        goto free_pages;
2281
2282                pg = pg->next;
2283        }
2284
2285        return start_pg;
2286
2287 free_pages:
2288        while (start_pg) {
2289                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2290                free_pages((unsigned long)pg->records, order);
2291                start_pg = pg->next;
2292                kfree(pg);
2293                pg = start_pg;
2294        }
2295        pr_info("ftrace: FAILED to allocate memory for functions\n");
2296        return NULL;
2297}
2298
2299static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2300{
2301        int cnt;
2302
2303        if (!num_to_init) {
2304                pr_info("ftrace: No functions to be traced?\n");
2305                return -1;
2306        }
2307
2308        cnt = num_to_init / ENTRIES_PER_PAGE;
2309        pr_info("ftrace: allocating %ld entries in %d pages\n",
2310                num_to_init, cnt + 1);
2311
2312        return 0;
2313}
2314
2315#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2316
2317struct ftrace_iterator {
2318        loff_t                          pos;
2319        loff_t                          func_pos;
2320        struct ftrace_page              *pg;
2321        struct dyn_ftrace               *func;
2322        struct ftrace_func_probe        *probe;
2323        struct trace_parser             parser;
2324        struct ftrace_hash              *hash;
2325        struct ftrace_ops               *ops;
2326        int                             hidx;
2327        int                             idx;
2328        unsigned                        flags;
2329};
2330
2331static void *
2332t_hash_next(struct seq_file *m, loff_t *pos)
2333{
2334        struct ftrace_iterator *iter = m->private;
2335        struct hlist_node *hnd = NULL;
2336        struct hlist_head *hhd;
2337
2338        (*pos)++;
2339        iter->pos = *pos;
2340
2341        if (iter->probe)
2342                hnd = &iter->probe->node;
2343 retry:
2344        if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2345                return NULL;
2346
2347        hhd = &ftrace_func_hash[iter->hidx];
2348
2349        if (hlist_empty(hhd)) {
2350                iter->hidx++;
2351                hnd = NULL;
2352                goto retry;
2353        }
2354
2355        if (!hnd)
2356                hnd = hhd->first;
2357        else {
2358                hnd = hnd->next;
2359                if (!hnd) {
2360                        iter->hidx++;
2361                        goto retry;
2362                }
2363        }
2364
2365        if (WARN_ON_ONCE(!hnd))
2366                return NULL;
2367
2368        iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2369
2370        return iter;
2371}
2372
2373static void *t_hash_start(struct seq_file *m, loff_t *pos)
2374{
2375        struct ftrace_iterator *iter = m->private;
2376        void *p = NULL;
2377        loff_t l;
2378
2379        if (!(iter->flags & FTRACE_ITER_DO_HASH))
2380                return NULL;
2381
2382        if (iter->func_pos > *pos)
2383                return NULL;
2384
2385        iter->hidx = 0;
2386        for (l = 0; l <= (*pos - iter->func_pos); ) {
2387                p = t_hash_next(m, &l);
2388                if (!p)
2389                        break;
2390        }
2391        if (!p)
2392                return NULL;
2393
2394        /* Only set this if we have an item */
2395        iter->flags |= FTRACE_ITER_HASH;
2396
2397        return iter;
2398}
2399
2400static int
2401t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2402{
2403        struct ftrace_func_probe *rec;
2404
2405        rec = iter->probe;
2406        if (WARN_ON_ONCE(!rec))
2407                return -EIO;
2408
2409        if (rec->ops->print)
2410                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2411
2412        seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2413
2414        if (rec->data)
2415                seq_printf(m, ":%p", rec->data);
2416        seq_putc(m, '\n');
2417
2418        return 0;
2419}
2420
2421static void *
2422t_next(struct seq_file *m, void *v, loff_t *pos)
2423{
2424        struct ftrace_iterator *iter = m->private;
2425        struct ftrace_ops *ops = iter->ops;
2426        struct dyn_ftrace *rec = NULL;
2427
2428        if (unlikely(ftrace_disabled))
2429                return NULL;
2430
2431        if (iter->flags & FTRACE_ITER_HASH)
2432                return t_hash_next(m, pos);
2433
2434        (*pos)++;
2435        iter->pos = iter->func_pos = *pos;
2436
2437        if (iter->flags & FTRACE_ITER_PRINTALL)
2438                return t_hash_start(m, pos);
2439
2440 retry:
2441        if (iter->idx >= iter->pg->index) {
2442                if (iter->pg->next) {
2443                        iter->pg = iter->pg->next;
2444                        iter->idx = 0;
2445                        goto retry;
2446                }
2447        } else {
2448                rec = &iter->pg->records[iter->idx++];
2449                if (((iter->flags & FTRACE_ITER_FILTER) &&
2450                     !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2451
2452                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
2453                     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2454
2455                    ((iter->flags & FTRACE_ITER_ENABLED) &&
2456                     !(rec->flags & ~FTRACE_FL_MASK))) {
2457
2458                        rec = NULL;
2459                        goto retry;
2460                }
2461        }
2462
2463        if (!rec)
2464                return t_hash_start(m, pos);
2465
2466        iter->func = rec;
2467
2468        return iter;
2469}
2470
2471static void reset_iter_read(struct ftrace_iterator *iter)
2472{
2473        iter->pos = 0;
2474        iter->func_pos = 0;
2475        iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2476}
2477
2478static void *t_start(struct seq_file *m, loff_t *pos)
2479{
2480        struct ftrace_iterator *iter = m->private;
2481        struct ftrace_ops *ops = iter->ops;
2482        void *p = NULL;
2483        loff_t l;
2484
2485        mutex_lock(&ftrace_lock);
2486
2487        if (unlikely(ftrace_disabled))
2488                return NULL;
2489
2490        /*
2491         * If an lseek was done, then reset and start from beginning.
2492         */
2493        if (*pos < iter->pos)
2494                reset_iter_read(iter);
2495
2496        /*
2497         * For set_ftrace_filter reading, if we have the filter
2498         * off, we can short cut and just print out that all
2499         * functions are enabled.
2500         */
2501        if (iter->flags & FTRACE_ITER_FILTER &&
2502            ftrace_hash_empty(ops->filter_hash)) {
2503                if (*pos > 0)
2504                        return t_hash_start(m, pos);
2505                iter->flags |= FTRACE_ITER_PRINTALL;
2506                /* reset in case of seek/pread */
2507                iter->flags &= ~FTRACE_ITER_HASH;
2508                return iter;
2509        }
2510
2511        if (iter->flags & FTRACE_ITER_HASH)
2512                return t_hash_start(m, pos);
2513
2514        /*
2515         * Unfortunately, we need to restart at ftrace_pages_start
2516         * every time we let go of the ftrace_mutex. This is because
2517         * those pointers can change without the lock.
2518         */
2519        iter->pg = ftrace_pages_start;
2520        iter->idx = 0;
2521        for (l = 0; l <= *pos; ) {
2522                p = t_next(m, p, &l);
2523                if (!p)
2524                        break;
2525        }
2526
2527        if (!p)
2528                return t_hash_start(m, pos);
2529
2530        return iter;
2531}
2532
2533static void t_stop(struct seq_file *m, void *p)
2534{
2535        mutex_unlock(&ftrace_lock);
2536}
2537
2538static int t_show(struct seq_file *m, void *v)
2539{
2540        struct ftrace_iterator *iter = m->private;
2541        struct dyn_ftrace *rec;
2542
2543        if (iter->flags & FTRACE_ITER_HASH)
2544                return t_hash_show(m, iter);
2545
2546        if (iter->flags & FTRACE_ITER_PRINTALL) {
2547                seq_printf(m, "#### all functions enabled ####\n");
2548                return 0;
2549        }
2550
2551        rec = iter->func;
2552
2553        if (!rec)
2554                return 0;
2555
2556        seq_printf(m, "%ps", (void *)rec->ip);
2557        if (iter->flags & FTRACE_ITER_ENABLED)
2558                seq_printf(m, " (%ld)%s",
2559                           rec->flags & ~FTRACE_FL_MASK,
2560                           rec->flags & FTRACE_FL_REGS ? " R" : "");
2561        seq_printf(m, "\n");
2562
2563        return 0;
2564}
2565
2566static const struct seq_operations show_ftrace_seq_ops = {
2567        .start = t_start,
2568        .next = t_next,
2569        .stop = t_stop,
2570        .show = t_show,
2571};
2572
2573static int
2574ftrace_avail_open(struct inode *inode, struct file *file)
2575{
2576        struct ftrace_iterator *iter;
2577
2578        if (unlikely(ftrace_disabled))
2579                return -ENODEV;
2580
2581        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2582        if (iter) {
2583                iter->pg = ftrace_pages_start;
2584                iter->ops = &global_ops;
2585        }
2586
2587        return iter ? 0 : -ENOMEM;
2588}
2589
2590static int
2591ftrace_enabled_open(struct inode *inode, struct file *file)
2592{
2593        struct ftrace_iterator *iter;
2594
2595        if (unlikely(ftrace_disabled))
2596                return -ENODEV;
2597
2598        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2599        if (iter) {
2600                iter->pg = ftrace_pages_start;
2601                iter->flags = FTRACE_ITER_ENABLED;
2602                iter->ops = &global_ops;
2603        }
2604
2605        return iter ? 0 : -ENOMEM;
2606}
2607
2608static void ftrace_filter_reset(struct ftrace_hash *hash)
2609{
2610        mutex_lock(&ftrace_lock);
2611        ftrace_hash_clear(hash);
2612        mutex_unlock(&ftrace_lock);
2613}
2614
2615/**
2616 * ftrace_regex_open - initialize function tracer filter files
2617 * @ops: The ftrace_ops that hold the hash filters
2618 * @flag: The type of filter to process
2619 * @inode: The inode, usually passed in to your open routine
2620 * @file: The file, usually passed in to your open routine
2621 *
2622 * ftrace_regex_open() initializes the filter files for the
2623 * @ops. Depending on @flag it may process the filter hash or
2624 * the notrace hash of @ops. With this called from the open
2625 * routine, you can use ftrace_filter_write() for the write
2626 * routine if @flag has FTRACE_ITER_FILTER set, or
2627 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2628 * ftrace_filter_lseek() should be used as the lseek routine, and
2629 * release must call ftrace_regex_release().
2630 */
2631int
2632ftrace_regex_open(struct ftrace_ops *ops, int flag,
2633                  struct inode *inode, struct file *file)
2634{
2635        struct ftrace_iterator *iter;
2636        struct ftrace_hash *hash;
2637        int ret = 0;
2638
2639        if (unlikely(ftrace_disabled))
2640                return -ENODEV;
2641
2642        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2643        if (!iter)
2644                return -ENOMEM;
2645
2646        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2647                kfree(iter);
2648                return -ENOMEM;
2649        }
2650
2651        if (flag & FTRACE_ITER_NOTRACE)
2652                hash = ops->notrace_hash;
2653        else
2654                hash = ops->filter_hash;
2655
2656        iter->ops = ops;
2657        iter->flags = flag;
2658
2659        if (file->f_mode & FMODE_WRITE) {
2660                mutex_lock(&ftrace_lock);
2661                iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2662                mutex_unlock(&ftrace_lock);
2663
2664                if (!iter->hash) {
2665                        trace_parser_put(&iter->parser);
2666                        kfree(iter);
2667                        return -ENOMEM;
2668                }
2669        }
2670
2671        mutex_lock(&ftrace_regex_lock);
2672
2673        if ((file->f_mode & FMODE_WRITE) &&
2674            (file->f_flags & O_TRUNC))
2675                ftrace_filter_reset(iter->hash);
2676
2677        if (file->f_mode & FMODE_READ) {
2678                iter->pg = ftrace_pages_start;
2679
2680                ret = seq_open(file, &show_ftrace_seq_ops);
2681                if (!ret) {
2682                        struct seq_file *m = file->private_data;
2683                        m->private = iter;
2684                } else {
2685                        /* Failed */
2686                        free_ftrace_hash(iter->hash);
2687                        trace_parser_put(&iter->parser);
2688                        kfree(iter);
2689                }
2690        } else
2691                file->private_data = iter;
2692        mutex_unlock(&ftrace_regex_lock);
2693
2694        return ret;
2695}
2696
2697static int
2698ftrace_filter_open(struct inode *inode, struct file *file)
2699{
2700        return ftrace_regex_open(&global_ops,
2701                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2702                        inode, file);
2703}
2704
2705static int
2706ftrace_notrace_open(struct inode *inode, struct file *file)
2707{
2708        return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2709                                 inode, file);
2710}
2711
2712static int ftrace_match(char *str, char *regex, int len, int type)
2713{
2714        int matched = 0;
2715        int slen;
2716
2717        switch (type) {
2718        case MATCH_FULL:
2719                if (strcmp(str, regex) == 0)
2720                        matched = 1;
2721                break;
2722        case MATCH_FRONT_ONLY:
2723                if (strncmp(str, regex, len) == 0)
2724                        matched = 1;
2725                break;
2726        case MATCH_MIDDLE_ONLY:
2727                if (strstr(str, regex))
2728                        matched = 1;
2729                break;
2730        case MATCH_END_ONLY:
2731                slen = strlen(str);
2732                if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2733                        matched = 1;
2734                break;
2735        }
2736
2737        return matched;
2738}
2739
2740static int
2741enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2742{
2743        struct ftrace_func_entry *entry;
2744        int ret = 0;
2745
2746        entry = ftrace_lookup_ip(hash, rec->ip);
2747        if (not) {
2748                /* Do nothing if it doesn't exist */
2749                if (!entry)
2750                        return 0;
2751
2752                free_hash_entry(hash, entry);
2753        } else {
2754                /* Do nothing if it exists */
2755                if (entry)
2756                        return 0;
2757
2758                ret = add_hash_entry(hash, rec->ip);
2759        }
2760        return ret;
2761}
2762
2763static int
2764ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2765                    char *regex, int len, int type)
2766{
2767        char str[KSYM_SYMBOL_LEN];
2768        char *modname;
2769
2770        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2771
2772        if (mod) {
2773                /* module lookup requires matching the module */
2774                if (!modname || strcmp(modname, mod))
2775                        return 0;
2776
2777                /* blank search means to match all funcs in the mod */
2778                if (!len)
2779                        return 1;
2780        }
2781
2782        return ftrace_match(str, regex, len, type);
2783}
2784
2785static int
2786match_records(struct ftrace_hash *hash, char *buff,
2787              int len, char *mod, int not)
2788{
2789        unsigned search_len = 0;
2790        struct ftrace_page *pg;
2791        struct dyn_ftrace *rec;
2792        int type = MATCH_FULL;
2793        char *search = buff;
2794        int found = 0;
2795        int ret;
2796
2797        if (len) {
2798                type = filter_parse_regex(buff, len, &search, &not);
2799                search_len = strlen(search);
2800        }
2801
2802        mutex_lock(&ftrace_lock);
2803
2804        if (unlikely(ftrace_disabled))
2805                goto out_unlock;
2806
2807        do_for_each_ftrace_rec(pg, rec) {
2808                if (ftrace_match_record(rec, mod, search, search_len, type)) {
2809                        ret = enter_record(hash, rec, not);
2810                        if (ret < 0) {
2811                                found = ret;
2812                                goto out_unlock;
2813                        }
2814                        found = 1;
2815                }
2816        } while_for_each_ftrace_rec();
2817 out_unlock:
2818        mutex_unlock(&ftrace_lock);
2819
2820        return found;
2821}
2822
2823static int
2824ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2825{
2826        return match_records(hash, buff, len, NULL, 0);
2827}
2828
2829static int
2830ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2831{
2832        int not = 0;
2833
2834        /* blank or '*' mean the same */
2835        if (strcmp(buff, "*") == 0)
2836                buff[0] = 0;
2837
2838        /* handle the case of 'dont filter this module' */
2839        if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2840                buff[0] = 0;
2841                not = 1;
2842        }
2843
2844        return match_records(hash, buff, strlen(buff), mod, not);
2845}
2846
2847/*
2848 * We register the module command as a template to show others how
2849 * to register the a command as well.
2850 */
2851
2852static int
2853ftrace_mod_callback(struct ftrace_hash *hash,
2854                    char *func, char *cmd, char *param, int enable)
2855{
2856        char *mod;
2857        int ret = -EINVAL;
2858
2859        /*
2860         * cmd == 'mod' because we only registered this func
2861         * for the 'mod' ftrace_func_command.
2862         * But if you register one func with multiple commands,
2863         * you can tell which command was used by the cmd
2864         * parameter.
2865         */
2866
2867        /* we must have a module name */
2868        if (!param)
2869                return ret;
2870
2871        mod = strsep(&param, ":");
2872        if (!strlen(mod))
2873                return ret;
2874
2875        ret = ftrace_match_module_records(hash, func, mod);
2876        if (!ret)
2877                ret = -EINVAL;
2878        if (ret < 0)
2879                return ret;
2880
2881        return 0;
2882}
2883
2884static struct ftrace_func_command ftrace_mod_cmd = {
2885        .name                   = "mod",
2886        .func                   = ftrace_mod_callback,
2887};
2888
2889static int __init ftrace_mod_cmd_init(void)
2890{
2891        return register_ftrace_command(&ftrace_mod_cmd);
2892}
2893core_initcall(ftrace_mod_cmd_init);
2894
2895static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2896                                      struct ftrace_ops *op, struct pt_regs *pt_regs)
2897{
2898        struct ftrace_func_probe *entry;
2899        struct hlist_head *hhd;
2900        unsigned long key;
2901
2902        key = hash_long(ip, FTRACE_HASH_BITS);
2903
2904        hhd = &ftrace_func_hash[key];
2905
2906        if (hlist_empty(hhd))
2907                return;
2908
2909        /*
2910         * Disable preemption for these calls to prevent a RCU grace
2911         * period. This syncs the hash iteration and freeing of items
2912         * on the hash. rcu_read_lock is too dangerous here.
2913         */
2914        preempt_disable_notrace();
2915        hlist_for_each_entry_rcu(entry, hhd, node) {
2916                if (entry->ip == ip)
2917                        entry->ops->func(ip, parent_ip, &entry->data);
2918        }
2919        preempt_enable_notrace();
2920}
2921
2922static struct ftrace_ops trace_probe_ops __read_mostly =
2923{
2924        .func           = function_trace_probe_call,
2925};
2926
2927static int ftrace_probe_registered;
2928
2929static void __enable_ftrace_function_probe(void)
2930{
2931        int ret;
2932        int i;
2933
2934        if (ftrace_probe_registered)
2935                return;
2936
2937        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2938                struct hlist_head *hhd = &ftrace_func_hash[i];
2939                if (hhd->first)
2940                        break;
2941        }
2942        /* Nothing registered? */
2943        if (i == FTRACE_FUNC_HASHSIZE)
2944                return;
2945
2946        ret = __register_ftrace_function(&trace_probe_ops);
2947        if (!ret)
2948                ret = ftrace_startup(&trace_probe_ops, 0);
2949
2950        ftrace_probe_registered = 1;
2951}
2952
2953static void __disable_ftrace_function_probe(void)
2954{
2955        int ret;
2956        int i;
2957
2958        if (!ftrace_probe_registered)
2959                return;
2960
2961        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2962                struct hlist_head *hhd = &ftrace_func_hash[i];
2963                if (hhd->first)
2964                        return;
2965        }
2966
2967        /* no more funcs left */
2968        ret = __unregister_ftrace_function(&trace_probe_ops);
2969        if (!ret)
2970                ftrace_shutdown(&trace_probe_ops, 0);
2971
2972        ftrace_probe_registered = 0;
2973}
2974
2975
2976static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2977{
2978        struct ftrace_func_probe *entry =
2979                container_of(rhp, struct ftrace_func_probe, rcu);
2980
2981        if (entry->ops->free)
2982                entry->ops->free(&entry->data);
2983        kfree(entry);
2984}
2985
2986
2987int
2988register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2989                              void *data)
2990{
2991        struct ftrace_func_probe *entry;
2992        struct ftrace_page *pg;
2993        struct dyn_ftrace *rec;
2994        int type, len, not;
2995        unsigned long key;
2996        int count = 0;
2997        char *search;
2998
2999        type = filter_parse_regex(glob, strlen(glob), &search, &not);
3000        len = strlen(search);
3001
3002        /* we do not support '!' for function probes */
3003        if (WARN_ON(not))
3004                return -EINVAL;
3005
3006        mutex_lock(&ftrace_lock);
3007
3008        if (unlikely(ftrace_disabled))
3009                goto out_unlock;
3010
3011        do_for_each_ftrace_rec(pg, rec) {
3012
3013                if (!ftrace_match_record(rec, NULL, search, len, type))
3014                        continue;
3015
3016                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3017                if (!entry) {
3018                        /* If we did not process any, then return error */
3019                        if (!count)
3020                                count = -ENOMEM;
3021                        goto out_unlock;
3022                }
3023
3024                count++;
3025
3026                entry->data = data;
3027
3028                /*
3029                 * The caller might want to do something special
3030                 * for each function we find. We call the callback
3031                 * to give the caller an opportunity to do so.
3032                 */
3033                if (ops->callback) {
3034                        if (ops->callback(rec->ip, &entry->data) < 0) {
3035                                /* caller does not like this func */
3036                                kfree(entry);
3037                                continue;
3038                        }
3039                }
3040
3041                entry->ops = ops;
3042                entry->ip = rec->ip;
3043
3044                key = hash_long(entry->ip, FTRACE_HASH_BITS);
3045                hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3046
3047        } while_for_each_ftrace_rec();
3048        __enable_ftrace_function_probe();
3049
3050 out_unlock:
3051        mutex_unlock(&ftrace_lock);
3052
3053        return count;
3054}
3055
3056enum {
3057        PROBE_TEST_FUNC         = 1,
3058        PROBE_TEST_DATA         = 2
3059};
3060
3061static void
3062__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3063                                  void *data, int flags)
3064{
3065        struct ftrace_func_probe *entry;
3066        struct hlist_node *tmp;
3067        char str[KSYM_SYMBOL_LEN];
3068        int type = MATCH_FULL;
3069        int i, len = 0;
3070        char *search;
3071
3072        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3073                glob = NULL;
3074        else if (glob) {
3075                int not;
3076
3077                type = filter_parse_regex(glob, strlen(glob), &search, &not);
3078                len = strlen(search);
3079
3080                /* we do not support '!' for function probes */
3081                if (WARN_ON(not))
3082                        return;
3083        }
3084
3085        mutex_lock(&ftrace_lock);
3086        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3087                struct hlist_head *hhd = &ftrace_func_hash[i];
3088
3089                hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3090
3091                        /* break up if statements for readability */
3092                        if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3093                                continue;
3094
3095                        if ((flags & PROBE_TEST_DATA) && entry->data != data)
3096                                continue;
3097
3098                        /* do this last, since it is the most expensive */
3099                        if (glob) {
3100                                kallsyms_lookup(entry->ip, NULL, NULL,
3101                                                NULL, str);
3102                                if (!ftrace_match(str, glob, len, type))
3103                                        continue;
3104                        }
3105
3106                        hlist_del_rcu(&entry->node);
3107                        call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
3108                }
3109        }
3110        __disable_ftrace_function_probe();
3111        mutex_unlock(&ftrace_lock);
3112}
3113
3114void
3115unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3116                                void *data)
3117{
3118        __unregister_ftrace_function_probe(glob, ops, data,
3119                                          PROBE_TEST_FUNC | PROBE_TEST_DATA);
3120}
3121
3122void
3123unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3124{
3125        __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3126}
3127
3128void unregister_ftrace_function_probe_all(char *glob)
3129{
3130        __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3131}
3132
3133static LIST_HEAD(ftrace_commands);
3134static DEFINE_MUTEX(ftrace_cmd_mutex);
3135
3136int register_ftrace_command(struct ftrace_func_command *cmd)
3137{
3138        struct ftrace_func_command *p;
3139        int ret = 0;
3140
3141        mutex_lock(&ftrace_cmd_mutex);
3142        list_for_each_entry(p, &ftrace_commands, list) {
3143                if (strcmp(cmd->name, p->name) == 0) {
3144                        ret = -EBUSY;
3145                        goto out_unlock;
3146                }
3147        }
3148        list_add(&cmd->list, &ftrace_commands);
3149 out_unlock:
3150        mutex_unlock(&ftrace_cmd_mutex);
3151
3152        return ret;
3153}
3154
3155int unregister_ftrace_command(struct ftrace_func_command *cmd)
3156{
3157        struct ftrace_func_command *p, *n;
3158        int ret = -ENODEV;
3159
3160        mutex_lock(&ftrace_cmd_mutex);
3161        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3162                if (strcmp(cmd->name, p->name) == 0) {
3163                        ret = 0;
3164                        list_del_init(&p->list);
3165                        goto out_unlock;
3166                }
3167        }
3168 out_unlock:
3169        mutex_unlock(&ftrace_cmd_mutex);
3170
3171        return ret;
3172}
3173
3174static int ftrace_process_regex(struct ftrace_hash *hash,
3175                                char *buff, int len, int enable)
3176{
3177        char *func, *command, *next = buff;
3178        struct ftrace_func_command *p;
3179        int ret = -EINVAL;
3180
3181        func = strsep(&next, ":");
3182
3183        if (!next) {
3184                ret = ftrace_match_records(hash, func, len);
3185                if (!ret)
3186                        ret = -EINVAL;
3187                if (ret < 0)
3188                        return ret;
3189                return 0;
3190        }
3191
3192        /* command found */
3193
3194        command = strsep(&next, ":");
3195
3196        mutex_lock(&ftrace_cmd_mutex);
3197        list_for_each_entry(p, &ftrace_commands, list) {
3198                if (strcmp(p->name, command) == 0) {
3199                        ret = p->func(hash, func, command, next, enable);
3200                        goto out_unlock;
3201                }
3202        }
3203 out_unlock:
3204        mutex_unlock(&ftrace_cmd_mutex);
3205
3206        return ret;
3207}
3208
3209static ssize_t
3210ftrace_regex_write(struct file *file, const char __user *ubuf,
3211                   size_t cnt, loff_t *ppos, int enable)
3212{
3213        struct ftrace_iterator *iter;
3214        struct trace_parser *parser;
3215        ssize_t ret, read;
3216
3217        if (!cnt)
3218                return 0;
3219
3220        mutex_lock(&ftrace_regex_lock);
3221
3222        ret = -ENODEV;
3223        if (unlikely(ftrace_disabled))
3224                goto out_unlock;
3225
3226        if (file->f_mode & FMODE_READ) {
3227                struct seq_file *m = file->private_data;
3228                iter = m->private;
3229        } else
3230                iter = file->private_data;
3231
3232        parser = &iter->parser;
3233        read = trace_get_user(parser, ubuf, cnt, ppos);
3234
3235        if (read >= 0 && trace_parser_loaded(parser) &&
3236            !trace_parser_cont(parser)) {
3237                ret = ftrace_process_regex(iter->hash, parser->buffer,
3238                                           parser->idx, enable);
3239                trace_parser_clear(parser);
3240                if (ret)
3241                        goto out_unlock;
3242        }
3243
3244        ret = read;
3245out_unlock:
3246        mutex_unlock(&ftrace_regex_lock);
3247
3248        return ret;
3249}
3250
3251ssize_t
3252ftrace_filter_write(struct file *file, const char __user *ubuf,
3253                    size_t cnt, loff_t *ppos)
3254{
3255        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3256}
3257
3258ssize_t
3259ftrace_notrace_write(struct file *file, const char __user *ubuf,
3260                     size_t cnt, loff_t *ppos)
3261{
3262        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3263}
3264
3265static int
3266ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3267{
3268        struct ftrace_func_entry *entry;
3269
3270        if (!ftrace_location(ip))
3271                return -EINVAL;
3272
3273        if (remove) {
3274                entry = ftrace_lookup_ip(hash, ip);
3275                if (!entry)
3276                        return -ENOENT;
3277                free_hash_entry(hash, entry);
3278                return 0;
3279        }
3280
3281        return add_hash_entry(hash, ip);
3282}
3283
3284static int
3285ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3286                unsigned long ip, int remove, int reset, int enable)
3287{
3288        struct ftrace_hash **orig_hash;
3289        struct ftrace_hash *hash;
3290        int ret;
3291
3292        /* All global ops uses the global ops filters */
3293        if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3294                ops = &global_ops;
3295
3296        if (unlikely(ftrace_disabled))
3297                return -ENODEV;
3298
3299        if (enable)
3300                orig_hash = &ops->filter_hash;
3301        else
3302                orig_hash = &ops->notrace_hash;
3303
3304        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3305        if (!hash)
3306                return -ENOMEM;
3307
3308        mutex_lock(&ftrace_regex_lock);
3309        if (reset)
3310                ftrace_filter_reset(hash);
3311        if (buf && !ftrace_match_records(hash, buf, len)) {
3312                ret = -EINVAL;
3313                goto out_regex_unlock;
3314        }
3315        if (ip) {
3316                ret = ftrace_match_addr(hash, ip, remove);
3317                if (ret < 0)
3318                        goto out_regex_unlock;
3319        }
3320
3321        mutex_lock(&ftrace_lock);
3322        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3323        if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3324            && ftrace_enabled)
3325                ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3326
3327        mutex_unlock(&ftrace_lock);
3328
3329 out_regex_unlock:
3330        mutex_unlock(&ftrace_regex_lock);
3331
3332        free_ftrace_hash(hash);
3333        return ret;
3334}
3335
3336static int
3337ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3338                int reset, int enable)
3339{
3340        return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3341}
3342
3343/**
3344 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3345 * @ops - the ops to set the filter with
3346 * @ip - the address to add to or remove from the filter.
3347 * @remove - non zero to remove the ip from the filter
3348 * @reset - non zero to reset all filters before applying this filter.
3349 *
3350 * Filters denote which functions should be enabled when tracing is enabled
3351 * If @ip is NULL, it failes to update filter.
3352 */
3353int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3354                         int remove, int reset)
3355{
3356        return ftrace_set_addr(ops, ip, remove, reset, 1);
3357}
3358EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3359
3360static int
3361ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3362                 int reset, int enable)
3363{
3364        return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3365}
3366
3367/**
3368 * ftrace_set_filter - set a function to filter on in ftrace
3369 * @ops - the ops to set the filter with
3370 * @buf - the string that holds the function filter text.
3371 * @len - the length of the string.
3372 * @reset - non zero to reset all filters before applying this filter.
3373 *
3374 * Filters denote which functions should be enabled when tracing is enabled.
3375 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3376 */
3377int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3378                       int len, int reset)
3379{
3380        return ftrace_set_regex(ops, buf, len, reset, 1);
3381}
3382EXPORT_SYMBOL_GPL(ftrace_set_filter);
3383
3384/**
3385 * ftrace_set_notrace - set a function to not trace in ftrace
3386 * @ops - the ops to set the notrace filter with
3387 * @buf - the string that holds the function notrace text.
3388 * @len - the length of the string.
3389 * @reset - non zero to reset all filters before applying this filter.
3390 *
3391 * Notrace Filters denote which functions should not be enabled when tracing
3392 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3393 * for tracing.
3394 */
3395int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3396                        int len, int reset)
3397{
3398        return ftrace_set_regex(ops, buf, len, reset, 0);
3399}
3400EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3401/**
3402 * ftrace_set_filter - set a function to filter on in ftrace
3403 * @ops - the ops to set the filter with
3404 * @buf - the string that holds the function filter text.
3405 * @len - the length of the string.
3406 * @reset - non zero to reset all filters before applying this filter.
3407 *
3408 * Filters denote which functions should be enabled when tracing is enabled.
3409 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3410 */
3411void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3412{
3413        ftrace_set_regex(&global_ops, buf, len, reset, 1);
3414}
3415EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3416
3417/**
3418 * ftrace_set_notrace - set a function to not trace in ftrace
3419 * @ops - the ops to set the notrace filter with
3420 * @buf - the string that holds the function notrace text.
3421 * @len - the length of the string.
3422 * @reset - non zero to reset all filters before applying this filter.
3423 *
3424 * Notrace Filters denote which functions should not be enabled when tracing
3425 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3426 * for tracing.
3427 */
3428void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3429{
3430        ftrace_set_regex(&global_ops, buf, len, reset, 0);
3431}
3432EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3433
3434/*
3435 * command line interface to allow users to set filters on boot up.
3436 */
3437#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3438static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3439static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3440
3441static int __init set_ftrace_notrace(char *str)
3442{
3443        strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3444        return 1;
3445}
3446__setup("ftrace_notrace=", set_ftrace_notrace);
3447
3448static int __init set_ftrace_filter(char *str)
3449{
3450        strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3451        return 1;
3452}
3453__setup("ftrace_filter=", set_ftrace_filter);
3454
3455#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3456static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3457static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3458
3459static int __init set_graph_function(char *str)
3460{
3461        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3462        return 1;
3463}
3464__setup("ftrace_graph_filter=", set_graph_function);
3465
3466static void __init set_ftrace_early_graph(char *buf)
3467{
3468        int ret;
3469        char *func;
3470
3471        while (buf) {
3472                func = strsep(&buf, ",");
3473                /* we allow only one expression at a time */
3474                ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3475                                      func);
3476                if (ret)
3477                        printk(KERN_DEBUG "ftrace: function %s not "
3478                                          "traceable\n", func);
3479        }
3480}
3481#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3482
3483void __init
3484ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3485{
3486        char *func;
3487
3488        while (buf) {
3489                func = strsep(&buf, ",");
3490                ftrace_set_regex(ops, func, strlen(func), 0, enable);
3491        }
3492}
3493
3494static void __init set_ftrace_early_filters(void)
3495{
3496        if (ftrace_filter_buf[0])
3497                ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3498        if (ftrace_notrace_buf[0])
3499                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3500#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3501        if (ftrace_graph_buf[0])
3502                set_ftrace_early_graph(ftrace_graph_buf);
3503#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3504}
3505
3506int ftrace_regex_release(struct inode *inode, struct file *file)
3507{
3508        struct seq_file *m = (struct seq_file *)file->private_data;
3509        struct ftrace_iterator *iter;
3510        struct ftrace_hash **orig_hash;
3511        struct trace_parser *parser;
3512        int filter_hash;
3513        int ret;
3514
3515        mutex_lock(&ftrace_regex_lock);
3516        if (file->f_mode & FMODE_READ) {
3517                iter = m->private;
3518
3519                seq_release(inode, file);
3520        } else
3521                iter = file->private_data;
3522
3523        parser = &iter->parser;
3524        if (trace_parser_loaded(parser)) {
3525                parser->buffer[parser->idx] = 0;
3526                ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3527        }
3528
3529        trace_parser_put(parser);
3530
3531        if (file->f_mode & FMODE_WRITE) {
3532                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3533
3534                if (filter_hash)
3535                        orig_hash = &iter->ops->filter_hash;
3536                else
3537                        orig_hash = &iter->ops->notrace_hash;
3538
3539                mutex_lock(&ftrace_lock);
3540                ret = ftrace_hash_move(iter->ops, filter_hash,
3541                                       orig_hash, iter->hash);
3542                if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3543                    && ftrace_enabled)
3544                        ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3545
3546                mutex_unlock(&ftrace_lock);
3547        }
3548        free_ftrace_hash(iter->hash);
3549        kfree(iter);
3550
3551        mutex_unlock(&ftrace_regex_lock);
3552        return 0;
3553}
3554
3555static const struct file_operations ftrace_avail_fops = {
3556        .open = ftrace_avail_open,
3557        .read = seq_read,
3558        .llseek = seq_lseek,
3559        .release = seq_release_private,
3560};
3561
3562static const struct file_operations ftrace_enabled_fops = {
3563        .open = ftrace_enabled_open,
3564        .read = seq_read,
3565        .llseek = seq_lseek,
3566        .release = seq_release_private,
3567};
3568
3569static const struct file_operations ftrace_filter_fops = {
3570        .open = ftrace_filter_open,
3571        .read = seq_read,
3572        .write = ftrace_filter_write,
3573        .llseek = ftrace_filter_lseek,
3574        .release = ftrace_regex_release,
3575};
3576
3577static const struct file_operations ftrace_notrace_fops = {
3578        .open = ftrace_notrace_open,
3579        .read = seq_read,
3580        .write = ftrace_notrace_write,
3581        .llseek = ftrace_filter_lseek,
3582        .release = ftrace_regex_release,
3583};
3584
3585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3586
3587static DEFINE_MUTEX(graph_lock);
3588
3589int ftrace_graph_count;
3590int ftrace_graph_filter_enabled;
3591unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3592
3593static void *
3594__g_next(struct seq_file *m, loff_t *pos)
3595{
3596        if (*pos >= ftrace_graph_count)
3597                return NULL;
3598        return &ftrace_graph_funcs[*pos];
3599}
3600
3601static void *
3602g_next(struct seq_file *m, void *v, loff_t *pos)
3603{
3604        (*pos)++;
3605        return __g_next(m, pos);
3606}
3607
3608static void *g_start(struct seq_file *m, loff_t *pos)
3609{
3610        mutex_lock(&graph_lock);
3611
3612        /* Nothing, tell g_show to print all functions are enabled */
3613        if (!ftrace_graph_filter_enabled && !*pos)
3614                return (void *)1;
3615
3616        return __g_next(m, pos);
3617}
3618
3619static void g_stop(struct seq_file *m, void *p)
3620{
3621        mutex_unlock(&graph_lock);
3622}
3623
3624static int g_show(struct seq_file *m, void *v)
3625{
3626        unsigned long *ptr = v;
3627
3628        if (!ptr)
3629                return 0;
3630
3631        if (ptr == (unsigned long *)1) {
3632                seq_printf(m, "#### all functions enabled ####\n");
3633                return 0;
3634        }
3635
3636        seq_printf(m, "%ps\n", (void *)*ptr);
3637
3638        return 0;
3639}
3640
3641static const struct seq_operations ftrace_graph_seq_ops = {
3642        .start = g_start,
3643        .next = g_next,
3644        .stop = g_stop,
3645        .show = g_show,
3646};
3647
3648static int
3649ftrace_graph_open(struct inode *inode, struct file *file)
3650{
3651        int ret = 0;
3652
3653        if (unlikely(ftrace_disabled))
3654                return -ENODEV;
3655
3656        mutex_lock(&graph_lock);
3657        if ((file->f_mode & FMODE_WRITE) &&
3658            (file->f_flags & O_TRUNC)) {
3659                ftrace_graph_filter_enabled = 0;
3660                ftrace_graph_count = 0;
3661                memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3662        }
3663        mutex_unlock(&graph_lock);
3664
3665        if (file->f_mode & FMODE_READ)
3666                ret = seq_open(file, &ftrace_graph_seq_ops);
3667
3668        return ret;
3669}
3670
3671static int
3672ftrace_graph_release(struct inode *inode, struct file *file)
3673{
3674        if (file->f_mode & FMODE_READ)
3675                seq_release(inode, file);
3676        return 0;
3677}
3678
3679static int
3680ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3681{
3682        struct dyn_ftrace *rec;
3683        struct ftrace_page *pg;
3684        int search_len;
3685        int fail = 1;
3686        int type, not;
3687        char *search;
3688        bool exists;
3689        int i;
3690
3691        /* decode regex */
3692        type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3693        if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3694                return -EBUSY;
3695
3696        search_len = strlen(search);
3697
3698        mutex_lock(&ftrace_lock);
3699
3700        if (unlikely(ftrace_disabled)) {
3701                mutex_unlock(&ftrace_lock);
3702                return -ENODEV;
3703        }
3704
3705        do_for_each_ftrace_rec(pg, rec) {
3706
3707                if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3708                        /* if it is in the array */
3709                        exists = false;
3710                        for (i = 0; i < *idx; i++) {
3711                                if (array[i] == rec->ip) {
3712                                        exists = true;
3713                                        break;
3714                                }
3715                        }
3716
3717                        if (!not) {
3718                                fail = 0;
3719                                if (!exists) {
3720                                        array[(*idx)++] = rec->ip;
3721                                        if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3722                                                goto out;
3723                                }
3724                        } else {
3725                                if (exists) {
3726                                        array[i] = array[--(*idx)];
3727                                        array[*idx] = 0;
3728                                        fail = 0;
3729                                }
3730                        }
3731                }
3732        } while_for_each_ftrace_rec();
3733out:
3734        mutex_unlock(&ftrace_lock);
3735
3736        if (fail)
3737                return -EINVAL;
3738
3739        ftrace_graph_filter_enabled = 1;
3740        return 0;
3741}
3742
3743static ssize_t
3744ftrace_graph_write(struct file *file, const char __user *ubuf,
3745                   size_t cnt, loff_t *ppos)
3746{
3747        struct trace_parser parser;
3748        ssize_t read, ret;
3749
3750        if (!cnt)
3751                return 0;
3752
3753        mutex_lock(&graph_lock);
3754
3755        if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3756                ret = -ENOMEM;
3757                goto out_unlock;
3758        }
3759
3760        read = trace_get_user(&parser, ubuf, cnt, ppos);
3761
3762        if (read >= 0 && trace_parser_loaded((&parser))) {
3763                parser.buffer[parser.idx] = 0;
3764
3765                /* we allow only one expression at a time */
3766                ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3767                                        parser.buffer);
3768                if (ret)
3769                        goto out_free;
3770        }
3771
3772        ret = read;
3773
3774out_free:
3775        trace_parser_put(&parser);
3776out_unlock:
3777        mutex_unlock(&graph_lock);
3778
3779        return ret;
3780}
3781
3782static const struct file_operations ftrace_graph_fops = {
3783        .open           = ftrace_graph_open,
3784        .read           = seq_read,
3785        .write          = ftrace_graph_write,
3786        .llseek         = ftrace_filter_lseek,
3787        .release        = ftrace_graph_release,
3788};
3789#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3790
3791static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3792{
3793
3794        trace_create_file("available_filter_functions", 0444,
3795                        d_tracer, NULL, &ftrace_avail_fops);
3796
3797        trace_create_file("enabled_functions", 0444,
3798                        d_tracer, NULL, &ftrace_enabled_fops);
3799
3800        trace_create_file("set_ftrace_filter", 0644, d_tracer,
3801                        NULL, &ftrace_filter_fops);
3802
3803        trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3804                                    NULL, &ftrace_notrace_fops);
3805
3806#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3807        trace_create_file("set_graph_function", 0444, d_tracer,
3808                                    NULL,
3809                                    &ftrace_graph_fops);
3810#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3811
3812        return 0;
3813}
3814
3815static int ftrace_cmp_ips(const void *a, const void *b)
3816{
3817        const unsigned long *ipa = a;
3818        const unsigned long *ipb = b;
3819
3820        if (*ipa > *ipb)
3821                return 1;
3822        if (*ipa < *ipb)
3823                return -1;
3824        return 0;
3825}
3826
3827static void ftrace_swap_ips(void *a, void *b, int size)
3828{
3829        unsigned long *ipa = a;
3830        unsigned long *ipb = b;
3831        unsigned long t;
3832
3833        t = *ipa;
3834        *ipa = *ipb;
3835        *ipb = t;
3836}
3837
3838static int ftrace_process_locs(struct module *mod,
3839                               unsigned long *start,
3840                               unsigned long *end)
3841{
3842        struct ftrace_page *start_pg;
3843        struct ftrace_page *pg;
3844        struct dyn_ftrace *rec;
3845        unsigned long count;
3846        unsigned long *p;
3847        unsigned long addr;
3848        unsigned long flags = 0; /* Shut up gcc */
3849        int ret = -ENOMEM;
3850
3851        count = end - start;
3852
3853        if (!count)
3854                return 0;
3855
3856        sort(start, count, sizeof(*start),
3857             ftrace_cmp_ips, ftrace_swap_ips);
3858
3859        start_pg = ftrace_allocate_pages(count);
3860        if (!start_pg)
3861                return -ENOMEM;
3862
3863        mutex_lock(&ftrace_lock);
3864
3865        /*
3866         * Core and each module needs their own pages, as
3867         * modules will free them when they are removed.
3868         * Force a new page to be allocated for modules.
3869         */
3870        if (!mod) {
3871                WARN_ON(ftrace_pages || ftrace_pages_start);
3872                /* First initialization */
3873                ftrace_pages = ftrace_pages_start = start_pg;
3874        } else {
3875                if (!ftrace_pages)
3876                        goto out;
3877
3878                if (WARN_ON(ftrace_pages->next)) {
3879                        /* Hmm, we have free pages? */
3880                        while (ftrace_pages->next)
3881                                ftrace_pages = ftrace_pages->next;
3882                }
3883
3884                ftrace_pages->next = start_pg;
3885        }
3886
3887        p = start;
3888        pg = start_pg;
3889        while (p < end) {
3890                addr = ftrace_call_adjust(*p++);
3891                /*
3892                 * Some architecture linkers will pad between
3893                 * the different mcount_loc sections of different
3894                 * object files to satisfy alignments.
3895                 * Skip any NULL pointers.
3896                 */
3897                if (!addr)
3898                        continue;
3899
3900                if (pg->index == pg->size) {
3901                        /* We should have allocated enough */
3902                        if (WARN_ON(!pg->next))
3903                                break;
3904                        pg = pg->next;
3905                }
3906
3907                rec = &pg->records[pg->index++];
3908                rec->ip = addr;
3909        }
3910
3911        /* We should have used all pages */
3912        WARN_ON(pg->next);
3913
3914        /* Assign the last page to ftrace_pages */
3915        ftrace_pages = pg;
3916
3917        /* These new locations need to be initialized */
3918        ftrace_new_pgs = start_pg;
3919
3920        /*
3921         * We only need to disable interrupts on start up
3922         * because we are modifying code that an interrupt
3923         * may execute, and the modification is not atomic.
3924         * But for modules, nothing runs the code we modify
3925         * until we are finished with it, and there's no
3926         * reason to cause large interrupt latencies while we do it.
3927         */
3928        if (!mod)
3929                local_irq_save(flags);
3930        ftrace_update_code(mod);
3931        if (!mod)
3932                local_irq_restore(flags);
3933        ret = 0;
3934 out:
3935        mutex_unlock(&ftrace_lock);
3936
3937        return ret;
3938}
3939
3940#ifdef CONFIG_MODULES
3941
3942#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3943
3944void ftrace_release_mod(struct module *mod)
3945{
3946        struct dyn_ftrace *rec;
3947        struct ftrace_page **last_pg;
3948        struct ftrace_page *pg;
3949        int order;
3950
3951        mutex_lock(&ftrace_lock);
3952
3953        if (ftrace_disabled)
3954                goto out_unlock;
3955
3956        /*
3957         * Each module has its own ftrace_pages, remove
3958         * them from the list.
3959         */
3960        last_pg = &ftrace_pages_start;
3961        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3962                rec = &pg->records[0];
3963                if (within_module_core(rec->ip, mod)) {
3964                        /*
3965                         * As core pages are first, the first
3966                         * page should never be a module page.
3967                         */
3968                        if (WARN_ON(pg == ftrace_pages_start))
3969                                goto out_unlock;
3970
3971                        /* Check if we are deleting the last page */
3972                        if (pg == ftrace_pages)
3973                                ftrace_pages = next_to_ftrace_page(last_pg);
3974
3975                        *last_pg = pg->next;
3976                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3977                        free_pages((unsigned long)pg->records, order);
3978                        kfree(pg);
3979                } else
3980                        last_pg = &pg->next;
3981        }
3982 out_unlock:
3983        mutex_unlock(&ftrace_lock);
3984}
3985
3986static void ftrace_init_module(struct module *mod,
3987                               unsigned long *start, unsigned long *end)
3988{
3989        if (ftrace_disabled || start == end)
3990                return;
3991        ftrace_process_locs(mod, start, end);
3992}
3993
3994static int ftrace_module_notify_enter(struct notifier_block *self,
3995                                      unsigned long val, void *data)
3996{
3997        struct module *mod = data;
3998
3999        if (val == MODULE_STATE_COMING)
4000                ftrace_init_module(mod, mod->ftrace_callsites,
4001                                   mod->ftrace_callsites +
4002                                   mod->num_ftrace_callsites);
4003        return 0;
4004}
4005
4006static int ftrace_module_notify_exit(struct notifier_block *self,
4007                                     unsigned long val, void *data)
4008{
4009        struct module *mod = data;
4010
4011        if (val == MODULE_STATE_GOING)
4012                ftrace_release_mod(mod);
4013
4014        return 0;
4015}
4016#else
4017static int ftrace_module_notify_enter(struct notifier_block *self,
4018                                      unsigned long val, void *data)
4019{
4020        return 0;
4021}
4022static int ftrace_module_notify_exit(struct notifier_block *self,
4023                                     unsigned long val, void *data)
4024{
4025        return 0;
4026}
4027#endif /* CONFIG_MODULES */
4028
4029struct notifier_block ftrace_module_enter_nb = {
4030        .notifier_call = ftrace_module_notify_enter,
4031        .priority = INT_MAX,    /* Run before anything that can use kprobes */
4032};
4033
4034struct notifier_block ftrace_module_exit_nb = {
4035        .notifier_call = ftrace_module_notify_exit,
4036        .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4037};
4038
4039extern unsigned long __start_mcount_loc[];
4040extern unsigned long __stop_mcount_loc[];
4041
4042void __init ftrace_init(void)
4043{
4044        unsigned long count, addr, flags;
4045        int ret;
4046
4047        /* Keep the ftrace pointer to the stub */
4048        addr = (unsigned long)ftrace_stub;
4049
4050        local_irq_save(flags);
4051        ftrace_dyn_arch_init(&addr);
4052        local_irq_restore(flags);
4053
4054        /* ftrace_dyn_arch_init places the return code in addr */
4055        if (addr)
4056                goto failed;
4057
4058        count = __stop_mcount_loc - __start_mcount_loc;
4059
4060        ret = ftrace_dyn_table_alloc(count);
4061        if (ret)
4062                goto failed;
4063
4064        last_ftrace_enabled = ftrace_enabled = 1;
4065
4066        ret = ftrace_process_locs(NULL,
4067                                  __start_mcount_loc,
4068                                  __stop_mcount_loc);
4069
4070        ret = register_module_notifier(&ftrace_module_enter_nb);
4071        if (ret)
4072                pr_warning("Failed to register trace ftrace module enter notifier\n");
4073
4074        ret = register_module_notifier(&ftrace_module_exit_nb);
4075        if (ret)
4076                pr_warning("Failed to register trace ftrace module exit notifier\n");
4077
4078        set_ftrace_early_filters();
4079
4080        return;
4081 failed:
4082        ftrace_disabled = 1;
4083}
4084
4085#else
4086
4087static struct ftrace_ops global_ops = {
4088        .func                   = ftrace_stub,
4089        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
4090};
4091
4092static int __init ftrace_nodyn_init(void)
4093{
4094        ftrace_enabled = 1;
4095        return 0;
4096}
4097core_initcall(ftrace_nodyn_init);
4098
4099static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4100static inline void ftrace_startup_enable(int command) { }
4101/* Keep as macros so we do not need to define the commands */
4102# define ftrace_startup(ops, command)                   \
4103        ({                                              \
4104                (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4105                0;                                      \
4106        })
4107# define ftrace_shutdown(ops, command)  do { } while (0)
4108# define ftrace_startup_sysctl()        do { } while (0)
4109# define ftrace_shutdown_sysctl()       do { } while (0)
4110
4111static inline int
4112ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4113{
4114        return 1;
4115}
4116
4117#endif /* CONFIG_DYNAMIC_FTRACE */
4118
4119static void
4120ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4121                        struct ftrace_ops *op, struct pt_regs *regs)
4122{
4123        if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4124                return;
4125
4126        /*
4127         * Some of the ops may be dynamically allocated,
4128         * they must be freed after a synchronize_sched().
4129         */
4130        preempt_disable_notrace();
4131        trace_recursion_set(TRACE_CONTROL_BIT);
4132        do_for_each_ftrace_op(op, ftrace_control_list) {
4133                if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4134                    !ftrace_function_local_disabled(op) &&
4135                    ftrace_ops_test(op, ip))
4136                        op->func(ip, parent_ip, op, regs);
4137        } while_for_each_ftrace_op(op);
4138        trace_recursion_clear(TRACE_CONTROL_BIT);
4139        preempt_enable_notrace();
4140}
4141
4142static struct ftrace_ops control_ops = {
4143        .func = ftrace_ops_control_func,
4144        .flags = FTRACE_OPS_FL_RECURSION_SAFE,
4145};
4146
4147static inline void
4148__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4149                       struct ftrace_ops *ignored, struct pt_regs *regs)
4150{
4151        struct ftrace_ops *op;
4152        int bit;
4153
4154        if (function_trace_stop)
4155                return;
4156
4157        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4158        if (bit < 0)
4159                return;
4160
4161        /*
4162         * Some of the ops may be dynamically allocated,
4163         * they must be freed after a synchronize_sched().
4164         */
4165        preempt_disable_notrace();
4166        do_for_each_ftrace_op(op, ftrace_ops_list) {
4167                if (ftrace_ops_test(op, ip))
4168                        op->func(ip, parent_ip, op, regs);
4169        } while_for_each_ftrace_op(op);
4170        preempt_enable_notrace();
4171        trace_clear_recursion(bit);
4172}
4173
4174/*
4175 * Some archs only support passing ip and parent_ip. Even though
4176 * the list function ignores the op parameter, we do not want any
4177 * C side effects, where a function is called without the caller
4178 * sending a third parameter.
4179 * Archs are to support both the regs and ftrace_ops at the same time.
4180 * If they support ftrace_ops, it is assumed they support regs.
4181 * If call backs want to use regs, they must either check for regs
4182 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4183 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4184 * An architecture can pass partial regs with ftrace_ops and still
4185 * set the ARCH_SUPPORT_FTARCE_OPS.
4186 */
4187#if ARCH_SUPPORTS_FTRACE_OPS
4188static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4189                                 struct ftrace_ops *op, struct pt_regs *regs)
4190{
4191        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4192}
4193#else
4194static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4195{
4196        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4197}
4198#endif
4199
4200static void clear_ftrace_swapper(void)
4201{
4202        struct task_struct *p;
4203        int cpu;
4204
4205        get_online_cpus();
4206        for_each_online_cpu(cpu) {
4207                p = idle_task(cpu);
4208                clear_tsk_trace_trace(p);
4209        }
4210        put_online_cpus();
4211}
4212
4213static void set_ftrace_swapper(void)
4214{
4215        struct task_struct *p;
4216        int cpu;
4217
4218        get_online_cpus();
4219        for_each_online_cpu(cpu) {
4220                p = idle_task(cpu);
4221                set_tsk_trace_trace(p);
4222        }
4223        put_online_cpus();
4224}
4225
4226static void clear_ftrace_pid(struct pid *pid)
4227{
4228        struct task_struct *p;
4229
4230        rcu_read_lock();
4231        do_each_pid_task(pid, PIDTYPE_PID, p) {
4232                clear_tsk_trace_trace(p);
4233        } while_each_pid_task(pid, PIDTYPE_PID, p);
4234        rcu_read_unlock();
4235
4236        put_pid(pid);
4237}
4238
4239static void set_ftrace_pid(struct pid *pid)
4240{
4241        struct task_struct *p;
4242
4243        rcu_read_lock();
4244        do_each_pid_task(pid, PIDTYPE_PID, p) {
4245                set_tsk_trace_trace(p);
4246        } while_each_pid_task(pid, PIDTYPE_PID, p);
4247        rcu_read_unlock();
4248}
4249
4250static void clear_ftrace_pid_task(struct pid *pid)
4251{
4252        if (pid == ftrace_swapper_pid)
4253                clear_ftrace_swapper();
4254        else
4255                clear_ftrace_pid(pid);
4256}
4257
4258static void set_ftrace_pid_task(struct pid *pid)
4259{
4260        if (pid == ftrace_swapper_pid)
4261                set_ftrace_swapper();
4262        else
4263                set_ftrace_pid(pid);
4264}
4265
4266static int ftrace_pid_add(int p)
4267{
4268        struct pid *pid;
4269        struct ftrace_pid *fpid;
4270        int ret = -EINVAL;
4271
4272        mutex_lock(&ftrace_lock);
4273
4274        if (!p)
4275                pid = ftrace_swapper_pid;
4276        else
4277                pid = find_get_pid(p);
4278
4279        if (!pid)
4280                goto out;
4281
4282        ret = 0;
4283
4284        list_for_each_entry(fpid, &ftrace_pids, list)
4285                if (fpid->pid == pid)
4286                        goto out_put;
4287
4288        ret = -ENOMEM;
4289
4290        fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4291        if (!fpid)
4292                goto out_put;
4293
4294        list_add(&fpid->list, &ftrace_pids);
4295        fpid->pid = pid;
4296
4297        set_ftrace_pid_task(pid);
4298
4299        ftrace_update_pid_func();
4300        ftrace_startup_enable(0);
4301
4302        mutex_unlock(&ftrace_lock);
4303        return 0;
4304
4305out_put:
4306        if (pid != ftrace_swapper_pid)
4307                put_pid(pid);
4308
4309out:
4310        mutex_unlock(&ftrace_lock);
4311        return ret;
4312}
4313
4314static void ftrace_pid_reset(void)
4315{
4316        struct ftrace_pid *fpid, *safe;
4317
4318        mutex_lock(&ftrace_lock);
4319        list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4320                struct pid *pid = fpid->pid;
4321
4322                clear_ftrace_pid_task(pid);
4323
4324                list_del(&fpid->list);
4325                kfree(fpid);
4326        }
4327
4328        ftrace_update_pid_func();
4329        ftrace_startup_enable(0);
4330
4331        mutex_unlock(&ftrace_lock);
4332}
4333
4334static void *fpid_start(struct seq_file *m, loff_t *pos)
4335{
4336        mutex_lock(&ftrace_lock);
4337
4338        if (list_empty(&ftrace_pids) && (!*pos))
4339                return (void *) 1;
4340
4341        return seq_list_start(&ftrace_pids, *pos);
4342}
4343
4344static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4345{
4346        if (v == (void *)1)
4347                return NULL;
4348
4349        return seq_list_next(v, &ftrace_pids, pos);
4350}
4351
4352static void fpid_stop(struct seq_file *m, void *p)
4353{
4354        mutex_unlock(&ftrace_lock);
4355}
4356
4357static int fpid_show(struct seq_file *m, void *v)
4358{
4359        const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4360
4361        if (v == (void *)1) {
4362                seq_printf(m, "no pid\n");
4363                return 0;
4364        }
4365
4366        if (fpid->pid == ftrace_swapper_pid)
4367                seq_printf(m, "swapper tasks\n");
4368        else
4369                seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4370
4371        return 0;
4372}
4373
4374static const struct seq_operations ftrace_pid_sops = {
4375        .start = fpid_start,
4376        .next = fpid_next,
4377        .stop = fpid_stop,
4378        .show = fpid_show,
4379};
4380
4381static int
4382ftrace_pid_open(struct inode *inode, struct file *file)
4383{
4384        int ret = 0;
4385
4386        if ((file->f_mode & FMODE_WRITE) &&
4387            (file->f_flags & O_TRUNC))
4388                ftrace_pid_reset();
4389
4390        if (file->f_mode & FMODE_READ)
4391                ret = seq_open(file, &ftrace_pid_sops);
4392
4393        return ret;
4394}
4395
4396static ssize_t
4397ftrace_pid_write(struct file *filp, const char __user *ubuf,
4398                   size_t cnt, loff_t *ppos)
4399{
4400        char buf[64], *tmp;
4401        long val;
4402        int ret;
4403
4404        if (cnt >= sizeof(buf))
4405                return -EINVAL;
4406
4407        if (copy_from_user(&buf, ubuf, cnt))
4408                return -EFAULT;
4409
4410        buf[cnt] = 0;
4411
4412        /*
4413         * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4414         * to clean the filter quietly.
4415         */
4416        tmp = strstrip(buf);
4417        if (strlen(tmp) == 0)
4418                return 1;
4419
4420        ret = kstrtol(tmp, 10, &val);
4421        if (ret < 0)
4422                return ret;
4423
4424        ret = ftrace_pid_add(val);
4425
4426        return ret ? ret : cnt;
4427}
4428
4429static int
4430ftrace_pid_release(struct inode *inode, struct file *file)
4431{
4432        if (file->f_mode & FMODE_READ)
4433                seq_release(inode, file);
4434
4435        return 0;
4436}
4437
4438static const struct file_operations ftrace_pid_fops = {
4439        .open           = ftrace_pid_open,
4440        .write          = ftrace_pid_write,
4441        .read           = seq_read,
4442        .llseek         = ftrace_filter_lseek,
4443        .release        = ftrace_pid_release,
4444};
4445
4446static __init int ftrace_init_debugfs(void)
4447{
4448        struct dentry *d_tracer;
4449
4450        d_tracer = tracing_init_dentry();
4451        if (!d_tracer)
4452                return 0;
4453
4454        ftrace_init_dyn_debugfs(d_tracer);
4455
4456        trace_create_file("set_ftrace_pid", 0644, d_tracer,
4457                            NULL, &ftrace_pid_fops);
4458
4459        ftrace_profile_debugfs(d_tracer);
4460
4461        return 0;
4462}
4463fs_initcall(ftrace_init_debugfs);
4464
4465/**
4466 * ftrace_kill - kill ftrace
4467 *
4468 * This function should be used by panic code. It stops ftrace
4469 * but in a not so nice way. If you need to simply kill ftrace
4470 * from a non-atomic section, use ftrace_kill.
4471 */
4472void ftrace_kill(void)
4473{
4474        ftrace_disabled = 1;
4475        ftrace_enabled = 0;
4476        clear_ftrace_function();
4477}
4478
4479/**
4480 * Test if ftrace is dead or not.
4481 */
4482int ftrace_is_dead(void)
4483{
4484        return ftrace_disabled;
4485}
4486
4487/**
4488 * register_ftrace_function - register a function for profiling
4489 * @ops - ops structure that holds the function for profiling.
4490 *
4491 * Register a function to be called by all functions in the
4492 * kernel.
4493 *
4494 * Note: @ops->func and all the functions it calls must be labeled
4495 *       with "notrace", otherwise it will go into a
4496 *       recursive loop.
4497 */
4498int register_ftrace_function(struct ftrace_ops *ops)
4499{
4500        int ret = -1;
4501
4502        mutex_lock(&ftrace_lock);
4503
4504        ret = __register_ftrace_function(ops);
4505        if (!ret)
4506                ret = ftrace_startup(ops, 0);
4507
4508        mutex_unlock(&ftrace_lock);
4509
4510        return ret;
4511}
4512EXPORT_SYMBOL_GPL(register_ftrace_function);
4513
4514/**
4515 * unregister_ftrace_function - unregister a function for profiling.
4516 * @ops - ops structure that holds the function to unregister
4517 *
4518 * Unregister a function that was added to be called by ftrace profiling.
4519 */
4520int unregister_ftrace_function(struct ftrace_ops *ops)
4521{
4522        int ret;
4523
4524        mutex_lock(&ftrace_lock);
4525        ret = __unregister_ftrace_function(ops);
4526        if (!ret)
4527                ftrace_shutdown(ops, 0);
4528        mutex_unlock(&ftrace_lock);
4529
4530        return ret;
4531}
4532EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4533
4534int
4535ftrace_enable_sysctl(struct ctl_table *table, int write,
4536                     void __user *buffer, size_t *lenp,
4537                     loff_t *ppos)
4538{
4539        int ret = -ENODEV;
4540
4541        mutex_lock(&ftrace_lock);
4542
4543        if (unlikely(ftrace_disabled))
4544                goto out;
4545
4546        ret = proc_dointvec(table, write, buffer, lenp, ppos);
4547
4548        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4549                goto out;
4550
4551        last_ftrace_enabled = !!ftrace_enabled;
4552
4553        if (ftrace_enabled) {
4554
4555                ftrace_startup_sysctl();
4556
4557                /* we are starting ftrace again */
4558                if (ftrace_ops_list != &ftrace_list_end)
4559                        update_ftrace_function();
4560
4561        } else {
4562                /* stopping ftrace calls (just send to ftrace_stub) */
4563                ftrace_trace_function = ftrace_stub;
4564
4565                ftrace_shutdown_sysctl();
4566        }
4567
4568 out:
4569        mutex_unlock(&ftrace_lock);
4570        return ret;
4571}
4572
4573#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4574
4575static int ftrace_graph_active;
4576static struct notifier_block ftrace_suspend_notifier;
4577
4578int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4579{
4580        return 0;
4581}
4582
4583/* The callbacks that hook a function */
4584trace_func_graph_ret_t ftrace_graph_return =
4585                        (trace_func_graph_ret_t)ftrace_stub;
4586trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4587
4588/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4589static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4590{
4591        int i;
4592        int ret = 0;
4593        unsigned long flags;
4594        int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4595        struct task_struct *g, *t;
4596
4597        for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4598                ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4599                                        * sizeof(struct ftrace_ret_stack),
4600                                        GFP_KERNEL);
4601                if (!ret_stack_list[i]) {
4602                        start = 0;
4603                        end = i;
4604                        ret = -ENOMEM;
4605                        goto free;
4606                }
4607        }
4608
4609        read_lock_irqsave(&tasklist_lock, flags);
4610        do_each_thread(g, t) {
4611                if (start == end) {
4612                        ret = -EAGAIN;
4613                        goto unlock;
4614                }
4615
4616                if (t->ret_stack == NULL) {
4617                        atomic_set(&t->tracing_graph_pause, 0);
4618                        atomic_set(&t->trace_overrun, 0);
4619                        t->curr_ret_stack = -1;
4620                        /* Make sure the tasks see the -1 first: */
4621                        smp_wmb();
4622                        t->ret_stack = ret_stack_list[start++];
4623                }
4624        } while_each_thread(g, t);
4625
4626unlock:
4627        read_unlock_irqrestore(&tasklist_lock, flags);
4628free:
4629        for (i = start; i < end; i++)
4630                kfree(ret_stack_list[i]);
4631        return ret;
4632}
4633
4634static void
4635ftrace_graph_probe_sched_switch(void *ignore,
4636                        struct task_struct *prev, struct task_struct *next)
4637{
4638        unsigned long long timestamp;
4639        int index;
4640
4641        /*
4642         * Does the user want to count the time a function was asleep.
4643         * If so, do not update the time stamps.
4644         */
4645        if (trace_flags & TRACE_ITER_SLEEP_TIME)
4646                return;
4647
4648        timestamp = trace_clock_local();
4649
4650        prev->ftrace_timestamp = timestamp;
4651
4652        /* only process tasks that we timestamped */
4653        if (!next->ftrace_timestamp)
4654                return;
4655
4656        /*
4657         * Update all the counters in next to make up for the
4658         * time next was sleeping.
4659         */
4660        timestamp -= next->ftrace_timestamp;
4661
4662        for (index = next->curr_ret_stack; index >= 0; index--)
4663                next->ret_stack[index].calltime += timestamp;
4664}
4665
4666/* Allocate a return stack for each task */
4667static int start_graph_tracing(void)
4668{
4669        struct ftrace_ret_stack **ret_stack_list;
4670        int ret, cpu;
4671
4672        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4673                                sizeof(struct ftrace_ret_stack *),
4674                                GFP_KERNEL);
4675
4676        if (!ret_stack_list)
4677                return -ENOMEM;
4678
4679        /* The cpu_boot init_task->ret_stack will never be freed */
4680        for_each_online_cpu(cpu) {
4681                if (!idle_task(cpu)->ret_stack)
4682                        ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4683        }
4684
4685        do {
4686                ret = alloc_retstack_tasklist(ret_stack_list);
4687        } while (ret == -EAGAIN);
4688
4689        if (!ret) {
4690                ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4691                if (ret)
4692                        pr_info("ftrace_graph: Couldn't activate tracepoint"
4693                                " probe to kernel_sched_switch\n");
4694        }
4695
4696        kfree(ret_stack_list);
4697        return ret;
4698}
4699
4700/*
4701 * Hibernation protection.
4702 * The state of the current task is too much unstable during
4703 * suspend/restore to disk. We want to protect against that.
4704 */
4705static int
4706ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4707                                                        void *unused)
4708{
4709        switch (state) {
4710        case PM_HIBERNATION_PREPARE:
4711                pause_graph_tracing();
4712                break;
4713
4714        case PM_POST_HIBERNATION:
4715                unpause_graph_tracing();
4716                break;
4717        }
4718        return NOTIFY_DONE;
4719}
4720
4721int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4722                        trace_func_graph_ent_t entryfunc)
4723{
4724        int ret = 0;
4725
4726        mutex_lock(&ftrace_lock);
4727
4728        /* we currently allow only one tracer registered at a time */
4729        if (ftrace_graph_active) {
4730                ret = -EBUSY;
4731                goto out;
4732        }
4733
4734        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4735        register_pm_notifier(&ftrace_suspend_notifier);
4736
4737        ftrace_graph_active++;
4738        ret = start_graph_tracing();
4739        if (ret) {
4740                ftrace_graph_active--;
4741                goto out;
4742        }
4743
4744        ftrace_graph_return = retfunc;
4745        ftrace_graph_entry = entryfunc;
4746
4747        ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4748
4749out:
4750        mutex_unlock(&ftrace_lock);
4751        return ret;
4752}
4753
4754void unregister_ftrace_graph(void)
4755{
4756        mutex_lock(&ftrace_lock);
4757
4758        if (unlikely(!ftrace_graph_active))
4759                goto out;
4760
4761        ftrace_graph_active--;
4762        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4763        ftrace_graph_entry = ftrace_graph_entry_stub;
4764        ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4765        unregister_pm_notifier(&ftrace_suspend_notifier);
4766        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4767
4768 out:
4769        mutex_unlock(&ftrace_lock);
4770}
4771
4772static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4773
4774static void
4775graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4776{
4777        atomic_set(&t->tracing_graph_pause, 0);
4778        atomic_set(&t->trace_overrun, 0);
4779        t->ftrace_timestamp = 0;
4780        /* make curr_ret_stack visible before we add the ret_stack */
4781        smp_wmb();
4782        t->ret_stack = ret_stack;
4783}
4784
4785/*
4786 * Allocate a return stack for the idle task. May be the first
4787 * time through, or it may be done by CPU hotplug online.
4788 */
4789void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4790{
4791        t->curr_ret_stack = -1;
4792        /*
4793         * The idle task has no parent, it either has its own
4794         * stack or no stack at all.
4795         */
4796        if (t->ret_stack)
4797                WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4798
4799        if (ftrace_graph_active) {
4800                struct ftrace_ret_stack *ret_stack;
4801
4802                ret_stack = per_cpu(idle_ret_stack, cpu);
4803                if (!ret_stack) {
4804                        ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4805                                            * sizeof(struct ftrace_ret_stack),
4806                                            GFP_KERNEL);
4807                        if (!ret_stack)
4808                                return;
4809                        per_cpu(idle_ret_stack, cpu) = ret_stack;
4810                }
4811                graph_init_task(t, ret_stack);
4812        }
4813}
4814
4815/* Allocate a return stack for newly created task */
4816void ftrace_graph_init_task(struct task_struct *t)
4817{
4818        /* Make sure we do not use the parent ret_stack */
4819        t->ret_stack = NULL;
4820        t->curr_ret_stack = -1;
4821
4822        if (ftrace_graph_active) {
4823                struct ftrace_ret_stack *ret_stack;
4824
4825                ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4826                                * sizeof(struct ftrace_ret_stack),
4827                                GFP_KERNEL);
4828                if (!ret_stack)
4829                        return;
4830                graph_init_task(t, ret_stack);
4831        }
4832}
4833
4834void ftrace_graph_exit_task(struct task_struct *t)
4835{
4836        struct ftrace_ret_stack *ret_stack = t->ret_stack;
4837
4838        t->ret_stack = NULL;
4839        /* NULL must become visible to IRQs before we free it: */
4840        barrier();
4841
4842        kfree(ret_stack);
4843}
4844
4845void ftrace_graph_stop(void)
4846{
4847        ftrace_stop();
4848}
4849#endif
4850
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.