linux/kernel/trace/ftrace.c
<<
>>
Prefs
   1/*
   2 * Infrastructure for profiling code inserted by 'gcc -pg'.
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally ported from the -rt patch by:
   8 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code in the latency_tracer, that is:
  11 *
  12 *  Copyright (C) 2004-2006 Ingo Molnar
  13 *  Copyright (C) 2004 William Lee Irwin III
  14 */
  15
  16#include <linux/stop_machine.h>
  17#include <linux/clocksource.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/suspend.h>
  21#include <linux/debugfs.h>
  22#include <linux/hardirq.h>
  23#include <linux/kthread.h>
  24#include <linux/uaccess.h>
  25#include <linux/bsearch.h>
  26#include <linux/module.h>
  27#include <linux/ftrace.h>
  28#include <linux/sysctl.h>
  29#include <linux/slab.h>
  30#include <linux/ctype.h>
  31#include <linux/sort.h>
  32#include <linux/list.h>
  33#include <linux/hash.h>
  34#include <linux/rcupdate.h>
  35
  36#include <trace/events/sched.h>
  37
  38#include <asm/setup.h>
  39
  40#include "trace_output.h"
  41#include "trace_stat.h"
  42
  43#define FTRACE_WARN_ON(cond)                    \
  44        ({                                      \
  45                int ___r = cond;                \
  46                if (WARN_ON(___r))              \
  47                        ftrace_kill();          \
  48                ___r;                           \
  49        })
  50
  51#define FTRACE_WARN_ON_ONCE(cond)               \
  52        ({                                      \
  53                int ___r = cond;                \
  54                if (WARN_ON_ONCE(___r))         \
  55                        ftrace_kill();          \
  56                ___r;                           \
  57        })
  58
  59/* hash bits for specific function selection */
  60#define FTRACE_HASH_BITS 7
  61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
  62#define FTRACE_HASH_DEFAULT_BITS 10
  63#define FTRACE_HASH_MAX_BITS 12
  64
  65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
  66
  67/* ftrace_enabled is a method to turn ftrace on or off */
  68int ftrace_enabled __read_mostly;
  69static int last_ftrace_enabled;
  70
  71/* Quick disabling of function tracer. */
  72int function_trace_stop;
  73
  74/* List for set_ftrace_pid's pids. */
  75LIST_HEAD(ftrace_pids);
  76struct ftrace_pid {
  77        struct list_head list;
  78        struct pid *pid;
  79};
  80
  81/*
  82 * ftrace_disabled is set when an anomaly is discovered.
  83 * ftrace_disabled is much stronger than ftrace_enabled.
  84 */
  85static int ftrace_disabled __read_mostly;
  86
  87static DEFINE_MUTEX(ftrace_lock);
  88
  89static struct ftrace_ops ftrace_list_end __read_mostly = {
  90        .func           = ftrace_stub,
  91};
  92
  93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
  94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
  95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
  96ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  97static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
  98ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  99ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 100static struct ftrace_ops global_ops;
 101static struct ftrace_ops control_ops;
 102
 103static void
 104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
 105
 106/*
 107 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
 108 * can use rcu_dereference_raw() is that elements removed from this list
 109 * are simply leaked, so there is no need to interact with a grace-period
 110 * mechanism.  The rcu_dereference_raw() calls are needed to handle
 111 * concurrent insertions into the ftrace_global_list.
 112 *
 113 * Silly Alpha and silly pointer-speculation compiler optimizations!
 114 */
 115static void ftrace_global_list_func(unsigned long ip,
 116                                    unsigned long parent_ip)
 117{
 118        struct ftrace_ops *op;
 119
 120        if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
 121                return;
 122
 123        trace_recursion_set(TRACE_GLOBAL_BIT);
 124        op = rcu_dereference_raw(ftrace_global_list); /*see above*/
 125        while (op != &ftrace_list_end) {
 126                op->func(ip, parent_ip);
 127                op = rcu_dereference_raw(op->next); /*see above*/
 128        };
 129        trace_recursion_clear(TRACE_GLOBAL_BIT);
 130}
 131
 132static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
 133{
 134        if (!test_tsk_trace_trace(current))
 135                return;
 136
 137        ftrace_pid_function(ip, parent_ip);
 138}
 139
 140static void set_ftrace_pid_function(ftrace_func_t func)
 141{
 142        /* do not set ftrace_pid_function to itself! */
 143        if (func != ftrace_pid_func)
 144                ftrace_pid_function = func;
 145}
 146
 147/**
 148 * clear_ftrace_function - reset the ftrace function
 149 *
 150 * This NULLs the ftrace function and in essence stops
 151 * tracing.  There may be lag
 152 */
 153void clear_ftrace_function(void)
 154{
 155        ftrace_trace_function = ftrace_stub;
 156        __ftrace_trace_function = ftrace_stub;
 157        __ftrace_trace_function_delay = ftrace_stub;
 158        ftrace_pid_function = ftrace_stub;
 159}
 160
 161#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 162/*
 163 * For those archs that do not test ftrace_trace_stop in their
 164 * mcount call site, we need to do it from C.
 165 */
 166static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
 167{
 168        if (function_trace_stop)
 169                return;
 170
 171        __ftrace_trace_function(ip, parent_ip);
 172}
 173#endif
 174
 175static void control_ops_disable_all(struct ftrace_ops *ops)
 176{
 177        int cpu;
 178
 179        for_each_possible_cpu(cpu)
 180                *per_cpu_ptr(ops->disabled, cpu) = 1;
 181}
 182
 183static int control_ops_alloc(struct ftrace_ops *ops)
 184{
 185        int __percpu *disabled;
 186
 187        disabled = alloc_percpu(int);
 188        if (!disabled)
 189                return -ENOMEM;
 190
 191        ops->disabled = disabled;
 192        control_ops_disable_all(ops);
 193        return 0;
 194}
 195
 196static void control_ops_free(struct ftrace_ops *ops)
 197{
 198        free_percpu(ops->disabled);
 199}
 200
 201static void update_global_ops(void)
 202{
 203        ftrace_func_t func;
 204
 205        /*
 206         * If there's only one function registered, then call that
 207         * function directly. Otherwise, we need to iterate over the
 208         * registered callers.
 209         */
 210        if (ftrace_global_list == &ftrace_list_end ||
 211            ftrace_global_list->next == &ftrace_list_end)
 212                func = ftrace_global_list->func;
 213        else
 214                func = ftrace_global_list_func;
 215
 216        /* If we filter on pids, update to use the pid function */
 217        if (!list_empty(&ftrace_pids)) {
 218                set_ftrace_pid_function(func);
 219                func = ftrace_pid_func;
 220        }
 221
 222        global_ops.func = func;
 223}
 224
 225static void update_ftrace_function(void)
 226{
 227        ftrace_func_t func;
 228
 229        update_global_ops();
 230
 231        /*
 232         * If we are at the end of the list and this ops is
 233         * not dynamic, then have the mcount trampoline call
 234         * the function directly
 235         */
 236        if (ftrace_ops_list == &ftrace_list_end ||
 237            (ftrace_ops_list->next == &ftrace_list_end &&
 238             !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
 239                func = ftrace_ops_list->func;
 240        else
 241                func = ftrace_ops_list_func;
 242
 243#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 244        ftrace_trace_function = func;
 245#else
 246#ifdef CONFIG_DYNAMIC_FTRACE
 247        /* do not update till all functions have been modified */
 248        __ftrace_trace_function_delay = func;
 249#else
 250        __ftrace_trace_function = func;
 251#endif
 252        ftrace_trace_function =
 253                (func == ftrace_stub) ? func : ftrace_test_stop_func;
 254#endif
 255}
 256
 257static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 258{
 259        ops->next = *list;
 260        /*
 261         * We are entering ops into the list but another
 262         * CPU might be walking that list. We need to make sure
 263         * the ops->next pointer is valid before another CPU sees
 264         * the ops pointer included into the list.
 265         */
 266        rcu_assign_pointer(*list, ops);
 267}
 268
 269static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 270{
 271        struct ftrace_ops **p;
 272
 273        /*
 274         * If we are removing the last function, then simply point
 275         * to the ftrace_stub.
 276         */
 277        if (*list == ops && ops->next == &ftrace_list_end) {
 278                *list = &ftrace_list_end;
 279                return 0;
 280        }
 281
 282        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 283                if (*p == ops)
 284                        break;
 285
 286        if (*p != ops)
 287                return -1;
 288
 289        *p = (*p)->next;
 290        return 0;
 291}
 292
 293static void add_ftrace_list_ops(struct ftrace_ops **list,
 294                                struct ftrace_ops *main_ops,
 295                                struct ftrace_ops *ops)
 296{
 297        int first = *list == &ftrace_list_end;
 298        add_ftrace_ops(list, ops);
 299        if (first)
 300                add_ftrace_ops(&ftrace_ops_list, main_ops);
 301}
 302
 303static int remove_ftrace_list_ops(struct ftrace_ops **list,
 304                                  struct ftrace_ops *main_ops,
 305                                  struct ftrace_ops *ops)
 306{
 307        int ret = remove_ftrace_ops(list, ops);
 308        if (!ret && *list == &ftrace_list_end)
 309                ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
 310        return ret;
 311}
 312
 313static int __register_ftrace_function(struct ftrace_ops *ops)
 314{
 315        if (unlikely(ftrace_disabled))
 316                return -ENODEV;
 317
 318        if (FTRACE_WARN_ON(ops == &global_ops))
 319                return -EINVAL;
 320
 321        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 322                return -EBUSY;
 323
 324        /* We don't support both control and global flags set. */
 325        if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
 326                return -EINVAL;
 327
 328        if (!core_kernel_data((unsigned long)ops))
 329                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 330
 331        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
 332                add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
 333                ops->flags |= FTRACE_OPS_FL_ENABLED;
 334        } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
 335                if (control_ops_alloc(ops))
 336                        return -ENOMEM;
 337                add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
 338        } else
 339                add_ftrace_ops(&ftrace_ops_list, ops);
 340
 341        if (ftrace_enabled)
 342                update_ftrace_function();
 343
 344        return 0;
 345}
 346
 347static int __unregister_ftrace_function(struct ftrace_ops *ops)
 348{
 349        int ret;
 350
 351        if (ftrace_disabled)
 352                return -ENODEV;
 353
 354        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 355                return -EBUSY;
 356
 357        if (FTRACE_WARN_ON(ops == &global_ops))
 358                return -EINVAL;
 359
 360        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
 361                ret = remove_ftrace_list_ops(&ftrace_global_list,
 362                                             &global_ops, ops);
 363                if (!ret)
 364                        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 365        } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
 366                ret = remove_ftrace_list_ops(&ftrace_control_list,
 367                                             &control_ops, ops);
 368                if (!ret) {
 369                        /*
 370                         * The ftrace_ops is now removed from the list,
 371                         * so there'll be no new users. We must ensure
 372                         * all current users are done before we free
 373                         * the control data.
 374                         */
 375                        synchronize_sched();
 376                        control_ops_free(ops);
 377                }
 378        } else
 379                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 380
 381        if (ret < 0)
 382                return ret;
 383
 384        if (ftrace_enabled)
 385                update_ftrace_function();
 386
 387        /*
 388         * Dynamic ops may be freed, we must make sure that all
 389         * callers are done before leaving this function.
 390         */
 391        if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
 392                synchronize_sched();
 393
 394        return 0;
 395}
 396
 397static void ftrace_update_pid_func(void)
 398{
 399        /* Only do something if we are tracing something */
 400        if (ftrace_trace_function == ftrace_stub)
 401                return;
 402
 403        update_ftrace_function();
 404}
 405
 406#ifdef CONFIG_FUNCTION_PROFILER
 407struct ftrace_profile {
 408        struct hlist_node               node;
 409        unsigned long                   ip;
 410        unsigned long                   counter;
 411#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 412        unsigned long long              time;
 413        unsigned long long              time_squared;
 414#endif
 415};
 416
 417struct ftrace_profile_page {
 418        struct ftrace_profile_page      *next;
 419        unsigned long                   index;
 420        struct ftrace_profile           records[];
 421};
 422
 423struct ftrace_profile_stat {
 424        atomic_t                        disabled;
 425        struct hlist_head               *hash;
 426        struct ftrace_profile_page      *pages;
 427        struct ftrace_profile_page      *start;
 428        struct tracer_stat              stat;
 429};
 430
 431#define PROFILE_RECORDS_SIZE                                            \
 432        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 433
 434#define PROFILES_PER_PAGE                                       \
 435        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 436
 437static int ftrace_profile_bits __read_mostly;
 438static int ftrace_profile_enabled __read_mostly;
 439
 440/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 441static DEFINE_MUTEX(ftrace_profile_lock);
 442
 443static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 444
 445#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
 446
 447static void *
 448function_stat_next(void *v, int idx)
 449{
 450        struct ftrace_profile *rec = v;
 451        struct ftrace_profile_page *pg;
 452
 453        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 454
 455 again:
 456        if (idx != 0)
 457                rec++;
 458
 459        if ((void *)rec >= (void *)&pg->records[pg->index]) {
 460                pg = pg->next;
 461                if (!pg)
 462                        return NULL;
 463                rec = &pg->records[0];
 464                if (!rec->counter)
 465                        goto again;
 466        }
 467
 468        return rec;
 469}
 470
 471static void *function_stat_start(struct tracer_stat *trace)
 472{
 473        struct ftrace_profile_stat *stat =
 474                container_of(trace, struct ftrace_profile_stat, stat);
 475
 476        if (!stat || !stat->start)
 477                return NULL;
 478
 479        return function_stat_next(&stat->start->records[0], 0);
 480}
 481
 482#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 483/* function graph compares on total time */
 484static int function_stat_cmp(void *p1, void *p2)
 485{
 486        struct ftrace_profile *a = p1;
 487        struct ftrace_profile *b = p2;
 488
 489        if (a->time < b->time)
 490                return -1;
 491        if (a->time > b->time)
 492                return 1;
 493        else
 494                return 0;
 495}
 496#else
 497/* not function graph compares against hits */
 498static int function_stat_cmp(void *p1, void *p2)
 499{
 500        struct ftrace_profile *a = p1;
 501        struct ftrace_profile *b = p2;
 502
 503        if (a->counter < b->counter)
 504                return -1;
 505        if (a->counter > b->counter)
 506                return 1;
 507        else
 508                return 0;
 509}
 510#endif
 511
 512static int function_stat_headers(struct seq_file *m)
 513{
 514#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 515        seq_printf(m, "  Function                               "
 516                   "Hit    Time            Avg             s^2\n"
 517                      "  --------                               "
 518                   "---    ----            ---             ---\n");
 519#else
 520        seq_printf(m, "  Function                               Hit\n"
 521                      "  --------                               ---\n");
 522#endif
 523        return 0;
 524}
 525
 526static int function_stat_show(struct seq_file *m, void *v)
 527{
 528        struct ftrace_profile *rec = v;
 529        char str[KSYM_SYMBOL_LEN];
 530        int ret = 0;
 531#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 532        static struct trace_seq s;
 533        unsigned long long avg;
 534        unsigned long long stddev;
 535#endif
 536        mutex_lock(&ftrace_profile_lock);
 537
 538        /* we raced with function_profile_reset() */
 539        if (unlikely(rec->counter == 0)) {
 540                ret = -EBUSY;
 541                goto out;
 542        }
 543
 544        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 545        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 546
 547#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 548        seq_printf(m, "    ");
 549        avg = rec->time;
 550        do_div(avg, rec->counter);
 551
 552        /* Sample standard deviation (s^2) */
 553        if (rec->counter <= 1)
 554                stddev = 0;
 555        else {
 556                stddev = rec->time_squared - rec->counter * avg * avg;
 557                /*
 558                 * Divide only 1000 for ns^2 -> us^2 conversion.
 559                 * trace_print_graph_duration will divide 1000 again.
 560                 */
 561                do_div(stddev, (rec->counter - 1) * 1000);
 562        }
 563
 564        trace_seq_init(&s);
 565        trace_print_graph_duration(rec->time, &s);
 566        trace_seq_puts(&s, "    ");
 567        trace_print_graph_duration(avg, &s);
 568        trace_seq_puts(&s, "    ");
 569        trace_print_graph_duration(stddev, &s);
 570        trace_print_seq(m, &s);
 571#endif
 572        seq_putc(m, '\n');
 573out:
 574        mutex_unlock(&ftrace_profile_lock);
 575
 576        return ret;
 577}
 578
 579static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 580{
 581        struct ftrace_profile_page *pg;
 582
 583        pg = stat->pages = stat->start;
 584
 585        while (pg) {
 586                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 587                pg->index = 0;
 588                pg = pg->next;
 589        }
 590
 591        memset(stat->hash, 0,
 592               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 593}
 594
 595int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 596{
 597        struct ftrace_profile_page *pg;
 598        int functions;
 599        int pages;
 600        int i;
 601
 602        /* If we already allocated, do nothing */
 603        if (stat->pages)
 604                return 0;
 605
 606        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 607        if (!stat->pages)
 608                return -ENOMEM;
 609
 610#ifdef CONFIG_DYNAMIC_FTRACE
 611        functions = ftrace_update_tot_cnt;
 612#else
 613        /*
 614         * We do not know the number of functions that exist because
 615         * dynamic tracing is what counts them. With past experience
 616         * we have around 20K functions. That should be more than enough.
 617         * It is highly unlikely we will execute every function in
 618         * the kernel.
 619         */
 620        functions = 20000;
 621#endif
 622
 623        pg = stat->start = stat->pages;
 624
 625        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 626
 627        for (i = 0; i < pages; i++) {
 628                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 629                if (!pg->next)
 630                        goto out_free;
 631                pg = pg->next;
 632        }
 633
 634        return 0;
 635
 636 out_free:
 637        pg = stat->start;
 638        while (pg) {
 639                unsigned long tmp = (unsigned long)pg;
 640
 641                pg = pg->next;
 642                free_page(tmp);
 643        }
 644
 645        free_page((unsigned long)stat->pages);
 646        stat->pages = NULL;
 647        stat->start = NULL;
 648
 649        return -ENOMEM;
 650}
 651
 652static int ftrace_profile_init_cpu(int cpu)
 653{
 654        struct ftrace_profile_stat *stat;
 655        int size;
 656
 657        stat = &per_cpu(ftrace_profile_stats, cpu);
 658
 659        if (stat->hash) {
 660                /* If the profile is already created, simply reset it */
 661                ftrace_profile_reset(stat);
 662                return 0;
 663        }
 664
 665        /*
 666         * We are profiling all functions, but usually only a few thousand
 667         * functions are hit. We'll make a hash of 1024 items.
 668         */
 669        size = FTRACE_PROFILE_HASH_SIZE;
 670
 671        stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
 672
 673        if (!stat->hash)
 674                return -ENOMEM;
 675
 676        if (!ftrace_profile_bits) {
 677                size--;
 678
 679                for (; size; size >>= 1)
 680                        ftrace_profile_bits++;
 681        }
 682
 683        /* Preallocate the function profiling pages */
 684        if (ftrace_profile_pages_init(stat) < 0) {
 685                kfree(stat->hash);
 686                stat->hash = NULL;
 687                return -ENOMEM;
 688        }
 689
 690        return 0;
 691}
 692
 693static int ftrace_profile_init(void)
 694{
 695        int cpu;
 696        int ret = 0;
 697
 698        for_each_online_cpu(cpu) {
 699                ret = ftrace_profile_init_cpu(cpu);
 700                if (ret)
 701                        break;
 702        }
 703
 704        return ret;
 705}
 706
 707/* interrupts must be disabled */
 708static struct ftrace_profile *
 709ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 710{
 711        struct ftrace_profile *rec;
 712        struct hlist_head *hhd;
 713        struct hlist_node *n;
 714        unsigned long key;
 715
 716        key = hash_long(ip, ftrace_profile_bits);
 717        hhd = &stat->hash[key];
 718
 719        if (hlist_empty(hhd))
 720                return NULL;
 721
 722        hlist_for_each_entry_rcu(rec, n, hhd, node) {
 723                if (rec->ip == ip)
 724                        return rec;
 725        }
 726
 727        return NULL;
 728}
 729
 730static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 731                               struct ftrace_profile *rec)
 732{
 733        unsigned long key;
 734
 735        key = hash_long(rec->ip, ftrace_profile_bits);
 736        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 737}
 738
 739/*
 740 * The memory is already allocated, this simply finds a new record to use.
 741 */
 742static struct ftrace_profile *
 743ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 744{
 745        struct ftrace_profile *rec = NULL;
 746
 747        /* prevent recursion (from NMIs) */
 748        if (atomic_inc_return(&stat->disabled) != 1)
 749                goto out;
 750
 751        /*
 752         * Try to find the function again since an NMI
 753         * could have added it
 754         */
 755        rec = ftrace_find_profiled_func(stat, ip);
 756        if (rec)
 757                goto out;
 758
 759        if (stat->pages->index == PROFILES_PER_PAGE) {
 760                if (!stat->pages->next)
 761                        goto out;
 762                stat->pages = stat->pages->next;
 763        }
 764
 765        rec = &stat->pages->records[stat->pages->index++];
 766        rec->ip = ip;
 767        ftrace_add_profile(stat, rec);
 768
 769 out:
 770        atomic_dec(&stat->disabled);
 771
 772        return rec;
 773}
 774
 775static void
 776function_profile_call(unsigned long ip, unsigned long parent_ip)
 777{
 778        struct ftrace_profile_stat *stat;
 779        struct ftrace_profile *rec;
 780        unsigned long flags;
 781
 782        if (!ftrace_profile_enabled)
 783                return;
 784
 785        local_irq_save(flags);
 786
 787        stat = &__get_cpu_var(ftrace_profile_stats);
 788        if (!stat->hash || !ftrace_profile_enabled)
 789                goto out;
 790
 791        rec = ftrace_find_profiled_func(stat, ip);
 792        if (!rec) {
 793                rec = ftrace_profile_alloc(stat, ip);
 794                if (!rec)
 795                        goto out;
 796        }
 797
 798        rec->counter++;
 799 out:
 800        local_irq_restore(flags);
 801}
 802
 803#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 804static int profile_graph_entry(struct ftrace_graph_ent *trace)
 805{
 806        function_profile_call(trace->func, 0);
 807        return 1;
 808}
 809
 810static void profile_graph_return(struct ftrace_graph_ret *trace)
 811{
 812        struct ftrace_profile_stat *stat;
 813        unsigned long long calltime;
 814        struct ftrace_profile *rec;
 815        unsigned long flags;
 816
 817        local_irq_save(flags);
 818        stat = &__get_cpu_var(ftrace_profile_stats);
 819        if (!stat->hash || !ftrace_profile_enabled)
 820                goto out;
 821
 822        /* If the calltime was zero'd ignore it */
 823        if (!trace->calltime)
 824                goto out;
 825
 826        calltime = trace->rettime - trace->calltime;
 827
 828        if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
 829                int index;
 830
 831                index = trace->depth;
 832
 833                /* Append this call time to the parent time to subtract */
 834                if (index)
 835                        current->ret_stack[index - 1].subtime += calltime;
 836
 837                if (current->ret_stack[index].subtime < calltime)
 838                        calltime -= current->ret_stack[index].subtime;
 839                else
 840                        calltime = 0;
 841        }
 842
 843        rec = ftrace_find_profiled_func(stat, trace->func);
 844        if (rec) {
 845                rec->time += calltime;
 846                rec->time_squared += calltime * calltime;
 847        }
 848
 849 out:
 850        local_irq_restore(flags);
 851}
 852
 853static int register_ftrace_profiler(void)
 854{
 855        return register_ftrace_graph(&profile_graph_return,
 856                                     &profile_graph_entry);
 857}
 858
 859static void unregister_ftrace_profiler(void)
 860{
 861        unregister_ftrace_graph();
 862}
 863#else
 864static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 865        .func           = function_profile_call,
 866};
 867
 868static int register_ftrace_profiler(void)
 869{
 870        return register_ftrace_function(&ftrace_profile_ops);
 871}
 872
 873static void unregister_ftrace_profiler(void)
 874{
 875        unregister_ftrace_function(&ftrace_profile_ops);
 876}
 877#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 878
 879static ssize_t
 880ftrace_profile_write(struct file *filp, const char __user *ubuf,
 881                     size_t cnt, loff_t *ppos)
 882{
 883        unsigned long val;
 884        int ret;
 885
 886        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 887        if (ret)
 888                return ret;
 889
 890        val = !!val;
 891
 892        mutex_lock(&ftrace_profile_lock);
 893        if (ftrace_profile_enabled ^ val) {
 894                if (val) {
 895                        ret = ftrace_profile_init();
 896                        if (ret < 0) {
 897                                cnt = ret;
 898                                goto out;
 899                        }
 900
 901                        ret = register_ftrace_profiler();
 902                        if (ret < 0) {
 903                                cnt = ret;
 904                                goto out;
 905                        }
 906                        ftrace_profile_enabled = 1;
 907                } else {
 908                        ftrace_profile_enabled = 0;
 909                        /*
 910                         * unregister_ftrace_profiler calls stop_machine
 911                         * so this acts like an synchronize_sched.
 912                         */
 913                        unregister_ftrace_profiler();
 914                }
 915        }
 916 out:
 917        mutex_unlock(&ftrace_profile_lock);
 918
 919        *ppos += cnt;
 920
 921        return cnt;
 922}
 923
 924static ssize_t
 925ftrace_profile_read(struct file *filp, char __user *ubuf,
 926                     size_t cnt, loff_t *ppos)
 927{
 928        char buf[64];           /* big enough to hold a number */
 929        int r;
 930
 931        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 932        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 933}
 934
 935static const struct file_operations ftrace_profile_fops = {
 936        .open           = tracing_open_generic,
 937        .read           = ftrace_profile_read,
 938        .write          = ftrace_profile_write,
 939        .llseek         = default_llseek,
 940};
 941
 942/* used to initialize the real stat files */
 943static struct tracer_stat function_stats __initdata = {
 944        .name           = "functions",
 945        .stat_start     = function_stat_start,
 946        .stat_next      = function_stat_next,
 947        .stat_cmp       = function_stat_cmp,
 948        .stat_headers   = function_stat_headers,
 949        .stat_show      = function_stat_show
 950};
 951
 952static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 953{
 954        struct ftrace_profile_stat *stat;
 955        struct dentry *entry;
 956        char *name;
 957        int ret;
 958        int cpu;
 959
 960        for_each_possible_cpu(cpu) {
 961                stat = &per_cpu(ftrace_profile_stats, cpu);
 962
 963                /* allocate enough for function name + cpu number */
 964                name = kmalloc(32, GFP_KERNEL);
 965                if (!name) {
 966                        /*
 967                         * The files created are permanent, if something happens
 968                         * we still do not free memory.
 969                         */
 970                        WARN(1,
 971                             "Could not allocate stat file for cpu %d\n",
 972                             cpu);
 973                        return;
 974                }
 975                stat->stat = function_stats;
 976                snprintf(name, 32, "function%d", cpu);
 977                stat->stat.name = name;
 978                ret = register_stat_tracer(&stat->stat);
 979                if (ret) {
 980                        WARN(1,
 981                             "Could not register function stat for cpu %d\n",
 982                             cpu);
 983                        kfree(name);
 984                        return;
 985                }
 986        }
 987
 988        entry = debugfs_create_file("function_profile_enabled", 0644,
 989                                    d_tracer, NULL, &ftrace_profile_fops);
 990        if (!entry)
 991                pr_warning("Could not create debugfs "
 992                           "'function_profile_enabled' entry\n");
 993}
 994
 995#else /* CONFIG_FUNCTION_PROFILER */
 996static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 997{
 998}
 999#endif /* CONFIG_FUNCTION_PROFILER */
1000
1001static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1002
1003#ifdef CONFIG_DYNAMIC_FTRACE
1004
1005#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1006# error Dynamic ftrace depends on MCOUNT_RECORD
1007#endif
1008
1009static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1010
1011struct ftrace_func_probe {
1012        struct hlist_node       node;
1013        struct ftrace_probe_ops *ops;
1014        unsigned long           flags;
1015        unsigned long           ip;
1016        void                    *data;
1017        struct rcu_head         rcu;
1018};
1019
1020struct ftrace_func_entry {
1021        struct hlist_node hlist;
1022        unsigned long ip;
1023};
1024
1025struct ftrace_hash {
1026        unsigned long           size_bits;
1027        struct hlist_head       *buckets;
1028        unsigned long           count;
1029        struct rcu_head         rcu;
1030};
1031
1032/*
1033 * We make these constant because no one should touch them,
1034 * but they are used as the default "empty hash", to avoid allocating
1035 * it all the time. These are in a read only section such that if
1036 * anyone does try to modify it, it will cause an exception.
1037 */
1038static const struct hlist_head empty_buckets[1];
1039static const struct ftrace_hash empty_hash = {
1040        .buckets = (struct hlist_head *)empty_buckets,
1041};
1042#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1043
1044static struct ftrace_ops global_ops = {
1045        .func                   = ftrace_stub,
1046        .notrace_hash           = EMPTY_HASH,
1047        .filter_hash            = EMPTY_HASH,
1048};
1049
1050static DEFINE_MUTEX(ftrace_regex_lock);
1051
1052struct ftrace_page {
1053        struct ftrace_page      *next;
1054        struct dyn_ftrace       *records;
1055        int                     index;
1056        int                     size;
1057};
1058
1059static struct ftrace_page *ftrace_new_pgs;
1060
1061#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1062#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1063
1064/* estimate from running different kernels */
1065#define NR_TO_INIT              10000
1066
1067static struct ftrace_page       *ftrace_pages_start;
1068static struct ftrace_page       *ftrace_pages;
1069
1070static bool ftrace_hash_empty(struct ftrace_hash *hash)
1071{
1072        return !hash || !hash->count;
1073}
1074
1075static struct ftrace_func_entry *
1076ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1077{
1078        unsigned long key;
1079        struct ftrace_func_entry *entry;
1080        struct hlist_head *hhd;
1081        struct hlist_node *n;
1082
1083        if (ftrace_hash_empty(hash))
1084                return NULL;
1085
1086        if (hash->size_bits > 0)
1087                key = hash_long(ip, hash->size_bits);
1088        else
1089                key = 0;
1090
1091        hhd = &hash->buckets[key];
1092
1093        hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1094                if (entry->ip == ip)
1095                        return entry;
1096        }
1097        return NULL;
1098}
1099
1100static void __add_hash_entry(struct ftrace_hash *hash,
1101                             struct ftrace_func_entry *entry)
1102{
1103        struct hlist_head *hhd;
1104        unsigned long key;
1105
1106        if (hash->size_bits)
1107                key = hash_long(entry->ip, hash->size_bits);
1108        else
1109                key = 0;
1110
1111        hhd = &hash->buckets[key];
1112        hlist_add_head(&entry->hlist, hhd);
1113        hash->count++;
1114}
1115
1116static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1117{
1118        struct ftrace_func_entry *entry;
1119
1120        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1121        if (!entry)
1122                return -ENOMEM;
1123
1124        entry->ip = ip;
1125        __add_hash_entry(hash, entry);
1126
1127        return 0;
1128}
1129
1130static void
1131free_hash_entry(struct ftrace_hash *hash,
1132                  struct ftrace_func_entry *entry)
1133{
1134        hlist_del(&entry->hlist);
1135        kfree(entry);
1136        hash->count--;
1137}
1138
1139static void
1140remove_hash_entry(struct ftrace_hash *hash,
1141                  struct ftrace_func_entry *entry)
1142{
1143        hlist_del(&entry->hlist);
1144        hash->count--;
1145}
1146
1147static void ftrace_hash_clear(struct ftrace_hash *hash)
1148{
1149        struct hlist_head *hhd;
1150        struct hlist_node *tp, *tn;
1151        struct ftrace_func_entry *entry;
1152        int size = 1 << hash->size_bits;
1153        int i;
1154
1155        if (!hash->count)
1156                return;
1157
1158        for (i = 0; i < size; i++) {
1159                hhd = &hash->buckets[i];
1160                hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1161                        free_hash_entry(hash, entry);
1162        }
1163        FTRACE_WARN_ON(hash->count);
1164}
1165
1166static void free_ftrace_hash(struct ftrace_hash *hash)
1167{
1168        if (!hash || hash == EMPTY_HASH)
1169                return;
1170        ftrace_hash_clear(hash);
1171        kfree(hash->buckets);
1172        kfree(hash);
1173}
1174
1175static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1176{
1177        struct ftrace_hash *hash;
1178
1179        hash = container_of(rcu, struct ftrace_hash, rcu);
1180        free_ftrace_hash(hash);
1181}
1182
1183static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1184{
1185        if (!hash || hash == EMPTY_HASH)
1186                return;
1187        call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1188}
1189
1190void ftrace_free_filter(struct ftrace_ops *ops)
1191{
1192        free_ftrace_hash(ops->filter_hash);
1193        free_ftrace_hash(ops->notrace_hash);
1194}
1195
1196static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1197{
1198        struct ftrace_hash *hash;
1199        int size;
1200
1201        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1202        if (!hash)
1203                return NULL;
1204
1205        size = 1 << size_bits;
1206        hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1207
1208        if (!hash->buckets) {
1209                kfree(hash);
1210                return NULL;
1211        }
1212
1213        hash->size_bits = size_bits;
1214
1215        return hash;
1216}
1217
1218static struct ftrace_hash *
1219alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1220{
1221        struct ftrace_func_entry *entry;
1222        struct ftrace_hash *new_hash;
1223        struct hlist_node *tp;
1224        int size;
1225        int ret;
1226        int i;
1227
1228        new_hash = alloc_ftrace_hash(size_bits);
1229        if (!new_hash)
1230                return NULL;
1231
1232        /* Empty hash? */
1233        if (ftrace_hash_empty(hash))
1234                return new_hash;
1235
1236        size = 1 << hash->size_bits;
1237        for (i = 0; i < size; i++) {
1238                hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1239                        ret = add_hash_entry(new_hash, entry->ip);
1240                        if (ret < 0)
1241                                goto free_hash;
1242                }
1243        }
1244
1245        FTRACE_WARN_ON(new_hash->count != hash->count);
1246
1247        return new_hash;
1248
1249 free_hash:
1250        free_ftrace_hash(new_hash);
1251        return NULL;
1252}
1253
1254static void
1255ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1256static void
1257ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1258
1259static int
1260ftrace_hash_move(struct ftrace_ops *ops, int enable,
1261                 struct ftrace_hash **dst, struct ftrace_hash *src)
1262{
1263        struct ftrace_func_entry *entry;
1264        struct hlist_node *tp, *tn;
1265        struct hlist_head *hhd;
1266        struct ftrace_hash *old_hash;
1267        struct ftrace_hash *new_hash;
1268        unsigned long key;
1269        int size = src->count;
1270        int bits = 0;
1271        int ret;
1272        int i;
1273
1274        /*
1275         * Remove the current set, update the hash and add
1276         * them back.
1277         */
1278        ftrace_hash_rec_disable(ops, enable);
1279
1280        /*
1281         * If the new source is empty, just free dst and assign it
1282         * the empty_hash.
1283         */
1284        if (!src->count) {
1285                free_ftrace_hash_rcu(*dst);
1286                rcu_assign_pointer(*dst, EMPTY_HASH);
1287                /* still need to update the function records */
1288                ret = 0;
1289                goto out;
1290        }
1291
1292        /*
1293         * Make the hash size about 1/2 the # found
1294         */
1295        for (size /= 2; size; size >>= 1)
1296                bits++;
1297
1298        /* Don't allocate too much */
1299        if (bits > FTRACE_HASH_MAX_BITS)
1300                bits = FTRACE_HASH_MAX_BITS;
1301
1302        ret = -ENOMEM;
1303        new_hash = alloc_ftrace_hash(bits);
1304        if (!new_hash)
1305                goto out;
1306
1307        size = 1 << src->size_bits;
1308        for (i = 0; i < size; i++) {
1309                hhd = &src->buckets[i];
1310                hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1311                        if (bits > 0)
1312                                key = hash_long(entry->ip, bits);
1313                        else
1314                                key = 0;
1315                        remove_hash_entry(src, entry);
1316                        __add_hash_entry(new_hash, entry);
1317                }
1318        }
1319
1320        old_hash = *dst;
1321        rcu_assign_pointer(*dst, new_hash);
1322        free_ftrace_hash_rcu(old_hash);
1323
1324        ret = 0;
1325 out:
1326        /*
1327         * Enable regardless of ret:
1328         *  On success, we enable the new hash.
1329         *  On failure, we re-enable the original hash.
1330         */
1331        ftrace_hash_rec_enable(ops, enable);
1332
1333        return ret;
1334}
1335
1336/*
1337 * Test the hashes for this ops to see if we want to call
1338 * the ops->func or not.
1339 *
1340 * It's a match if the ip is in the ops->filter_hash or
1341 * the filter_hash does not exist or is empty,
1342 *  AND
1343 * the ip is not in the ops->notrace_hash.
1344 *
1345 * This needs to be called with preemption disabled as
1346 * the hashes are freed with call_rcu_sched().
1347 */
1348static int
1349ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1350{
1351        struct ftrace_hash *filter_hash;
1352        struct ftrace_hash *notrace_hash;
1353        int ret;
1354
1355        filter_hash = rcu_dereference_raw(ops->filter_hash);
1356        notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1357
1358        if ((ftrace_hash_empty(filter_hash) ||
1359             ftrace_lookup_ip(filter_hash, ip)) &&
1360            (ftrace_hash_empty(notrace_hash) ||
1361             !ftrace_lookup_ip(notrace_hash, ip)))
1362                ret = 1;
1363        else
1364                ret = 0;
1365
1366        return ret;
1367}
1368
1369/*
1370 * This is a double for. Do not use 'break' to break out of the loop,
1371 * you must use a goto.
1372 */
1373#define do_for_each_ftrace_rec(pg, rec)                                 \
1374        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1375                int _____i;                                             \
1376                for (_____i = 0; _____i < pg->index; _____i++) {        \
1377                        rec = &pg->records[_____i];
1378
1379#define while_for_each_ftrace_rec()             \
1380                }                               \
1381        }
1382
1383
1384static int ftrace_cmp_recs(const void *a, const void *b)
1385{
1386        const struct dyn_ftrace *key = a;
1387        const struct dyn_ftrace *rec = b;
1388
1389        if (key->flags < rec->ip)
1390                return -1;
1391        if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1392                return 1;
1393        return 0;
1394}
1395
1396static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1397{
1398        struct ftrace_page *pg;
1399        struct dyn_ftrace *rec;
1400        struct dyn_ftrace key;
1401
1402        key.ip = start;
1403        key.flags = end;        /* overload flags, as it is unsigned long */
1404
1405        for (pg = ftrace_pages_start; pg; pg = pg->next) {
1406                if (end < pg->records[0].ip ||
1407                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1408                        continue;
1409                rec = bsearch(&key, pg->records, pg->index,
1410                              sizeof(struct dyn_ftrace),
1411                              ftrace_cmp_recs);
1412                if (rec)
1413                        return rec->ip;
1414        }
1415
1416        return 0;
1417}
1418
1419/**
1420 * ftrace_location - return true if the ip giving is a traced location
1421 * @ip: the instruction pointer to check
1422 *
1423 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1424 * That is, the instruction that is either a NOP or call to
1425 * the function tracer. It checks the ftrace internal tables to
1426 * determine if the address belongs or not.
1427 */
1428unsigned long ftrace_location(unsigned long ip)
1429{
1430        return ftrace_location_range(ip, ip);
1431}
1432
1433/**
1434 * ftrace_text_reserved - return true if range contains an ftrace location
1435 * @start: start of range to search
1436 * @end: end of range to search (inclusive). @end points to the last byte to check.
1437 *
1438 * Returns 1 if @start and @end contains a ftrace location.
1439 * That is, the instruction that is either a NOP or call to
1440 * the function tracer. It checks the ftrace internal tables to
1441 * determine if the address belongs or not.
1442 */
1443int ftrace_text_reserved(void *start, void *end)
1444{
1445        unsigned long ret;
1446
1447        ret = ftrace_location_range((unsigned long)start,
1448                                    (unsigned long)end);
1449
1450        return (int)!!ret;
1451}
1452
1453static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1454                                     int filter_hash,
1455                                     bool inc)
1456{
1457        struct ftrace_hash *hash;
1458        struct ftrace_hash *other_hash;
1459        struct ftrace_page *pg;
1460        struct dyn_ftrace *rec;
1461        int count = 0;
1462        int all = 0;
1463
1464        /* Only update if the ops has been registered */
1465        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1466                return;
1467
1468        /*
1469         * In the filter_hash case:
1470         *   If the count is zero, we update all records.
1471         *   Otherwise we just update the items in the hash.
1472         *
1473         * In the notrace_hash case:
1474         *   We enable the update in the hash.
1475         *   As disabling notrace means enabling the tracing,
1476         *   and enabling notrace means disabling, the inc variable
1477         *   gets inversed.
1478         */
1479        if (filter_hash) {
1480                hash = ops->filter_hash;
1481                other_hash = ops->notrace_hash;
1482                if (ftrace_hash_empty(hash))
1483                        all = 1;
1484        } else {
1485                inc = !inc;
1486                hash = ops->notrace_hash;
1487                other_hash = ops->filter_hash;
1488                /*
1489                 * If the notrace hash has no items,
1490                 * then there's nothing to do.
1491                 */
1492                if (ftrace_hash_empty(hash))
1493                        return;
1494        }
1495
1496        do_for_each_ftrace_rec(pg, rec) {
1497                int in_other_hash = 0;
1498                int in_hash = 0;
1499                int match = 0;
1500
1501                if (all) {
1502                        /*
1503                         * Only the filter_hash affects all records.
1504                         * Update if the record is not in the notrace hash.
1505                         */
1506                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1507                                match = 1;
1508                } else {
1509                        in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1510                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1511
1512                        /*
1513                         *
1514                         */
1515                        if (filter_hash && in_hash && !in_other_hash)
1516                                match = 1;
1517                        else if (!filter_hash && in_hash &&
1518                                 (in_other_hash || ftrace_hash_empty(other_hash)))
1519                                match = 1;
1520                }
1521                if (!match)
1522                        continue;
1523
1524                if (inc) {
1525                        rec->flags++;
1526                        if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1527                                return;
1528                } else {
1529                        if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1530                                return;
1531                        rec->flags--;
1532                }
1533                count++;
1534                /* Shortcut, if we handled all records, we are done. */
1535                if (!all && count == hash->count)
1536                        return;
1537        } while_for_each_ftrace_rec();
1538}
1539
1540static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1541                                    int filter_hash)
1542{
1543        __ftrace_hash_rec_update(ops, filter_hash, 0);
1544}
1545
1546static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1547                                   int filter_hash)
1548{
1549        __ftrace_hash_rec_update(ops, filter_hash, 1);
1550}
1551
1552static void print_ip_ins(const char *fmt, unsigned char *p)
1553{
1554        int i;
1555
1556        printk(KERN_CONT "%s", fmt);
1557
1558        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1559                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1560}
1561
1562/**
1563 * ftrace_bug - report and shutdown function tracer
1564 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1565 * @ip: The address that failed
1566 *
1567 * The arch code that enables or disables the function tracing
1568 * can call ftrace_bug() when it has detected a problem in
1569 * modifying the code. @failed should be one of either:
1570 * EFAULT - if the problem happens on reading the @ip address
1571 * EINVAL - if what is read at @ip is not what was expected
1572 * EPERM - if the problem happens on writting to the @ip address
1573 */
1574void ftrace_bug(int failed, unsigned long ip)
1575{
1576        switch (failed) {
1577        case -EFAULT:
1578                FTRACE_WARN_ON_ONCE(1);
1579                pr_info("ftrace faulted on modifying ");
1580                print_ip_sym(ip);
1581                break;
1582        case -EINVAL:
1583                FTRACE_WARN_ON_ONCE(1);
1584                pr_info("ftrace failed to modify ");
1585                print_ip_sym(ip);
1586                print_ip_ins(" actual: ", (unsigned char *)ip);
1587                printk(KERN_CONT "\n");
1588                break;
1589        case -EPERM:
1590                FTRACE_WARN_ON_ONCE(1);
1591                pr_info("ftrace faulted on writing ");
1592                print_ip_sym(ip);
1593                break;
1594        default:
1595                FTRACE_WARN_ON_ONCE(1);
1596                pr_info("ftrace faulted on unknown error ");
1597                print_ip_sym(ip);
1598        }
1599}
1600
1601static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1602{
1603        unsigned long flag = 0UL;
1604
1605        /*
1606         * If we are updating calls:
1607         *
1608         *   If the record has a ref count, then we need to enable it
1609         *   because someone is using it.
1610         *
1611         *   Otherwise we make sure its disabled.
1612         *
1613         * If we are disabling calls, then disable all records that
1614         * are enabled.
1615         */
1616        if (enable && (rec->flags & ~FTRACE_FL_MASK))
1617                flag = FTRACE_FL_ENABLED;
1618
1619        /* If the state of this record hasn't changed, then do nothing */
1620        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1621                return FTRACE_UPDATE_IGNORE;
1622
1623        if (flag) {
1624                if (update)
1625                        rec->flags |= FTRACE_FL_ENABLED;
1626                return FTRACE_UPDATE_MAKE_CALL;
1627        }
1628
1629        if (update)
1630                rec->flags &= ~FTRACE_FL_ENABLED;
1631
1632        return FTRACE_UPDATE_MAKE_NOP;
1633}
1634
1635/**
1636 * ftrace_update_record, set a record that now is tracing or not
1637 * @rec: the record to update
1638 * @enable: set to 1 if the record is tracing, zero to force disable
1639 *
1640 * The records that represent all functions that can be traced need
1641 * to be updated when tracing has been enabled.
1642 */
1643int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1644{
1645        return ftrace_check_record(rec, enable, 1);
1646}
1647
1648/**
1649 * ftrace_test_record, check if the record has been enabled or not
1650 * @rec: the record to test
1651 * @enable: set to 1 to check if enabled, 0 if it is disabled
1652 *
1653 * The arch code may need to test if a record is already set to
1654 * tracing to determine how to modify the function code that it
1655 * represents.
1656 */
1657int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1658{
1659        return ftrace_check_record(rec, enable, 0);
1660}
1661
1662static int
1663__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1664{
1665        unsigned long ftrace_addr;
1666        int ret;
1667
1668        ftrace_addr = (unsigned long)FTRACE_ADDR;
1669
1670        ret = ftrace_update_record(rec, enable);
1671
1672        switch (ret) {
1673        case FTRACE_UPDATE_IGNORE:
1674                return 0;
1675
1676        case FTRACE_UPDATE_MAKE_CALL:
1677                return ftrace_make_call(rec, ftrace_addr);
1678
1679        case FTRACE_UPDATE_MAKE_NOP:
1680                return ftrace_make_nop(NULL, rec, ftrace_addr);
1681        }
1682
1683        return -1; /* unknow ftrace bug */
1684}
1685
1686void __weak ftrace_replace_code(int enable)
1687{
1688        struct dyn_ftrace *rec;
1689        struct ftrace_page *pg;
1690        int failed;
1691
1692        if (unlikely(ftrace_disabled))
1693                return;
1694
1695        do_for_each_ftrace_rec(pg, rec) {
1696                failed = __ftrace_replace_code(rec, enable);
1697                if (failed) {
1698                        ftrace_bug(failed, rec->ip);
1699                        /* Stop processing */
1700                        return;
1701                }
1702        } while_for_each_ftrace_rec();
1703}
1704
1705struct ftrace_rec_iter {
1706        struct ftrace_page      *pg;
1707        int                     index;
1708};
1709
1710/**
1711 * ftrace_rec_iter_start, start up iterating over traced functions
1712 *
1713 * Returns an iterator handle that is used to iterate over all
1714 * the records that represent address locations where functions
1715 * are traced.
1716 *
1717 * May return NULL if no records are available.
1718 */
1719struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1720{
1721        /*
1722         * We only use a single iterator.
1723         * Protected by the ftrace_lock mutex.
1724         */
1725        static struct ftrace_rec_iter ftrace_rec_iter;
1726        struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1727
1728        iter->pg = ftrace_pages_start;
1729        iter->index = 0;
1730
1731        /* Could have empty pages */
1732        while (iter->pg && !iter->pg->index)
1733                iter->pg = iter->pg->next;
1734
1735        if (!iter->pg)
1736                return NULL;
1737
1738        return iter;
1739}
1740
1741/**
1742 * ftrace_rec_iter_next, get the next record to process.
1743 * @iter: The handle to the iterator.
1744 *
1745 * Returns the next iterator after the given iterator @iter.
1746 */
1747struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1748{
1749        iter->index++;
1750
1751        if (iter->index >= iter->pg->index) {
1752                iter->pg = iter->pg->next;
1753                iter->index = 0;
1754
1755                /* Could have empty pages */
1756                while (iter->pg && !iter->pg->index)
1757                        iter->pg = iter->pg->next;
1758        }
1759
1760        if (!iter->pg)
1761                return NULL;
1762
1763        return iter;
1764}
1765
1766/**
1767 * ftrace_rec_iter_record, get the record at the iterator location
1768 * @iter: The current iterator location
1769 *
1770 * Returns the record that the current @iter is at.
1771 */
1772struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1773{
1774        return &iter->pg->records[iter->index];
1775}
1776
1777static int
1778ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1779{
1780        unsigned long ip;
1781        int ret;
1782
1783        ip = rec->ip;
1784
1785        if (unlikely(ftrace_disabled))
1786                return 0;
1787
1788        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1789        if (ret) {
1790                ftrace_bug(ret, ip);
1791                return 0;
1792        }
1793        return 1;
1794}
1795
1796/*
1797 * archs can override this function if they must do something
1798 * before the modifying code is performed.
1799 */
1800int __weak ftrace_arch_code_modify_prepare(void)
1801{
1802        return 0;
1803}
1804
1805/*
1806 * archs can override this function if they must do something
1807 * after the modifying code is performed.
1808 */
1809int __weak ftrace_arch_code_modify_post_process(void)
1810{
1811        return 0;
1812}
1813
1814void ftrace_modify_all_code(int command)
1815{
1816        if (command & FTRACE_UPDATE_CALLS)
1817                ftrace_replace_code(1);
1818        else if (command & FTRACE_DISABLE_CALLS)
1819                ftrace_replace_code(0);
1820
1821        if (command & FTRACE_UPDATE_TRACE_FUNC)
1822                ftrace_update_ftrace_func(ftrace_trace_function);
1823
1824        if (command & FTRACE_START_FUNC_RET)
1825                ftrace_enable_ftrace_graph_caller();
1826        else if (command & FTRACE_STOP_FUNC_RET)
1827                ftrace_disable_ftrace_graph_caller();
1828}
1829
1830static int __ftrace_modify_code(void *data)
1831{
1832        int *command = data;
1833
1834        ftrace_modify_all_code(*command);
1835
1836        return 0;
1837}
1838
1839/**
1840 * ftrace_run_stop_machine, go back to the stop machine method
1841 * @command: The command to tell ftrace what to do
1842 *
1843 * If an arch needs to fall back to the stop machine method, the
1844 * it can call this function.
1845 */
1846void ftrace_run_stop_machine(int command)
1847{
1848        stop_machine(__ftrace_modify_code, &command, NULL);
1849}
1850
1851/**
1852 * arch_ftrace_update_code, modify the code to trace or not trace
1853 * @command: The command that needs to be done
1854 *
1855 * Archs can override this function if it does not need to
1856 * run stop_machine() to modify code.
1857 */
1858void __weak arch_ftrace_update_code(int command)
1859{
1860        ftrace_run_stop_machine(command);
1861}
1862
1863static void ftrace_run_update_code(int command)
1864{
1865        int ret;
1866
1867        ret = ftrace_arch_code_modify_prepare();
1868        FTRACE_WARN_ON(ret);
1869        if (ret)
1870                return;
1871        /*
1872         * Do not call function tracer while we update the code.
1873         * We are in stop machine.
1874         */
1875        function_trace_stop++;
1876
1877        /*
1878         * By default we use stop_machine() to modify the code.
1879         * But archs can do what ever they want as long as it
1880         * is safe. The stop_machine() is the safest, but also
1881         * produces the most overhead.
1882         */
1883        arch_ftrace_update_code(command);
1884
1885#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1886        /*
1887         * For archs that call ftrace_test_stop_func(), we must
1888         * wait till after we update all the function callers
1889         * before we update the callback. This keeps different
1890         * ops that record different functions from corrupting
1891         * each other.
1892         */
1893        __ftrace_trace_function = __ftrace_trace_function_delay;
1894#endif
1895        function_trace_stop--;
1896
1897        ret = ftrace_arch_code_modify_post_process();
1898        FTRACE_WARN_ON(ret);
1899}
1900
1901static ftrace_func_t saved_ftrace_func;
1902static int ftrace_start_up;
1903static int global_start_up;
1904
1905static void ftrace_startup_enable(int command)
1906{
1907        if (saved_ftrace_func != ftrace_trace_function) {
1908                saved_ftrace_func = ftrace_trace_function;
1909                command |= FTRACE_UPDATE_TRACE_FUNC;
1910        }
1911
1912        if (!command || !ftrace_enabled)
1913                return;
1914
1915        ftrace_run_update_code(command);
1916}
1917
1918static int ftrace_startup(struct ftrace_ops *ops, int command)
1919{
1920        bool hash_enable = true;
1921
1922        if (unlikely(ftrace_disabled))
1923                return -ENODEV;
1924
1925        ftrace_start_up++;
1926        command |= FTRACE_UPDATE_CALLS;
1927
1928        /* ops marked global share the filter hashes */
1929        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1930                ops = &global_ops;
1931                /* Don't update hash if global is already set */
1932                if (global_start_up)
1933                        hash_enable = false;
1934                global_start_up++;
1935        }
1936
1937        ops->flags |= FTRACE_OPS_FL_ENABLED;
1938        if (hash_enable)
1939                ftrace_hash_rec_enable(ops, 1);
1940
1941        ftrace_startup_enable(command);
1942
1943        return 0;
1944}
1945
1946static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1947{
1948        bool hash_disable = true;
1949
1950        if (unlikely(ftrace_disabled))
1951                return;
1952
1953        ftrace_start_up--;
1954        /*
1955         * Just warn in case of unbalance, no need to kill ftrace, it's not
1956         * critical but the ftrace_call callers may be never nopped again after
1957         * further ftrace uses.
1958         */
1959        WARN_ON_ONCE(ftrace_start_up < 0);
1960
1961        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1962                ops = &global_ops;
1963                global_start_up--;
1964                WARN_ON_ONCE(global_start_up < 0);
1965                /* Don't update hash if global still has users */
1966                if (global_start_up) {
1967                        WARN_ON_ONCE(!ftrace_start_up);
1968                        hash_disable = false;
1969                }
1970        }
1971
1972        if (hash_disable)
1973                ftrace_hash_rec_disable(ops, 1);
1974
1975        if (ops != &global_ops || !global_start_up)
1976                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1977
1978        command |= FTRACE_UPDATE_CALLS;
1979
1980        if (saved_ftrace_func != ftrace_trace_function) {
1981                saved_ftrace_func = ftrace_trace_function;
1982                command |= FTRACE_UPDATE_TRACE_FUNC;
1983        }
1984
1985        if (!command || !ftrace_enabled)
1986                return;
1987
1988        ftrace_run_update_code(command);
1989}
1990
1991static void ftrace_startup_sysctl(void)
1992{
1993        if (unlikely(ftrace_disabled))
1994                return;
1995
1996        /* Force update next time */
1997        saved_ftrace_func = NULL;
1998        /* ftrace_start_up is true if we want ftrace running */
1999        if (ftrace_start_up)
2000                ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2001}
2002
2003static void ftrace_shutdown_sysctl(void)
2004{
2005        if (unlikely(ftrace_disabled))
2006                return;
2007
2008        /* ftrace_start_up is true if ftrace is running */
2009        if (ftrace_start_up)
2010                ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2011}
2012
2013static cycle_t          ftrace_update_time;
2014static unsigned long    ftrace_update_cnt;
2015unsigned long           ftrace_update_tot_cnt;
2016
2017static int ops_traces_mod(struct ftrace_ops *ops)
2018{
2019        struct ftrace_hash *hash;
2020
2021        hash = ops->filter_hash;
2022        return ftrace_hash_empty(hash);
2023}
2024
2025static int ftrace_update_code(struct module *mod)
2026{
2027        struct ftrace_page *pg;
2028        struct dyn_ftrace *p;
2029        cycle_t start, stop;
2030        unsigned long ref = 0;
2031        int i;
2032
2033        /*
2034         * When adding a module, we need to check if tracers are
2035         * currently enabled and if they are set to trace all functions.
2036         * If they are, we need to enable the module functions as well
2037         * as update the reference counts for those function records.
2038         */
2039        if (mod) {
2040                struct ftrace_ops *ops;
2041
2042                for (ops = ftrace_ops_list;
2043                     ops != &ftrace_list_end; ops = ops->next) {
2044                        if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2045                            ops_traces_mod(ops))
2046                                ref++;
2047                }
2048        }
2049
2050        start = ftrace_now(raw_smp_processor_id());
2051        ftrace_update_cnt = 0;
2052
2053        for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2054
2055                for (i = 0; i < pg->index; i++) {
2056                        /* If something went wrong, bail without enabling anything */
2057                        if (unlikely(ftrace_disabled))
2058                                return -1;
2059
2060                        p = &pg->records[i];
2061                        p->flags = ref;
2062
2063                        /*
2064                         * Do the initial record conversion from mcount jump
2065                         * to the NOP instructions.
2066                         */
2067                        if (!ftrace_code_disable(mod, p))
2068                                break;
2069
2070                        ftrace_update_cnt++;
2071
2072                        /*
2073                         * If the tracing is enabled, go ahead and enable the record.
2074                         *
2075                         * The reason not to enable the record immediatelly is the
2076                         * inherent check of ftrace_make_nop/ftrace_make_call for
2077                         * correct previous instructions.  Making first the NOP
2078                         * conversion puts the module to the correct state, thus
2079                         * passing the ftrace_make_call check.
2080                         */
2081                        if (ftrace_start_up && ref) {
2082                                int failed = __ftrace_replace_code(p, 1);
2083                                if (failed)
2084                                        ftrace_bug(failed, p->ip);
2085                        }
2086                }
2087        }
2088
2089        ftrace_new_pgs = NULL;
2090
2091        stop = ftrace_now(raw_smp_processor_id());
2092        ftrace_update_time = stop - start;
2093        ftrace_update_tot_cnt += ftrace_update_cnt;
2094
2095        return 0;
2096}
2097
2098static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2099{
2100        int order;
2101        int cnt;
2102
2103        if (WARN_ON(!count))
2104                return -EINVAL;
2105
2106        order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2107
2108        /*
2109         * We want to fill as much as possible. No more than a page
2110         * may be empty.
2111         */
2112        while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2113                order--;
2114
2115 again:
2116        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2117
2118        if (!pg->records) {
2119                /* if we can't allocate this size, try something smaller */
2120                if (!order)
2121                        return -ENOMEM;
2122                order >>= 1;
2123                goto again;
2124        }
2125
2126        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2127        pg->size = cnt;
2128
2129        if (cnt > count)
2130                cnt = count;
2131
2132        return cnt;
2133}
2134
2135static struct ftrace_page *
2136ftrace_allocate_pages(unsigned long num_to_init)
2137{
2138        struct ftrace_page *start_pg;
2139        struct ftrace_page *pg;
2140        int order;
2141        int cnt;
2142
2143        if (!num_to_init)
2144                return 0;
2145
2146        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2147        if (!pg)
2148                return NULL;
2149
2150        /*
2151         * Try to allocate as much as possible in one continues
2152         * location that fills in all of the space. We want to
2153         * waste as little space as possible.
2154         */
2155        for (;;) {
2156                cnt = ftrace_allocate_records(pg, num_to_init);
2157                if (cnt < 0)
2158                        goto free_pages;
2159
2160                num_to_init -= cnt;
2161                if (!num_to_init)
2162                        break;
2163
2164                pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2165                if (!pg->next)
2166                        goto free_pages;
2167
2168                pg = pg->next;
2169        }
2170
2171        return start_pg;
2172
2173 free_pages:
2174        while (start_pg) {
2175                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2176                free_pages((unsigned long)pg->records, order);
2177                start_pg = pg->next;
2178                kfree(pg);
2179                pg = start_pg;
2180        }
2181        pr_info("ftrace: FAILED to allocate memory for functions\n");
2182        return NULL;
2183}
2184
2185static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2186{
2187        int cnt;
2188
2189        if (!num_to_init) {
2190                pr_info("ftrace: No functions to be traced?\n");
2191                return -1;
2192        }
2193
2194        cnt = num_to_init / ENTRIES_PER_PAGE;
2195        pr_info("ftrace: allocating %ld entries in %d pages\n",
2196                num_to_init, cnt + 1);
2197
2198        return 0;
2199}
2200
2201#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2202
2203struct ftrace_iterator {
2204        loff_t                          pos;
2205        loff_t                          func_pos;
2206        struct ftrace_page              *pg;
2207        struct dyn_ftrace               *func;
2208        struct ftrace_func_probe        *probe;
2209        struct trace_parser             parser;
2210        struct ftrace_hash              *hash;
2211        struct ftrace_ops               *ops;
2212        int                             hidx;
2213        int                             idx;
2214        unsigned                        flags;
2215};
2216
2217static void *
2218t_hash_next(struct seq_file *m, loff_t *pos)
2219{
2220        struct ftrace_iterator *iter = m->private;
2221        struct hlist_node *hnd = NULL;
2222        struct hlist_head *hhd;
2223
2224        (*pos)++;
2225        iter->pos = *pos;
2226
2227        if (iter->probe)
2228                hnd = &iter->probe->node;
2229 retry:
2230        if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2231                return NULL;
2232
2233        hhd = &ftrace_func_hash[iter->hidx];
2234
2235        if (hlist_empty(hhd)) {
2236                iter->hidx++;
2237                hnd = NULL;
2238                goto retry;
2239        }
2240
2241        if (!hnd)
2242                hnd = hhd->first;
2243        else {
2244                hnd = hnd->next;
2245                if (!hnd) {
2246                        iter->hidx++;
2247                        goto retry;
2248                }
2249        }
2250
2251        if (WARN_ON_ONCE(!hnd))
2252                return NULL;
2253
2254        iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2255
2256        return iter;
2257}
2258
2259static void *t_hash_start(struct seq_file *m, loff_t *pos)
2260{
2261        struct ftrace_iterator *iter = m->private;
2262        void *p = NULL;
2263        loff_t l;
2264
2265        if (!(iter->flags & FTRACE_ITER_DO_HASH))
2266                return NULL;
2267
2268        if (iter->func_pos > *pos)
2269                return NULL;
2270
2271        iter->hidx = 0;
2272        for (l = 0; l <= (*pos - iter->func_pos); ) {
2273                p = t_hash_next(m, &l);
2274                if (!p)
2275                        break;
2276        }
2277        if (!p)
2278                return NULL;
2279
2280        /* Only set this if we have an item */
2281        iter->flags |= FTRACE_ITER_HASH;
2282
2283        return iter;
2284}
2285
2286static int
2287t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2288{
2289        struct ftrace_func_probe *rec;
2290
2291        rec = iter->probe;
2292        if (WARN_ON_ONCE(!rec))
2293                return -EIO;
2294
2295        if (rec->ops->print)
2296                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2297
2298        seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2299
2300        if (rec->data)
2301                seq_printf(m, ":%p", rec->data);
2302        seq_putc(m, '\n');
2303
2304        return 0;
2305}
2306
2307static void *
2308t_next(struct seq_file *m, void *v, loff_t *pos)
2309{
2310        struct ftrace_iterator *iter = m->private;
2311        struct ftrace_ops *ops = iter->ops;
2312        struct dyn_ftrace *rec = NULL;
2313
2314        if (unlikely(ftrace_disabled))
2315                return NULL;
2316
2317        if (iter->flags & FTRACE_ITER_HASH)
2318                return t_hash_next(m, pos);
2319
2320        (*pos)++;
2321        iter->pos = iter->func_pos = *pos;
2322
2323        if (iter->flags & FTRACE_ITER_PRINTALL)
2324                return t_hash_start(m, pos);
2325
2326 retry:
2327        if (iter->idx >= iter->pg->index) {
2328                if (iter->pg->next) {
2329                        iter->pg = iter->pg->next;
2330                        iter->idx = 0;
2331                        goto retry;
2332                }
2333        } else {
2334                rec = &iter->pg->records[iter->idx++];
2335                if (((iter->flags & FTRACE_ITER_FILTER) &&
2336                     !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2337
2338                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
2339                     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2340
2341                    ((iter->flags & FTRACE_ITER_ENABLED) &&
2342                     !(rec->flags & ~FTRACE_FL_MASK))) {
2343
2344                        rec = NULL;
2345                        goto retry;
2346                }
2347        }
2348
2349        if (!rec)
2350                return t_hash_start(m, pos);
2351
2352        iter->func = rec;
2353
2354        return iter;
2355}
2356
2357static void reset_iter_read(struct ftrace_iterator *iter)
2358{
2359        iter->pos = 0;
2360        iter->func_pos = 0;
2361        iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2362}
2363
2364static void *t_start(struct seq_file *m, loff_t *pos)
2365{
2366        struct ftrace_iterator *iter = m->private;
2367        struct ftrace_ops *ops = iter->ops;
2368        void *p = NULL;
2369        loff_t l;
2370
2371        mutex_lock(&ftrace_lock);
2372
2373        if (unlikely(ftrace_disabled))
2374                return NULL;
2375
2376        /*
2377         * If an lseek was done, then reset and start from beginning.
2378         */
2379        if (*pos < iter->pos)
2380                reset_iter_read(iter);
2381
2382        /*
2383         * For set_ftrace_filter reading, if we have the filter
2384         * off, we can short cut and just print out that all
2385         * functions are enabled.
2386         */
2387        if (iter->flags & FTRACE_ITER_FILTER &&
2388            ftrace_hash_empty(ops->filter_hash)) {
2389                if (*pos > 0)
2390                        return t_hash_start(m, pos);
2391                iter->flags |= FTRACE_ITER_PRINTALL;
2392                /* reset in case of seek/pread */
2393                iter->flags &= ~FTRACE_ITER_HASH;
2394                return iter;
2395        }
2396
2397        if (iter->flags & FTRACE_ITER_HASH)
2398                return t_hash_start(m, pos);
2399
2400        /*
2401         * Unfortunately, we need to restart at ftrace_pages_start
2402         * every time we let go of the ftrace_mutex. This is because
2403         * those pointers can change without the lock.
2404         */
2405        iter->pg = ftrace_pages_start;
2406        iter->idx = 0;
2407        for (l = 0; l <= *pos; ) {
2408                p = t_next(m, p, &l);
2409                if (!p)
2410                        break;
2411        }
2412
2413        if (!p)
2414                return t_hash_start(m, pos);
2415
2416        return iter;
2417}
2418
2419static void t_stop(struct seq_file *m, void *p)
2420{
2421        mutex_unlock(&ftrace_lock);
2422}
2423
2424static int t_show(struct seq_file *m, void *v)
2425{
2426        struct ftrace_iterator *iter = m->private;
2427        struct dyn_ftrace *rec;
2428
2429        if (iter->flags & FTRACE_ITER_HASH)
2430                return t_hash_show(m, iter);
2431
2432        if (iter->flags & FTRACE_ITER_PRINTALL) {
2433                seq_printf(m, "#### all functions enabled ####\n");
2434                return 0;
2435        }
2436
2437        rec = iter->func;
2438
2439        if (!rec)
2440                return 0;
2441
2442        seq_printf(m, "%ps", (void *)rec->ip);
2443        if (iter->flags & FTRACE_ITER_ENABLED)
2444                seq_printf(m, " (%ld)",
2445                           rec->flags & ~FTRACE_FL_MASK);
2446        seq_printf(m, "\n");
2447
2448        return 0;
2449}
2450
2451static const struct seq_operations show_ftrace_seq_ops = {
2452        .start = t_start,
2453        .next = t_next,
2454        .stop = t_stop,
2455        .show = t_show,
2456};
2457
2458static int
2459ftrace_avail_open(struct inode *inode, struct file *file)
2460{
2461        struct ftrace_iterator *iter;
2462
2463        if (unlikely(ftrace_disabled))
2464                return -ENODEV;
2465
2466        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2467        if (iter) {
2468                iter->pg = ftrace_pages_start;
2469                iter->ops = &global_ops;
2470        }
2471
2472        return iter ? 0 : -ENOMEM;
2473}
2474
2475static int
2476ftrace_enabled_open(struct inode *inode, struct file *file)
2477{
2478        struct ftrace_iterator *iter;
2479
2480        if (unlikely(ftrace_disabled))
2481                return -ENODEV;
2482
2483        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2484        if (iter) {
2485                iter->pg = ftrace_pages_start;
2486                iter->flags = FTRACE_ITER_ENABLED;
2487                iter->ops = &global_ops;
2488        }
2489
2490        return iter ? 0 : -ENOMEM;
2491}
2492
2493static void ftrace_filter_reset(struct ftrace_hash *hash)
2494{
2495        mutex_lock(&ftrace_lock);
2496        ftrace_hash_clear(hash);
2497        mutex_unlock(&ftrace_lock);
2498}
2499
2500/**
2501 * ftrace_regex_open - initialize function tracer filter files
2502 * @ops: The ftrace_ops that hold the hash filters
2503 * @flag: The type of filter to process
2504 * @inode: The inode, usually passed in to your open routine
2505 * @file: The file, usually passed in to your open routine
2506 *
2507 * ftrace_regex_open() initializes the filter files for the
2508 * @ops. Depending on @flag it may process the filter hash or
2509 * the notrace hash of @ops. With this called from the open
2510 * routine, you can use ftrace_filter_write() for the write
2511 * routine if @flag has FTRACE_ITER_FILTER set, or
2512 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2513 * ftrace_regex_lseek() should be used as the lseek routine, and
2514 * release must call ftrace_regex_release().
2515 */
2516int
2517ftrace_regex_open(struct ftrace_ops *ops, int flag,
2518                  struct inode *inode, struct file *file)
2519{
2520        struct ftrace_iterator *iter;
2521        struct ftrace_hash *hash;
2522        int ret = 0;
2523
2524        if (unlikely(ftrace_disabled))
2525                return -ENODEV;
2526
2527        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2528        if (!iter)
2529                return -ENOMEM;
2530
2531        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2532                kfree(iter);
2533                return -ENOMEM;
2534        }
2535
2536        if (flag & FTRACE_ITER_NOTRACE)
2537                hash = ops->notrace_hash;
2538        else
2539                hash = ops->filter_hash;
2540
2541        iter->ops = ops;
2542        iter->flags = flag;
2543
2544        if (file->f_mode & FMODE_WRITE) {
2545                mutex_lock(&ftrace_lock);
2546                iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2547                mutex_unlock(&ftrace_lock);
2548
2549                if (!iter->hash) {
2550                        trace_parser_put(&iter->parser);
2551                        kfree(iter);
2552                        return -ENOMEM;
2553                }
2554        }
2555
2556        mutex_lock(&ftrace_regex_lock);
2557
2558        if ((file->f_mode & FMODE_WRITE) &&
2559            (file->f_flags & O_TRUNC))
2560                ftrace_filter_reset(iter->hash);
2561
2562        if (file->f_mode & FMODE_READ) {
2563                iter->pg = ftrace_pages_start;
2564
2565                ret = seq_open(file, &show_ftrace_seq_ops);
2566                if (!ret) {
2567                        struct seq_file *m = file->private_data;
2568                        m->private = iter;
2569                } else {
2570                        /* Failed */
2571                        free_ftrace_hash(iter->hash);
2572                        trace_parser_put(&iter->parser);
2573                        kfree(iter);
2574                }
2575        } else
2576                file->private_data = iter;
2577        mutex_unlock(&ftrace_regex_lock);
2578
2579        return ret;
2580}
2581
2582static int
2583ftrace_filter_open(struct inode *inode, struct file *file)
2584{
2585        return ftrace_regex_open(&global_ops,
2586                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2587                        inode, file);
2588}
2589
2590static int
2591ftrace_notrace_open(struct inode *inode, struct file *file)
2592{
2593        return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2594                                 inode, file);
2595}
2596
2597loff_t
2598ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2599{
2600        loff_t ret;
2601
2602        if (file->f_mode & FMODE_READ)
2603                ret = seq_lseek(file, offset, origin);
2604        else
2605                file->f_pos = ret = 1;
2606
2607        return ret;
2608}
2609
2610static int ftrace_match(char *str, char *regex, int len, int type)
2611{
2612        int matched = 0;
2613        int slen;
2614
2615        switch (type) {
2616        case MATCH_FULL:
2617                if (strcmp(str, regex) == 0)
2618                        matched = 1;
2619                break;
2620        case MATCH_FRONT_ONLY:
2621                if (strncmp(str, regex, len) == 0)
2622                        matched = 1;
2623                break;
2624        case MATCH_MIDDLE_ONLY:
2625                if (strstr(str, regex))
2626                        matched = 1;
2627                break;
2628        case MATCH_END_ONLY:
2629                slen = strlen(str);
2630                if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2631                        matched = 1;
2632                break;
2633        }
2634
2635        return matched;
2636}
2637
2638static int
2639enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2640{
2641        struct ftrace_func_entry *entry;
2642        int ret = 0;
2643
2644        entry = ftrace_lookup_ip(hash, rec->ip);
2645        if (not) {
2646                /* Do nothing if it doesn't exist */
2647                if (!entry)
2648                        return 0;
2649
2650                free_hash_entry(hash, entry);
2651        } else {
2652                /* Do nothing if it exists */
2653                if (entry)
2654                        return 0;
2655
2656                ret = add_hash_entry(hash, rec->ip);
2657        }
2658        return ret;
2659}
2660
2661static int
2662ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2663                    char *regex, int len, int type)
2664{
2665        char str[KSYM_SYMBOL_LEN];
2666        char *modname;
2667
2668        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2669
2670        if (mod) {
2671                /* module lookup requires matching the module */
2672                if (!modname || strcmp(modname, mod))
2673                        return 0;
2674
2675                /* blank search means to match all funcs in the mod */
2676                if (!len)
2677                        return 1;
2678        }
2679
2680        return ftrace_match(str, regex, len, type);
2681}
2682
2683static int
2684match_records(struct ftrace_hash *hash, char *buff,
2685              int len, char *mod, int not)
2686{
2687        unsigned search_len = 0;
2688        struct ftrace_page *pg;
2689        struct dyn_ftrace *rec;
2690        int type = MATCH_FULL;
2691        char *search = buff;
2692        int found = 0;
2693        int ret;
2694
2695        if (len) {
2696                type = filter_parse_regex(buff, len, &search, &not);
2697                search_len = strlen(search);
2698        }
2699
2700        mutex_lock(&ftrace_lock);
2701
2702        if (unlikely(ftrace_disabled))
2703                goto out_unlock;
2704
2705        do_for_each_ftrace_rec(pg, rec) {
2706                if (ftrace_match_record(rec, mod, search, search_len, type)) {
2707                        ret = enter_record(hash, rec, not);
2708                        if (ret < 0) {
2709                                found = ret;
2710                                goto out_unlock;
2711                        }
2712                        found = 1;
2713                }
2714        } while_for_each_ftrace_rec();
2715 out_unlock:
2716        mutex_unlock(&ftrace_lock);
2717
2718        return found;
2719}
2720
2721static int
2722ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2723{
2724        return match_records(hash, buff, len, NULL, 0);
2725}
2726
2727static int
2728ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2729{
2730        int not = 0;
2731
2732        /* blank or '*' mean the same */
2733        if (strcmp(buff, "*") == 0)
2734                buff[0] = 0;
2735
2736        /* handle the case of 'dont filter this module' */
2737        if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2738                buff[0] = 0;
2739                not = 1;
2740        }
2741
2742        return match_records(hash, buff, strlen(buff), mod, not);
2743}
2744
2745/*
2746 * We register the module command as a template to show others how
2747 * to register the a command as well.
2748 */
2749
2750static int
2751ftrace_mod_callback(struct ftrace_hash *hash,
2752                    char *func, char *cmd, char *param, int enable)
2753{
2754        char *mod;
2755        int ret = -EINVAL;
2756
2757        /*
2758         * cmd == 'mod' because we only registered this func
2759         * for the 'mod' ftrace_func_command.
2760         * But if you register one func with multiple commands,
2761         * you can tell which command was used by the cmd
2762         * parameter.
2763         */
2764
2765        /* we must have a module name */
2766        if (!param)
2767                return ret;
2768
2769        mod = strsep(&param, ":");
2770        if (!strlen(mod))
2771                return ret;
2772
2773        ret = ftrace_match_module_records(hash, func, mod);
2774        if (!ret)
2775                ret = -EINVAL;
2776        if (ret < 0)
2777                return ret;
2778
2779        return 0;
2780}
2781
2782static struct ftrace_func_command ftrace_mod_cmd = {
2783        .name                   = "mod",
2784        .func                   = ftrace_mod_callback,
2785};
2786
2787static int __init ftrace_mod_cmd_init(void)
2788{
2789        return register_ftrace_command(&ftrace_mod_cmd);
2790}
2791device_initcall(ftrace_mod_cmd_init);
2792
2793static void
2794function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2795{
2796        struct ftrace_func_probe *entry;
2797        struct hlist_head *hhd;
2798        struct hlist_node *n;
2799        unsigned long key;
2800
2801        key = hash_long(ip, FTRACE_HASH_BITS);
2802
2803        hhd = &ftrace_func_hash[key];
2804
2805        if (hlist_empty(hhd))
2806                return;
2807
2808        /*
2809         * Disable preemption for these calls to prevent a RCU grace
2810         * period. This syncs the hash iteration and freeing of items
2811         * on the hash. rcu_read_lock is too dangerous here.
2812         */
2813        preempt_disable_notrace();
2814        hlist_for_each_entry_rcu(entry, n, hhd, node) {
2815                if (entry->ip == ip)
2816                        entry->ops->func(ip, parent_ip, &entry->data);
2817        }
2818        preempt_enable_notrace();
2819}
2820
2821static struct ftrace_ops trace_probe_ops __read_mostly =
2822{
2823        .func           = function_trace_probe_call,
2824};
2825
2826static int ftrace_probe_registered;
2827
2828static void __enable_ftrace_function_probe(void)
2829{
2830        int ret;
2831        int i;
2832
2833        if (ftrace_probe_registered)
2834                return;
2835
2836        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2837                struct hlist_head *hhd = &ftrace_func_hash[i];
2838                if (hhd->first)
2839                        break;
2840        }
2841        /* Nothing registered? */
2842        if (i == FTRACE_FUNC_HASHSIZE)
2843                return;
2844
2845        ret = __register_ftrace_function(&trace_probe_ops);
2846        if (!ret)
2847                ret = ftrace_startup(&trace_probe_ops, 0);
2848
2849        ftrace_probe_registered = 1;
2850}
2851
2852static void __disable_ftrace_function_probe(void)
2853{
2854        int ret;
2855        int i;
2856
2857        if (!ftrace_probe_registered)
2858                return;
2859
2860        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2861                struct hlist_head *hhd = &ftrace_func_hash[i];
2862                if (hhd->first)
2863                        return;
2864        }
2865
2866        /* no more funcs left */
2867        ret = __unregister_ftrace_function(&trace_probe_ops);
2868        if (!ret)
2869                ftrace_shutdown(&trace_probe_ops, 0);
2870
2871        ftrace_probe_registered = 0;
2872}
2873
2874
2875static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2876{
2877        struct ftrace_func_probe *entry =
2878                container_of(rhp, struct ftrace_func_probe, rcu);
2879
2880        if (entry->ops->free)
2881                entry->ops->free(&entry->data);
2882        kfree(entry);
2883}
2884
2885
2886int
2887register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2888                              void *data)
2889{
2890        struct ftrace_func_probe *entry;
2891        struct ftrace_page *pg;
2892        struct dyn_ftrace *rec;
2893        int type, len, not;
2894        unsigned long key;
2895        int count = 0;
2896        char *search;
2897
2898        type = filter_parse_regex(glob, strlen(glob), &search, &not);
2899        len = strlen(search);
2900
2901        /* we do not support '!' for function probes */
2902        if (WARN_ON(not))
2903                return -EINVAL;
2904
2905        mutex_lock(&ftrace_lock);
2906
2907        if (unlikely(ftrace_disabled))
2908                goto out_unlock;
2909
2910        do_for_each_ftrace_rec(pg, rec) {
2911
2912                if (!ftrace_match_record(rec, NULL, search, len, type))
2913                        continue;
2914
2915                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2916                if (!entry) {
2917                        /* If we did not process any, then return error */
2918                        if (!count)
2919                                count = -ENOMEM;
2920                        goto out_unlock;
2921                }
2922
2923                count++;
2924
2925                entry->data = data;
2926
2927                /*
2928                 * The caller might want to do something special
2929                 * for each function we find. We call the callback
2930                 * to give the caller an opportunity to do so.
2931                 */
2932                if (ops->callback) {
2933                        if (ops->callback(rec->ip, &entry->data) < 0) {
2934                                /* caller does not like this func */
2935                                kfree(entry);
2936                                continue;
2937                        }
2938                }
2939
2940                entry->ops = ops;
2941                entry->ip = rec->ip;
2942
2943                key = hash_long(entry->ip, FTRACE_HASH_BITS);
2944                hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2945
2946        } while_for_each_ftrace_rec();
2947        __enable_ftrace_function_probe();
2948
2949 out_unlock:
2950        mutex_unlock(&ftrace_lock);
2951
2952        return count;
2953}
2954
2955enum {
2956        PROBE_TEST_FUNC         = 1,
2957        PROBE_TEST_DATA         = 2
2958};
2959
2960static void
2961__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2962                                  void *data, int flags)
2963{
2964        struct ftrace_func_probe *entry;
2965        struct hlist_node *n, *tmp;
2966        char str[KSYM_SYMBOL_LEN];
2967        int type = MATCH_FULL;
2968        int i, len = 0;
2969        char *search;
2970
2971        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2972                glob = NULL;
2973        else if (glob) {
2974                int not;
2975
2976                type = filter_parse_regex(glob, strlen(glob), &search, &not);
2977                len = strlen(search);
2978
2979                /* we do not support '!' for function probes */
2980                if (WARN_ON(not))
2981                        return;
2982        }
2983
2984        mutex_lock(&ftrace_lock);
2985        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2986                struct hlist_head *hhd = &ftrace_func_hash[i];
2987
2988                hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2989
2990                        /* break up if statements for readability */
2991                        if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2992                                continue;
2993
2994                        if ((flags & PROBE_TEST_DATA) && entry->data != data)
2995                                continue;
2996
2997                        /* do this last, since it is the most expensive */
2998                        if (glob) {
2999                                kallsyms_lookup(entry->ip, NULL, NULL,
3000                                                NULL, str);
3001                                if (!ftrace_match(str, glob, len, type))
3002                                        continue;
3003                        }
3004
3005                        hlist_del(&entry->node);
3006                        call_rcu(&entry->rcu, ftrace_free_entry_rcu);
3007                }
3008        }
3009        __disable_ftrace_function_probe();
3010        mutex_unlock(&ftrace_lock);
3011}
3012
3013void
3014unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3015                                void *data)
3016{
3017        __unregister_ftrace_function_probe(glob, ops, data,
3018                                          PROBE_TEST_FUNC | PROBE_TEST_DATA);
3019}
3020
3021void
3022unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3023{
3024        __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3025}
3026
3027void unregister_ftrace_function_probe_all(char *glob)
3028{
3029        __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3030}
3031
3032static LIST_HEAD(ftrace_commands);
3033static DEFINE_MUTEX(ftrace_cmd_mutex);
3034
3035int register_ftrace_command(struct ftrace_func_command *cmd)
3036{
3037        struct ftrace_func_command *p;
3038        int ret = 0;
3039
3040        mutex_lock(&ftrace_cmd_mutex);
3041        list_for_each_entry(p, &ftrace_commands, list) {
3042                if (strcmp(cmd->name, p->name) == 0) {
3043                        ret = -EBUSY;
3044                        goto out_unlock;
3045                }
3046        }
3047        list_add(&cmd->list, &ftrace_commands);
3048 out_unlock:
3049        mutex_unlock(&ftrace_cmd_mutex);
3050
3051        return ret;
3052}
3053
3054int unregister_ftrace_command(struct ftrace_func_command *cmd)
3055{
3056        struct ftrace_func_command *p, *n;
3057        int ret = -ENODEV;
3058
3059        mutex_lock(&ftrace_cmd_mutex);
3060        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3061                if (strcmp(cmd->name, p->name) == 0) {
3062                        ret = 0;
3063                        list_del_init(&p->list);
3064                        goto out_unlock;
3065                }
3066        }
3067 out_unlock:
3068        mutex_unlock(&ftrace_cmd_mutex);
3069
3070        return ret;
3071}
3072
3073static int ftrace_process_regex(struct ftrace_hash *hash,
3074                                char *buff, int len, int enable)
3075{
3076        char *func, *command, *next = buff;
3077        struct ftrace_func_command *p;
3078        int ret = -EINVAL;
3079
3080        func = strsep(&next, ":");
3081
3082        if (!next) {
3083                ret = ftrace_match_records(hash, func, len);
3084                if (!ret)
3085                        ret = -EINVAL;
3086                if (ret < 0)
3087                        return ret;
3088                return 0;
3089        }
3090
3091        /* command found */
3092
3093        command = strsep(&next, ":");
3094
3095        mutex_lock(&ftrace_cmd_mutex);
3096        list_for_each_entry(p, &ftrace_commands, list) {
3097                if (strcmp(p->name, command) == 0) {
3098                        ret = p->func(hash, func, command, next, enable);
3099                        goto out_unlock;
3100                }
3101        }
3102 out_unlock:
3103        mutex_unlock(&ftrace_cmd_mutex);
3104
3105        return ret;
3106}
3107
3108static ssize_t
3109ftrace_regex_write(struct file *file, const char __user *ubuf,
3110                   size_t cnt, loff_t *ppos, int enable)
3111{
3112        struct ftrace_iterator *iter;
3113        struct trace_parser *parser;
3114        ssize_t ret, read;
3115
3116        if (!cnt)
3117                return 0;
3118
3119        mutex_lock(&ftrace_regex_lock);
3120
3121        ret = -ENODEV;
3122        if (unlikely(ftrace_disabled))
3123                goto out_unlock;
3124
3125        if (file->f_mode & FMODE_READ) {
3126                struct seq_file *m = file->private_data;
3127                iter = m->private;
3128        } else
3129                iter = file->private_data;
3130
3131        parser = &iter->parser;
3132        read = trace_get_user(parser, ubuf, cnt, ppos);
3133
3134        if (read >= 0 && trace_parser_loaded(parser) &&
3135            !trace_parser_cont(parser)) {
3136                ret = ftrace_process_regex(iter->hash, parser->buffer,
3137                                           parser->idx, enable);
3138                trace_parser_clear(parser);
3139                if (ret)
3140                        goto out_unlock;
3141        }
3142
3143        ret = read;
3144out_unlock:
3145        mutex_unlock(&ftrace_regex_lock);
3146
3147        return ret;
3148}
3149
3150ssize_t
3151ftrace_filter_write(struct file *file, const char __user *ubuf,
3152                    size_t cnt, loff_t *ppos)
3153{
3154        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3155}
3156
3157ssize_t
3158ftrace_notrace_write(struct file *file, const char __user *ubuf,
3159                     size_t cnt, loff_t *ppos)
3160{
3161        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3162}
3163
3164static int
3165ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3166                 int reset, int enable)
3167{
3168        struct ftrace_hash **orig_hash;
3169        struct ftrace_hash *hash;
3170        int ret;
3171
3172        /* All global ops uses the global ops filters */
3173        if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3174                ops = &global_ops;
3175
3176        if (unlikely(ftrace_disabled))
3177                return -ENODEV;
3178
3179        if (enable)
3180                orig_hash = &ops->filter_hash;
3181        else
3182                orig_hash = &ops->notrace_hash;
3183
3184        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3185        if (!hash)
3186                return -ENOMEM;
3187
3188        mutex_lock(&ftrace_regex_lock);
3189        if (reset)
3190                ftrace_filter_reset(hash);
3191        if (buf && !ftrace_match_records(hash, buf, len)) {
3192                ret = -EINVAL;
3193                goto out_regex_unlock;
3194        }
3195
3196        mutex_lock(&ftrace_lock);
3197        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3198        if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3199            && ftrace_enabled)
3200                ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3201
3202        mutex_unlock(&ftrace_lock);
3203
3204 out_regex_unlock:
3205        mutex_unlock(&ftrace_regex_lock);
3206
3207        free_ftrace_hash(hash);
3208        return ret;
3209}
3210
3211/**
3212 * ftrace_set_filter - set a function to filter on in ftrace
3213 * @ops - the ops to set the filter with
3214 * @buf - the string that holds the function filter text.
3215 * @len - the length of the string.
3216 * @reset - non zero to reset all filters before applying this filter.
3217 *
3218 * Filters denote which functions should be enabled when tracing is enabled.
3219 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3220 */
3221int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3222                       int len, int reset)
3223{
3224        return ftrace_set_regex(ops, buf, len, reset, 1);
3225}
3226EXPORT_SYMBOL_GPL(ftrace_set_filter);
3227
3228/**
3229 * ftrace_set_notrace - set a function to not trace in ftrace
3230 * @ops - the ops to set the notrace filter with
3231 * @buf - the string that holds the function notrace text.
3232 * @len - the length of the string.
3233 * @reset - non zero to reset all filters before applying this filter.
3234 *
3235 * Notrace Filters denote which functions should not be enabled when tracing
3236 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3237 * for tracing.
3238 */
3239int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3240                        int len, int reset)
3241{
3242        return ftrace_set_regex(ops, buf, len, reset, 0);
3243}
3244EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3245/**
3246 * ftrace_set_filter - set a function to filter on in ftrace
3247 * @ops - the ops to set the filter with
3248 * @buf - the string that holds the function filter text.
3249 * @len - the length of the string.
3250 * @reset - non zero to reset all filters before applying this filter.
3251 *
3252 * Filters denote which functions should be enabled when tracing is enabled.
3253 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3254 */
3255void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3256{
3257        ftrace_set_regex(&global_ops, buf, len, reset, 1);
3258}
3259EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3260
3261/**
3262 * ftrace_set_notrace - set a function to not trace in ftrace
3263 * @ops - the ops to set the notrace filter with
3264 * @buf - the string that holds the function notrace text.
3265 * @len - the length of the string.
3266 * @reset - non zero to reset all filters before applying this filter.
3267 *
3268 * Notrace Filters denote which functions should not be enabled when tracing
3269 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3270 * for tracing.
3271 */
3272void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3273{
3274        ftrace_set_regex(&global_ops, buf, len, reset, 0);
3275}
3276EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3277
3278/*
3279 * command line interface to allow users to set filters on boot up.
3280 */
3281#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3282static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3283static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3284
3285static int __init set_ftrace_notrace(char *str)
3286{
3287        strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3288        return 1;
3289}
3290__setup("ftrace_notrace=", set_ftrace_notrace);
3291
3292static int __init set_ftrace_filter(char *str)
3293{
3294        strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3295        return 1;
3296}
3297__setup("ftrace_filter=", set_ftrace_filter);
3298
3299#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3300static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3301static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3302
3303static int __init set_graph_function(char *str)
3304{
3305        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3306        return 1;
3307}
3308__setup("ftrace_graph_filter=", set_graph_function);
3309
3310static void __init set_ftrace_early_graph(char *buf)
3311{
3312        int ret;
3313        char *func;
3314
3315        while (buf) {
3316                func = strsep(&buf, ",");
3317                /* we allow only one expression at a time */
3318                ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3319                                      func);
3320                if (ret)
3321                        printk(KERN_DEBUG "ftrace: function %s not "
3322                                          "traceable\n", func);
3323        }
3324}
3325#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3326
3327void __init
3328ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3329{
3330        char *func;
3331
3332        while (buf) {
3333                func = strsep(&buf, ",");
3334                ftrace_set_regex(ops, func, strlen(func), 0, enable);
3335        }
3336}
3337
3338static void __init set_ftrace_early_filters(void)
3339{
3340        if (ftrace_filter_buf[0])
3341                ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3342        if (ftrace_notrace_buf[0])
3343                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3344#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3345        if (ftrace_graph_buf[0])
3346                set_ftrace_early_graph(ftrace_graph_buf);
3347#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3348}
3349
3350int ftrace_regex_release(struct inode *inode, struct file *file)
3351{
3352        struct seq_file *m = (struct seq_file *)file->private_data;
3353        struct ftrace_iterator *iter;
3354        struct ftrace_hash **orig_hash;
3355        struct trace_parser *parser;
3356        int filter_hash;
3357        int ret;
3358
3359        mutex_lock(&ftrace_regex_lock);
3360        if (file->f_mode & FMODE_READ) {
3361                iter = m->private;
3362
3363                seq_release(inode, file);
3364        } else
3365                iter = file->private_data;
3366
3367        parser = &iter->parser;
3368        if (trace_parser_loaded(parser)) {
3369                parser->buffer[parser->idx] = 0;
3370                ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3371        }
3372
3373        trace_parser_put(parser);
3374
3375        if (file->f_mode & FMODE_WRITE) {
3376                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3377
3378                if (filter_hash)
3379                        orig_hash = &iter->ops->filter_hash;
3380                else
3381                        orig_hash = &iter->ops->notrace_hash;
3382
3383                mutex_lock(&ftrace_lock);
3384                ret = ftrace_hash_move(iter->ops, filter_hash,
3385                                       orig_hash, iter->hash);
3386                if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3387                    && ftrace_enabled)
3388                        ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3389
3390                mutex_unlock(&ftrace_lock);
3391        }
3392        free_ftrace_hash(iter->hash);
3393        kfree(iter);
3394
3395        mutex_unlock(&ftrace_regex_lock);
3396        return 0;
3397}
3398
3399static const struct file_operations ftrace_avail_fops = {
3400        .open = ftrace_avail_open,
3401        .read = seq_read,
3402        .llseek = seq_lseek,
3403        .release = seq_release_private,
3404};
3405
3406static const struct file_operations ftrace_enabled_fops = {
3407        .open = ftrace_enabled_open,
3408        .read = seq_read,
3409        .llseek = seq_lseek,
3410        .release = seq_release_private,
3411};
3412
3413static const struct file_operations ftrace_filter_fops = {
3414        .open = ftrace_filter_open,
3415        .read = seq_read,
3416        .write = ftrace_filter_write,
3417        .llseek = ftrace_regex_lseek,
3418        .release = ftrace_regex_release,
3419};
3420
3421static const struct file_operations ftrace_notrace_fops = {
3422        .open = ftrace_notrace_open,
3423        .read = seq_read,
3424        .write = ftrace_notrace_write,
3425        .llseek = ftrace_regex_lseek,
3426        .release = ftrace_regex_release,
3427};
3428
3429#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3430
3431static DEFINE_MUTEX(graph_lock);
3432
3433int ftrace_graph_count;
3434int ftrace_graph_filter_enabled;
3435unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3436
3437static void *
3438__g_next(struct seq_file *m, loff_t *pos)
3439{
3440        if (*pos >= ftrace_graph_count)
3441                return NULL;
3442        return &ftrace_graph_funcs[*pos];
3443}
3444
3445static void *
3446g_next(struct seq_file *m, void *v, loff_t *pos)
3447{
3448        (*pos)++;
3449        return __g_next(m, pos);
3450}
3451
3452static void *g_start(struct seq_file *m, loff_t *pos)
3453{
3454        mutex_lock(&graph_lock);
3455
3456        /* Nothing, tell g_show to print all functions are enabled */
3457        if (!ftrace_graph_filter_enabled && !*pos)
3458                return (void *)1;
3459
3460        return __g_next(m, pos);
3461}
3462
3463static void g_stop(struct seq_file *m, void *p)
3464{
3465        mutex_unlock(&graph_lock);
3466}
3467
3468static int g_show(struct seq_file *m, void *v)
3469{
3470        unsigned long *ptr = v;
3471
3472        if (!ptr)
3473                return 0;
3474
3475        if (ptr == (unsigned long *)1) {
3476                seq_printf(m, "#### all functions enabled ####\n");
3477                return 0;
3478        }
3479
3480        seq_printf(m, "%ps\n", (void *)*ptr);
3481
3482        return 0;
3483}
3484
3485static const struct seq_operations ftrace_graph_seq_ops = {
3486        .start = g_start,
3487        .next = g_next,
3488        .stop = g_stop,
3489        .show = g_show,
3490};
3491
3492static int
3493ftrace_graph_open(struct inode *inode, struct file *file)
3494{
3495        int ret = 0;
3496
3497        if (unlikely(ftrace_disabled))
3498                return -ENODEV;
3499
3500        mutex_lock(&graph_lock);
3501        if ((file->f_mode & FMODE_WRITE) &&
3502            (file->f_flags & O_TRUNC)) {
3503                ftrace_graph_filter_enabled = 0;
3504                ftrace_graph_count = 0;
3505                memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3506        }
3507        mutex_unlock(&graph_lock);
3508
3509        if (file->f_mode & FMODE_READ)
3510                ret = seq_open(file, &ftrace_graph_seq_ops);
3511
3512        return ret;
3513}
3514
3515static int
3516ftrace_graph_release(struct inode *inode, struct file *file)
3517{
3518        if (file->f_mode & FMODE_READ)
3519                seq_release(inode, file);
3520        return 0;
3521}
3522
3523static int
3524ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3525{
3526        struct dyn_ftrace *rec;
3527        struct ftrace_page *pg;
3528        int search_len;
3529        int fail = 1;
3530        int type, not;
3531        char *search;
3532        bool exists;
3533        int i;
3534
3535        /* decode regex */
3536        type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3537        if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3538                return -EBUSY;
3539
3540        search_len = strlen(search);
3541
3542        mutex_lock(&ftrace_lock);
3543
3544        if (unlikely(ftrace_disabled)) {
3545                mutex_unlock(&ftrace_lock);
3546                return -ENODEV;
3547        }
3548
3549        do_for_each_ftrace_rec(pg, rec) {
3550
3551                if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3552                        /* if it is in the array */
3553                        exists = false;
3554                        for (i = 0; i < *idx; i++) {
3555                                if (array[i] == rec->ip) {
3556                                        exists = true;
3557                                        break;
3558                                }
3559                        }
3560
3561                        if (!not) {
3562                                fail = 0;
3563                                if (!exists) {
3564                                        array[(*idx)++] = rec->ip;
3565                                        if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3566                                                goto out;
3567                                }
3568                        } else {
3569                                if (exists) {
3570                                        array[i] = array[--(*idx)];
3571                                        array[*idx] = 0;
3572                                        fail = 0;
3573                                }
3574                        }
3575                }
3576        } while_for_each_ftrace_rec();
3577out:
3578        mutex_unlock(&ftrace_lock);
3579
3580        if (fail)
3581                return -EINVAL;
3582
3583        ftrace_graph_filter_enabled = 1;
3584        return 0;
3585}
3586
3587static ssize_t
3588ftrace_graph_write(struct file *file, const char __user *ubuf,
3589                   size_t cnt, loff_t *ppos)
3590{
3591        struct trace_parser parser;
3592        ssize_t read, ret;
3593
3594        if (!cnt)
3595                return 0;
3596
3597        mutex_lock(&graph_lock);
3598
3599        if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3600                ret = -ENOMEM;
3601                goto out_unlock;
3602        }
3603
3604        read = trace_get_user(&parser, ubuf, cnt, ppos);
3605
3606        if (read >= 0 && trace_parser_loaded((&parser))) {
3607                parser.buffer[parser.idx] = 0;
3608
3609                /* we allow only one expression at a time */
3610                ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3611                                        parser.buffer);
3612                if (ret)
3613                        goto out_free;
3614        }
3615
3616        ret = read;
3617
3618out_free:
3619        trace_parser_put(&parser);
3620out_unlock:
3621        mutex_unlock(&graph_lock);
3622
3623        return ret;
3624}
3625
3626static const struct file_operations ftrace_graph_fops = {
3627        .open           = ftrace_graph_open,
3628        .read           = seq_read,
3629        .write          = ftrace_graph_write,
3630        .release        = ftrace_graph_release,
3631        .llseek         = seq_lseek,
3632};
3633#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3634
3635static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3636{
3637
3638        trace_create_file("available_filter_functions", 0444,
3639                        d_tracer, NULL, &ftrace_avail_fops);
3640
3641        trace_create_file("enabled_functions", 0444,
3642                        d_tracer, NULL, &ftrace_enabled_fops);
3643
3644        trace_create_file("set_ftrace_filter", 0644, d_tracer,
3645                        NULL, &ftrace_filter_fops);
3646
3647        trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3648                                    NULL, &ftrace_notrace_fops);
3649
3650#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3651        trace_create_file("set_graph_function", 0444, d_tracer,
3652                                    NULL,
3653                                    &ftrace_graph_fops);
3654#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3655
3656        return 0;
3657}
3658
3659static int ftrace_cmp_ips(const void *a, const void *b)
3660{
3661        const unsigned long *ipa = a;
3662        const unsigned long *ipb = b;
3663
3664        if (*ipa > *ipb)
3665                return 1;
3666        if (*ipa < *ipb)
3667                return -1;
3668        return 0;
3669}
3670
3671static void ftrace_swap_ips(void *a, void *b, int size)
3672{
3673        unsigned long *ipa = a;
3674        unsigned long *ipb = b;
3675        unsigned long t;
3676
3677        t = *ipa;
3678        *ipa = *ipb;
3679        *ipb = t;
3680}
3681
3682static int ftrace_process_locs(struct module *mod,
3683                               unsigned long *start,
3684                               unsigned long *end)
3685{
3686        struct ftrace_page *start_pg;
3687        struct ftrace_page *pg;
3688        struct dyn_ftrace *rec;
3689        unsigned long count;
3690        unsigned long *p;
3691        unsigned long addr;
3692        unsigned long flags = 0; /* Shut up gcc */
3693        int ret = -ENOMEM;
3694
3695        count = end - start;
3696
3697        if (!count)
3698                return 0;
3699
3700        sort(start, count, sizeof(*start),
3701             ftrace_cmp_ips, ftrace_swap_ips);
3702
3703        start_pg = ftrace_allocate_pages(count);
3704        if (!start_pg)
3705                return -ENOMEM;
3706
3707        mutex_lock(&ftrace_lock);
3708
3709        /*
3710         * Core and each module needs their own pages, as
3711         * modules will free them when they are removed.
3712         * Force a new page to be allocated for modules.
3713         */
3714        if (!mod) {
3715                WARN_ON(ftrace_pages || ftrace_pages_start);
3716                /* First initialization */
3717                ftrace_pages = ftrace_pages_start = start_pg;
3718        } else {
3719                if (!ftrace_pages)
3720                        goto out;
3721
3722                if (WARN_ON(ftrace_pages->next)) {
3723                        /* Hmm, we have free pages? */
3724                        while (ftrace_pages->next)
3725                                ftrace_pages = ftrace_pages->next;
3726                }
3727
3728                ftrace_pages->next = start_pg;
3729        }
3730
3731        p = start;
3732        pg = start_pg;
3733        while (p < end) {
3734                addr = ftrace_call_adjust(*p++);
3735                /*
3736                 * Some architecture linkers will pad between
3737                 * the different mcount_loc sections of different
3738                 * object files to satisfy alignments.
3739                 * Skip any NULL pointers.
3740                 */
3741                if (!addr)
3742                        continue;
3743
3744                if (pg->index == pg->size) {
3745                        /* We should have allocated enough */
3746                        if (WARN_ON(!pg->next))
3747                                break;
3748                        pg = pg->next;
3749                }
3750
3751                rec = &pg->records[pg->index++];
3752                rec->ip = addr;
3753        }
3754
3755        /* We should have used all pages */
3756        WARN_ON(pg->next);
3757
3758        /* Assign the last page to ftrace_pages */
3759        ftrace_pages = pg;
3760
3761        /* These new locations need to be initialized */
3762        ftrace_new_pgs = start_pg;
3763
3764        /*
3765         * We only need to disable interrupts on start up
3766         * because we are modifying code that an interrupt
3767         * may execute, and the modification is not atomic.
3768         * But for modules, nothing runs the code we modify
3769         * until we are finished with it, and there's no
3770         * reason to cause large interrupt latencies while we do it.
3771         */
3772        if (!mod)
3773                local_irq_save(flags);
3774        ftrace_update_code(mod);
3775        if (!mod)
3776                local_irq_restore(flags);
3777        ret = 0;
3778 out:
3779        mutex_unlock(&ftrace_lock);
3780
3781        return ret;
3782}
3783
3784#ifdef CONFIG_MODULES
3785
3786#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3787
3788void ftrace_release_mod(struct module *mod)
3789{
3790        struct dyn_ftrace *rec;
3791        struct ftrace_page **last_pg;
3792        struct ftrace_page *pg;
3793        int order;
3794
3795        mutex_lock(&ftrace_lock);
3796
3797        if (ftrace_disabled)
3798                goto out_unlock;
3799
3800        /*
3801         * Each module has its own ftrace_pages, remove
3802         * them from the list.
3803         */
3804        last_pg = &ftrace_pages_start;
3805        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3806                rec = &pg->records[0];
3807                if (within_module_core(rec->ip, mod)) {
3808                        /*
3809                         * As core pages are first, the first
3810                         * page should never be a module page.
3811                         */
3812                        if (WARN_ON(pg == ftrace_pages_start))
3813                                goto out_unlock;
3814
3815                        /* Check if we are deleting the last page */
3816                        if (pg == ftrace_pages)
3817                                ftrace_pages = next_to_ftrace_page(last_pg);
3818
3819                        *last_pg = pg->next;
3820                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3821                        free_pages((unsigned long)pg->records, order);
3822                        kfree(pg);
3823                } else
3824                        last_pg = &pg->next;
3825        }
3826 out_unlock:
3827        mutex_unlock(&ftrace_lock);
3828}
3829
3830static void ftrace_init_module(struct module *mod,
3831                               unsigned long *start, unsigned long *end)
3832{
3833        if (ftrace_disabled || start == end)
3834                return;
3835        ftrace_process_locs(mod, start, end);
3836}
3837
3838static int ftrace_module_notify(struct notifier_block *self,
3839                                unsigned long val, void *data)
3840{
3841        struct module *mod = data;
3842
3843        switch (val) {
3844        case MODULE_STATE_COMING:
3845                ftrace_init_module(mod, mod->ftrace_callsites,
3846                                   mod->ftrace_callsites +
3847                                   mod->num_ftrace_callsites);
3848                break;
3849        case MODULE_STATE_GOING:
3850                ftrace_release_mod(mod);
3851                break;
3852        }
3853
3854        return 0;
3855}
3856#else
3857static int ftrace_module_notify(struct notifier_block *self,
3858                                unsigned long val, void *data)
3859{
3860        return 0;
3861}
3862#endif /* CONFIG_MODULES */
3863
3864struct notifier_block ftrace_module_nb = {
3865        .notifier_call = ftrace_module_notify,
3866        .priority = 0,
3867};
3868
3869extern unsigned long __start_mcount_loc[];
3870extern unsigned long __stop_mcount_loc[];
3871
3872void __init ftrace_init(void)
3873{
3874        unsigned long count, addr, flags;
3875        int ret;
3876
3877        /* Keep the ftrace pointer to the stub */
3878        addr = (unsigned long)ftrace_stub;
3879
3880        local_irq_save(flags);
3881        ftrace_dyn_arch_init(&addr);
3882        local_irq_restore(flags);
3883
3884        /* ftrace_dyn_arch_init places the return code in addr */
3885        if (addr)
3886                goto failed;
3887
3888        count = __stop_mcount_loc - __start_mcount_loc;
3889
3890        ret = ftrace_dyn_table_alloc(count);
3891        if (ret)
3892                goto failed;
3893
3894        last_ftrace_enabled = ftrace_enabled = 1;
3895
3896        ret = ftrace_process_locs(NULL,
3897                                  __start_mcount_loc,
3898                                  __stop_mcount_loc);
3899
3900        ret = register_module_notifier(&ftrace_module_nb);
3901        if (ret)
3902                pr_warning("Failed to register trace ftrace module notifier\n");
3903
3904        set_ftrace_early_filters();
3905
3906        return;
3907 failed:
3908        ftrace_disabled = 1;
3909}
3910
3911#else
3912
3913static struct ftrace_ops global_ops = {
3914        .func                   = ftrace_stub,
3915};
3916
3917static int __init ftrace_nodyn_init(void)
3918{
3919        ftrace_enabled = 1;
3920        return 0;
3921}
3922device_initcall(ftrace_nodyn_init);
3923
3924static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3925static inline void ftrace_startup_enable(int command) { }
3926/* Keep as macros so we do not need to define the commands */
3927# define ftrace_startup(ops, command)                   \
3928        ({                                              \
3929                (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3930                0;                                      \
3931        })
3932# define ftrace_shutdown(ops, command)  do { } while (0)
3933# define ftrace_startup_sysctl()        do { } while (0)
3934# define ftrace_shutdown_sysctl()       do { } while (0)
3935
3936static inline int
3937ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3938{
3939        return 1;
3940}
3941
3942#endif /* CONFIG_DYNAMIC_FTRACE */
3943
3944static void
3945ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3946{
3947        struct ftrace_ops *op;
3948
3949        if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3950                return;
3951
3952        /*
3953         * Some of the ops may be dynamically allocated,
3954         * they must be freed after a synchronize_sched().
3955         */
3956        preempt_disable_notrace();
3957        trace_recursion_set(TRACE_CONTROL_BIT);
3958        op = rcu_dereference_raw(ftrace_control_list);
3959        while (op != &ftrace_list_end) {
3960                if (!ftrace_function_local_disabled(op) &&
3961                    ftrace_ops_test(op, ip))
3962                        op->func(ip, parent_ip);
3963
3964                op = rcu_dereference_raw(op->next);
3965        };
3966        trace_recursion_clear(TRACE_CONTROL_BIT);
3967        preempt_enable_notrace();
3968}
3969
3970static struct ftrace_ops control_ops = {
3971        .func = ftrace_ops_control_func,
3972};
3973
3974static void
3975ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3976{
3977        struct ftrace_ops *op;
3978
3979        if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3980                return;
3981
3982        trace_recursion_set(TRACE_INTERNAL_BIT);
3983        /*
3984         * Some of the ops may be dynamically allocated,
3985         * they must be freed after a synchronize_sched().
3986         */
3987        preempt_disable_notrace();
3988        op = rcu_dereference_raw(ftrace_ops_list);
3989        while (op != &ftrace_list_end) {
3990                if (ftrace_ops_test(op, ip))
3991                        op->func(ip, parent_ip);
3992                op = rcu_dereference_raw(op->next);
3993        };
3994        preempt_enable_notrace();
3995        trace_recursion_clear(TRACE_INTERNAL_BIT);
3996}
3997
3998static void clear_ftrace_swapper(void)
3999{
4000        struct task_struct *p;
4001        int cpu;
4002
4003        get_online_cpus();
4004        for_each_online_cpu(cpu) {
4005                p = idle_task(cpu);
4006                clear_tsk_trace_trace(p);
4007        }
4008        put_online_cpus();
4009}
4010
4011static void set_ftrace_swapper(void)
4012{
4013        struct task_struct *p;
4014        int cpu;
4015
4016        get_online_cpus();
4017        for_each_online_cpu(cpu) {
4018                p = idle_task(cpu);
4019                set_tsk_trace_trace(p);
4020        }
4021        put_online_cpus();
4022}
4023
4024static void clear_ftrace_pid(struct pid *pid)
4025{
4026        struct task_struct *p;
4027
4028        rcu_read_lock();
4029        do_each_pid_task(pid, PIDTYPE_PID, p) {
4030                clear_tsk_trace_trace(p);
4031        } while_each_pid_task(pid, PIDTYPE_PID, p);
4032        rcu_read_unlock();
4033
4034        put_pid(pid);
4035}
4036
4037static void set_ftrace_pid(struct pid *pid)
4038{
4039        struct task_struct *p;
4040
4041        rcu_read_lock();
4042        do_each_pid_task(pid, PIDTYPE_PID, p) {
4043                set_tsk_trace_trace(p);
4044        } while_each_pid_task(pid, PIDTYPE_PID, p);
4045        rcu_read_unlock();
4046}
4047
4048static void clear_ftrace_pid_task(struct pid *pid)
4049{
4050        if (pid == ftrace_swapper_pid)
4051                clear_ftrace_swapper();
4052        else
4053                clear_ftrace_pid(pid);
4054}
4055
4056static void set_ftrace_pid_task(struct pid *pid)
4057{
4058        if (pid == ftrace_swapper_pid)
4059                set_ftrace_swapper();
4060        else
4061                set_ftrace_pid(pid);
4062}
4063
4064static int ftrace_pid_add(int p)
4065{
4066        struct pid *pid;
4067        struct ftrace_pid *fpid;
4068        int ret = -EINVAL;
4069
4070        mutex_lock(&ftrace_lock);
4071
4072        if (!p)
4073                pid = ftrace_swapper_pid;
4074        else
4075                pid = find_get_pid(p);
4076
4077        if (!pid)
4078                goto out;
4079
4080        ret = 0;
4081
4082        list_for_each_entry(fpid, &ftrace_pids, list)
4083                if (fpid->pid == pid)
4084                        goto out_put;
4085
4086        ret = -ENOMEM;
4087
4088        fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4089        if (!fpid)
4090                goto out_put;
4091
4092        list_add(&fpid->list, &ftrace_pids);
4093        fpid->pid = pid;
4094
4095        set_ftrace_pid_task(pid);
4096
4097        ftrace_update_pid_func();
4098        ftrace_startup_enable(0);
4099
4100        mutex_unlock(&ftrace_lock);
4101        return 0;
4102
4103out_put:
4104        if (pid != ftrace_swapper_pid)
4105                put_pid(pid);
4106
4107out:
4108        mutex_unlock(&ftrace_lock);
4109        return ret;
4110}
4111
4112static void ftrace_pid_reset(void)
4113{
4114        struct ftrace_pid *fpid, *safe;
4115
4116        mutex_lock(&ftrace_lock);
4117        list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4118                struct pid *pid = fpid->pid;
4119
4120                clear_ftrace_pid_task(pid);
4121
4122                list_del(&fpid->list);
4123                kfree(fpid);
4124        }
4125
4126        ftrace_update_pid_func();
4127        ftrace_startup_enable(0);
4128
4129        mutex_unlock(&ftrace_lock);
4130}
4131
4132static void *fpid_start(struct seq_file *m, loff_t *pos)
4133{
4134        mutex_lock(&ftrace_lock);
4135
4136        if (list_empty(&ftrace_pids) && (!*pos))
4137                return (void *) 1;
4138
4139        return seq_list_start(&ftrace_pids, *pos);
4140}
4141
4142static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4143{
4144        if (v == (void *)1)
4145                return NULL;
4146
4147        return seq_list_next(v, &ftrace_pids, pos);
4148}
4149
4150static void fpid_stop(struct seq_file *m, void *p)
4151{
4152        mutex_unlock(&ftrace_lock);
4153}
4154
4155static int fpid_show(struct seq_file *m, void *v)
4156{
4157        const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4158
4159        if (v == (void *)1) {
4160                seq_printf(m, "no pid\n");
4161                return 0;
4162        }
4163
4164        if (fpid->pid == ftrace_swapper_pid)
4165                seq_printf(m, "swapper tasks\n");
4166        else
4167                seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4168
4169        return 0;
4170}
4171
4172static const struct seq_operations ftrace_pid_sops = {
4173        .start = fpid_start,
4174        .next = fpid_next,
4175        .stop = fpid_stop,
4176        .show = fpid_show,
4177};
4178
4179static int
4180ftrace_pid_open(struct inode *inode, struct file *file)
4181{
4182        int ret = 0;
4183
4184        if ((file->f_mode & FMODE_WRITE) &&
4185            (file->f_flags & O_TRUNC))
4186                ftrace_pid_reset();
4187
4188        if (file->f_mode & FMODE_READ)
4189                ret = seq_open(file, &ftrace_pid_sops);
4190
4191        return ret;
4192}
4193
4194static ssize_t
4195ftrace_pid_write(struct file *filp, const char __user *ubuf,
4196                   size_t cnt, loff_t *ppos)
4197{
4198        char buf[64], *tmp;
4199        long val;
4200        int ret;
4201
4202        if (cnt >= sizeof(buf))
4203                return -EINVAL;
4204
4205        if (copy_from_user(&buf, ubuf, cnt))
4206                return -EFAULT;
4207
4208        buf[cnt] = 0;
4209
4210        /*
4211         * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4212         * to clean the filter quietly.
4213         */
4214        tmp = strstrip(buf);
4215        if (strlen(tmp) == 0)
4216                return 1;
4217
4218        ret = strict_strtol(tmp, 10, &val);
4219        if (ret < 0)
4220                return ret;
4221
4222        ret = ftrace_pid_add(val);
4223
4224        return ret ? ret : cnt;
4225}
4226
4227static int
4228ftrace_pid_release(struct inode *inode, struct file *file)
4229{
4230        if (file->f_mode & FMODE_READ)
4231                seq_release(inode, file);
4232
4233        return 0;
4234}
4235
4236static const struct file_operations ftrace_pid_fops = {
4237        .open           = ftrace_pid_open,
4238        .write          = ftrace_pid_write,
4239        .read           = seq_read,
4240        .llseek         = seq_lseek,
4241        .release        = ftrace_pid_release,
4242};
4243
4244static __init int ftrace_init_debugfs(void)
4245{
4246        struct dentry *d_tracer;
4247
4248        d_tracer = tracing_init_dentry();
4249        if (!d_tracer)
4250                return 0;
4251
4252        ftrace_init_dyn_debugfs(d_tracer);
4253
4254        trace_create_file("set_ftrace_pid", 0644, d_tracer,
4255                            NULL, &ftrace_pid_fops);
4256
4257        ftrace_profile_debugfs(d_tracer);
4258
4259        return 0;
4260}
4261fs_initcall(ftrace_init_debugfs);
4262
4263/**
4264 * ftrace_kill - kill ftrace
4265 *
4266 * This function should be used by panic code. It stops ftrace
4267 * but in a not so nice way. If you need to simply kill ftrace
4268 * from a non-atomic section, use ftrace_kill.
4269 */
4270void ftrace_kill(void)
4271{
4272        ftrace_disabled = 1;
4273        ftrace_enabled = 0;
4274        clear_ftrace_function();
4275}
4276
4277/**
4278 * Test if ftrace is dead or not.
4279 */
4280int ftrace_is_dead(void)
4281{
4282        return ftrace_disabled;
4283}
4284
4285/**
4286 * register_ftrace_function - register a function for profiling
4287 * @ops - ops structure that holds the function for profiling.
4288 *
4289 * Register a function to be called by all functions in the
4290 * kernel.
4291 *
4292 * Note: @ops->func and all the functions it calls must be labeled
4293 *       with "notrace", otherwise it will go into a
4294 *       recursive loop.
4295 */
4296int register_ftrace_function(struct ftrace_ops *ops)
4297{
4298        int ret = -1;
4299
4300        mutex_lock(&ftrace_lock);
4301
4302        ret = __register_ftrace_function(ops);
4303        if (!ret)
4304                ret = ftrace_startup(ops, 0);
4305
4306        mutex_unlock(&ftrace_lock);
4307
4308        return ret;
4309}
4310EXPORT_SYMBOL_GPL(register_ftrace_function);
4311
4312/**
4313 * unregister_ftrace_function - unregister a function for profiling.
4314 * @ops - ops structure that holds the function to unregister
4315 *
4316 * Unregister a function that was added to be called by ftrace profiling.
4317 */
4318int unregister_ftrace_function(struct ftrace_ops *ops)
4319{
4320        int ret;
4321
4322        mutex_lock(&ftrace_lock);
4323        ret = __unregister_ftrace_function(ops);
4324        if (!ret)
4325                ftrace_shutdown(ops, 0);
4326        mutex_unlock(&ftrace_lock);
4327
4328        return ret;
4329}
4330EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4331
4332int
4333ftrace_enable_sysctl(struct ctl_table *table, int write,
4334                     void __user *buffer, size_t *lenp,
4335                     loff_t *ppos)
4336{
4337        int ret = -ENODEV;
4338
4339        mutex_lock(&ftrace_lock);
4340
4341        if (unlikely(ftrace_disabled))
4342                goto out;
4343
4344        ret = proc_dointvec(table, write, buffer, lenp, ppos);
4345
4346        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4347                goto out;
4348
4349        last_ftrace_enabled = !!ftrace_enabled;
4350
4351        if (ftrace_enabled) {
4352
4353                ftrace_startup_sysctl();
4354
4355                /* we are starting ftrace again */
4356                if (ftrace_ops_list != &ftrace_list_end) {
4357                        if (ftrace_ops_list->next == &ftrace_list_end)
4358                                ftrace_trace_function = ftrace_ops_list->func;
4359                        else
4360                                ftrace_trace_function = ftrace_ops_list_func;
4361                }
4362
4363        } else {
4364                /* stopping ftrace calls (just send to ftrace_stub) */
4365                ftrace_trace_function = ftrace_stub;
4366
4367                ftrace_shutdown_sysctl();
4368        }
4369
4370 out:
4371        mutex_unlock(&ftrace_lock);
4372        return ret;
4373}
4374
4375#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4376
4377static int ftrace_graph_active;
4378static struct notifier_block ftrace_suspend_notifier;
4379
4380int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4381{
4382        return 0;
4383}
4384
4385/* The callbacks that hook a function */
4386trace_func_graph_ret_t ftrace_graph_return =
4387                        (trace_func_graph_ret_t)ftrace_stub;
4388trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4389
4390/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4391static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4392{
4393        int i;
4394        int ret = 0;
4395        unsigned long flags;
4396        int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4397        struct task_struct *g, *t;
4398
4399        for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4400                ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4401                                        * sizeof(struct ftrace_ret_stack),
4402                                        GFP_KERNEL);
4403                if (!ret_stack_list[i]) {
4404                        start = 0;
4405                        end = i;
4406                        ret = -ENOMEM;
4407                        goto free;
4408                }
4409        }
4410
4411        read_lock_irqsave(&tasklist_lock, flags);
4412        do_each_thread(g, t) {
4413                if (start == end) {
4414                        ret = -EAGAIN;
4415                        goto unlock;
4416                }
4417
4418                if (t->ret_stack == NULL) {
4419                        atomic_set(&t->tracing_graph_pause, 0);
4420                        atomic_set(&t->trace_overrun, 0);
4421                        t->curr_ret_stack = -1;
4422                        /* Make sure the tasks see the -1 first: */
4423                        smp_wmb();
4424                        t->ret_stack = ret_stack_list[start++];
4425                }
4426        } while_each_thread(g, t);
4427
4428unlock:
4429        read_unlock_irqrestore(&tasklist_lock, flags);
4430free:
4431        for (i = start; i < end; i++)
4432                kfree(ret_stack_list[i]);
4433        return ret;
4434}
4435
4436static void
4437ftrace_graph_probe_sched_switch(void *ignore,
4438                        struct task_struct *prev, struct task_struct *next)
4439{
4440        unsigned long long timestamp;
4441        int index;
4442
4443        /*
4444         * Does the user want to count the time a function was asleep.
4445         * If so, do not update the time stamps.
4446         */
4447        if (trace_flags & TRACE_ITER_SLEEP_TIME)
4448                return;
4449
4450        timestamp = trace_clock_local();
4451
4452        prev->ftrace_timestamp = timestamp;
4453
4454        /* only process tasks that we timestamped */
4455        if (!next->ftrace_timestamp)
4456                return;
4457
4458        /*
4459         * Update all the counters in next to make up for the
4460         * time next was sleeping.
4461         */
4462        timestamp -= next->ftrace_timestamp;
4463
4464        for (index = next->curr_ret_stack; index >= 0; index--)
4465                next->ret_stack[index].calltime += timestamp;
4466}
4467
4468/* Allocate a return stack for each task */
4469static int start_graph_tracing(void)
4470{
4471        struct ftrace_ret_stack **ret_stack_list;
4472        int ret, cpu;
4473
4474        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4475                                sizeof(struct ftrace_ret_stack *),
4476                                GFP_KERNEL);
4477
4478        if (!ret_stack_list)
4479                return -ENOMEM;
4480
4481        /* The cpu_boot init_task->ret_stack will never be freed */
4482        for_each_online_cpu(cpu) {
4483                if (!idle_task(cpu)->ret_stack)
4484                        ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4485        }
4486
4487        do {
4488                ret = alloc_retstack_tasklist(ret_stack_list);
4489        } while (ret == -EAGAIN);
4490
4491        if (!ret) {
4492                ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4493                if (ret)
4494                        pr_info("ftrace_graph: Couldn't activate tracepoint"
4495                                " probe to kernel_sched_switch\n");
4496        }
4497
4498        kfree(ret_stack_list);
4499        return ret;
4500}
4501
4502/*
4503 * Hibernation protection.
4504 * The state of the current task is too much unstable during
4505 * suspend/restore to disk. We want to protect against that.
4506 */
4507static int
4508ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4509                                                        void *unused)
4510{
4511        switch (state) {
4512        case PM_HIBERNATION_PREPARE:
4513                pause_graph_tracing();
4514                break;
4515
4516        case PM_POST_HIBERNATION:
4517                unpause_graph_tracing();
4518                break;
4519        }
4520        return NOTIFY_DONE;
4521}
4522
4523int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4524                        trace_func_graph_ent_t entryfunc)
4525{
4526        int ret = 0;
4527
4528        mutex_lock(&ftrace_lock);
4529
4530        /* we currently allow only one tracer registered at a time */
4531        if (ftrace_graph_active) {
4532                ret = -EBUSY;
4533                goto out;
4534        }
4535
4536        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4537        register_pm_notifier(&ftrace_suspend_notifier);
4538
4539        ftrace_graph_active++;
4540        ret = start_graph_tracing();
4541        if (ret) {
4542                ftrace_graph_active--;
4543                goto out;
4544        }
4545
4546        ftrace_graph_return = retfunc;
4547        ftrace_graph_entry = entryfunc;
4548
4549        ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4550
4551out:
4552        mutex_unlock(&ftrace_lock);
4553        return ret;
4554}
4555
4556void unregister_ftrace_graph(void)
4557{
4558        mutex_lock(&ftrace_lock);
4559
4560        if (unlikely(!ftrace_graph_active))
4561                goto out;
4562
4563        ftrace_graph_active--;
4564        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4565        ftrace_graph_entry = ftrace_graph_entry_stub;
4566        ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4567        unregister_pm_notifier(&ftrace_suspend_notifier);
4568        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4569
4570 out:
4571        mutex_unlock(&ftrace_lock);
4572}
4573
4574static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4575
4576static void
4577graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4578{
4579        atomic_set(&t->tracing_graph_pause, 0);
4580        atomic_set(&t->trace_overrun, 0);
4581        t->ftrace_timestamp = 0;
4582        /* make curr_ret_stack visible before we add the ret_stack */
4583        smp_wmb();
4584        t->ret_stack = ret_stack;
4585}
4586
4587/*
4588 * Allocate a return stack for the idle task. May be the first
4589 * time through, or it may be done by CPU hotplug online.
4590 */
4591void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4592{
4593        t->curr_ret_stack = -1;
4594        /*
4595         * The idle task has no parent, it either has its own
4596         * stack or no stack at all.
4597         */
4598        if (t->ret_stack)
4599                WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4600
4601        if (ftrace_graph_active) {
4602                struct ftrace_ret_stack *ret_stack;
4603
4604                ret_stack = per_cpu(idle_ret_stack, cpu);
4605                if (!ret_stack) {
4606                        ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4607                                            * sizeof(struct ftrace_ret_stack),
4608                                            GFP_KERNEL);
4609                        if (!ret_stack)
4610                                return;
4611                        per_cpu(idle_ret_stack, cpu) = ret_stack;
4612                }
4613                graph_init_task(t, ret_stack);
4614        }
4615}
4616
4617/* Allocate a return stack for newly created task */
4618void ftrace_graph_init_task(struct task_struct *t)
4619{
4620        /* Make sure we do not use the parent ret_stack */
4621        t->ret_stack = NULL;
4622        t->curr_ret_stack = -1;
4623
4624        if (ftrace_graph_active) {
4625                struct ftrace_ret_stack *ret_stack;
4626
4627                ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4628                                * sizeof(struct ftrace_ret_stack),
4629                                GFP_KERNEL);
4630                if (!ret_stack)
4631                        return;
4632                graph_init_task(t, ret_stack);
4633        }
4634}
4635
4636void ftrace_graph_exit_task(struct task_struct *t)
4637{
4638        struct ftrace_ret_stack *ret_stack = t->ret_stack;
4639
4640        t->ret_stack = NULL;
4641        /* NULL must become visible to IRQs before we free it: */
4642        barrier();
4643
4644        kfree(ret_stack);
4645}
4646
4647void ftrace_graph_stop(void)
4648{
4649        ftrace_stop();
4650}
4651#endif
4652
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.