linux/kernel/kprobes.c
<<
>>
Prefs
   1/*
   2 *  Kernel Probes (KProbes)
   3 *  kernel/kprobes.c
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18 *
  19 * Copyright (C) IBM Corporation, 2002, 2004
  20 *
  21 * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22 *              Probes initial implementation (includes suggestions from
  23 *              Rusty Russell).
  24 * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25 *              hlists and exceptions notifier as suggested by Andi Kleen.
  26 * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27 *              interface to access function arguments.
  28 * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29 *              exceptions notifier to be first on the priority list.
  30 * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31 *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32 *              <prasanna@in.ibm.com> added function-return probes.
  33 */
  34#include <linux/kprobes.h>
  35#include <linux/hash.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/stddef.h>
  39#include <linux/export.h>
  40#include <linux/moduleloader.h>
  41#include <linux/kallsyms.h>
  42#include <linux/freezer.h>
  43#include <linux/seq_file.h>
  44#include <linux/debugfs.h>
  45#include <linux/sysctl.h>
  46#include <linux/kdebug.h>
  47#include <linux/memory.h>
  48#include <linux/ftrace.h>
  49#include <linux/cpu.h>
  50#include <linux/jump_label.h>
  51
  52#include <asm-generic/sections.h>
  53#include <asm/cacheflush.h>
  54#include <asm/errno.h>
  55#include <asm/uaccess.h>
  56
  57#define KPROBE_HASH_BITS 6
  58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  59
  60
  61/*
  62 * Some oddball architectures like 64bit powerpc have function descriptors
  63 * so this must be overridable.
  64 */
  65#ifndef kprobe_lookup_name
  66#define kprobe_lookup_name(name, addr) \
  67        addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  68#endif
  69
  70static int kprobes_initialized;
  71static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  72static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  73
  74/* NOTE: change this value only with kprobe_mutex held */
  75static bool kprobes_all_disarmed;
  76
  77/* This protects kprobe_table and optimizing_list */
  78static DEFINE_MUTEX(kprobe_mutex);
  79static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  80static struct {
  81        raw_spinlock_t lock ____cacheline_aligned_in_smp;
  82} kretprobe_table_locks[KPROBE_TABLE_SIZE];
  83
  84static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  85{
  86        return &(kretprobe_table_locks[hash].lock);
  87}
  88
  89/*
  90 * Normally, functions that we'd want to prohibit kprobes in, are marked
  91 * __kprobes. But, there are cases where such functions already belong to
  92 * a different section (__sched for preempt_schedule)
  93 *
  94 * For such cases, we now have a blacklist
  95 */
  96static struct kprobe_blackpoint kprobe_blacklist[] = {
  97        {"preempt_schedule",},
  98        {"native_get_debugreg",},
  99        {"irq_entries_start",},
 100        {"common_interrupt",},
 101        {"mcount",},    /* mcount can be called from everywhere */
 102        {NULL}    /* Terminator */
 103};
 104
 105#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
 106/*
 107 * kprobe->ainsn.insn points to the copy of the instruction to be
 108 * single-stepped. x86_64, POWER4 and above have no-exec support and
 109 * stepping on the instruction on a vmalloced/kmalloced/data page
 110 * is a recipe for disaster
 111 */
 112struct kprobe_insn_page {
 113        struct list_head list;
 114        kprobe_opcode_t *insns;         /* Page of instruction slots */
 115        struct kprobe_insn_cache *cache;
 116        int nused;
 117        int ngarbage;
 118        char slot_used[];
 119};
 120
 121#define KPROBE_INSN_PAGE_SIZE(slots)                    \
 122        (offsetof(struct kprobe_insn_page, slot_used) + \
 123         (sizeof(char) * (slots)))
 124
 125static int slots_per_page(struct kprobe_insn_cache *c)
 126{
 127        return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
 128}
 129
 130enum kprobe_slot_state {
 131        SLOT_CLEAN = 0,
 132        SLOT_DIRTY = 1,
 133        SLOT_USED = 2,
 134};
 135
 136static void *alloc_insn_page(void)
 137{
 138        return module_alloc(PAGE_SIZE);
 139}
 140
 141static void free_insn_page(void *page)
 142{
 143        module_free(NULL, page);
 144}
 145
 146struct kprobe_insn_cache kprobe_insn_slots = {
 147        .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
 148        .alloc = alloc_insn_page,
 149        .free = free_insn_page,
 150        .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
 151        .insn_size = MAX_INSN_SIZE,
 152        .nr_garbage = 0,
 153};
 154static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
 155
 156/**
 157 * __get_insn_slot() - Find a slot on an executable page for an instruction.
 158 * We allocate an executable page if there's no room on existing ones.
 159 */
 160kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
 161{
 162        struct kprobe_insn_page *kip;
 163        kprobe_opcode_t *slot = NULL;
 164
 165        mutex_lock(&c->mutex);
 166 retry:
 167        list_for_each_entry(kip, &c->pages, list) {
 168                if (kip->nused < slots_per_page(c)) {
 169                        int i;
 170                        for (i = 0; i < slots_per_page(c); i++) {
 171                                if (kip->slot_used[i] == SLOT_CLEAN) {
 172                                        kip->slot_used[i] = SLOT_USED;
 173                                        kip->nused++;
 174                                        slot = kip->insns + (i * c->insn_size);
 175                                        goto out;
 176                                }
 177                        }
 178                        /* kip->nused is broken. Fix it. */
 179                        kip->nused = slots_per_page(c);
 180                        WARN_ON(1);
 181                }
 182        }
 183
 184        /* If there are any garbage slots, collect it and try again. */
 185        if (c->nr_garbage && collect_garbage_slots(c) == 0)
 186                goto retry;
 187
 188        /* All out of space.  Need to allocate a new page. */
 189        kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
 190        if (!kip)
 191                goto out;
 192
 193        /*
 194         * Use module_alloc so this page is within +/- 2GB of where the
 195         * kernel image and loaded module images reside. This is required
 196         * so x86_64 can correctly handle the %rip-relative fixups.
 197         */
 198        kip->insns = c->alloc();
 199        if (!kip->insns) {
 200                kfree(kip);
 201                goto out;
 202        }
 203        INIT_LIST_HEAD(&kip->list);
 204        memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
 205        kip->slot_used[0] = SLOT_USED;
 206        kip->nused = 1;
 207        kip->ngarbage = 0;
 208        kip->cache = c;
 209        list_add(&kip->list, &c->pages);
 210        slot = kip->insns;
 211out:
 212        mutex_unlock(&c->mutex);
 213        return slot;
 214}
 215
 216/* Return 1 if all garbages are collected, otherwise 0. */
 217static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
 218{
 219        kip->slot_used[idx] = SLOT_CLEAN;
 220        kip->nused--;
 221        if (kip->nused == 0) {
 222                /*
 223                 * Page is no longer in use.  Free it unless
 224                 * it's the last one.  We keep the last one
 225                 * so as not to have to set it up again the
 226                 * next time somebody inserts a probe.
 227                 */
 228                if (!list_is_singular(&kip->list)) {
 229                        list_del(&kip->list);
 230                        kip->cache->free(kip->insns);
 231                        kfree(kip);
 232                }
 233                return 1;
 234        }
 235        return 0;
 236}
 237
 238static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
 239{
 240        struct kprobe_insn_page *kip, *next;
 241
 242        /* Ensure no-one is interrupted on the garbages */
 243        synchronize_sched();
 244
 245        list_for_each_entry_safe(kip, next, &c->pages, list) {
 246                int i;
 247                if (kip->ngarbage == 0)
 248                        continue;
 249                kip->ngarbage = 0;      /* we will collect all garbages */
 250                for (i = 0; i < slots_per_page(c); i++) {
 251                        if (kip->slot_used[i] == SLOT_DIRTY &&
 252                            collect_one_slot(kip, i))
 253                                break;
 254                }
 255        }
 256        c->nr_garbage = 0;
 257        return 0;
 258}
 259
 260void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
 261                                kprobe_opcode_t *slot, int dirty)
 262{
 263        struct kprobe_insn_page *kip;
 264
 265        mutex_lock(&c->mutex);
 266        list_for_each_entry(kip, &c->pages, list) {
 267                long idx = ((long)slot - (long)kip->insns) /
 268                                (c->insn_size * sizeof(kprobe_opcode_t));
 269                if (idx >= 0 && idx < slots_per_page(c)) {
 270                        WARN_ON(kip->slot_used[idx] != SLOT_USED);
 271                        if (dirty) {
 272                                kip->slot_used[idx] = SLOT_DIRTY;
 273                                kip->ngarbage++;
 274                                if (++c->nr_garbage > slots_per_page(c))
 275                                        collect_garbage_slots(c);
 276                        } else
 277                                collect_one_slot(kip, idx);
 278                        goto out;
 279                }
 280        }
 281        /* Could not free this slot. */
 282        WARN_ON(1);
 283out:
 284        mutex_unlock(&c->mutex);
 285}
 286
 287#ifdef CONFIG_OPTPROBES
 288/* For optimized_kprobe buffer */
 289struct kprobe_insn_cache kprobe_optinsn_slots = {
 290        .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
 291        .alloc = alloc_insn_page,
 292        .free = free_insn_page,
 293        .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
 294        /* .insn_size is initialized later */
 295        .nr_garbage = 0,
 296};
 297#endif
 298#endif
 299
 300/* We have preemption disabled.. so it is safe to use __ versions */
 301static inline void set_kprobe_instance(struct kprobe *kp)
 302{
 303        __this_cpu_write(kprobe_instance, kp);
 304}
 305
 306static inline void reset_kprobe_instance(void)
 307{
 308        __this_cpu_write(kprobe_instance, NULL);
 309}
 310
 311/*
 312 * This routine is called either:
 313 *      - under the kprobe_mutex - during kprobe_[un]register()
 314 *                              OR
 315 *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
 316 */
 317struct kprobe __kprobes *get_kprobe(void *addr)
 318{
 319        struct hlist_head *head;
 320        struct kprobe *p;
 321
 322        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
 323        hlist_for_each_entry_rcu(p, head, hlist) {
 324                if (p->addr == addr)
 325                        return p;
 326        }
 327
 328        return NULL;
 329}
 330
 331static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
 332
 333/* Return true if the kprobe is an aggregator */
 334static inline int kprobe_aggrprobe(struct kprobe *p)
 335{
 336        return p->pre_handler == aggr_pre_handler;
 337}
 338
 339/* Return true(!0) if the kprobe is unused */
 340static inline int kprobe_unused(struct kprobe *p)
 341{
 342        return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
 343               list_empty(&p->list);
 344}
 345
 346/*
 347 * Keep all fields in the kprobe consistent
 348 */
 349static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
 350{
 351        memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
 352        memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
 353}
 354
 355#ifdef CONFIG_OPTPROBES
 356/* NOTE: change this value only with kprobe_mutex held */
 357static bool kprobes_allow_optimization;
 358
 359/*
 360 * Call all pre_handler on the list, but ignores its return value.
 361 * This must be called from arch-dep optimized caller.
 362 */
 363void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
 364{
 365        struct kprobe *kp;
 366
 367        list_for_each_entry_rcu(kp, &p->list, list) {
 368                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
 369                        set_kprobe_instance(kp);
 370                        kp->pre_handler(kp, regs);
 371                }
 372                reset_kprobe_instance();
 373        }
 374}
 375
 376/* Free optimized instructions and optimized_kprobe */
 377static __kprobes void free_aggr_kprobe(struct kprobe *p)
 378{
 379        struct optimized_kprobe *op;
 380
 381        op = container_of(p, struct optimized_kprobe, kp);
 382        arch_remove_optimized_kprobe(op);
 383        arch_remove_kprobe(p);
 384        kfree(op);
 385}
 386
 387/* Return true(!0) if the kprobe is ready for optimization. */
 388static inline int kprobe_optready(struct kprobe *p)
 389{
 390        struct optimized_kprobe *op;
 391
 392        if (kprobe_aggrprobe(p)) {
 393                op = container_of(p, struct optimized_kprobe, kp);
 394                return arch_prepared_optinsn(&op->optinsn);
 395        }
 396
 397        return 0;
 398}
 399
 400/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
 401static inline int kprobe_disarmed(struct kprobe *p)
 402{
 403        struct optimized_kprobe *op;
 404
 405        /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
 406        if (!kprobe_aggrprobe(p))
 407                return kprobe_disabled(p);
 408
 409        op = container_of(p, struct optimized_kprobe, kp);
 410
 411        return kprobe_disabled(p) && list_empty(&op->list);
 412}
 413
 414/* Return true(!0) if the probe is queued on (un)optimizing lists */
 415static int __kprobes kprobe_queued(struct kprobe *p)
 416{
 417        struct optimized_kprobe *op;
 418
 419        if (kprobe_aggrprobe(p)) {
 420                op = container_of(p, struct optimized_kprobe, kp);
 421                if (!list_empty(&op->list))
 422                        return 1;
 423        }
 424        return 0;
 425}
 426
 427/*
 428 * Return an optimized kprobe whose optimizing code replaces
 429 * instructions including addr (exclude breakpoint).
 430 */
 431static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
 432{
 433        int i;
 434        struct kprobe *p = NULL;
 435        struct optimized_kprobe *op;
 436
 437        /* Don't check i == 0, since that is a breakpoint case. */
 438        for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
 439                p = get_kprobe((void *)(addr - i));
 440
 441        if (p && kprobe_optready(p)) {
 442                op = container_of(p, struct optimized_kprobe, kp);
 443                if (arch_within_optimized_kprobe(op, addr))
 444                        return p;
 445        }
 446
 447        return NULL;
 448}
 449
 450/* Optimization staging list, protected by kprobe_mutex */
 451static LIST_HEAD(optimizing_list);
 452static LIST_HEAD(unoptimizing_list);
 453static LIST_HEAD(freeing_list);
 454
 455static void kprobe_optimizer(struct work_struct *work);
 456static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
 457#define OPTIMIZE_DELAY 5
 458
 459/*
 460 * Optimize (replace a breakpoint with a jump) kprobes listed on
 461 * optimizing_list.
 462 */
 463static __kprobes void do_optimize_kprobes(void)
 464{
 465        /* Optimization never be done when disarmed */
 466        if (kprobes_all_disarmed || !kprobes_allow_optimization ||
 467            list_empty(&optimizing_list))
 468                return;
 469
 470        /*
 471         * The optimization/unoptimization refers online_cpus via
 472         * stop_machine() and cpu-hotplug modifies online_cpus.
 473         * And same time, text_mutex will be held in cpu-hotplug and here.
 474         * This combination can cause a deadlock (cpu-hotplug try to lock
 475         * text_mutex but stop_machine can not be done because online_cpus
 476         * has been changed)
 477         * To avoid this deadlock, we need to call get_online_cpus()
 478         * for preventing cpu-hotplug outside of text_mutex locking.
 479         */
 480        get_online_cpus();
 481        mutex_lock(&text_mutex);
 482        arch_optimize_kprobes(&optimizing_list);
 483        mutex_unlock(&text_mutex);
 484        put_online_cpus();
 485}
 486
 487/*
 488 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 489 * if need) kprobes listed on unoptimizing_list.
 490 */
 491static __kprobes void do_unoptimize_kprobes(void)
 492{
 493        struct optimized_kprobe *op, *tmp;
 494
 495        /* Unoptimization must be done anytime */
 496        if (list_empty(&unoptimizing_list))
 497                return;
 498
 499        /* Ditto to do_optimize_kprobes */
 500        get_online_cpus();
 501        mutex_lock(&text_mutex);
 502        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 503        /* Loop free_list for disarming */
 504        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 505                /* Disarm probes if marked disabled */
 506                if (kprobe_disabled(&op->kp))
 507                        arch_disarm_kprobe(&op->kp);
 508                if (kprobe_unused(&op->kp)) {
 509                        /*
 510                         * Remove unused probes from hash list. After waiting
 511                         * for synchronization, these probes are reclaimed.
 512                         * (reclaiming is done by do_free_cleaned_kprobes.)
 513                         */
 514                        hlist_del_rcu(&op->kp.hlist);
 515                } else
 516                        list_del_init(&op->list);
 517        }
 518        mutex_unlock(&text_mutex);
 519        put_online_cpus();
 520}
 521
 522/* Reclaim all kprobes on the free_list */
 523static __kprobes void do_free_cleaned_kprobes(void)
 524{
 525        struct optimized_kprobe *op, *tmp;
 526
 527        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 528                BUG_ON(!kprobe_unused(&op->kp));
 529                list_del_init(&op->list);
 530                free_aggr_kprobe(&op->kp);
 531        }
 532}
 533
 534/* Start optimizer after OPTIMIZE_DELAY passed */
 535static __kprobes void kick_kprobe_optimizer(void)
 536{
 537        schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 538}
 539
 540/* Kprobe jump optimizer */
 541static __kprobes void kprobe_optimizer(struct work_struct *work)
 542{
 543        mutex_lock(&kprobe_mutex);
 544        /* Lock modules while optimizing kprobes */
 545        mutex_lock(&module_mutex);
 546
 547        /*
 548         * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 549         * kprobes before waiting for quiesence period.
 550         */
 551        do_unoptimize_kprobes();
 552
 553        /*
 554         * Step 2: Wait for quiesence period to ensure all running interrupts
 555         * are done. Because optprobe may modify multiple instructions
 556         * there is a chance that Nth instruction is interrupted. In that
 557         * case, running interrupt can return to 2nd-Nth byte of jump
 558         * instruction. This wait is for avoiding it.
 559         */
 560        synchronize_sched();
 561
 562        /* Step 3: Optimize kprobes after quiesence period */
 563        do_optimize_kprobes();
 564
 565        /* Step 4: Free cleaned kprobes after quiesence period */
 566        do_free_cleaned_kprobes();
 567
 568        mutex_unlock(&module_mutex);
 569        mutex_unlock(&kprobe_mutex);
 570
 571        /* Step 5: Kick optimizer again if needed */
 572        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
 573                kick_kprobe_optimizer();
 574}
 575
 576/* Wait for completing optimization and unoptimization */
 577static __kprobes void wait_for_kprobe_optimizer(void)
 578{
 579        mutex_lock(&kprobe_mutex);
 580
 581        while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
 582                mutex_unlock(&kprobe_mutex);
 583
 584                /* this will also make optimizing_work execute immmediately */
 585                flush_delayed_work(&optimizing_work);
 586                /* @optimizing_work might not have been queued yet, relax */
 587                cpu_relax();
 588
 589                mutex_lock(&kprobe_mutex);
 590        }
 591
 592        mutex_unlock(&kprobe_mutex);
 593}
 594
 595/* Optimize kprobe if p is ready to be optimized */
 596static __kprobes void optimize_kprobe(struct kprobe *p)
 597{
 598        struct optimized_kprobe *op;
 599
 600        /* Check if the kprobe is disabled or not ready for optimization. */
 601        if (!kprobe_optready(p) || !kprobes_allow_optimization ||
 602            (kprobe_disabled(p) || kprobes_all_disarmed))
 603                return;
 604
 605        /* Both of break_handler and post_handler are not supported. */
 606        if (p->break_handler || p->post_handler)
 607                return;
 608
 609        op = container_of(p, struct optimized_kprobe, kp);
 610
 611        /* Check there is no other kprobes at the optimized instructions */
 612        if (arch_check_optimized_kprobe(op) < 0)
 613                return;
 614
 615        /* Check if it is already optimized. */
 616        if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
 617                return;
 618        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 619
 620        if (!list_empty(&op->list))
 621                /* This is under unoptimizing. Just dequeue the probe */
 622                list_del_init(&op->list);
 623        else {
 624                list_add(&op->list, &optimizing_list);
 625                kick_kprobe_optimizer();
 626        }
 627}
 628
 629/* Short cut to direct unoptimizing */
 630static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
 631{
 632        get_online_cpus();
 633        arch_unoptimize_kprobe(op);
 634        put_online_cpus();
 635        if (kprobe_disabled(&op->kp))
 636                arch_disarm_kprobe(&op->kp);
 637}
 638
 639/* Unoptimize a kprobe if p is optimized */
 640static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
 641{
 642        struct optimized_kprobe *op;
 643
 644        if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
 645                return; /* This is not an optprobe nor optimized */
 646
 647        op = container_of(p, struct optimized_kprobe, kp);
 648        if (!kprobe_optimized(p)) {
 649                /* Unoptimized or unoptimizing case */
 650                if (force && !list_empty(&op->list)) {
 651                        /*
 652                         * Only if this is unoptimizing kprobe and forced,
 653                         * forcibly unoptimize it. (No need to unoptimize
 654                         * unoptimized kprobe again :)
 655                         */
 656                        list_del_init(&op->list);
 657                        force_unoptimize_kprobe(op);
 658                }
 659                return;
 660        }
 661
 662        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 663        if (!list_empty(&op->list)) {
 664                /* Dequeue from the optimization queue */
 665                list_del_init(&op->list);
 666                return;
 667        }
 668        /* Optimized kprobe case */
 669        if (force)
 670                /* Forcibly update the code: this is a special case */
 671                force_unoptimize_kprobe(op);
 672        else {
 673                list_add(&op->list, &unoptimizing_list);
 674                kick_kprobe_optimizer();
 675        }
 676}
 677
 678/* Cancel unoptimizing for reusing */
 679static void reuse_unused_kprobe(struct kprobe *ap)
 680{
 681        struct optimized_kprobe *op;
 682
 683        BUG_ON(!kprobe_unused(ap));
 684        /*
 685         * Unused kprobe MUST be on the way of delayed unoptimizing (means
 686         * there is still a relative jump) and disabled.
 687         */
 688        op = container_of(ap, struct optimized_kprobe, kp);
 689        if (unlikely(list_empty(&op->list)))
 690                printk(KERN_WARNING "Warning: found a stray unused "
 691                        "aggrprobe@%p\n", ap->addr);
 692        /* Enable the probe again */
 693        ap->flags &= ~KPROBE_FLAG_DISABLED;
 694        /* Optimize it again (remove from op->list) */
 695        BUG_ON(!kprobe_optready(ap));
 696        optimize_kprobe(ap);
 697}
 698
 699/* Remove optimized instructions */
 700static void __kprobes kill_optimized_kprobe(struct kprobe *p)
 701{
 702        struct optimized_kprobe *op;
 703
 704        op = container_of(p, struct optimized_kprobe, kp);
 705        if (!list_empty(&op->list))
 706                /* Dequeue from the (un)optimization queue */
 707                list_del_init(&op->list);
 708        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 709
 710        if (kprobe_unused(p)) {
 711                /* Enqueue if it is unused */
 712                list_add(&op->list, &freeing_list);
 713                /*
 714                 * Remove unused probes from the hash list. After waiting
 715                 * for synchronization, this probe is reclaimed.
 716                 * (reclaiming is done by do_free_cleaned_kprobes().)
 717                 */
 718                hlist_del_rcu(&op->kp.hlist);
 719        }
 720
 721        /* Don't touch the code, because it is already freed. */
 722        arch_remove_optimized_kprobe(op);
 723}
 724
 725/* Try to prepare optimized instructions */
 726static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
 727{
 728        struct optimized_kprobe *op;
 729
 730        op = container_of(p, struct optimized_kprobe, kp);
 731        arch_prepare_optimized_kprobe(op);
 732}
 733
 734/* Allocate new optimized_kprobe and try to prepare optimized instructions */
 735static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 736{
 737        struct optimized_kprobe *op;
 738
 739        op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
 740        if (!op)
 741                return NULL;
 742
 743        INIT_LIST_HEAD(&op->list);
 744        op->kp.addr = p->addr;
 745        arch_prepare_optimized_kprobe(op);
 746
 747        return &op->kp;
 748}
 749
 750static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
 751
 752/*
 753 * Prepare an optimized_kprobe and optimize it
 754 * NOTE: p must be a normal registered kprobe
 755 */
 756static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
 757{
 758        struct kprobe *ap;
 759        struct optimized_kprobe *op;
 760
 761        /* Impossible to optimize ftrace-based kprobe */
 762        if (kprobe_ftrace(p))
 763                return;
 764
 765        /* For preparing optimization, jump_label_text_reserved() is called */
 766        jump_label_lock();
 767        mutex_lock(&text_mutex);
 768
 769        ap = alloc_aggr_kprobe(p);
 770        if (!ap)
 771                goto out;
 772
 773        op = container_of(ap, struct optimized_kprobe, kp);
 774        if (!arch_prepared_optinsn(&op->optinsn)) {
 775                /* If failed to setup optimizing, fallback to kprobe */
 776                arch_remove_optimized_kprobe(op);
 777                kfree(op);
 778                goto out;
 779        }
 780
 781        init_aggr_kprobe(ap, p);
 782        optimize_kprobe(ap);    /* This just kicks optimizer thread */
 783
 784out:
 785        mutex_unlock(&text_mutex);
 786        jump_label_unlock();
 787}
 788
 789#ifdef CONFIG_SYSCTL
 790static void __kprobes optimize_all_kprobes(void)
 791{
 792        struct hlist_head *head;
 793        struct kprobe *p;
 794        unsigned int i;
 795
 796        mutex_lock(&kprobe_mutex);
 797        /* If optimization is already allowed, just return */
 798        if (kprobes_allow_optimization)
 799                goto out;
 800
 801        kprobes_allow_optimization = true;
 802        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 803                head = &kprobe_table[i];
 804                hlist_for_each_entry_rcu(p, head, hlist)
 805                        if (!kprobe_disabled(p))
 806                                optimize_kprobe(p);
 807        }
 808        printk(KERN_INFO "Kprobes globally optimized\n");
 809out:
 810        mutex_unlock(&kprobe_mutex);
 811}
 812
 813static void __kprobes unoptimize_all_kprobes(void)
 814{
 815        struct hlist_head *head;
 816        struct kprobe *p;
 817        unsigned int i;
 818
 819        mutex_lock(&kprobe_mutex);
 820        /* If optimization is already prohibited, just return */
 821        if (!kprobes_allow_optimization) {
 822                mutex_unlock(&kprobe_mutex);
 823                return;
 824        }
 825
 826        kprobes_allow_optimization = false;
 827        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 828                head = &kprobe_table[i];
 829                hlist_for_each_entry_rcu(p, head, hlist) {
 830                        if (!kprobe_disabled(p))
 831                                unoptimize_kprobe(p, false);
 832                }
 833        }
 834        mutex_unlock(&kprobe_mutex);
 835
 836        /* Wait for unoptimizing completion */
 837        wait_for_kprobe_optimizer();
 838        printk(KERN_INFO "Kprobes globally unoptimized\n");
 839}
 840
 841static DEFINE_MUTEX(kprobe_sysctl_mutex);
 842int sysctl_kprobes_optimization;
 843int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 844                                      void __user *buffer, size_t *length,
 845                                      loff_t *ppos)
 846{
 847        int ret;
 848
 849        mutex_lock(&kprobe_sysctl_mutex);
 850        sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
 851        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 852
 853        if (sysctl_kprobes_optimization)
 854                optimize_all_kprobes();
 855        else
 856                unoptimize_all_kprobes();
 857        mutex_unlock(&kprobe_sysctl_mutex);
 858
 859        return ret;
 860}
 861#endif /* CONFIG_SYSCTL */
 862
 863/* Put a breakpoint for a probe. Must be called with text_mutex locked */
 864static void __kprobes __arm_kprobe(struct kprobe *p)
 865{
 866        struct kprobe *_p;
 867
 868        /* Check collision with other optimized kprobes */
 869        _p = get_optimized_kprobe((unsigned long)p->addr);
 870        if (unlikely(_p))
 871                /* Fallback to unoptimized kprobe */
 872                unoptimize_kprobe(_p, true);
 873
 874        arch_arm_kprobe(p);
 875        optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
 876}
 877
 878/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
 879static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
 880{
 881        struct kprobe *_p;
 882
 883        unoptimize_kprobe(p, false);    /* Try to unoptimize */
 884
 885        if (!kprobe_queued(p)) {
 886                arch_disarm_kprobe(p);
 887                /* If another kprobe was blocked, optimize it. */
 888                _p = get_optimized_kprobe((unsigned long)p->addr);
 889                if (unlikely(_p) && reopt)
 890                        optimize_kprobe(_p);
 891        }
 892        /* TODO: reoptimize others after unoptimized this probe */
 893}
 894
 895#else /* !CONFIG_OPTPROBES */
 896
 897#define optimize_kprobe(p)                      do {} while (0)
 898#define unoptimize_kprobe(p, f)                 do {} while (0)
 899#define kill_optimized_kprobe(p)                do {} while (0)
 900#define prepare_optimized_kprobe(p)             do {} while (0)
 901#define try_to_optimize_kprobe(p)               do {} while (0)
 902#define __arm_kprobe(p)                         arch_arm_kprobe(p)
 903#define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
 904#define kprobe_disarmed(p)                      kprobe_disabled(p)
 905#define wait_for_kprobe_optimizer()             do {} while (0)
 906
 907/* There should be no unused kprobes can be reused without optimization */
 908static void reuse_unused_kprobe(struct kprobe *ap)
 909{
 910        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
 911        BUG_ON(kprobe_unused(ap));
 912}
 913
 914static __kprobes void free_aggr_kprobe(struct kprobe *p)
 915{
 916        arch_remove_kprobe(p);
 917        kfree(p);
 918}
 919
 920static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 921{
 922        return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
 923}
 924#endif /* CONFIG_OPTPROBES */
 925
 926#ifdef CONFIG_KPROBES_ON_FTRACE
 927static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
 928        .func = kprobe_ftrace_handler,
 929        .flags = FTRACE_OPS_FL_SAVE_REGS,
 930};
 931static int kprobe_ftrace_enabled;
 932
 933/* Must ensure p->addr is really on ftrace */
 934static int __kprobes prepare_kprobe(struct kprobe *p)
 935{
 936        if (!kprobe_ftrace(p))
 937                return arch_prepare_kprobe(p);
 938
 939        return arch_prepare_kprobe_ftrace(p);
 940}
 941
 942/* Caller must lock kprobe_mutex */
 943static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
 944{
 945        int ret;
 946
 947        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
 948                                   (unsigned long)p->addr, 0, 0);
 949        WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 950        kprobe_ftrace_enabled++;
 951        if (kprobe_ftrace_enabled == 1) {
 952                ret = register_ftrace_function(&kprobe_ftrace_ops);
 953                WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
 954        }
 955}
 956
 957/* Caller must lock kprobe_mutex */
 958static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
 959{
 960        int ret;
 961
 962        kprobe_ftrace_enabled--;
 963        if (kprobe_ftrace_enabled == 0) {
 964                ret = unregister_ftrace_function(&kprobe_ftrace_ops);
 965                WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
 966        }
 967        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
 968                           (unsigned long)p->addr, 1, 0);
 969        WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 970}
 971#else   /* !CONFIG_KPROBES_ON_FTRACE */
 972#define prepare_kprobe(p)       arch_prepare_kprobe(p)
 973#define arm_kprobe_ftrace(p)    do {} while (0)
 974#define disarm_kprobe_ftrace(p) do {} while (0)
 975#endif
 976
 977/* Arm a kprobe with text_mutex */
 978static void __kprobes arm_kprobe(struct kprobe *kp)
 979{
 980        if (unlikely(kprobe_ftrace(kp))) {
 981                arm_kprobe_ftrace(kp);
 982                return;
 983        }
 984        /*
 985         * Here, since __arm_kprobe() doesn't use stop_machine(),
 986         * this doesn't cause deadlock on text_mutex. So, we don't
 987         * need get_online_cpus().
 988         */
 989        mutex_lock(&text_mutex);
 990        __arm_kprobe(kp);
 991        mutex_unlock(&text_mutex);
 992}
 993
 994/* Disarm a kprobe with text_mutex */
 995static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
 996{
 997        if (unlikely(kprobe_ftrace(kp))) {
 998                disarm_kprobe_ftrace(kp);
 999                return;
1000        }
1001        /* Ditto */
1002        mutex_lock(&text_mutex);
1003        __disarm_kprobe(kp, reopt);
1004        mutex_unlock(&text_mutex);
1005}
1006
1007/*
1008 * Aggregate handlers for multiple kprobes support - these handlers
1009 * take care of invoking the individual kprobe handlers on p->list
1010 */
1011static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1012{
1013        struct kprobe *kp;
1014
1015        list_for_each_entry_rcu(kp, &p->list, list) {
1016                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1017                        set_kprobe_instance(kp);
1018                        if (kp->pre_handler(kp, regs))
1019                                return 1;
1020                }
1021                reset_kprobe_instance();
1022        }
1023        return 0;
1024}
1025
1026static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1027                                        unsigned long flags)
1028{
1029        struct kprobe *kp;
1030
1031        list_for_each_entry_rcu(kp, &p->list, list) {
1032                if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1033                        set_kprobe_instance(kp);
1034                        kp->post_handler(kp, regs, flags);
1035                        reset_kprobe_instance();
1036                }
1037        }
1038}
1039
1040static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1041                                        int trapnr)
1042{
1043        struct kprobe *cur = __this_cpu_read(kprobe_instance);
1044
1045        /*
1046         * if we faulted "during" the execution of a user specified
1047         * probe handler, invoke just that probe's fault handler
1048         */
1049        if (cur && cur->fault_handler) {
1050                if (cur->fault_handler(cur, regs, trapnr))
1051                        return 1;
1052        }
1053        return 0;
1054}
1055
1056static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1057{
1058        struct kprobe *cur = __this_cpu_read(kprobe_instance);
1059        int ret = 0;
1060
1061        if (cur && cur->break_handler) {
1062                if (cur->break_handler(cur, regs))
1063                        ret = 1;
1064        }
1065        reset_kprobe_instance();
1066        return ret;
1067}
1068
1069/* Walks the list and increments nmissed count for multiprobe case */
1070void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1071{
1072        struct kprobe *kp;
1073        if (!kprobe_aggrprobe(p)) {
1074                p->nmissed++;
1075        } else {
1076                list_for_each_entry_rcu(kp, &p->list, list)
1077                        kp->nmissed++;
1078        }
1079        return;
1080}
1081
1082void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1083                                struct hlist_head *head)
1084{
1085        struct kretprobe *rp = ri->rp;
1086
1087        /* remove rp inst off the rprobe_inst_table */
1088        hlist_del(&ri->hlist);
1089        INIT_HLIST_NODE(&ri->hlist);
1090        if (likely(rp)) {
1091                raw_spin_lock(&rp->lock);
1092                hlist_add_head(&ri->hlist, &rp->free_instances);
1093                raw_spin_unlock(&rp->lock);
1094        } else
1095                /* Unregistering */
1096                hlist_add_head(&ri->hlist, head);
1097}
1098
1099void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
1100                         struct hlist_head **head, unsigned long *flags)
1101__acquires(hlist_lock)
1102{
1103        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1104        raw_spinlock_t *hlist_lock;
1105
1106        *head = &kretprobe_inst_table[hash];
1107        hlist_lock = kretprobe_table_lock_ptr(hash);
1108        raw_spin_lock_irqsave(hlist_lock, *flags);
1109}
1110
1111static void __kprobes kretprobe_table_lock(unsigned long hash,
1112        unsigned long *flags)
1113__acquires(hlist_lock)
1114{
1115        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1116        raw_spin_lock_irqsave(hlist_lock, *flags);
1117}
1118
1119void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1120        unsigned long *flags)
1121__releases(hlist_lock)
1122{
1123        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1124        raw_spinlock_t *hlist_lock;
1125
1126        hlist_lock = kretprobe_table_lock_ptr(hash);
1127        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1128}
1129
1130static void __kprobes kretprobe_table_unlock(unsigned long hash,
1131       unsigned long *flags)
1132__releases(hlist_lock)
1133{
1134        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1135        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1136}
1137
1138/*
1139 * This function is called from finish_task_switch when task tk becomes dead,
1140 * so that we can recycle any function-return probe instances associated
1141 * with this task. These left over instances represent probed functions
1142 * that have been called but will never return.
1143 */
1144void __kprobes kprobe_flush_task(struct task_struct *tk)
1145{
1146        struct kretprobe_instance *ri;
1147        struct hlist_head *head, empty_rp;
1148        struct hlist_node *tmp;
1149        unsigned long hash, flags = 0;
1150
1151        if (unlikely(!kprobes_initialized))
1152                /* Early boot.  kretprobe_table_locks not yet initialized. */
1153                return;
1154
1155        INIT_HLIST_HEAD(&empty_rp);
1156        hash = hash_ptr(tk, KPROBE_HASH_BITS);
1157        head = &kretprobe_inst_table[hash];
1158        kretprobe_table_lock(hash, &flags);
1159        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1160                if (ri->task == tk)
1161                        recycle_rp_inst(ri, &empty_rp);
1162        }
1163        kretprobe_table_unlock(hash, &flags);
1164        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1165                hlist_del(&ri->hlist);
1166                kfree(ri);
1167        }
1168}
1169
1170static inline void free_rp_inst(struct kretprobe *rp)
1171{
1172        struct kretprobe_instance *ri;
1173        struct hlist_node *next;
1174
1175        hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1176                hlist_del(&ri->hlist);
1177                kfree(ri);
1178        }
1179}
1180
1181static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1182{
1183        unsigned long flags, hash;
1184        struct kretprobe_instance *ri;
1185        struct hlist_node *next;
1186        struct hlist_head *head;
1187
1188        /* No race here */
1189        for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1190                kretprobe_table_lock(hash, &flags);
1191                head = &kretprobe_inst_table[hash];
1192                hlist_for_each_entry_safe(ri, next, head, hlist) {
1193                        if (ri->rp == rp)
1194                                ri->rp = NULL;
1195                }
1196                kretprobe_table_unlock(hash, &flags);
1197        }
1198        free_rp_inst(rp);
1199}
1200
1201/*
1202* Add the new probe to ap->list. Fail if this is the
1203* second jprobe at the address - two jprobes can't coexist
1204*/
1205static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1206{
1207        BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1208
1209        if (p->break_handler || p->post_handler)
1210                unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1211
1212        if (p->break_handler) {
1213                if (ap->break_handler)
1214                        return -EEXIST;
1215                list_add_tail_rcu(&p->list, &ap->list);
1216                ap->break_handler = aggr_break_handler;
1217        } else
1218                list_add_rcu(&p->list, &ap->list);
1219        if (p->post_handler && !ap->post_handler)
1220                ap->post_handler = aggr_post_handler;
1221
1222        return 0;
1223}
1224
1225/*
1226 * Fill in the required fields of the "manager kprobe". Replace the
1227 * earlier kprobe in the hlist with the manager kprobe
1228 */
1229static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1230{
1231        /* Copy p's insn slot to ap */
1232        copy_kprobe(p, ap);
1233        flush_insn_slot(ap);
1234        ap->addr = p->addr;
1235        ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1236        ap->pre_handler = aggr_pre_handler;
1237        ap->fault_handler = aggr_fault_handler;
1238        /* We don't care the kprobe which has gone. */
1239        if (p->post_handler && !kprobe_gone(p))
1240                ap->post_handler = aggr_post_handler;
1241        if (p->break_handler && !kprobe_gone(p))
1242                ap->break_handler = aggr_break_handler;
1243
1244        INIT_LIST_HEAD(&ap->list);
1245        INIT_HLIST_NODE(&ap->hlist);
1246
1247        list_add_rcu(&p->list, &ap->list);
1248        hlist_replace_rcu(&p->hlist, &ap->hlist);
1249}
1250
1251/*
1252 * This is the second or subsequent kprobe at the address - handle
1253 * the intricacies
1254 */
1255static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1256                                          struct kprobe *p)
1257{
1258        int ret = 0;
1259        struct kprobe *ap = orig_p;
1260
1261        /* For preparing optimization, jump_label_text_reserved() is called */
1262        jump_label_lock();
1263        /*
1264         * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1265         * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1266         */
1267        get_online_cpus();
1268        mutex_lock(&text_mutex);
1269
1270        if (!kprobe_aggrprobe(orig_p)) {
1271                /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1272                ap = alloc_aggr_kprobe(orig_p);
1273                if (!ap) {
1274                        ret = -ENOMEM;
1275                        goto out;
1276                }
1277                init_aggr_kprobe(ap, orig_p);
1278        } else if (kprobe_unused(ap))
1279                /* This probe is going to die. Rescue it */
1280                reuse_unused_kprobe(ap);
1281
1282        if (kprobe_gone(ap)) {
1283                /*
1284                 * Attempting to insert new probe at the same location that
1285                 * had a probe in the module vaddr area which already
1286                 * freed. So, the instruction slot has already been
1287                 * released. We need a new slot for the new probe.
1288                 */
1289                ret = arch_prepare_kprobe(ap);
1290                if (ret)
1291                        /*
1292                         * Even if fail to allocate new slot, don't need to
1293                         * free aggr_probe. It will be used next time, or
1294                         * freed by unregister_kprobe.
1295                         */
1296                        goto out;
1297
1298                /* Prepare optimized instructions if possible. */
1299                prepare_optimized_kprobe(ap);
1300
1301                /*
1302                 * Clear gone flag to prevent allocating new slot again, and
1303                 * set disabled flag because it is not armed yet.
1304                 */
1305                ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1306                            | KPROBE_FLAG_DISABLED;
1307        }
1308
1309        /* Copy ap's insn slot to p */
1310        copy_kprobe(ap, p);
1311        ret = add_new_kprobe(ap, p);
1312
1313out:
1314        mutex_unlock(&text_mutex);
1315        put_online_cpus();
1316        jump_label_unlock();
1317
1318        if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1319                ap->flags &= ~KPROBE_FLAG_DISABLED;
1320                if (!kprobes_all_disarmed)
1321                        /* Arm the breakpoint again. */
1322                        arm_kprobe(ap);
1323        }
1324        return ret;
1325}
1326
1327static int __kprobes in_kprobes_functions(unsigned long addr)
1328{
1329        struct kprobe_blackpoint *kb;
1330
1331        if (addr >= (unsigned long)__kprobes_text_start &&
1332            addr < (unsigned long)__kprobes_text_end)
1333                return -EINVAL;
1334        /*
1335         * If there exists a kprobe_blacklist, verify and
1336         * fail any probe registration in the prohibited area
1337         */
1338        for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1339                if (kb->start_addr) {
1340                        if (addr >= kb->start_addr &&
1341                            addr < (kb->start_addr + kb->range))
1342                                return -EINVAL;
1343                }
1344        }
1345        return 0;
1346}
1347
1348/*
1349 * If we have a symbol_name argument, look it up and add the offset field
1350 * to it. This way, we can specify a relative address to a symbol.
1351 * This returns encoded errors if it fails to look up symbol or invalid
1352 * combination of parameters.
1353 */
1354static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1355{
1356        kprobe_opcode_t *addr = p->addr;
1357
1358        if ((p->symbol_name && p->addr) ||
1359            (!p->symbol_name && !p->addr))
1360                goto invalid;
1361
1362        if (p->symbol_name) {
1363                kprobe_lookup_name(p->symbol_name, addr);
1364                if (!addr)
1365                        return ERR_PTR(-ENOENT);
1366        }
1367
1368        addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
1369        if (addr)
1370                return addr;
1371
1372invalid:
1373        return ERR_PTR(-EINVAL);
1374}
1375
1376/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1377static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1378{
1379        struct kprobe *ap, *list_p;
1380
1381        ap = get_kprobe(p->addr);
1382        if (unlikely(!ap))
1383                return NULL;
1384
1385        if (p != ap) {
1386                list_for_each_entry_rcu(list_p, &ap->list, list)
1387                        if (list_p == p)
1388                        /* kprobe p is a valid probe */
1389                                goto valid;
1390                return NULL;
1391        }
1392valid:
1393        return ap;
1394}
1395
1396/* Return error if the kprobe is being re-registered */
1397static inline int check_kprobe_rereg(struct kprobe *p)
1398{
1399        int ret = 0;
1400
1401        mutex_lock(&kprobe_mutex);
1402        if (__get_valid_kprobe(p))
1403                ret = -EINVAL;
1404        mutex_unlock(&kprobe_mutex);
1405
1406        return ret;
1407}
1408
1409static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1410                                               struct module **probed_mod)
1411{
1412        int ret = 0;
1413        unsigned long ftrace_addr;
1414
1415        /*
1416         * If the address is located on a ftrace nop, set the
1417         * breakpoint to the following instruction.
1418         */
1419        ftrace_addr = ftrace_location((unsigned long)p->addr);
1420        if (ftrace_addr) {
1421#ifdef CONFIG_KPROBES_ON_FTRACE
1422                /* Given address is not on the instruction boundary */
1423                if ((unsigned long)p->addr != ftrace_addr)
1424                        return -EILSEQ;
1425                p->flags |= KPROBE_FLAG_FTRACE;
1426#else   /* !CONFIG_KPROBES_ON_FTRACE */
1427                return -EINVAL;
1428#endif
1429        }
1430
1431        jump_label_lock();
1432        preempt_disable();
1433
1434        /* Ensure it is not in reserved area nor out of text */
1435        if (!kernel_text_address((unsigned long) p->addr) ||
1436            in_kprobes_functions((unsigned long) p->addr) ||
1437            jump_label_text_reserved(p->addr, p->addr)) {
1438                ret = -EINVAL;
1439                goto out;
1440        }
1441
1442        /* Check if are we probing a module */
1443        *probed_mod = __module_text_address((unsigned long) p->addr);
1444        if (*probed_mod) {
1445                /*
1446                 * We must hold a refcount of the probed module while updating
1447                 * its code to prohibit unexpected unloading.
1448                 */
1449                if (unlikely(!try_module_get(*probed_mod))) {
1450                        ret = -ENOENT;
1451                        goto out;
1452                }
1453
1454                /*
1455                 * If the module freed .init.text, we couldn't insert
1456                 * kprobes in there.
1457                 */
1458                if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1459                    (*probed_mod)->state != MODULE_STATE_COMING) {
1460                        module_put(*probed_mod);
1461                        *probed_mod = NULL;
1462                        ret = -ENOENT;
1463                }
1464        }
1465out:
1466        preempt_enable();
1467        jump_label_unlock();
1468
1469        return ret;
1470}
1471
1472int __kprobes register_kprobe(struct kprobe *p)
1473{
1474        int ret;
1475        struct kprobe *old_p;
1476        struct module *probed_mod;
1477        kprobe_opcode_t *addr;
1478
1479        /* Adjust probe address from symbol */
1480        addr = kprobe_addr(p);
1481        if (IS_ERR(addr))
1482                return PTR_ERR(addr);
1483        p->addr = addr;
1484
1485        ret = check_kprobe_rereg(p);
1486        if (ret)
1487                return ret;
1488
1489        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1490        p->flags &= KPROBE_FLAG_DISABLED;
1491        p->nmissed = 0;
1492        INIT_LIST_HEAD(&p->list);
1493
1494        ret = check_kprobe_address_safe(p, &probed_mod);
1495        if (ret)
1496                return ret;
1497
1498        mutex_lock(&kprobe_mutex);
1499
1500        old_p = get_kprobe(p->addr);
1501        if (old_p) {
1502                /* Since this may unoptimize old_p, locking text_mutex. */
1503                ret = register_aggr_kprobe(old_p, p);
1504                goto out;
1505        }
1506
1507        mutex_lock(&text_mutex);        /* Avoiding text modification */
1508        ret = prepare_kprobe(p);
1509        mutex_unlock(&text_mutex);
1510        if (ret)
1511                goto out;
1512
1513        INIT_HLIST_NODE(&p->hlist);
1514        hlist_add_head_rcu(&p->hlist,
1515                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1516
1517        if (!kprobes_all_disarmed && !kprobe_disabled(p))
1518                arm_kprobe(p);
1519
1520        /* Try to optimize kprobe */
1521        try_to_optimize_kprobe(p);
1522
1523out:
1524        mutex_unlock(&kprobe_mutex);
1525
1526        if (probed_mod)
1527                module_put(probed_mod);
1528
1529        return ret;
1530}
1531EXPORT_SYMBOL_GPL(register_kprobe);
1532
1533/* Check if all probes on the aggrprobe are disabled */
1534static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1535{
1536        struct kprobe *kp;
1537
1538        list_for_each_entry_rcu(kp, &ap->list, list)
1539                if (!kprobe_disabled(kp))
1540                        /*
1541                         * There is an active probe on the list.
1542                         * We can't disable this ap.
1543                         */
1544                        return 0;
1545
1546        return 1;
1547}
1548
1549/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1550static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1551{
1552        struct kprobe *orig_p;
1553
1554        /* Get an original kprobe for return */
1555        orig_p = __get_valid_kprobe(p);
1556        if (unlikely(orig_p == NULL))
1557                return NULL;
1558
1559        if (!kprobe_disabled(p)) {
1560                /* Disable probe if it is a child probe */
1561                if (p != orig_p)
1562                        p->flags |= KPROBE_FLAG_DISABLED;
1563
1564                /* Try to disarm and disable this/parent probe */
1565                if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1566                        disarm_kprobe(orig_p, true);
1567                        orig_p->flags |= KPROBE_FLAG_DISABLED;
1568                }
1569        }
1570
1571        return orig_p;
1572}
1573
1574/*
1575 * Unregister a kprobe without a scheduler synchronization.
1576 */
1577static int __kprobes __unregister_kprobe_top(struct kprobe *p)
1578{
1579        struct kprobe *ap, *list_p;
1580
1581        /* Disable kprobe. This will disarm it if needed. */
1582        ap = __disable_kprobe(p);
1583        if (ap == NULL)
1584                return -EINVAL;
1585
1586        if (ap == p)
1587                /*
1588                 * This probe is an independent(and non-optimized) kprobe
1589                 * (not an aggrprobe). Remove from the hash list.
1590                 */
1591                goto disarmed;
1592
1593        /* Following process expects this probe is an aggrprobe */
1594        WARN_ON(!kprobe_aggrprobe(ap));
1595
1596        if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1597                /*
1598                 * !disarmed could be happen if the probe is under delayed
1599                 * unoptimizing.
1600                 */
1601                goto disarmed;
1602        else {
1603                /* If disabling probe has special handlers, update aggrprobe */
1604                if (p->break_handler && !kprobe_gone(p))
1605                        ap->break_handler = NULL;
1606                if (p->post_handler && !kprobe_gone(p)) {
1607                        list_for_each_entry_rcu(list_p, &ap->list, list) {
1608                                if ((list_p != p) && (list_p->post_handler))
1609                                        goto noclean;
1610                        }
1611                        ap->post_handler = NULL;
1612                }
1613noclean:
1614                /*
1615                 * Remove from the aggrprobe: this path will do nothing in
1616                 * __unregister_kprobe_bottom().
1617                 */
1618                list_del_rcu(&p->list);
1619                if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1620                        /*
1621                         * Try to optimize this probe again, because post
1622                         * handler may have been changed.
1623                         */
1624                        optimize_kprobe(ap);
1625        }
1626        return 0;
1627
1628disarmed:
1629        BUG_ON(!kprobe_disarmed(ap));
1630        hlist_del_rcu(&ap->hlist);
1631        return 0;
1632}
1633
1634static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1635{
1636        struct kprobe *ap;
1637
1638        if (list_empty(&p->list))
1639                /* This is an independent kprobe */
1640                arch_remove_kprobe(p);
1641        else if (list_is_singular(&p->list)) {
1642                /* This is the last child of an aggrprobe */
1643                ap = list_entry(p->list.next, struct kprobe, list);
1644                list_del(&p->list);
1645                free_aggr_kprobe(ap);
1646        }
1647        /* Otherwise, do nothing. */
1648}
1649
1650int __kprobes register_kprobes(struct kprobe **kps, int num)
1651{
1652        int i, ret = 0;
1653
1654        if (num <= 0)
1655                return -EINVAL;
1656        for (i = 0; i < num; i++) {
1657                ret = register_kprobe(kps[i]);
1658                if (ret < 0) {
1659                        if (i > 0)
1660                                unregister_kprobes(kps, i);
1661                        break;
1662                }
1663        }
1664        return ret;
1665}
1666EXPORT_SYMBOL_GPL(register_kprobes);
1667
1668void __kprobes unregister_kprobe(struct kprobe *p)
1669{
1670        unregister_kprobes(&p, 1);
1671}
1672EXPORT_SYMBOL_GPL(unregister_kprobe);
1673
1674void __kprobes unregister_kprobes(struct kprobe **kps, int num)
1675{
1676        int i;
1677
1678        if (num <= 0)
1679                return;
1680        mutex_lock(&kprobe_mutex);
1681        for (i = 0; i < num; i++)
1682                if (__unregister_kprobe_top(kps[i]) < 0)
1683                        kps[i]->addr = NULL;
1684        mutex_unlock(&kprobe_mutex);
1685
1686        synchronize_sched();
1687        for (i = 0; i < num; i++)
1688                if (kps[i]->addr)
1689                        __unregister_kprobe_bottom(kps[i]);
1690}
1691EXPORT_SYMBOL_GPL(unregister_kprobes);
1692
1693static struct notifier_block kprobe_exceptions_nb = {
1694        .notifier_call = kprobe_exceptions_notify,
1695        .priority = 0x7fffffff /* we need to be notified first */
1696};
1697
1698unsigned long __weak arch_deref_entry_point(void *entry)
1699{
1700        return (unsigned long)entry;
1701}
1702
1703int __kprobes register_jprobes(struct jprobe **jps, int num)
1704{
1705        struct jprobe *jp;
1706        int ret = 0, i;
1707
1708        if (num <= 0)
1709                return -EINVAL;
1710        for (i = 0; i < num; i++) {
1711                unsigned long addr, offset;
1712                jp = jps[i];
1713                addr = arch_deref_entry_point(jp->entry);
1714
1715                /* Verify probepoint is a function entry point */
1716                if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1717                    offset == 0) {
1718                        jp->kp.pre_handler = setjmp_pre_handler;
1719                        jp->kp.break_handler = longjmp_break_handler;
1720                        ret = register_kprobe(&jp->kp);
1721                } else
1722                        ret = -EINVAL;
1723
1724                if (ret < 0) {
1725                        if (i > 0)
1726                                unregister_jprobes(jps, i);
1727                        break;
1728                }
1729        }
1730        return ret;
1731}
1732EXPORT_SYMBOL_GPL(register_jprobes);
1733
1734int __kprobes register_jprobe(struct jprobe *jp)
1735{
1736        return register_jprobes(&jp, 1);
1737}
1738EXPORT_SYMBOL_GPL(register_jprobe);
1739
1740void __kprobes unregister_jprobe(struct jprobe *jp)
1741{
1742        unregister_jprobes(&jp, 1);
1743}
1744EXPORT_SYMBOL_GPL(unregister_jprobe);
1745
1746void __kprobes unregister_jprobes(struct jprobe **jps, int num)
1747{
1748        int i;
1749
1750        if (num <= 0)
1751                return;
1752        mutex_lock(&kprobe_mutex);
1753        for (i = 0; i < num; i++)
1754                if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1755                        jps[i]->kp.addr = NULL;
1756        mutex_unlock(&kprobe_mutex);
1757
1758        synchronize_sched();
1759        for (i = 0; i < num; i++) {
1760                if (jps[i]->kp.addr)
1761                        __unregister_kprobe_bottom(&jps[i]->kp);
1762        }
1763}
1764EXPORT_SYMBOL_GPL(unregister_jprobes);
1765
1766#ifdef CONFIG_KRETPROBES
1767/*
1768 * This kprobe pre_handler is registered with every kretprobe. When probe
1769 * hits it will set up the return probe.
1770 */
1771static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1772                                           struct pt_regs *regs)
1773{
1774        struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1775        unsigned long hash, flags = 0;
1776        struct kretprobe_instance *ri;
1777
1778        /*TODO: consider to only swap the RA after the last pre_handler fired */
1779        hash = hash_ptr(current, KPROBE_HASH_BITS);
1780        raw_spin_lock_irqsave(&rp->lock, flags);
1781        if (!hlist_empty(&rp->free_instances)) {
1782                ri = hlist_entry(rp->free_instances.first,
1783                                struct kretprobe_instance, hlist);
1784                hlist_del(&ri->hlist);
1785                raw_spin_unlock_irqrestore(&rp->lock, flags);
1786
1787                ri->rp = rp;
1788                ri->task = current;
1789
1790                if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1791                        raw_spin_lock_irqsave(&rp->lock, flags);
1792                        hlist_add_head(&ri->hlist, &rp->free_instances);
1793                        raw_spin_unlock_irqrestore(&rp->lock, flags);
1794                        return 0;
1795                }
1796
1797                arch_prepare_kretprobe(ri, regs);
1798
1799                /* XXX(hch): why is there no hlist_move_head? */
1800                INIT_HLIST_NODE(&ri->hlist);
1801                kretprobe_table_lock(hash, &flags);
1802                hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1803                kretprobe_table_unlock(hash, &flags);
1804        } else {
1805                rp->nmissed++;
1806                raw_spin_unlock_irqrestore(&rp->lock, flags);
1807        }
1808        return 0;
1809}
1810
1811int __kprobes register_kretprobe(struct kretprobe *rp)
1812{
1813        int ret = 0;
1814        struct kretprobe_instance *inst;
1815        int i;
1816        void *addr;
1817
1818        if (kretprobe_blacklist_size) {
1819                addr = kprobe_addr(&rp->kp);
1820                if (IS_ERR(addr))
1821                        return PTR_ERR(addr);
1822
1823                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1824                        if (kretprobe_blacklist[i].addr == addr)
1825                                return -EINVAL;
1826                }
1827        }
1828
1829        rp->kp.pre_handler = pre_handler_kretprobe;
1830        rp->kp.post_handler = NULL;
1831        rp->kp.fault_handler = NULL;
1832        rp->kp.break_handler = NULL;
1833
1834        /* Pre-allocate memory for max kretprobe instances */
1835        if (rp->maxactive <= 0) {
1836#ifdef CONFIG_PREEMPT
1837                rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1838#else
1839                rp->maxactive = num_possible_cpus();
1840#endif
1841        }
1842        raw_spin_lock_init(&rp->lock);
1843        INIT_HLIST_HEAD(&rp->free_instances);
1844        for (i = 0; i < rp->maxactive; i++) {
1845                inst = kmalloc(sizeof(struct kretprobe_instance) +
1846                               rp->data_size, GFP_KERNEL);
1847                if (inst == NULL) {
1848                        free_rp_inst(rp);
1849                        return -ENOMEM;
1850                }
1851                INIT_HLIST_NODE(&inst->hlist);
1852                hlist_add_head(&inst->hlist, &rp->free_instances);
1853        }
1854
1855        rp->nmissed = 0;
1856        /* Establish function entry probe point */
1857        ret = register_kprobe(&rp->kp);
1858        if (ret != 0)
1859                free_rp_inst(rp);
1860        return ret;
1861}
1862EXPORT_SYMBOL_GPL(register_kretprobe);
1863
1864int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1865{
1866        int ret = 0, i;
1867
1868        if (num <= 0)
1869                return -EINVAL;
1870        for (i = 0; i < num; i++) {
1871                ret = register_kretprobe(rps[i]);
1872                if (ret < 0) {
1873                        if (i > 0)
1874                                unregister_kretprobes(rps, i);
1875                        break;
1876                }
1877        }
1878        return ret;
1879}
1880EXPORT_SYMBOL_GPL(register_kretprobes);
1881
1882void __kprobes unregister_kretprobe(struct kretprobe *rp)
1883{
1884        unregister_kretprobes(&rp, 1);
1885}
1886EXPORT_SYMBOL_GPL(unregister_kretprobe);
1887
1888void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1889{
1890        int i;
1891
1892        if (num <= 0)
1893                return;
1894        mutex_lock(&kprobe_mutex);
1895        for (i = 0; i < num; i++)
1896                if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1897                        rps[i]->kp.addr = NULL;
1898        mutex_unlock(&kprobe_mutex);
1899
1900        synchronize_sched();
1901        for (i = 0; i < num; i++) {
1902                if (rps[i]->kp.addr) {
1903                        __unregister_kprobe_bottom(&rps[i]->kp);
1904                        cleanup_rp_inst(rps[i]);
1905                }
1906        }
1907}
1908EXPORT_SYMBOL_GPL(unregister_kretprobes);
1909
1910#else /* CONFIG_KRETPROBES */
1911int __kprobes register_kretprobe(struct kretprobe *rp)
1912{
1913        return -ENOSYS;
1914}
1915EXPORT_SYMBOL_GPL(register_kretprobe);
1916
1917int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1918{
1919        return -ENOSYS;
1920}
1921EXPORT_SYMBOL_GPL(register_kretprobes);
1922
1923void __kprobes unregister_kretprobe(struct kretprobe *rp)
1924{
1925}
1926EXPORT_SYMBOL_GPL(unregister_kretprobe);
1927
1928void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1929{
1930}
1931EXPORT_SYMBOL_GPL(unregister_kretprobes);
1932
1933static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1934                                           struct pt_regs *regs)
1935{
1936        return 0;
1937}
1938
1939#endif /* CONFIG_KRETPROBES */
1940
1941/* Set the kprobe gone and remove its instruction buffer. */
1942static void __kprobes kill_kprobe(struct kprobe *p)
1943{
1944        struct kprobe *kp;
1945
1946        p->flags |= KPROBE_FLAG_GONE;
1947        if (kprobe_aggrprobe(p)) {
1948                /*
1949                 * If this is an aggr_kprobe, we have to list all the
1950                 * chained probes and mark them GONE.
1951                 */
1952                list_for_each_entry_rcu(kp, &p->list, list)
1953                        kp->flags |= KPROBE_FLAG_GONE;
1954                p->post_handler = NULL;
1955                p->break_handler = NULL;
1956                kill_optimized_kprobe(p);
1957        }
1958        /*
1959         * Here, we can remove insn_slot safely, because no thread calls
1960         * the original probed function (which will be freed soon) any more.
1961         */
1962        arch_remove_kprobe(p);
1963}
1964
1965/* Disable one kprobe */
1966int __kprobes disable_kprobe(struct kprobe *kp)
1967{
1968        int ret = 0;
1969
1970        mutex_lock(&kprobe_mutex);
1971
1972        /* Disable this kprobe */
1973        if (__disable_kprobe(kp) == NULL)
1974                ret = -EINVAL;
1975
1976        mutex_unlock(&kprobe_mutex);
1977        return ret;
1978}
1979EXPORT_SYMBOL_GPL(disable_kprobe);
1980
1981/* Enable one kprobe */
1982int __kprobes enable_kprobe(struct kprobe *kp)
1983{
1984        int ret = 0;
1985        struct kprobe *p;
1986
1987        mutex_lock(&kprobe_mutex);
1988
1989        /* Check whether specified probe is valid. */
1990        p = __get_valid_kprobe(kp);
1991        if (unlikely(p == NULL)) {
1992                ret = -EINVAL;
1993                goto out;
1994        }
1995
1996        if (kprobe_gone(kp)) {
1997                /* This kprobe has gone, we couldn't enable it. */
1998                ret = -EINVAL;
1999                goto out;
2000        }
2001
2002        if (p != kp)
2003                kp->flags &= ~KPROBE_FLAG_DISABLED;
2004
2005        if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2006                p->flags &= ~KPROBE_FLAG_DISABLED;
2007                arm_kprobe(p);
2008        }
2009out:
2010        mutex_unlock(&kprobe_mutex);
2011        return ret;
2012}
2013EXPORT_SYMBOL_GPL(enable_kprobe);
2014
2015void __kprobes dump_kprobe(struct kprobe *kp)
2016{
2017        printk(KERN_WARNING "Dumping kprobe:\n");
2018        printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2019               kp->symbol_name, kp->addr, kp->offset);
2020}
2021
2022/* Module notifier call back, checking kprobes on the module */
2023static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2024                                             unsigned long val, void *data)
2025{
2026        struct module *mod = data;
2027        struct hlist_head *head;
2028        struct kprobe *p;
2029        unsigned int i;
2030        int checkcore = (val == MODULE_STATE_GOING);
2031
2032        if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2033                return NOTIFY_DONE;
2034
2035        /*
2036         * When MODULE_STATE_GOING was notified, both of module .text and
2037         * .init.text sections would be freed. When MODULE_STATE_LIVE was
2038         * notified, only .init.text section would be freed. We need to
2039         * disable kprobes which have been inserted in the sections.
2040         */
2041        mutex_lock(&kprobe_mutex);
2042        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2043                head = &kprobe_table[i];
2044                hlist_for_each_entry_rcu(p, head, hlist)
2045                        if (within_module_init((unsigned long)p->addr, mod) ||
2046                            (checkcore &&
2047                             within_module_core((unsigned long)p->addr, mod))) {
2048                                /*
2049                                 * The vaddr this probe is installed will soon
2050                                 * be vfreed buy not synced to disk. Hence,
2051                                 * disarming the breakpoint isn't needed.
2052                                 */
2053                                kill_kprobe(p);
2054                        }
2055        }
2056        mutex_unlock(&kprobe_mutex);
2057        return NOTIFY_DONE;
2058}
2059
2060static struct notifier_block kprobe_module_nb = {
2061        .notifier_call = kprobes_module_callback,
2062        .priority = 0
2063};
2064
2065static int __init init_kprobes(void)
2066{
2067        int i, err = 0;
2068        unsigned long offset = 0, size = 0;
2069        char *modname, namebuf[128];
2070        const char *symbol_name;
2071        void *addr;
2072        struct kprobe_blackpoint *kb;
2073
2074        /* FIXME allocate the probe table, currently defined statically */
2075        /* initialize all list heads */
2076        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2077                INIT_HLIST_HEAD(&kprobe_table[i]);
2078                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2079                raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2080        }
2081
2082        /*
2083         * Lookup and populate the kprobe_blacklist.
2084         *
2085         * Unlike the kretprobe blacklist, we'll need to determine
2086         * the range of addresses that belong to the said functions,
2087         * since a kprobe need not necessarily be at the beginning
2088         * of a function.
2089         */
2090        for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
2091                kprobe_lookup_name(kb->name, addr);
2092                if (!addr)
2093                        continue;
2094
2095                kb->start_addr = (unsigned long)addr;
2096                symbol_name = kallsyms_lookup(kb->start_addr,
2097                                &size, &offset, &modname, namebuf);
2098                if (!symbol_name)
2099                        kb->range = 0;
2100                else
2101                        kb->range = size;
2102        }
2103
2104        if (kretprobe_blacklist_size) {
2105                /* lookup the function address from its name */
2106                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2107                        kprobe_lookup_name(kretprobe_blacklist[i].name,
2108                                           kretprobe_blacklist[i].addr);
2109                        if (!kretprobe_blacklist[i].addr)
2110                                printk("kretprobe: lookup failed: %s\n",
2111                                       kretprobe_blacklist[i].name);
2112                }
2113        }
2114
2115#if defined(CONFIG_OPTPROBES)
2116#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2117        /* Init kprobe_optinsn_slots */
2118        kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2119#endif
2120        /* By default, kprobes can be optimized */
2121        kprobes_allow_optimization = true;
2122#endif
2123
2124        /* By default, kprobes are armed */
2125        kprobes_all_disarmed = false;
2126
2127        err = arch_init_kprobes();
2128        if (!err)
2129                err = register_die_notifier(&kprobe_exceptions_nb);
2130        if (!err)
2131                err = register_module_notifier(&kprobe_module_nb);
2132
2133        kprobes_initialized = (err == 0);
2134
2135        if (!err)
2136                init_test_probes();
2137        return err;
2138}
2139
2140#ifdef CONFIG_DEBUG_FS
2141static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2142                const char *sym, int offset, char *modname, struct kprobe *pp)
2143{
2144        char *kprobe_type;
2145
2146        if (p->pre_handler == pre_handler_kretprobe)
2147                kprobe_type = "r";
2148        else if (p->pre_handler == setjmp_pre_handler)
2149                kprobe_type = "j";
2150        else
2151                kprobe_type = "k";
2152
2153        if (sym)
2154                seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2155                        p->addr, kprobe_type, sym, offset,
2156                        (modname ? modname : " "));
2157        else
2158                seq_printf(pi, "%p  %s  %p ",
2159                        p->addr, kprobe_type, p->addr);
2160
2161        if (!pp)
2162                pp = p;
2163        seq_printf(pi, "%s%s%s%s\n",
2164                (kprobe_gone(p) ? "[GONE]" : ""),
2165                ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2166                (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2167                (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2168}
2169
2170static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2171{
2172        return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2173}
2174
2175static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2176{
2177        (*pos)++;
2178        if (*pos >= KPROBE_TABLE_SIZE)
2179                return NULL;
2180        return pos;
2181}
2182
2183static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2184{
2185        /* Nothing to do */
2186}
2187
2188static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2189{
2190        struct hlist_head *head;
2191        struct kprobe *p, *kp;
2192        const char *sym = NULL;
2193        unsigned int i = *(loff_t *) v;
2194        unsigned long offset = 0;
2195        char *modname, namebuf[128];
2196
2197        head = &kprobe_table[i];
2198        preempt_disable();
2199        hlist_for_each_entry_rcu(p, head, hlist) {
2200                sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2201                                        &offset, &modname, namebuf);
2202                if (kprobe_aggrprobe(p)) {
2203                        list_for_each_entry_rcu(kp, &p->list, list)
2204                                report_probe(pi, kp, sym, offset, modname, p);
2205                } else
2206                        report_probe(pi, p, sym, offset, modname, NULL);
2207        }
2208        preempt_enable();
2209        return 0;
2210}
2211
2212static const struct seq_operations kprobes_seq_ops = {
2213        .start = kprobe_seq_start,
2214        .next  = kprobe_seq_next,
2215        .stop  = kprobe_seq_stop,
2216        .show  = show_kprobe_addr
2217};
2218
2219static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
2220{
2221        return seq_open(filp, &kprobes_seq_ops);
2222}
2223
2224static const struct file_operations debugfs_kprobes_operations = {
2225        .open           = kprobes_open,
2226        .read           = seq_read,
2227        .llseek         = seq_lseek,
2228        .release        = seq_release,
2229};
2230
2231static void __kprobes arm_all_kprobes(void)
2232{
2233        struct hlist_head *head;
2234        struct kprobe *p;
2235        unsigned int i;
2236
2237        mutex_lock(&kprobe_mutex);
2238
2239        /* If kprobes are armed, just return */
2240        if (!kprobes_all_disarmed)
2241                goto already_enabled;
2242
2243        /* Arming kprobes doesn't optimize kprobe itself */
2244        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2245                head = &kprobe_table[i];
2246                hlist_for_each_entry_rcu(p, head, hlist)
2247                        if (!kprobe_disabled(p))
2248                                arm_kprobe(p);
2249        }
2250
2251        kprobes_all_disarmed = false;
2252        printk(KERN_INFO "Kprobes globally enabled\n");
2253
2254already_enabled:
2255        mutex_unlock(&kprobe_mutex);
2256        return;
2257}
2258
2259static void __kprobes disarm_all_kprobes(void)
2260{
2261        struct hlist_head *head;
2262        struct kprobe *p;
2263        unsigned int i;
2264
2265        mutex_lock(&kprobe_mutex);
2266
2267        /* If kprobes are already disarmed, just return */
2268        if (kprobes_all_disarmed) {
2269                mutex_unlock(&kprobe_mutex);
2270                return;
2271        }
2272
2273        kprobes_all_disarmed = true;
2274        printk(KERN_INFO "Kprobes globally disabled\n");
2275
2276        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2277                head = &kprobe_table[i];
2278                hlist_for_each_entry_rcu(p, head, hlist) {
2279                        if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2280                                disarm_kprobe(p, false);
2281                }
2282        }
2283        mutex_unlock(&kprobe_mutex);
2284
2285        /* Wait for disarming all kprobes by optimizer */
2286        wait_for_kprobe_optimizer();
2287}
2288
2289/*
2290 * XXX: The debugfs bool file interface doesn't allow for callbacks
2291 * when the bool state is switched. We can reuse that facility when
2292 * available
2293 */
2294static ssize_t read_enabled_file_bool(struct file *file,
2295               char __user *user_buf, size_t count, loff_t *ppos)
2296{
2297        char buf[3];
2298
2299        if (!kprobes_all_disarmed)
2300                buf[0] = '1';
2301        else
2302                buf[0] = '0';
2303        buf[1] = '\n';
2304        buf[2] = 0x00;
2305        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2306}
2307
2308static ssize_t write_enabled_file_bool(struct file *file,
2309               const char __user *user_buf, size_t count, loff_t *ppos)
2310{
2311        char buf[32];
2312        size_t buf_size;
2313
2314        buf_size = min(count, (sizeof(buf)-1));
2315        if (copy_from_user(buf, user_buf, buf_size))
2316                return -EFAULT;
2317
2318        buf[buf_size] = '\0';
2319        switch (buf[0]) {
2320        case 'y':
2321        case 'Y':
2322        case '1':
2323                arm_all_kprobes();
2324                break;
2325        case 'n':
2326        case 'N':
2327        case '0':
2328                disarm_all_kprobes();
2329                break;
2330        default:
2331                return -EINVAL;
2332        }
2333
2334        return count;
2335}
2336
2337static const struct file_operations fops_kp = {
2338        .read =         read_enabled_file_bool,
2339        .write =        write_enabled_file_bool,
2340        .llseek =       default_llseek,
2341};
2342
2343static int __kprobes debugfs_kprobe_init(void)
2344{
2345        struct dentry *dir, *file;
2346        unsigned int value = 1;
2347
2348        dir = debugfs_create_dir("kprobes", NULL);
2349        if (!dir)
2350                return -ENOMEM;
2351
2352        file = debugfs_create_file("list", 0444, dir, NULL,
2353                                &debugfs_kprobes_operations);
2354        if (!file) {
2355                debugfs_remove(dir);
2356                return -ENOMEM;
2357        }
2358
2359        file = debugfs_create_file("enabled", 0600, dir,
2360                                        &value, &fops_kp);
2361        if (!file) {
2362                debugfs_remove(dir);
2363                return -ENOMEM;
2364        }
2365
2366        return 0;
2367}
2368
2369late_initcall(debugfs_kprobe_init);
2370#endif /* CONFIG_DEBUG_FS */
2371
2372module_init(init_kprobes);
2373
2374/* defined in arch/.../kernel/kprobes.c */
2375EXPORT_SYMBOL_GPL(jprobe_return);
2376
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.