linux/mm/kmemleak.c
<<
>>
Prefs
   1/*
   2 * mm/kmemleak.c
   3 *
   4 * Copyright (C) 2008 ARM Limited
   5 * Written by Catalin Marinas <catalin.marinas@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 *
  20 *
  21 * For more information on the algorithm and kmemleak usage, please see
  22 * Documentation/kmemleak.txt.
  23 *
  24 * Notes on locking
  25 * ----------------
  26 *
  27 * The following locks and mutexes are used by kmemleak:
  28 *
  29 * - kmemleak_lock (rwlock): protects the object_list modifications and
  30 *   accesses to the object_tree_root. The object_list is the main list
  31 *   holding the metadata (struct kmemleak_object) for the allocated memory
  32 *   blocks. The object_tree_root is a red black tree used to look-up
  33 *   metadata based on a pointer to the corresponding memory block.  The
  34 *   kmemleak_object structures are added to the object_list and
  35 *   object_tree_root in the create_object() function called from the
  36 *   kmemleak_alloc() callback and removed in delete_object() called from the
  37 *   kmemleak_free() callback
  38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  39 *   the metadata (e.g. count) are protected by this lock. Note that some
  40 *   members of this structure may be protected by other means (atomic or
  41 *   kmemleak_lock). This lock is also held when scanning the corresponding
  42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  43 *   callback. This is less heavyweight than holding a global lock like
  44 *   kmemleak_lock during scanning
  45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  46 *   unreferenced objects at a time. The gray_list contains the objects which
  47 *   are already referenced or marked as false positives and need to be
  48 *   scanned. This list is only modified during a scanning episode when the
  49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  50 *   Note that the kmemleak_object.use_count is incremented when an object is
  51 *   added to the gray_list and therefore cannot be freed. This mutex also
  52 *   prevents multiple users of the "kmemleak" debugfs file together with
  53 *   modifications to the memory scanning parameters including the scan_thread
  54 *   pointer
  55 *
  56 * The kmemleak_object structures have a use_count incremented or decremented
  57 * using the get_object()/put_object() functions. When the use_count becomes
  58 * 0, this count can no longer be incremented and put_object() schedules the
  59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  60 * function must be protected by rcu_read_lock() to avoid accessing a freed
  61 * structure.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65
  66#include <linux/init.h>
  67#include <linux/kernel.h>
  68#include <linux/list.h>
  69#include <linux/sched.h>
  70#include <linux/jiffies.h>
  71#include <linux/delay.h>
  72#include <linux/export.h>
  73#include <linux/kthread.h>
  74#include <linux/rbtree.h>
  75#include <linux/fs.h>
  76#include <linux/debugfs.h>
  77#include <linux/seq_file.h>
  78#include <linux/cpumask.h>
  79#include <linux/spinlock.h>
  80#include <linux/mutex.h>
  81#include <linux/rcupdate.h>
  82#include <linux/stacktrace.h>
  83#include <linux/cache.h>
  84#include <linux/percpu.h>
  85#include <linux/hardirq.h>
  86#include <linux/mmzone.h>
  87#include <linux/slab.h>
  88#include <linux/thread_info.h>
  89#include <linux/err.h>
  90#include <linux/uaccess.h>
  91#include <linux/string.h>
  92#include <linux/nodemask.h>
  93#include <linux/mm.h>
  94#include <linux/workqueue.h>
  95#include <linux/crc32.h>
  96
  97#include <asm/sections.h>
  98#include <asm/processor.h>
  99#include <linux/atomic.h>
 100
 101#include <linux/kmemcheck.h>
 102#include <linux/kmemleak.h>
 103#include <linux/memory_hotplug.h>
 104
 105/*
 106 * Kmemleak configuration and common defines.
 107 */
 108#define MAX_TRACE               16      /* stack trace length */
 109#define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
 110#define SECS_FIRST_SCAN         60      /* delay before the first scan */
 111#define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
 112#define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
 113
 114#define BYTES_PER_POINTER       sizeof(void *)
 115
 116/* GFP bitmask for kmemleak internal allocations */
 117#define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 118                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 119                                 __GFP_NOWARN)
 120
 121/* scanning area inside a memory block */
 122struct kmemleak_scan_area {
 123        struct hlist_node node;
 124        unsigned long start;
 125        size_t size;
 126};
 127
 128#define KMEMLEAK_GREY   0
 129#define KMEMLEAK_BLACK  -1
 130
 131/*
 132 * Structure holding the metadata for each allocated memory block.
 133 * Modifications to such objects should be made while holding the
 134 * object->lock. Insertions or deletions from object_list, gray_list or
 135 * rb_node are already protected by the corresponding locks or mutex (see
 136 * the notes on locking above). These objects are reference-counted
 137 * (use_count) and freed using the RCU mechanism.
 138 */
 139struct kmemleak_object {
 140        spinlock_t lock;
 141        unsigned long flags;            /* object status flags */
 142        struct list_head object_list;
 143        struct list_head gray_list;
 144        struct rb_node rb_node;
 145        struct rcu_head rcu;            /* object_list lockless traversal */
 146        /* object usage count; object freed when use_count == 0 */
 147        atomic_t use_count;
 148        unsigned long pointer;
 149        size_t size;
 150        /* minimum number of a pointers found before it is considered leak */
 151        int min_count;
 152        /* the total number of pointers found pointing to this object */
 153        int count;
 154        /* checksum for detecting modified objects */
 155        u32 checksum;
 156        /* memory ranges to be scanned inside an object (empty for all) */
 157        struct hlist_head area_list;
 158        unsigned long trace[MAX_TRACE];
 159        unsigned int trace_len;
 160        unsigned long jiffies;          /* creation timestamp */
 161        pid_t pid;                      /* pid of the current task */
 162        char comm[TASK_COMM_LEN];       /* executable name */
 163};
 164
 165/* flag representing the memory block allocation status */
 166#define OBJECT_ALLOCATED        (1 << 0)
 167/* flag set after the first reporting of an unreference object */
 168#define OBJECT_REPORTED         (1 << 1)
 169/* flag set to not scan the object */
 170#define OBJECT_NO_SCAN          (1 << 2)
 171
 172/* number of bytes to print per line; must be 16 or 32 */
 173#define HEX_ROW_SIZE            16
 174/* number of bytes to print at a time (1, 2, 4, 8) */
 175#define HEX_GROUP_SIZE          1
 176/* include ASCII after the hex output */
 177#define HEX_ASCII               1
 178/* max number of lines to be printed */
 179#define HEX_MAX_LINES           2
 180
 181/* the list of all allocated objects */
 182static LIST_HEAD(object_list);
 183/* the list of gray-colored objects (see color_gray comment below) */
 184static LIST_HEAD(gray_list);
 185/* search tree for object boundaries */
 186static struct rb_root object_tree_root = RB_ROOT;
 187/* rw_lock protecting the access to object_list and object_tree_root */
 188static DEFINE_RWLOCK(kmemleak_lock);
 189
 190/* allocation caches for kmemleak internal data */
 191static struct kmem_cache *object_cache;
 192static struct kmem_cache *scan_area_cache;
 193
 194/* set if tracing memory operations is enabled */
 195static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
 196/* set in the late_initcall if there were no errors */
 197static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
 198/* enables or disables early logging of the memory operations */
 199static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
 200/* set if a kmemleak warning was issued */
 201static atomic_t kmemleak_warning = ATOMIC_INIT(0);
 202/* set if a fatal kmemleak error has occurred */
 203static atomic_t kmemleak_error = ATOMIC_INIT(0);
 204
 205/* minimum and maximum address that may be valid pointers */
 206static unsigned long min_addr = ULONG_MAX;
 207static unsigned long max_addr;
 208
 209static struct task_struct *scan_thread;
 210/* used to avoid reporting of recently allocated objects */
 211static unsigned long jiffies_min_age;
 212static unsigned long jiffies_last_scan;
 213/* delay between automatic memory scannings */
 214static signed long jiffies_scan_wait;
 215/* enables or disables the task stacks scanning */
 216static int kmemleak_stack_scan = 1;
 217/* protects the memory scanning, parameters and debug/kmemleak file access */
 218static DEFINE_MUTEX(scan_mutex);
 219/* setting kmemleak=on, will set this var, skipping the disable */
 220static int kmemleak_skip_disable;
 221
 222
 223/*
 224 * Early object allocation/freeing logging. Kmemleak is initialized after the
 225 * kernel allocator. However, both the kernel allocator and kmemleak may
 226 * allocate memory blocks which need to be tracked. Kmemleak defines an
 227 * arbitrary buffer to hold the allocation/freeing information before it is
 228 * fully initialized.
 229 */
 230
 231/* kmemleak operation type for early logging */
 232enum {
 233        KMEMLEAK_ALLOC,
 234        KMEMLEAK_ALLOC_PERCPU,
 235        KMEMLEAK_FREE,
 236        KMEMLEAK_FREE_PART,
 237        KMEMLEAK_FREE_PERCPU,
 238        KMEMLEAK_NOT_LEAK,
 239        KMEMLEAK_IGNORE,
 240        KMEMLEAK_SCAN_AREA,
 241        KMEMLEAK_NO_SCAN
 242};
 243
 244/*
 245 * Structure holding the information passed to kmemleak callbacks during the
 246 * early logging.
 247 */
 248struct early_log {
 249        int op_type;                    /* kmemleak operation type */
 250        const void *ptr;                /* allocated/freed memory block */
 251        size_t size;                    /* memory block size */
 252        int min_count;                  /* minimum reference count */
 253        unsigned long trace[MAX_TRACE]; /* stack trace */
 254        unsigned int trace_len;         /* stack trace length */
 255};
 256
 257/* early logging buffer and current position */
 258static struct early_log
 259        early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
 260static int crt_early_log __initdata;
 261
 262static void kmemleak_disable(void);
 263
 264/*
 265 * Print a warning and dump the stack trace.
 266 */
 267#define kmemleak_warn(x...)     do {            \
 268        pr_warning(x);                          \
 269        dump_stack();                           \
 270        atomic_set(&kmemleak_warning, 1);       \
 271} while (0)
 272
 273/*
 274 * Macro invoked when a serious kmemleak condition occurred and cannot be
 275 * recovered from. Kmemleak will be disabled and further allocation/freeing
 276 * tracing no longer available.
 277 */
 278#define kmemleak_stop(x...)     do {    \
 279        kmemleak_warn(x);               \
 280        kmemleak_disable();             \
 281} while (0)
 282
 283/*
 284 * Printing of the objects hex dump to the seq file. The number of lines to be
 285 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 286 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 287 * with the object->lock held.
 288 */
 289static void hex_dump_object(struct seq_file *seq,
 290                            struct kmemleak_object *object)
 291{
 292        const u8 *ptr = (const u8 *)object->pointer;
 293        int i, len, remaining;
 294        unsigned char linebuf[HEX_ROW_SIZE * 5];
 295
 296        /* limit the number of lines to HEX_MAX_LINES */
 297        remaining = len =
 298                min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
 299
 300        seq_printf(seq, "  hex dump (first %d bytes):\n", len);
 301        for (i = 0; i < len; i += HEX_ROW_SIZE) {
 302                int linelen = min(remaining, HEX_ROW_SIZE);
 303
 304                remaining -= HEX_ROW_SIZE;
 305                hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
 306                                   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
 307                                   HEX_ASCII);
 308                seq_printf(seq, "    %s\n", linebuf);
 309        }
 310}
 311
 312/*
 313 * Object colors, encoded with count and min_count:
 314 * - white - orphan object, not enough references to it (count < min_count)
 315 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 316 *              sufficient references to it (count >= min_count)
 317 * - black - ignore, it doesn't contain references (e.g. text section)
 318 *              (min_count == -1). No function defined for this color.
 319 * Newly created objects don't have any color assigned (object->count == -1)
 320 * before the next memory scan when they become white.
 321 */
 322static bool color_white(const struct kmemleak_object *object)
 323{
 324        return object->count != KMEMLEAK_BLACK &&
 325                object->count < object->min_count;
 326}
 327
 328static bool color_gray(const struct kmemleak_object *object)
 329{
 330        return object->min_count != KMEMLEAK_BLACK &&
 331                object->count >= object->min_count;
 332}
 333
 334/*
 335 * Objects are considered unreferenced only if their color is white, they have
 336 * not be deleted and have a minimum age to avoid false positives caused by
 337 * pointers temporarily stored in CPU registers.
 338 */
 339static bool unreferenced_object(struct kmemleak_object *object)
 340{
 341        return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 342                time_before_eq(object->jiffies + jiffies_min_age,
 343                               jiffies_last_scan);
 344}
 345
 346/*
 347 * Printing of the unreferenced objects information to the seq file. The
 348 * print_unreferenced function must be called with the object->lock held.
 349 */
 350static void print_unreferenced(struct seq_file *seq,
 351                               struct kmemleak_object *object)
 352{
 353        int i;
 354        unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 355
 356        seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 357                   object->pointer, object->size);
 358        seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 359                   object->comm, object->pid, object->jiffies,
 360                   msecs_age / 1000, msecs_age % 1000);
 361        hex_dump_object(seq, object);
 362        seq_printf(seq, "  backtrace:\n");
 363
 364        for (i = 0; i < object->trace_len; i++) {
 365                void *ptr = (void *)object->trace[i];
 366                seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 367        }
 368}
 369
 370/*
 371 * Print the kmemleak_object information. This function is used mainly for
 372 * debugging special cases when kmemleak operations. It must be called with
 373 * the object->lock held.
 374 */
 375static void dump_object_info(struct kmemleak_object *object)
 376{
 377        struct stack_trace trace;
 378
 379        trace.nr_entries = object->trace_len;
 380        trace.entries = object->trace;
 381
 382        pr_notice("Object 0x%08lx (size %zu):\n",
 383                  object->pointer, object->size);
 384        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 385                  object->comm, object->pid, object->jiffies);
 386        pr_notice("  min_count = %d\n", object->min_count);
 387        pr_notice("  count = %d\n", object->count);
 388        pr_notice("  flags = 0x%lx\n", object->flags);
 389        pr_notice("  checksum = %d\n", object->checksum);
 390        pr_notice("  backtrace:\n");
 391        print_stack_trace(&trace, 4);
 392}
 393
 394/*
 395 * Look-up a memory block metadata (kmemleak_object) in the object search
 396 * tree based on a pointer value. If alias is 0, only values pointing to the
 397 * beginning of the memory block are allowed. The kmemleak_lock must be held
 398 * when calling this function.
 399 */
 400static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 401{
 402        struct rb_node *rb = object_tree_root.rb_node;
 403
 404        while (rb) {
 405                struct kmemleak_object *object =
 406                        rb_entry(rb, struct kmemleak_object, rb_node);
 407                if (ptr < object->pointer)
 408                        rb = object->rb_node.rb_left;
 409                else if (object->pointer + object->size <= ptr)
 410                        rb = object->rb_node.rb_right;
 411                else if (object->pointer == ptr || alias)
 412                        return object;
 413                else {
 414                        kmemleak_warn("Found object by alias at 0x%08lx\n",
 415                                      ptr);
 416                        dump_object_info(object);
 417                        break;
 418                }
 419        }
 420        return NULL;
 421}
 422
 423/*
 424 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 425 * that once an object's use_count reached 0, the RCU freeing was already
 426 * registered and the object should no longer be used. This function must be
 427 * called under the protection of rcu_read_lock().
 428 */
 429static int get_object(struct kmemleak_object *object)
 430{
 431        return atomic_inc_not_zero(&object->use_count);
 432}
 433
 434/*
 435 * RCU callback to free a kmemleak_object.
 436 */
 437static void free_object_rcu(struct rcu_head *rcu)
 438{
 439        struct hlist_node *elem, *tmp;
 440        struct kmemleak_scan_area *area;
 441        struct kmemleak_object *object =
 442                container_of(rcu, struct kmemleak_object, rcu);
 443
 444        /*
 445         * Once use_count is 0 (guaranteed by put_object), there is no other
 446         * code accessing this object, hence no need for locking.
 447         */
 448        hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
 449                hlist_del(elem);
 450                kmem_cache_free(scan_area_cache, area);
 451        }
 452        kmem_cache_free(object_cache, object);
 453}
 454
 455/*
 456 * Decrement the object use_count. Once the count is 0, free the object using
 457 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 458 * delete_object() path, the delayed RCU freeing ensures that there is no
 459 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 460 * is also possible.
 461 */
 462static void put_object(struct kmemleak_object *object)
 463{
 464        if (!atomic_dec_and_test(&object->use_count))
 465                return;
 466
 467        /* should only get here after delete_object was called */
 468        WARN_ON(object->flags & OBJECT_ALLOCATED);
 469
 470        call_rcu(&object->rcu, free_object_rcu);
 471}
 472
 473/*
 474 * Look up an object in the object search tree and increase its use_count.
 475 */
 476static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 477{
 478        unsigned long flags;
 479        struct kmemleak_object *object = NULL;
 480
 481        rcu_read_lock();
 482        read_lock_irqsave(&kmemleak_lock, flags);
 483        if (ptr >= min_addr && ptr < max_addr)
 484                object = lookup_object(ptr, alias);
 485        read_unlock_irqrestore(&kmemleak_lock, flags);
 486
 487        /* check whether the object is still available */
 488        if (object && !get_object(object))
 489                object = NULL;
 490        rcu_read_unlock();
 491
 492        return object;
 493}
 494
 495/*
 496 * Save stack trace to the given array of MAX_TRACE size.
 497 */
 498static int __save_stack_trace(unsigned long *trace)
 499{
 500        struct stack_trace stack_trace;
 501
 502        stack_trace.max_entries = MAX_TRACE;
 503        stack_trace.nr_entries = 0;
 504        stack_trace.entries = trace;
 505        stack_trace.skip = 2;
 506        save_stack_trace(&stack_trace);
 507
 508        return stack_trace.nr_entries;
 509}
 510
 511/*
 512 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 513 * memory block and add it to the object_list and object_tree_root.
 514 */
 515static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 516                                             int min_count, gfp_t gfp)
 517{
 518        unsigned long flags;
 519        struct kmemleak_object *object, *parent;
 520        struct rb_node **link, *rb_parent;
 521
 522        object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 523        if (!object) {
 524                pr_warning("Cannot allocate a kmemleak_object structure\n");
 525                kmemleak_disable();
 526                return NULL;
 527        }
 528
 529        INIT_LIST_HEAD(&object->object_list);
 530        INIT_LIST_HEAD(&object->gray_list);
 531        INIT_HLIST_HEAD(&object->area_list);
 532        spin_lock_init(&object->lock);
 533        atomic_set(&object->use_count, 1);
 534        object->flags = OBJECT_ALLOCATED;
 535        object->pointer = ptr;
 536        object->size = size;
 537        object->min_count = min_count;
 538        object->count = 0;                      /* white color initially */
 539        object->jiffies = jiffies;
 540        object->checksum = 0;
 541
 542        /* task information */
 543        if (in_irq()) {
 544                object->pid = 0;
 545                strncpy(object->comm, "hardirq", sizeof(object->comm));
 546        } else if (in_softirq()) {
 547                object->pid = 0;
 548                strncpy(object->comm, "softirq", sizeof(object->comm));
 549        } else {
 550                object->pid = current->pid;
 551                /*
 552                 * There is a small chance of a race with set_task_comm(),
 553                 * however using get_task_comm() here may cause locking
 554                 * dependency issues with current->alloc_lock. In the worst
 555                 * case, the command line is not correct.
 556                 */
 557                strncpy(object->comm, current->comm, sizeof(object->comm));
 558        }
 559
 560        /* kernel backtrace */
 561        object->trace_len = __save_stack_trace(object->trace);
 562
 563        write_lock_irqsave(&kmemleak_lock, flags);
 564
 565        min_addr = min(min_addr, ptr);
 566        max_addr = max(max_addr, ptr + size);
 567        link = &object_tree_root.rb_node;
 568        rb_parent = NULL;
 569        while (*link) {
 570                rb_parent = *link;
 571                parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 572                if (ptr + size <= parent->pointer)
 573                        link = &parent->rb_node.rb_left;
 574                else if (parent->pointer + parent->size <= ptr)
 575                        link = &parent->rb_node.rb_right;
 576                else {
 577                        kmemleak_stop("Cannot insert 0x%lx into the object "
 578                                      "search tree (overlaps existing)\n",
 579                                      ptr);
 580                        kmem_cache_free(object_cache, object);
 581                        object = parent;
 582                        spin_lock(&object->lock);
 583                        dump_object_info(object);
 584                        spin_unlock(&object->lock);
 585                        goto out;
 586                }
 587        }
 588        rb_link_node(&object->rb_node, rb_parent, link);
 589        rb_insert_color(&object->rb_node, &object_tree_root);
 590
 591        list_add_tail_rcu(&object->object_list, &object_list);
 592out:
 593        write_unlock_irqrestore(&kmemleak_lock, flags);
 594        return object;
 595}
 596
 597/*
 598 * Remove the metadata (struct kmemleak_object) for a memory block from the
 599 * object_list and object_tree_root and decrement its use_count.
 600 */
 601static void __delete_object(struct kmemleak_object *object)
 602{
 603        unsigned long flags;
 604
 605        write_lock_irqsave(&kmemleak_lock, flags);
 606        rb_erase(&object->rb_node, &object_tree_root);
 607        list_del_rcu(&object->object_list);
 608        write_unlock_irqrestore(&kmemleak_lock, flags);
 609
 610        WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 611        WARN_ON(atomic_read(&object->use_count) < 2);
 612
 613        /*
 614         * Locking here also ensures that the corresponding memory block
 615         * cannot be freed when it is being scanned.
 616         */
 617        spin_lock_irqsave(&object->lock, flags);
 618        object->flags &= ~OBJECT_ALLOCATED;
 619        spin_unlock_irqrestore(&object->lock, flags);
 620        put_object(object);
 621}
 622
 623/*
 624 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 625 * delete it.
 626 */
 627static void delete_object_full(unsigned long ptr)
 628{
 629        struct kmemleak_object *object;
 630
 631        object = find_and_get_object(ptr, 0);
 632        if (!object) {
 633#ifdef DEBUG
 634                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 635                              ptr);
 636#endif
 637                return;
 638        }
 639        __delete_object(object);
 640        put_object(object);
 641}
 642
 643/*
 644 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 645 * delete it. If the memory block is partially freed, the function may create
 646 * additional metadata for the remaining parts of the block.
 647 */
 648static void delete_object_part(unsigned long ptr, size_t size)
 649{
 650        struct kmemleak_object *object;
 651        unsigned long start, end;
 652
 653        object = find_and_get_object(ptr, 1);
 654        if (!object) {
 655#ifdef DEBUG
 656                kmemleak_warn("Partially freeing unknown object at 0x%08lx "
 657                              "(size %zu)\n", ptr, size);
 658#endif
 659                return;
 660        }
 661        __delete_object(object);
 662
 663        /*
 664         * Create one or two objects that may result from the memory block
 665         * split. Note that partial freeing is only done by free_bootmem() and
 666         * this happens before kmemleak_init() is called. The path below is
 667         * only executed during early log recording in kmemleak_init(), so
 668         * GFP_KERNEL is enough.
 669         */
 670        start = object->pointer;
 671        end = object->pointer + object->size;
 672        if (ptr > start)
 673                create_object(start, ptr - start, object->min_count,
 674                              GFP_KERNEL);
 675        if (ptr + size < end)
 676                create_object(ptr + size, end - ptr - size, object->min_count,
 677                              GFP_KERNEL);
 678
 679        put_object(object);
 680}
 681
 682static void __paint_it(struct kmemleak_object *object, int color)
 683{
 684        object->min_count = color;
 685        if (color == KMEMLEAK_BLACK)
 686                object->flags |= OBJECT_NO_SCAN;
 687}
 688
 689static void paint_it(struct kmemleak_object *object, int color)
 690{
 691        unsigned long flags;
 692
 693        spin_lock_irqsave(&object->lock, flags);
 694        __paint_it(object, color);
 695        spin_unlock_irqrestore(&object->lock, flags);
 696}
 697
 698static void paint_ptr(unsigned long ptr, int color)
 699{
 700        struct kmemleak_object *object;
 701
 702        object = find_and_get_object(ptr, 0);
 703        if (!object) {
 704                kmemleak_warn("Trying to color unknown object "
 705                              "at 0x%08lx as %s\n", ptr,
 706                              (color == KMEMLEAK_GREY) ? "Grey" :
 707                              (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 708                return;
 709        }
 710        paint_it(object, color);
 711        put_object(object);
 712}
 713
 714/*
 715 * Mark an object permanently as gray-colored so that it can no longer be
 716 * reported as a leak. This is used in general to mark a false positive.
 717 */
 718static void make_gray_object(unsigned long ptr)
 719{
 720        paint_ptr(ptr, KMEMLEAK_GREY);
 721}
 722
 723/*
 724 * Mark the object as black-colored so that it is ignored from scans and
 725 * reporting.
 726 */
 727static void make_black_object(unsigned long ptr)
 728{
 729        paint_ptr(ptr, KMEMLEAK_BLACK);
 730}
 731
 732/*
 733 * Add a scanning area to the object. If at least one such area is added,
 734 * kmemleak will only scan these ranges rather than the whole memory block.
 735 */
 736static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 737{
 738        unsigned long flags;
 739        struct kmemleak_object *object;
 740        struct kmemleak_scan_area *area;
 741
 742        object = find_and_get_object(ptr, 1);
 743        if (!object) {
 744                kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 745                              ptr);
 746                return;
 747        }
 748
 749        area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 750        if (!area) {
 751                pr_warning("Cannot allocate a scan area\n");
 752                goto out;
 753        }
 754
 755        spin_lock_irqsave(&object->lock, flags);
 756        if (ptr + size > object->pointer + object->size) {
 757                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 758                dump_object_info(object);
 759                kmem_cache_free(scan_area_cache, area);
 760                goto out_unlock;
 761        }
 762
 763        INIT_HLIST_NODE(&area->node);
 764        area->start = ptr;
 765        area->size = size;
 766
 767        hlist_add_head(&area->node, &object->area_list);
 768out_unlock:
 769        spin_unlock_irqrestore(&object->lock, flags);
 770out:
 771        put_object(object);
 772}
 773
 774/*
 775 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 776 * pointer. Such object will not be scanned by kmemleak but references to it
 777 * are searched.
 778 */
 779static void object_no_scan(unsigned long ptr)
 780{
 781        unsigned long flags;
 782        struct kmemleak_object *object;
 783
 784        object = find_and_get_object(ptr, 0);
 785        if (!object) {
 786                kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 787                return;
 788        }
 789
 790        spin_lock_irqsave(&object->lock, flags);
 791        object->flags |= OBJECT_NO_SCAN;
 792        spin_unlock_irqrestore(&object->lock, flags);
 793        put_object(object);
 794}
 795
 796/*
 797 * Log an early kmemleak_* call to the early_log buffer. These calls will be
 798 * processed later once kmemleak is fully initialized.
 799 */
 800static void __init log_early(int op_type, const void *ptr, size_t size,
 801                             int min_count)
 802{
 803        unsigned long flags;
 804        struct early_log *log;
 805
 806        if (atomic_read(&kmemleak_error)) {
 807                /* kmemleak stopped recording, just count the requests */
 808                crt_early_log++;
 809                return;
 810        }
 811
 812        if (crt_early_log >= ARRAY_SIZE(early_log)) {
 813                kmemleak_disable();
 814                return;
 815        }
 816
 817        /*
 818         * There is no need for locking since the kernel is still in UP mode
 819         * at this stage. Disabling the IRQs is enough.
 820         */
 821        local_irq_save(flags);
 822        log = &early_log[crt_early_log];
 823        log->op_type = op_type;
 824        log->ptr = ptr;
 825        log->size = size;
 826        log->min_count = min_count;
 827        log->trace_len = __save_stack_trace(log->trace);
 828        crt_early_log++;
 829        local_irq_restore(flags);
 830}
 831
 832/*
 833 * Log an early allocated block and populate the stack trace.
 834 */
 835static void early_alloc(struct early_log *log)
 836{
 837        struct kmemleak_object *object;
 838        unsigned long flags;
 839        int i;
 840
 841        if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
 842                return;
 843
 844        /*
 845         * RCU locking needed to ensure object is not freed via put_object().
 846         */
 847        rcu_read_lock();
 848        object = create_object((unsigned long)log->ptr, log->size,
 849                               log->min_count, GFP_ATOMIC);
 850        if (!object)
 851                goto out;
 852        spin_lock_irqsave(&object->lock, flags);
 853        for (i = 0; i < log->trace_len; i++)
 854                object->trace[i] = log->trace[i];
 855        object->trace_len = log->trace_len;
 856        spin_unlock_irqrestore(&object->lock, flags);
 857out:
 858        rcu_read_unlock();
 859}
 860
 861/*
 862 * Log an early allocated block and populate the stack trace.
 863 */
 864static void early_alloc_percpu(struct early_log *log)
 865{
 866        unsigned int cpu;
 867        const void __percpu *ptr = log->ptr;
 868
 869        for_each_possible_cpu(cpu) {
 870                log->ptr = per_cpu_ptr(ptr, cpu);
 871                early_alloc(log);
 872        }
 873}
 874
 875/**
 876 * kmemleak_alloc - register a newly allocated object
 877 * @ptr:        pointer to beginning of the object
 878 * @size:       size of the object
 879 * @min_count:  minimum number of references to this object. If during memory
 880 *              scanning a number of references less than @min_count is found,
 881 *              the object is reported as a memory leak. If @min_count is 0,
 882 *              the object is never reported as a leak. If @min_count is -1,
 883 *              the object is ignored (not scanned and not reported as a leak)
 884 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
 885 *
 886 * This function is called from the kernel allocators when a new object
 887 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
 888 */
 889void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 890                          gfp_t gfp)
 891{
 892        pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 893
 894        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 895                create_object((unsigned long)ptr, size, min_count, gfp);
 896        else if (atomic_read(&kmemleak_early_log))
 897                log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 898}
 899EXPORT_SYMBOL_GPL(kmemleak_alloc);
 900
 901/**
 902 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 903 * @ptr:        __percpu pointer to beginning of the object
 904 * @size:       size of the object
 905 *
 906 * This function is called from the kernel percpu allocator when a new object
 907 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
 908 * allocation.
 909 */
 910void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
 911{
 912        unsigned int cpu;
 913
 914        pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 915
 916        /*
 917         * Percpu allocations are only scanned and not reported as leaks
 918         * (min_count is set to 0).
 919         */
 920        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 921                for_each_possible_cpu(cpu)
 922                        create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 923                                      size, 0, GFP_KERNEL);
 924        else if (atomic_read(&kmemleak_early_log))
 925                log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 926}
 927EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 928
 929/**
 930 * kmemleak_free - unregister a previously registered object
 931 * @ptr:        pointer to beginning of the object
 932 *
 933 * This function is called from the kernel allocators when an object (memory
 934 * block) is freed (kmem_cache_free, kfree, vfree etc.).
 935 */
 936void __ref kmemleak_free(const void *ptr)
 937{
 938        pr_debug("%s(0x%p)\n", __func__, ptr);
 939
 940        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 941                delete_object_full((unsigned long)ptr);
 942        else if (atomic_read(&kmemleak_early_log))
 943                log_early(KMEMLEAK_FREE, ptr, 0, 0);
 944}
 945EXPORT_SYMBOL_GPL(kmemleak_free);
 946
 947/**
 948 * kmemleak_free_part - partially unregister a previously registered object
 949 * @ptr:        pointer to the beginning or inside the object. This also
 950 *              represents the start of the range to be freed
 951 * @size:       size to be unregistered
 952 *
 953 * This function is called when only a part of a memory block is freed
 954 * (usually from the bootmem allocator).
 955 */
 956void __ref kmemleak_free_part(const void *ptr, size_t size)
 957{
 958        pr_debug("%s(0x%p)\n", __func__, ptr);
 959
 960        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 961                delete_object_part((unsigned long)ptr, size);
 962        else if (atomic_read(&kmemleak_early_log))
 963                log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 964}
 965EXPORT_SYMBOL_GPL(kmemleak_free_part);
 966
 967/**
 968 * kmemleak_free_percpu - unregister a previously registered __percpu object
 969 * @ptr:        __percpu pointer to beginning of the object
 970 *
 971 * This function is called from the kernel percpu allocator when an object
 972 * (memory block) is freed (free_percpu).
 973 */
 974void __ref kmemleak_free_percpu(const void __percpu *ptr)
 975{
 976        unsigned int cpu;
 977
 978        pr_debug("%s(0x%p)\n", __func__, ptr);
 979
 980        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 981                for_each_possible_cpu(cpu)
 982                        delete_object_full((unsigned long)per_cpu_ptr(ptr,
 983                                                                      cpu));
 984        else if (atomic_read(&kmemleak_early_log))
 985                log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
 986}
 987EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 988
 989/**
 990 * kmemleak_not_leak - mark an allocated object as false positive
 991 * @ptr:        pointer to beginning of the object
 992 *
 993 * Calling this function on an object will cause the memory block to no longer
 994 * be reported as leak and always be scanned.
 995 */
 996void __ref kmemleak_not_leak(const void *ptr)
 997{
 998        pr_debug("%s(0x%p)\n", __func__, ptr);
 999
1000        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1001                make_gray_object((unsigned long)ptr);
1002        else if (atomic_read(&kmemleak_early_log))
1003                log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1004}
1005EXPORT_SYMBOL(kmemleak_not_leak);
1006
1007/**
1008 * kmemleak_ignore - ignore an allocated object
1009 * @ptr:        pointer to beginning of the object
1010 *
1011 * Calling this function on an object will cause the memory block to be
1012 * ignored (not scanned and not reported as a leak). This is usually done when
1013 * it is known that the corresponding block is not a leak and does not contain
1014 * any references to other allocated memory blocks.
1015 */
1016void __ref kmemleak_ignore(const void *ptr)
1017{
1018        pr_debug("%s(0x%p)\n", __func__, ptr);
1019
1020        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1021                make_black_object((unsigned long)ptr);
1022        else if (atomic_read(&kmemleak_early_log))
1023                log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1024}
1025EXPORT_SYMBOL(kmemleak_ignore);
1026
1027/**
1028 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1029 * @ptr:        pointer to beginning or inside the object. This also
1030 *              represents the start of the scan area
1031 * @size:       size of the scan area
1032 * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1033 *
1034 * This function is used when it is known that only certain parts of an object
1035 * contain references to other objects. Kmemleak will only scan these areas
1036 * reducing the number false negatives.
1037 */
1038void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1039{
1040        pr_debug("%s(0x%p)\n", __func__, ptr);
1041
1042        if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1043                add_scan_area((unsigned long)ptr, size, gfp);
1044        else if (atomic_read(&kmemleak_early_log))
1045                log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1046}
1047EXPORT_SYMBOL(kmemleak_scan_area);
1048
1049/**
1050 * kmemleak_no_scan - do not scan an allocated object
1051 * @ptr:        pointer to beginning of the object
1052 *
1053 * This function notifies kmemleak not to scan the given memory block. Useful
1054 * in situations where it is known that the given object does not contain any
1055 * references to other objects. Kmemleak will not scan such objects reducing
1056 * the number of false negatives.
1057 */
1058void __ref kmemleak_no_scan(const void *ptr)
1059{
1060        pr_debug("%s(0x%p)\n", __func__, ptr);
1061
1062        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1063                object_no_scan((unsigned long)ptr);
1064        else if (atomic_read(&kmemleak_early_log))
1065                log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1066}
1067EXPORT_SYMBOL(kmemleak_no_scan);
1068
1069/*
1070 * Update an object's checksum and return true if it was modified.
1071 */
1072static bool update_checksum(struct kmemleak_object *object)
1073{
1074        u32 old_csum = object->checksum;
1075
1076        if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1077                return false;
1078
1079        object->checksum = crc32(0, (void *)object->pointer, object->size);
1080        return object->checksum != old_csum;
1081}
1082
1083/*
1084 * Memory scanning is a long process and it needs to be interruptable. This
1085 * function checks whether such interrupt condition occurred.
1086 */
1087static int scan_should_stop(void)
1088{
1089        if (!atomic_read(&kmemleak_enabled))
1090                return 1;
1091
1092        /*
1093         * This function may be called from either process or kthread context,
1094         * hence the need to check for both stop conditions.
1095         */
1096        if (current->mm)
1097                return signal_pending(current);
1098        else
1099                return kthread_should_stop();
1100
1101        return 0;
1102}
1103
1104/*
1105 * Scan a memory block (exclusive range) for valid pointers and add those
1106 * found to the gray list.
1107 */
1108static void scan_block(void *_start, void *_end,
1109                       struct kmemleak_object *scanned, int allow_resched)
1110{
1111        unsigned long *ptr;
1112        unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1113        unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1114
1115        for (ptr = start; ptr < end; ptr++) {
1116                struct kmemleak_object *object;
1117                unsigned long flags;
1118                unsigned long pointer;
1119
1120                if (allow_resched)
1121                        cond_resched();
1122                if (scan_should_stop())
1123                        break;
1124
1125                /* don't scan uninitialized memory */
1126                if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1127                                                  BYTES_PER_POINTER))
1128                        continue;
1129
1130                pointer = *ptr;
1131
1132                object = find_and_get_object(pointer, 1);
1133                if (!object)
1134                        continue;
1135                if (object == scanned) {
1136                        /* self referenced, ignore */
1137                        put_object(object);
1138                        continue;
1139                }
1140
1141                /*
1142                 * Avoid the lockdep recursive warning on object->lock being
1143                 * previously acquired in scan_object(). These locks are
1144                 * enclosed by scan_mutex.
1145                 */
1146                spin_lock_irqsave_nested(&object->lock, flags,
1147                                         SINGLE_DEPTH_NESTING);
1148                if (!color_white(object)) {
1149                        /* non-orphan, ignored or new */
1150                        spin_unlock_irqrestore(&object->lock, flags);
1151                        put_object(object);
1152                        continue;
1153                }
1154
1155                /*
1156                 * Increase the object's reference count (number of pointers
1157                 * to the memory block). If this count reaches the required
1158                 * minimum, the object's color will become gray and it will be
1159                 * added to the gray_list.
1160                 */
1161                object->count++;
1162                if (color_gray(object)) {
1163                        list_add_tail(&object->gray_list, &gray_list);
1164                        spin_unlock_irqrestore(&object->lock, flags);
1165                        continue;
1166                }
1167
1168                spin_unlock_irqrestore(&object->lock, flags);
1169                put_object(object);
1170        }
1171}
1172
1173/*
1174 * Scan a memory block corresponding to a kmemleak_object. A condition is
1175 * that object->use_count >= 1.
1176 */
1177static void scan_object(struct kmemleak_object *object)
1178{
1179        struct kmemleak_scan_area *area;
1180        struct hlist_node *elem;
1181        unsigned long flags;
1182
1183        /*
1184         * Once the object->lock is acquired, the corresponding memory block
1185         * cannot be freed (the same lock is acquired in delete_object).
1186         */
1187        spin_lock_irqsave(&object->lock, flags);
1188        if (object->flags & OBJECT_NO_SCAN)
1189                goto out;
1190        if (!(object->flags & OBJECT_ALLOCATED))
1191                /* already freed object */
1192                goto out;
1193        if (hlist_empty(&object->area_list)) {
1194                void *start = (void *)object->pointer;
1195                void *end = (void *)(object->pointer + object->size);
1196
1197                while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1198                       !(object->flags & OBJECT_NO_SCAN)) {
1199                        scan_block(start, min(start + MAX_SCAN_SIZE, end),
1200                                   object, 0);
1201                        start += MAX_SCAN_SIZE;
1202
1203                        spin_unlock_irqrestore(&object->lock, flags);
1204                        cond_resched();
1205                        spin_lock_irqsave(&object->lock, flags);
1206                }
1207        } else
1208                hlist_for_each_entry(area, elem, &object->area_list, node)
1209                        scan_block((void *)area->start,
1210                                   (void *)(area->start + area->size),
1211                                   object, 0);
1212out:
1213        spin_unlock_irqrestore(&object->lock, flags);
1214}
1215
1216/*
1217 * Scan the objects already referenced (gray objects). More objects will be
1218 * referenced and, if there are no memory leaks, all the objects are scanned.
1219 */
1220static void scan_gray_list(void)
1221{
1222        struct kmemleak_object *object, *tmp;
1223
1224        /*
1225         * The list traversal is safe for both tail additions and removals
1226         * from inside the loop. The kmemleak objects cannot be freed from
1227         * outside the loop because their use_count was incremented.
1228         */
1229        object = list_entry(gray_list.next, typeof(*object), gray_list);
1230        while (&object->gray_list != &gray_list) {
1231                cond_resched();
1232
1233                /* may add new objects to the list */
1234                if (!scan_should_stop())
1235                        scan_object(object);
1236
1237                tmp = list_entry(object->gray_list.next, typeof(*object),
1238                                 gray_list);
1239
1240                /* remove the object from the list and release it */
1241                list_del(&object->gray_list);
1242                put_object(object);
1243
1244                object = tmp;
1245        }
1246        WARN_ON(!list_empty(&gray_list));
1247}
1248
1249/*
1250 * Scan data sections and all the referenced memory blocks allocated via the
1251 * kernel's standard allocators. This function must be called with the
1252 * scan_mutex held.
1253 */
1254static void kmemleak_scan(void)
1255{
1256        unsigned long flags;
1257        struct kmemleak_object *object;
1258        int i;
1259        int new_leaks = 0;
1260
1261        jiffies_last_scan = jiffies;
1262
1263        /* prepare the kmemleak_object's */
1264        rcu_read_lock();
1265        list_for_each_entry_rcu(object, &object_list, object_list) {
1266                spin_lock_irqsave(&object->lock, flags);
1267#ifdef DEBUG
1268                /*
1269                 * With a few exceptions there should be a maximum of
1270                 * 1 reference to any object at this point.
1271                 */
1272                if (atomic_read(&object->use_count) > 1) {
1273                        pr_debug("object->use_count = %d\n",
1274                                 atomic_read(&object->use_count));
1275                        dump_object_info(object);
1276                }
1277#endif
1278                /* reset the reference count (whiten the object) */
1279                object->count = 0;
1280                if (color_gray(object) && get_object(object))
1281                        list_add_tail(&object->gray_list, &gray_list);
1282
1283                spin_unlock_irqrestore(&object->lock, flags);
1284        }
1285        rcu_read_unlock();
1286
1287        /* data/bss scanning */
1288        scan_block(_sdata, _edata, NULL, 1);
1289        scan_block(__bss_start, __bss_stop, NULL, 1);
1290
1291#ifdef CONFIG_SMP
1292        /* per-cpu sections scanning */
1293        for_each_possible_cpu(i)
1294                scan_block(__per_cpu_start + per_cpu_offset(i),
1295                           __per_cpu_end + per_cpu_offset(i), NULL, 1);
1296#endif
1297
1298        /*
1299         * Struct page scanning for each node.
1300         */
1301        lock_memory_hotplug();
1302        for_each_online_node(i) {
1303                pg_data_t *pgdat = NODE_DATA(i);
1304                unsigned long start_pfn = pgdat->node_start_pfn;
1305                unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1306                unsigned long pfn;
1307
1308                for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1309                        struct page *page;
1310
1311                        if (!pfn_valid(pfn))
1312                                continue;
1313                        page = pfn_to_page(pfn);
1314                        /* only scan if page is in use */
1315                        if (page_count(page) == 0)
1316                                continue;
1317                        scan_block(page, page + 1, NULL, 1);
1318                }
1319        }
1320        unlock_memory_hotplug();
1321
1322        /*
1323         * Scanning the task stacks (may introduce false negatives).
1324         */
1325        if (kmemleak_stack_scan) {
1326                struct task_struct *p, *g;
1327
1328                read_lock(&tasklist_lock);
1329                do_each_thread(g, p) {
1330                        scan_block(task_stack_page(p), task_stack_page(p) +
1331                                   THREAD_SIZE, NULL, 0);
1332                } while_each_thread(g, p);
1333                read_unlock(&tasklist_lock);
1334        }
1335
1336        /*
1337         * Scan the objects already referenced from the sections scanned
1338         * above.
1339         */
1340        scan_gray_list();
1341
1342        /*
1343         * Check for new or unreferenced objects modified since the previous
1344         * scan and color them gray until the next scan.
1345         */
1346        rcu_read_lock();
1347        list_for_each_entry_rcu(object, &object_list, object_list) {
1348                spin_lock_irqsave(&object->lock, flags);
1349                if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1350                    && update_checksum(object) && get_object(object)) {
1351                        /* color it gray temporarily */
1352                        object->count = object->min_count;
1353                        list_add_tail(&object->gray_list, &gray_list);
1354                }
1355                spin_unlock_irqrestore(&object->lock, flags);
1356        }
1357        rcu_read_unlock();
1358
1359        /*
1360         * Re-scan the gray list for modified unreferenced objects.
1361         */
1362        scan_gray_list();
1363
1364        /*
1365         * If scanning was stopped do not report any new unreferenced objects.
1366         */
1367        if (scan_should_stop())
1368                return;
1369
1370        /*
1371         * Scanning result reporting.
1372         */
1373        rcu_read_lock();
1374        list_for_each_entry_rcu(object, &object_list, object_list) {
1375                spin_lock_irqsave(&object->lock, flags);
1376                if (unreferenced_object(object) &&
1377                    !(object->flags & OBJECT_REPORTED)) {
1378                        object->flags |= OBJECT_REPORTED;
1379                        new_leaks++;
1380                }
1381                spin_unlock_irqrestore(&object->lock, flags);
1382        }
1383        rcu_read_unlock();
1384
1385        if (new_leaks)
1386                pr_info("%d new suspected memory leaks (see "
1387                        "/sys/kernel/debug/kmemleak)\n", new_leaks);
1388
1389}
1390
1391/*
1392 * Thread function performing automatic memory scanning. Unreferenced objects
1393 * at the end of a memory scan are reported but only the first time.
1394 */
1395static int kmemleak_scan_thread(void *arg)
1396{
1397        static int first_run = 1;
1398
1399        pr_info("Automatic memory scanning thread started\n");
1400        set_user_nice(current, 10);
1401
1402        /*
1403         * Wait before the first scan to allow the system to fully initialize.
1404         */
1405        if (first_run) {
1406                first_run = 0;
1407                ssleep(SECS_FIRST_SCAN);
1408        }
1409
1410        while (!kthread_should_stop()) {
1411                signed long timeout = jiffies_scan_wait;
1412
1413                mutex_lock(&scan_mutex);
1414                kmemleak_scan();
1415                mutex_unlock(&scan_mutex);
1416
1417                /* wait before the next scan */
1418                while (timeout && !kthread_should_stop())
1419                        timeout = schedule_timeout_interruptible(timeout);
1420        }
1421
1422        pr_info("Automatic memory scanning thread ended\n");
1423
1424        return 0;
1425}
1426
1427/*
1428 * Start the automatic memory scanning thread. This function must be called
1429 * with the scan_mutex held.
1430 */
1431static void start_scan_thread(void)
1432{
1433        if (scan_thread)
1434                return;
1435        scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1436        if (IS_ERR(scan_thread)) {
1437                pr_warning("Failed to create the scan thread\n");
1438                scan_thread = NULL;
1439        }
1440}
1441
1442/*
1443 * Stop the automatic memory scanning thread. This function must be called
1444 * with the scan_mutex held.
1445 */
1446static void stop_scan_thread(void)
1447{
1448        if (scan_thread) {
1449                kthread_stop(scan_thread);
1450                scan_thread = NULL;
1451        }
1452}
1453
1454/*
1455 * Iterate over the object_list and return the first valid object at or after
1456 * the required position with its use_count incremented. The function triggers
1457 * a memory scanning when the pos argument points to the first position.
1458 */
1459static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1460{
1461        struct kmemleak_object *object;
1462        loff_t n = *pos;
1463        int err;
1464
1465        err = mutex_lock_interruptible(&scan_mutex);
1466        if (err < 0)
1467                return ERR_PTR(err);
1468
1469        rcu_read_lock();
1470        list_for_each_entry_rcu(object, &object_list, object_list) {
1471                if (n-- > 0)
1472                        continue;
1473                if (get_object(object))
1474                        goto out;
1475        }
1476        object = NULL;
1477out:
1478        return object;
1479}
1480
1481/*
1482 * Return the next object in the object_list. The function decrements the
1483 * use_count of the previous object and increases that of the next one.
1484 */
1485static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1486{
1487        struct kmemleak_object *prev_obj = v;
1488        struct kmemleak_object *next_obj = NULL;
1489        struct kmemleak_object *obj = prev_obj;
1490
1491        ++(*pos);
1492
1493        list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1494                if (get_object(obj)) {
1495                        next_obj = obj;
1496                        break;
1497                }
1498        }
1499
1500        put_object(prev_obj);
1501        return next_obj;
1502}
1503
1504/*
1505 * Decrement the use_count of the last object required, if any.
1506 */
1507static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1508{
1509        if (!IS_ERR(v)) {
1510                /*
1511                 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1512                 * waiting was interrupted, so only release it if !IS_ERR.
1513                 */
1514                rcu_read_unlock();
1515                mutex_unlock(&scan_mutex);
1516                if (v)
1517                        put_object(v);
1518        }
1519}
1520
1521/*
1522 * Print the information for an unreferenced object to the seq file.
1523 */
1524static int kmemleak_seq_show(struct seq_file *seq, void *v)
1525{
1526        struct kmemleak_object *object = v;
1527        unsigned long flags;
1528
1529        spin_lock_irqsave(&object->lock, flags);
1530        if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1531                print_unreferenced(seq, object);
1532        spin_unlock_irqrestore(&object->lock, flags);
1533        return 0;
1534}
1535
1536static const struct seq_operations kmemleak_seq_ops = {
1537        .start = kmemleak_seq_start,
1538        .next  = kmemleak_seq_next,
1539        .stop  = kmemleak_seq_stop,
1540        .show  = kmemleak_seq_show,
1541};
1542
1543static int kmemleak_open(struct inode *inode, struct file *file)
1544{
1545        return seq_open(file, &kmemleak_seq_ops);
1546}
1547
1548static int kmemleak_release(struct inode *inode, struct file *file)
1549{
1550        return seq_release(inode, file);
1551}
1552
1553static int dump_str_object_info(const char *str)
1554{
1555        unsigned long flags;
1556        struct kmemleak_object *object;
1557        unsigned long addr;
1558
1559        addr= simple_strtoul(str, NULL, 0);
1560        object = find_and_get_object(addr, 0);
1561        if (!object) {
1562                pr_info("Unknown object at 0x%08lx\n", addr);
1563                return -EINVAL;
1564        }
1565
1566        spin_lock_irqsave(&object->lock, flags);
1567        dump_object_info(object);
1568        spin_unlock_irqrestore(&object->lock, flags);
1569
1570        put_object(object);
1571        return 0;
1572}
1573
1574/*
1575 * We use grey instead of black to ensure we can do future scans on the same
1576 * objects. If we did not do future scans these black objects could
1577 * potentially contain references to newly allocated objects in the future and
1578 * we'd end up with false positives.
1579 */
1580static void kmemleak_clear(void)
1581{
1582        struct kmemleak_object *object;
1583        unsigned long flags;
1584
1585        rcu_read_lock();
1586        list_for_each_entry_rcu(object, &object_list, object_list) {
1587                spin_lock_irqsave(&object->lock, flags);
1588                if ((object->flags & OBJECT_REPORTED) &&
1589                    unreferenced_object(object))
1590                        __paint_it(object, KMEMLEAK_GREY);
1591                spin_unlock_irqrestore(&object->lock, flags);
1592        }
1593        rcu_read_unlock();
1594}
1595
1596/*
1597 * File write operation to configure kmemleak at run-time. The following
1598 * commands can be written to the /sys/kernel/debug/kmemleak file:
1599 *   off        - disable kmemleak (irreversible)
1600 *   stack=on   - enable the task stacks scanning
1601 *   stack=off  - disable the tasks stacks scanning
1602 *   scan=on    - start the automatic memory scanning thread
1603 *   scan=off   - stop the automatic memory scanning thread
1604 *   scan=...   - set the automatic memory scanning period in seconds (0 to
1605 *                disable it)
1606 *   scan       - trigger a memory scan
1607 *   clear      - mark all current reported unreferenced kmemleak objects as
1608 *                grey to ignore printing them
1609 *   dump=...   - dump information about the object found at the given address
1610 */
1611static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1612                              size_t size, loff_t *ppos)
1613{
1614        char buf[64];
1615        int buf_size;
1616        int ret;
1617
1618        if (!atomic_read(&kmemleak_enabled))
1619                return -EBUSY;
1620
1621        buf_size = min(size, (sizeof(buf) - 1));
1622        if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1623                return -EFAULT;
1624        buf[buf_size] = 0;
1625
1626        ret = mutex_lock_interruptible(&scan_mutex);
1627        if (ret < 0)
1628                return ret;
1629
1630        if (strncmp(buf, "off", 3) == 0)
1631                kmemleak_disable();
1632        else if (strncmp(buf, "stack=on", 8) == 0)
1633                kmemleak_stack_scan = 1;
1634        else if (strncmp(buf, "stack=off", 9) == 0)
1635                kmemleak_stack_scan = 0;
1636        else if (strncmp(buf, "scan=on", 7) == 0)
1637                start_scan_thread();
1638        else if (strncmp(buf, "scan=off", 8) == 0)
1639                stop_scan_thread();
1640        else if (strncmp(buf, "scan=", 5) == 0) {
1641                unsigned long secs;
1642
1643                ret = strict_strtoul(buf + 5, 0, &secs);
1644                if (ret < 0)
1645                        goto out;
1646                stop_scan_thread();
1647                if (secs) {
1648                        jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1649                        start_scan_thread();
1650                }
1651        } else if (strncmp(buf, "scan", 4) == 0)
1652                kmemleak_scan();
1653        else if (strncmp(buf, "clear", 5) == 0)
1654                kmemleak_clear();
1655        else if (strncmp(buf, "dump=", 5) == 0)
1656                ret = dump_str_object_info(buf + 5);
1657        else
1658                ret = -EINVAL;
1659
1660out:
1661        mutex_unlock(&scan_mutex);
1662        if (ret < 0)
1663                return ret;
1664
1665        /* ignore the rest of the buffer, only one command at a time */
1666        *ppos += size;
1667        return size;
1668}
1669
1670static const struct file_operations kmemleak_fops = {
1671        .owner          = THIS_MODULE,
1672        .open           = kmemleak_open,
1673        .read           = seq_read,
1674        .write          = kmemleak_write,
1675        .llseek         = seq_lseek,
1676        .release        = kmemleak_release,
1677};
1678
1679/*
1680 * Stop the memory scanning thread and free the kmemleak internal objects if
1681 * no previous scan thread (otherwise, kmemleak may still have some useful
1682 * information on memory leaks).
1683 */
1684static void kmemleak_do_cleanup(struct work_struct *work)
1685{
1686        struct kmemleak_object *object;
1687        bool cleanup = scan_thread == NULL;
1688
1689        mutex_lock(&scan_mutex);
1690        stop_scan_thread();
1691
1692        if (cleanup) {
1693                rcu_read_lock();
1694                list_for_each_entry_rcu(object, &object_list, object_list)
1695                        delete_object_full(object->pointer);
1696                rcu_read_unlock();
1697        }
1698        mutex_unlock(&scan_mutex);
1699}
1700
1701static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1702
1703/*
1704 * Disable kmemleak. No memory allocation/freeing will be traced once this
1705 * function is called. Disabling kmemleak is an irreversible operation.
1706 */
1707static void kmemleak_disable(void)
1708{
1709        /* atomically check whether it was already invoked */
1710        if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1711                return;
1712
1713        /* stop any memory operation tracing */
1714        atomic_set(&kmemleak_enabled, 0);
1715
1716        /* check whether it is too early for a kernel thread */
1717        if (atomic_read(&kmemleak_initialized))
1718                schedule_work(&cleanup_work);
1719
1720        pr_info("Kernel memory leak detector disabled\n");
1721}
1722
1723/*
1724 * Allow boot-time kmemleak disabling (enabled by default).
1725 */
1726static int kmemleak_boot_config(char *str)
1727{
1728        if (!str)
1729                return -EINVAL;
1730        if (strcmp(str, "off") == 0)
1731                kmemleak_disable();
1732        else if (strcmp(str, "on") == 0)
1733                kmemleak_skip_disable = 1;
1734        else
1735                return -EINVAL;
1736        return 0;
1737}
1738early_param("kmemleak", kmemleak_boot_config);
1739
1740static void __init print_log_trace(struct early_log *log)
1741{
1742        struct stack_trace trace;
1743
1744        trace.nr_entries = log->trace_len;
1745        trace.entries = log->trace;
1746
1747        pr_notice("Early log backtrace:\n");
1748        print_stack_trace(&trace, 2);
1749}
1750
1751/*
1752 * Kmemleak initialization.
1753 */
1754void __init kmemleak_init(void)
1755{
1756        int i;
1757        unsigned long flags;
1758
1759#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1760        if (!kmemleak_skip_disable) {
1761                atomic_set(&kmemleak_early_log, 0);
1762                kmemleak_disable();
1763                return;
1764        }
1765#endif
1766
1767        jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1768        jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1769
1770        object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1771        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1772
1773        if (crt_early_log >= ARRAY_SIZE(early_log))
1774                pr_warning("Early log buffer exceeded (%d), please increase "
1775                           "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1776
1777        /* the kernel is still in UP mode, so disabling the IRQs is enough */
1778        local_irq_save(flags);
1779        atomic_set(&kmemleak_early_log, 0);
1780        if (atomic_read(&kmemleak_error)) {
1781                local_irq_restore(flags);
1782                return;
1783        } else
1784                atomic_set(&kmemleak_enabled, 1);
1785        local_irq_restore(flags);
1786
1787        /*
1788         * This is the point where tracking allocations is safe. Automatic
1789         * scanning is started during the late initcall. Add the early logged
1790         * callbacks to the kmemleak infrastructure.
1791         */
1792        for (i = 0; i < crt_early_log; i++) {
1793                struct early_log *log = &early_log[i];
1794
1795                switch (log->op_type) {
1796                case KMEMLEAK_ALLOC:
1797                        early_alloc(log);
1798                        break;
1799                case KMEMLEAK_ALLOC_PERCPU:
1800                        early_alloc_percpu(log);
1801                        break;
1802                case KMEMLEAK_FREE:
1803                        kmemleak_free(log->ptr);
1804                        break;
1805                case KMEMLEAK_FREE_PART:
1806                        kmemleak_free_part(log->ptr, log->size);
1807                        break;
1808                case KMEMLEAK_FREE_PERCPU:
1809                        kmemleak_free_percpu(log->ptr);
1810                        break;
1811                case KMEMLEAK_NOT_LEAK:
1812                        kmemleak_not_leak(log->ptr);
1813                        break;
1814                case KMEMLEAK_IGNORE:
1815                        kmemleak_ignore(log->ptr);
1816                        break;
1817                case KMEMLEAK_SCAN_AREA:
1818                        kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1819                        break;
1820                case KMEMLEAK_NO_SCAN:
1821                        kmemleak_no_scan(log->ptr);
1822                        break;
1823                default:
1824                        kmemleak_warn("Unknown early log operation: %d\n",
1825                                      log->op_type);
1826                }
1827
1828                if (atomic_read(&kmemleak_warning)) {
1829                        print_log_trace(log);
1830                        atomic_set(&kmemleak_warning, 0);
1831                }
1832        }
1833}
1834
1835/*
1836 * Late initialization function.
1837 */
1838static int __init kmemleak_late_init(void)
1839{
1840        struct dentry *dentry;
1841
1842        atomic_set(&kmemleak_initialized, 1);
1843
1844        if (atomic_read(&kmemleak_error)) {
1845                /*
1846                 * Some error occurred and kmemleak was disabled. There is a
1847                 * small chance that kmemleak_disable() was called immediately
1848                 * after setting kmemleak_initialized and we may end up with
1849                 * two clean-up threads but serialized by scan_mutex.
1850                 */
1851                schedule_work(&cleanup_work);
1852                return -ENOMEM;
1853        }
1854
1855        dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1856                                     &kmemleak_fops);
1857        if (!dentry)
1858                pr_warning("Failed to create the debugfs kmemleak file\n");
1859        mutex_lock(&scan_mutex);
1860        start_scan_thread();
1861        mutex_unlock(&scan_mutex);
1862
1863        pr_info("Kernel memory leak detector initialized\n");
1864
1865        return 0;
1866}
1867late_initcall(kmemleak_late_init);
1868
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.