linux/kernel/lockdep.c
<<
>>
Prefs
   1/*
   2 * kernel/lockdep.c
   3 *
   4 * Runtime locking correctness validator
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *
  10 * this code maps all the lock dependencies as they occur in a live kernel
  11 * and will warn about the following classes of locking bugs:
  12 *
  13 * - lock inversion scenarios
  14 * - circular lock dependencies
  15 * - hardirq/softirq safe/unsafe locking bugs
  16 *
  17 * Bugs are reported even if the current locking scenario does not cause
  18 * any deadlock at this point.
  19 *
  20 * I.e. if anytime in the past two locks were taken in a different order,
  21 * even if it happened for another task, even if those were different
  22 * locks (but of the same class as this lock), this code will detect it.
  23 *
  24 * Thanks to Arjan van de Ven for coming up with the initial idea of
  25 * mapping lock dependencies runtime.
  26 */
  27#include <linux/mutex.h>
  28#include <linux/sched.h>
  29#include <linux/delay.h>
  30#include <linux/module.h>
  31#include <linux/proc_fs.h>
  32#include <linux/seq_file.h>
  33#include <linux/spinlock.h>
  34#include <linux/kallsyms.h>
  35#include <linux/interrupt.h>
  36#include <linux/stacktrace.h>
  37#include <linux/debug_locks.h>
  38#include <linux/irqflags.h>
  39#include <linux/utsname.h>
  40
  41#include <asm/sections.h>
  42
  43#include "lockdep_internals.h"
  44
  45/*
  46 * hash_lock: protects the lockdep hashes and class/list/hash allocators.
  47 *
  48 * This is one of the rare exceptions where it's justified
  49 * to use a raw spinlock - we really dont want the spinlock
  50 * code to recurse back into the lockdep code.
  51 */
  52static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  53
  54static int lockdep_initialized;
  55
  56unsigned long nr_list_entries;
  57static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
  58
  59/*
  60 * Allocate a lockdep entry. (assumes hash_lock held, returns
  61 * with NULL on failure)
  62 */
  63static struct lock_list *alloc_list_entry(void)
  64{
  65        if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
  66                __raw_spin_unlock(&hash_lock);
  67                debug_locks_off();
  68                printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
  69                printk("turning off the locking correctness validator.\n");
  70                return NULL;
  71        }
  72        return list_entries + nr_list_entries++;
  73}
  74
  75/*
  76 * All data structures here are protected by the global debug_lock.
  77 *
  78 * Mutex key structs only get allocated, once during bootup, and never
  79 * get freed - this significantly simplifies the debugging code.
  80 */
  81unsigned long nr_lock_classes;
  82static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
  83
  84/*
  85 * We keep a global list of all lock classes. The list only grows,
  86 * never shrinks. The list is only accessed with the lockdep
  87 * spinlock lock held.
  88 */
  89LIST_HEAD(all_lock_classes);
  90
  91/*
  92 * The lockdep classes are in a hash-table as well, for fast lookup:
  93 */
  94#define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
  95#define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
  96#define CLASSHASH_MASK          (CLASSHASH_SIZE - 1)
  97#define __classhashfn(key)      ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
  98#define classhashentry(key)     (classhash_table + __classhashfn((key)))
  99
 100static struct list_head classhash_table[CLASSHASH_SIZE];
 101
 102unsigned long nr_lock_chains;
 103static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
 104
 105/*
 106 * We put the lock dependency chains into a hash-table as well, to cache
 107 * their existence:
 108 */
 109#define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
 110#define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
 111#define CHAINHASH_MASK          (CHAINHASH_SIZE - 1)
 112#define __chainhashfn(chain) \
 113                (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
 114#define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
 115
 116static struct list_head chainhash_table[CHAINHASH_SIZE];
 117
 118/*
 119 * The hash key of the lock dependency chains is a hash itself too:
 120 * it's a hash of all locks taken up to that lock, including that lock.
 121 * It's a 64-bit hash, because it's important for the keys to be
 122 * unique.
 123 */
 124#define iterate_chain_key(key1, key2) \
 125        (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
 126        ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
 127        (key2))
 128
 129void lockdep_off(void)
 130{
 131        current->lockdep_recursion++;
 132}
 133
 134EXPORT_SYMBOL(lockdep_off);
 135
 136void lockdep_on(void)
 137{
 138        current->lockdep_recursion--;
 139}
 140
 141EXPORT_SYMBOL(lockdep_on);
 142
 143int lockdep_internal(void)
 144{
 145        return current->lockdep_recursion != 0;
 146}
 147
 148EXPORT_SYMBOL(lockdep_internal);
 149
 150/*
 151 * Debugging switches:
 152 */
 153
 154#define VERBOSE                 0
 155#ifdef VERBOSE
 156# define VERY_VERBOSE           0
 157#endif
 158
 159#if VERBOSE
 160# define HARDIRQ_VERBOSE        1
 161# define SOFTIRQ_VERBOSE        1
 162#else
 163# define HARDIRQ_VERBOSE        0
 164# define SOFTIRQ_VERBOSE        0
 165#endif
 166
 167#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
 168/*
 169 * Quick filtering for interesting events:
 170 */
 171static int class_filter(struct lock_class *class)
 172{
 173#if 0
 174        /* Example */
 175        if (class->name_version == 1 &&
 176                        !strcmp(class->name, "lockname"))
 177                return 1;
 178        if (class->name_version == 1 &&
 179                        !strcmp(class->name, "&struct->lockfield"))
 180                return 1;
 181#endif
 182        /* Allow everything else. 0 would be filter everything else */
 183        return 1;
 184}
 185#endif
 186
 187static int verbose(struct lock_class *class)
 188{
 189#if VERBOSE
 190        return class_filter(class);
 191#endif
 192        return 0;
 193}
 194
 195#ifdef CONFIG_TRACE_IRQFLAGS
 196
 197static int hardirq_verbose(struct lock_class *class)
 198{
 199#if HARDIRQ_VERBOSE
 200        return class_filter(class);
 201#endif
 202        return 0;
 203}
 204
 205static int softirq_verbose(struct lock_class *class)
 206{
 207#if SOFTIRQ_VERBOSE
 208        return class_filter(class);
 209#endif
 210        return 0;
 211}
 212
 213#endif
 214
 215/*
 216 * Stack-trace: tightly packed array of stack backtrace
 217 * addresses. Protected by the hash_lock.
 218 */
 219unsigned long nr_stack_trace_entries;
 220static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 221
 222static int save_trace(struct stack_trace *trace)
 223{
 224        trace->nr_entries = 0;
 225        trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
 226        trace->entries = stack_trace + nr_stack_trace_entries;
 227
 228        trace->skip = 3;
 229        trace->all_contexts = 0;
 230
 231        /* Make sure to not recurse in case the the unwinder needs to tak
 232e          locks. */
 233        lockdep_off();
 234        save_stack_trace(trace, NULL);
 235        lockdep_on();
 236
 237        trace->max_entries = trace->nr_entries;
 238
 239        nr_stack_trace_entries += trace->nr_entries;
 240        if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
 241                return 0;
 242
 243        if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
 244                __raw_spin_unlock(&hash_lock);
 245                if (debug_locks_off()) {
 246                        printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
 247                        printk("turning off the locking correctness validator.\n");
 248                        dump_stack();
 249                }
 250                return 0;
 251        }
 252
 253        return 1;
 254}
 255
 256unsigned int nr_hardirq_chains;
 257unsigned int nr_softirq_chains;
 258unsigned int nr_process_chains;
 259unsigned int max_lockdep_depth;
 260unsigned int max_recursion_depth;
 261
 262#ifdef CONFIG_DEBUG_LOCKDEP
 263/*
 264 * We cannot printk in early bootup code. Not even early_printk()
 265 * might work. So we mark any initialization errors and printk
 266 * about it later on, in lockdep_info().
 267 */
 268static int lockdep_init_error;
 269
 270/*
 271 * Various lockdep statistics:
 272 */
 273atomic_t chain_lookup_hits;
 274atomic_t chain_lookup_misses;
 275atomic_t hardirqs_on_events;
 276atomic_t hardirqs_off_events;
 277atomic_t redundant_hardirqs_on;
 278atomic_t redundant_hardirqs_off;
 279atomic_t softirqs_on_events;
 280atomic_t softirqs_off_events;
 281atomic_t redundant_softirqs_on;
 282atomic_t redundant_softirqs_off;
 283atomic_t nr_unused_locks;
 284atomic_t nr_cyclic_checks;
 285atomic_t nr_cyclic_check_recursions;
 286atomic_t nr_find_usage_forwards_checks;
 287atomic_t nr_find_usage_forwards_recursions;
 288atomic_t nr_find_usage_backwards_checks;
 289atomic_t nr_find_usage_backwards_recursions;
 290# define debug_atomic_inc(ptr)          atomic_inc(ptr)
 291# define debug_atomic_dec(ptr)          atomic_dec(ptr)
 292# define debug_atomic_read(ptr)         atomic_read(ptr)
 293#else
 294# define debug_atomic_inc(ptr)          do { } while (0)
 295# define debug_atomic_dec(ptr)          do { } while (0)
 296# define debug_atomic_read(ptr)         0
 297#endif
 298
 299/*
 300 * Locking printouts:
 301 */
 302
 303static const char *usage_str[] =
 304{
 305        [LOCK_USED] =                   "initial-use ",
 306        [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
 307        [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
 308        [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
 309        [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
 310        [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
 311        [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
 312        [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
 313        [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
 314};
 315
 316const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
 317{
 318        unsigned long offs, size;
 319        char *modname;
 320
 321        return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str);
 322}
 323
 324void
 325get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
 326{
 327        *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
 328
 329        if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
 330                *c1 = '+';
 331        else
 332                if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
 333                        *c1 = '-';
 334
 335        if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
 336                *c2 = '+';
 337        else
 338                if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
 339                        *c2 = '-';
 340
 341        if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
 342                *c3 = '-';
 343        if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
 344                *c3 = '+';
 345                if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
 346                        *c3 = '?';
 347        }
 348
 349        if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
 350                *c4 = '-';
 351        if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
 352                *c4 = '+';
 353                if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
 354                        *c4 = '?';
 355        }
 356}
 357
 358static void print_lock_name(struct lock_class *class)
 359{
 360        char str[128], c1, c2, c3, c4;
 361        const char *name;
 362
 363        get_usage_chars(class, &c1, &c2, &c3, &c4);
 364
 365        name = class->name;
 366        if (!name) {
 367                name = __get_key_name(class->key, str);
 368                printk(" (%s", name);
 369        } else {
 370                printk(" (%s", name);
 371                if (class->name_version > 1)
 372                        printk("#%d", class->name_version);
 373                if (class->subclass)
 374                        printk("/%d", class->subclass);
 375        }
 376        printk("){%c%c%c%c}", c1, c2, c3, c4);
 377}
 378
 379static void print_lockdep_cache(struct lockdep_map *lock)
 380{
 381        const char *name;
 382        char str[128];
 383
 384        name = lock->name;
 385        if (!name)
 386                name = __get_key_name(lock->key->subkeys, str);
 387
 388        printk("%s", name);
 389}
 390
 391static void print_lock(struct held_lock *hlock)
 392{
 393        print_lock_name(hlock->class);
 394        printk(", at: ");
 395        print_ip_sym(hlock->acquire_ip);
 396}
 397
 398static void lockdep_print_held_locks(struct task_struct *curr)
 399{
 400        int i, depth = curr->lockdep_depth;
 401
 402        if (!depth) {
 403                printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
 404                return;
 405        }
 406        printk("%d lock%s held by %s/%d:\n",
 407                depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
 408
 409        for (i = 0; i < depth; i++) {
 410                printk(" #%d: ", i);
 411                print_lock(curr->held_locks + i);
 412        }
 413}
 414
 415static void print_lock_class_header(struct lock_class *class, int depth)
 416{
 417        int bit;
 418
 419        printk("%*s->", depth, "");
 420        print_lock_name(class);
 421        printk(" ops: %lu", class->ops);
 422        printk(" {\n");
 423
 424        for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
 425                if (class->usage_mask & (1 << bit)) {
 426                        int len = depth;
 427
 428                        len += printk("%*s   %s", depth, "", usage_str[bit]);
 429                        len += printk(" at:\n");
 430                        print_stack_trace(class->usage_traces + bit, len);
 431                }
 432        }
 433        printk("%*s }\n", depth, "");
 434
 435        printk("%*s ... key      at: ",depth,"");
 436        print_ip_sym((unsigned long)class->key);
 437}
 438
 439/*
 440 * printk all lock dependencies starting at <entry>:
 441 */
 442static void print_lock_dependencies(struct lock_class *class, int depth)
 443{
 444        struct lock_list *entry;
 445
 446        if (DEBUG_LOCKS_WARN_ON(depth >= 20))
 447                return;
 448
 449        print_lock_class_header(class, depth);
 450
 451        list_for_each_entry(entry, &class->locks_after, entry) {
 452                DEBUG_LOCKS_WARN_ON(!entry->class);
 453                print_lock_dependencies(entry->class, depth + 1);
 454
 455                printk("%*s ... acquired at:\n",depth,"");
 456                print_stack_trace(&entry->trace, 2);
 457                printk("\n");
 458        }
 459}
 460
 461/*
 462 * Add a new dependency to the head of the list:
 463 */
 464static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 465                            struct list_head *head, unsigned long ip)
 466{
 467        struct lock_list *entry;
 468        /*
 469         * Lock not present yet - get a new dependency struct and
 470         * add it to the list:
 471         */
 472        entry = alloc_list_entry();
 473        if (!entry)
 474                return 0;
 475
 476        entry->class = this;
 477        save_trace(&entry->trace);
 478
 479        /*
 480         * Since we never remove from the dependency list, the list can
 481         * be walked lockless by other CPUs, it's only allocation
 482         * that must be protected by the spinlock. But this also means
 483         * we must make new entries visible only once writes to the
 484         * entry become visible - hence the RCU op:
 485         */
 486        list_add_tail_rcu(&entry->entry, head);
 487
 488        return 1;
 489}
 490
 491/*
 492 * Recursive, forwards-direction lock-dependency checking, used for
 493 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
 494 * checking.
 495 *
 496 * (to keep the stackframe of the recursive functions small we
 497 *  use these global variables, and we also mark various helper
 498 *  functions as noinline.)
 499 */
 500static struct held_lock *check_source, *check_target;
 501
 502/*
 503 * Print a dependency chain entry (this is only done when a deadlock
 504 * has been detected):
 505 */
 506static noinline int
 507print_circular_bug_entry(struct lock_list *target, unsigned int depth)
 508{
 509        if (debug_locks_silent)
 510                return 0;
 511        printk("\n-> #%u", depth);
 512        print_lock_name(target->class);
 513        printk(":\n");
 514        print_stack_trace(&target->trace, 6);
 515
 516        return 0;
 517}
 518
 519static void print_kernel_version(void)
 520{
 521        printk("%s %.*s\n", init_utsname()->release,
 522                (int)strcspn(init_utsname()->version, " "),
 523                init_utsname()->version);
 524}
 525
 526/*
 527 * When a circular dependency is detected, print the
 528 * header first:
 529 */
 530static noinline int
 531print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 532{
 533        struct task_struct *curr = current;
 534
 535        __raw_spin_unlock(&hash_lock);
 536        debug_locks_off();
 537        if (debug_locks_silent)
 538                return 0;
 539
 540        printk("\n=======================================================\n");
 541        printk(  "[ INFO: possible circular locking dependency detected ]\n");
 542        print_kernel_version();
 543        printk(  "-------------------------------------------------------\n");
 544        printk("%s/%d is trying to acquire lock:\n",
 545                curr->comm, curr->pid);
 546        print_lock(check_source);
 547        printk("\nbut task is already holding lock:\n");
 548        print_lock(check_target);
 549        printk("\nwhich lock already depends on the new lock.\n\n");
 550        printk("\nthe existing dependency chain (in reverse order) is:\n");
 551
 552        print_circular_bug_entry(entry, depth);
 553
 554        return 0;
 555}
 556
 557static noinline int print_circular_bug_tail(void)
 558{
 559        struct task_struct *curr = current;
 560        struct lock_list this;
 561
 562        if (debug_locks_silent)
 563                return 0;
 564
 565        this.class = check_source->class;
 566        save_trace(&this.trace);
 567        print_circular_bug_entry(&this, 0);
 568
 569        printk("\nother info that might help us debug this:\n\n");
 570        lockdep_print_held_locks(curr);
 571
 572        printk("\nstack backtrace:\n");
 573        dump_stack();
 574
 575        return 0;
 576}
 577
 578#define RECURSION_LIMIT 40
 579
 580static int noinline print_infinite_recursion_bug(void)
 581{
 582        __raw_spin_unlock(&hash_lock);
 583        DEBUG_LOCKS_WARN_ON(1);
 584
 585        return 0;
 586}
 587
 588/*
 589 * Prove that the dependency graph starting at <entry> can not
 590 * lead to <target>. Print an error and return 0 if it does.
 591 */
 592static noinline int
 593check_noncircular(struct lock_class *source, unsigned int depth)
 594{
 595        struct lock_list *entry;
 596
 597        debug_atomic_inc(&nr_cyclic_check_recursions);
 598        if (depth > max_recursion_depth)
 599                max_recursion_depth = depth;
 600        if (depth >= RECURSION_LIMIT)
 601                return print_infinite_recursion_bug();
 602        /*
 603         * Check this lock's dependency list:
 604         */
 605        list_for_each_entry(entry, &source->locks_after, entry) {
 606                if (entry->class == check_target->class)
 607                        return print_circular_bug_header(entry, depth+1);
 608                debug_atomic_inc(&nr_cyclic_checks);
 609                if (!check_noncircular(entry->class, depth+1))
 610                        return print_circular_bug_entry(entry, depth+1);
 611        }
 612        return 1;
 613}
 614
 615static int very_verbose(struct lock_class *class)
 616{
 617#if VERY_VERBOSE
 618        return class_filter(class);
 619#endif
 620        return 0;
 621}
 622#ifdef CONFIG_TRACE_IRQFLAGS
 623
 624/*
 625 * Forwards and backwards subgraph searching, for the purposes of
 626 * proving that two subgraphs can be connected by a new dependency
 627 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
 628 */
 629static enum lock_usage_bit find_usage_bit;
 630static struct lock_class *forwards_match, *backwards_match;
 631
 632/*
 633 * Find a node in the forwards-direction dependency sub-graph starting
 634 * at <source> that matches <find_usage_bit>.
 635 *
 636 * Return 2 if such a node exists in the subgraph, and put that node
 637 * into <forwards_match>.
 638 *
 639 * Return 1 otherwise and keep <forwards_match> unchanged.
 640 * Return 0 on error.
 641 */
 642static noinline int
 643find_usage_forwards(struct lock_class *source, unsigned int depth)
 644{
 645        struct lock_list *entry;
 646        int ret;
 647
 648        if (depth > max_recursion_depth)
 649                max_recursion_depth = depth;
 650        if (depth >= RECURSION_LIMIT)
 651                return print_infinite_recursion_bug();
 652
 653        debug_atomic_inc(&nr_find_usage_forwards_checks);
 654        if (source->usage_mask & (1 << find_usage_bit)) {
 655                forwards_match = source;
 656                return 2;
 657        }
 658
 659        /*
 660         * Check this lock's dependency list:
 661         */
 662        list_for_each_entry(entry, &source->locks_after, entry) {
 663                debug_atomic_inc(&nr_find_usage_forwards_recursions);
 664                ret = find_usage_forwards(entry->class, depth+1);
 665                if (ret == 2 || ret == 0)
 666                        return ret;
 667        }
 668        return 1;
 669}
 670
 671/*
 672 * Find a node in the backwards-direction dependency sub-graph starting
 673 * at <source> that matches <find_usage_bit>.
 674 *
 675 * Return 2 if such a node exists in the subgraph, and put that node
 676 * into <backwards_match>.
 677 *
 678 * Return 1 otherwise and keep <backwards_match> unchanged.
 679 * Return 0 on error.
 680 */
 681static noinline int
 682find_usage_backwards(struct lock_class *source, unsigned int depth)
 683{
 684        struct lock_list *entry;
 685        int ret;
 686
 687        if (depth > max_recursion_depth)
 688                max_recursion_depth = depth;
 689        if (depth >= RECURSION_LIMIT)
 690                return print_infinite_recursion_bug();
 691
 692        debug_atomic_inc(&nr_find_usage_backwards_checks);
 693        if (source->usage_mask & (1 << find_usage_bit)) {
 694                backwards_match = source;
 695                return 2;
 696        }
 697
 698        /*
 699         * Check this lock's dependency list:
 700         */
 701        list_for_each_entry(entry, &source->locks_before, entry) {
 702                debug_atomic_inc(&nr_find_usage_backwards_recursions);
 703                ret = find_usage_backwards(entry->class, depth+1);
 704                if (ret == 2 || ret == 0)
 705                        return ret;
 706        }
 707        return 1;
 708}
 709
 710static int
 711print_bad_irq_dependency(struct task_struct *curr,
 712                         struct held_lock *prev,
 713                         struct held_lock *next,
 714                         enum lock_usage_bit bit1,
 715                         enum lock_usage_bit bit2,
 716                         const char *irqclass)
 717{
 718        __raw_spin_unlock(&hash_lock);
 719        debug_locks_off();
 720        if (debug_locks_silent)
 721                return 0;
 722
 723        printk("\n======================================================\n");
 724        printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
 725                irqclass, irqclass);
 726        print_kernel_version();
 727        printk(  "------------------------------------------------------\n");
 728        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
 729                curr->comm, curr->pid,
 730                curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
 731                curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
 732                curr->hardirqs_enabled,
 733                curr->softirqs_enabled);
 734        print_lock(next);
 735
 736        printk("\nand this task is already holding:\n");
 737        print_lock(prev);
 738        printk("which would create a new lock dependency:\n");
 739        print_lock_name(prev->class);
 740        printk(" ->");
 741        print_lock_name(next->class);
 742        printk("\n");
 743
 744        printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
 745                irqclass);
 746        print_lock_name(backwards_match);
 747        printk("\n... which became %s-irq-safe at:\n", irqclass);
 748
 749        print_stack_trace(backwards_match->usage_traces + bit1, 1);
 750
 751        printk("\nto a %s-irq-unsafe lock:\n", irqclass);
 752        print_lock_name(forwards_match);
 753        printk("\n... which became %s-irq-unsafe at:\n", irqclass);
 754        printk("...");
 755
 756        print_stack_trace(forwards_match->usage_traces + bit2, 1);
 757
 758        printk("\nother info that might help us debug this:\n\n");
 759        lockdep_print_held_locks(curr);
 760
 761        printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
 762        print_lock_dependencies(backwards_match, 0);
 763
 764        printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
 765        print_lock_dependencies(forwards_match, 0);
 766
 767        printk("\nstack backtrace:\n");
 768        dump_stack();
 769
 770        return 0;
 771}
 772
 773static int
 774check_usage(struct task_struct *curr, struct held_lock *prev,
 775            struct held_lock *next, enum lock_usage_bit bit_backwards,
 776            enum lock_usage_bit bit_forwards, const char *irqclass)
 777{
 778        int ret;
 779
 780        find_usage_bit = bit_backwards;
 781        /* fills in <backwards_match> */
 782        ret = find_usage_backwards(prev->class, 0);
 783        if (!ret || ret == 1)
 784                return ret;
 785
 786        find_usage_bit = bit_forwards;
 787        ret = find_usage_forwards(next->class, 0);
 788        if (!ret || ret == 1)
 789                return ret;
 790        /* ret == 2 */
 791        return print_bad_irq_dependency(curr, prev, next,
 792                        bit_backwards, bit_forwards, irqclass);
 793}
 794
 795#endif
 796
 797static int
 798print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 799                   struct held_lock *next)
 800{
 801        debug_locks_off();
 802        __raw_spin_unlock(&hash_lock);
 803        if (debug_locks_silent)
 804                return 0;
 805
 806        printk("\n=============================================\n");
 807        printk(  "[ INFO: possible recursive locking detected ]\n");
 808        print_kernel_version();
 809        printk(  "---------------------------------------------\n");
 810        printk("%s/%d is trying to acquire lock:\n",
 811                curr->comm, curr->pid);
 812        print_lock(next);
 813        printk("\nbut task is already holding lock:\n");
 814        print_lock(prev);
 815
 816        printk("\nother info that might help us debug this:\n");
 817        lockdep_print_held_locks(curr);
 818
 819        printk("\nstack backtrace:\n");
 820        dump_stack();
 821
 822        return 0;
 823}
 824
 825/*
 826 * Check whether we are holding such a class already.
 827 *
 828 * (Note that this has to be done separately, because the graph cannot
 829 * detect such classes of deadlocks.)
 830 *
 831 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
 832 */
 833static int
 834check_deadlock(struct task_struct *curr, struct held_lock *next,
 835               struct lockdep_map *next_instance, int read)
 836{
 837        struct held_lock *prev;
 838        int i;
 839
 840        for (i = 0; i < curr->lockdep_depth; i++) {
 841                prev = curr->held_locks + i;
 842                if (prev->class != next->class)
 843                        continue;
 844                /*
 845                 * Allow read-after-read recursion of the same
 846                 * lock class (i.e. read_lock(lock)+read_lock(lock)):
 847                 */
 848                if ((read == 2) && prev->read)
 849                        return 2;
 850                return print_deadlock_bug(curr, prev, next);
 851        }
 852        return 1;
 853}
 854
 855/*
 856 * There was a chain-cache miss, and we are about to add a new dependency
 857 * to a previous lock. We recursively validate the following rules:
 858 *
 859 *  - would the adding of the <prev> -> <next> dependency create a
 860 *    circular dependency in the graph? [== circular deadlock]
 861 *
 862 *  - does the new prev->next dependency connect any hardirq-safe lock
 863 *    (in the full backwards-subgraph starting at <prev>) with any
 864 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
 865 *    <next>)? [== illegal lock inversion with hardirq contexts]
 866 *
 867 *  - does the new prev->next dependency connect any softirq-safe lock
 868 *    (in the full backwards-subgraph starting at <prev>) with any
 869 *    softirq-unsafe lock (in the full forwards-subgraph starting at
 870 *    <next>)? [== illegal lock inversion with softirq contexts]
 871 *
 872 * any of these scenarios could lead to a deadlock.
 873 *
 874 * Then if all the validations pass, we add the forwards and backwards
 875 * dependency.
 876 */
 877static int
 878check_prev_add(struct task_struct *curr, struct held_lock *prev,
 879               struct held_lock *next)
 880{
 881        struct lock_list *entry;
 882        int ret;
 883
 884        /*
 885         * Prove that the new <prev> -> <next> dependency would not
 886         * create a circular dependency in the graph. (We do this by
 887         * forward-recursing into the graph starting at <next>, and
 888         * checking whether we can reach <prev>.)
 889         *
 890         * We are using global variables to control the recursion, to
 891         * keep the stackframe size of the recursive functions low:
 892         */
 893        check_source = next;
 894        check_target = prev;
 895        if (!(check_noncircular(next->class, 0)))
 896                return print_circular_bug_tail();
 897
 898#ifdef CONFIG_TRACE_IRQFLAGS
 899        /*
 900         * Prove that the new dependency does not connect a hardirq-safe
 901         * lock with a hardirq-unsafe lock - to achieve this we search
 902         * the backwards-subgraph starting at <prev>, and the
 903         * forwards-subgraph starting at <next>:
 904         */
 905        if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
 906                                        LOCK_ENABLED_HARDIRQS, "hard"))
 907                return 0;
 908
 909        /*
 910         * Prove that the new dependency does not connect a hardirq-safe-read
 911         * lock with a hardirq-unsafe lock - to achieve this we search
 912         * the backwards-subgraph starting at <prev>, and the
 913         * forwards-subgraph starting at <next>:
 914         */
 915        if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
 916                                        LOCK_ENABLED_HARDIRQS, "hard-read"))
 917                return 0;
 918
 919        /*
 920         * Prove that the new dependency does not connect a softirq-safe
 921         * lock with a softirq-unsafe lock - to achieve this we search
 922         * the backwards-subgraph starting at <prev>, and the
 923         * forwards-subgraph starting at <next>:
 924         */
 925        if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
 926                                        LOCK_ENABLED_SOFTIRQS, "soft"))
 927                return 0;
 928        /*
 929         * Prove that the new dependency does not connect a softirq-safe-read
 930         * lock with a softirq-unsafe lock - to achieve this we search
 931         * the backwards-subgraph starting at <prev>, and the
 932         * forwards-subgraph starting at <next>:
 933         */
 934        if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
 935                                        LOCK_ENABLED_SOFTIRQS, "soft"))
 936                return 0;
 937#endif
 938        /*
 939         * For recursive read-locks we do all the dependency checks,
 940         * but we dont store read-triggered dependencies (only
 941         * write-triggered dependencies). This ensures that only the
 942         * write-side dependencies matter, and that if for example a
 943         * write-lock never takes any other locks, then the reads are
 944         * equivalent to a NOP.
 945         */
 946        if (next->read == 2 || prev->read == 2)
 947                return 1;
 948        /*
 949         * Is the <prev> -> <next> dependency already present?
 950         *
 951         * (this may occur even though this is a new chain: consider
 952         *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
 953         *  chains - the second one will be new, but L1 already has
 954         *  L2 added to its dependency list, due to the first chain.)
 955         */
 956        list_for_each_entry(entry, &prev->class->locks_after, entry) {
 957                if (entry->class == next->class)
 958                        return 2;
 959        }
 960
 961        /*
 962         * Ok, all validations passed, add the new lock
 963         * to the previous lock's dependency list:
 964         */
 965        ret = add_lock_to_list(prev->class, next->class,
 966                               &prev->class->locks_after, next->acquire_ip);
 967        if (!ret)
 968                return 0;
 969        /*
 970         * Return value of 2 signals 'dependency already added',
 971         * in that case we dont have to add the backlink either.
 972         */
 973        if (ret == 2)
 974                return 2;
 975        ret = add_lock_to_list(next->class, prev->class,
 976                               &next->class->locks_before, next->acquire_ip);
 977
 978        /*
 979         * Debugging printouts:
 980         */
 981        if (verbose(prev->class) || verbose(next->class)) {
 982                __raw_spin_unlock(&hash_lock);
 983                printk("\n new dependency: ");
 984                print_lock_name(prev->class);
 985                printk(" => ");
 986                print_lock_name(next->class);
 987                printk("\n");
 988                dump_stack();
 989                __raw_spin_lock(&hash_lock);
 990        }
 991        return 1;
 992}
 993
 994/*
 995 * Add the dependency to all directly-previous locks that are 'relevant'.
 996 * The ones that are relevant are (in increasing distance from curr):
 997 * all consecutive trylock entries and the final non-trylock entry - or
 998 * the end of this context's lock-chain - whichever comes first.
 999 */
1000static int
1001check_prevs_add(struct task_struct *curr, struct held_lock *next)
1002{
1003        int depth = curr->lockdep_depth;
1004        struct held_lock *hlock;
1005
1006        /*
1007         * Debugging checks.
1008         *
1009         * Depth must not be zero for a non-head lock:
1010         */
1011        if (!depth)
1012                goto out_bug;
1013        /*
1014         * At least two relevant locks must exist for this
1015         * to be a head:
1016         */
1017        if (curr->held_locks[depth].irq_context !=
1018                        curr->held_locks[depth-1].irq_context)
1019                goto out_bug;
1020
1021        for (;;) {
1022                hlock = curr->held_locks + depth-1;
1023                /*
1024                 * Only non-recursive-read entries get new dependencies
1025                 * added:
1026                 */
1027                if (hlock->read != 2) {
1028                        check_prev_add(curr, hlock, next);
1029                        /*
1030                         * Stop after the first non-trylock entry,
1031                         * as non-trylock entries have added their
1032                         * own direct dependencies already, so this
1033                         * lock is connected to them indirectly:
1034                         */
1035                        if (!hlock->trylock)
1036                                break;
1037                }
1038                depth--;
1039                /*
1040                 * End of lock-stack?
1041                 */
1042                if (!depth)
1043                        break;
1044                /*
1045                 * Stop the search if we cross into another context:
1046                 */
1047                if (curr->held_locks[depth].irq_context !=
1048                                curr->held_locks[depth-1].irq_context)
1049                        break;
1050        }
1051        return 1;
1052out_bug:
1053        __raw_spin_unlock(&hash_lock);
1054        DEBUG_LOCKS_WARN_ON(1);
1055
1056        return 0;
1057}
1058
1059
1060/*
1061 * Is this the address of a static object:
1062 */
1063static int static_obj(void *obj)
1064{
1065        unsigned long start = (unsigned long) &_stext,
1066                      end   = (unsigned long) &_end,
1067                      addr  = (unsigned long) obj;
1068#ifdef CONFIG_SMP
1069        int i;
1070#endif
1071
1072        /*
1073         * static variable?
1074         */
1075        if ((addr >= start) && (addr < end))
1076                return 1;
1077
1078#ifdef CONFIG_SMP
1079        /*
1080         * percpu var?
1081         */
1082        for_each_possible_cpu(i) {
1083                start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1084                end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
1085                                        + per_cpu_offset(i);
1086
1087                if ((addr >= start) && (addr < end))
1088                        return 1;
1089        }
1090#endif
1091
1092        /*
1093         * module var?
1094         */
1095        return is_module_address(addr);
1096}
1097
1098/*
1099 * To make lock name printouts unique, we calculate a unique
1100 * class->name_version generation counter:
1101 */
1102static int count_matching_names(struct lock_class *new_class)
1103{
1104        struct lock_class *class;
1105        int count = 0;
1106
1107        if (!new_class->name)
1108                return 0;
1109
1110        list_for_each_entry(class, &all_lock_classes, lock_entry) {
1111                if (new_class->key - new_class->subclass == class->key)
1112                        return class->name_version;
1113                if (class->name && !strcmp(class->name, new_class->name))
1114                        count = max(count, class->name_version);
1115        }
1116
1117        return count + 1;
1118}
1119
1120/*
1121 * Register a lock's class in the hash-table, if the class is not present
1122 * yet. Otherwise we look it up. We cache the result in the lock object
1123 * itself, so actual lookup of the hash should be once per lock object.
1124 */
1125static inline struct lock_class *
1126look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1127{
1128        struct lockdep_subclass_key *key;
1129        struct list_head *hash_head;
1130        struct lock_class *class;
1131
1132#ifdef CONFIG_DEBUG_LOCKDEP
1133        /*
1134         * If the architecture calls into lockdep before initializing
1135         * the hashes then we'll warn about it later. (we cannot printk
1136         * right now)
1137         */
1138        if (unlikely(!lockdep_initialized)) {
1139                lockdep_init();
1140                lockdep_init_error = 1;
1141        }
1142#endif
1143
1144        /*
1145         * Static locks do not have their class-keys yet - for them the key
1146         * is the lock object itself:
1147         */
1148        if (unlikely(!lock->key))
1149                lock->key = (void *)lock;
1150
1151        /*
1152         * NOTE: the class-key must be unique. For dynamic locks, a static
1153         * lock_class_key variable is passed in through the mutex_init()
1154         * (or spin_lock_init()) call - which acts as the key. For static
1155         * locks we use the lock object itself as the key.
1156         */
1157        BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
1158
1159        key = lock->key->subkeys + subclass;
1160
1161        hash_head = classhashentry(key);
1162
1163        /*
1164         * We can walk the hash lockfree, because the hash only
1165         * grows, and we are careful when adding entries to the end:
1166         */
1167        list_for_each_entry(class, hash_head, hash_entry)
1168                if (class->key == key)
1169                        return class;
1170
1171        return NULL;
1172}
1173
1174/*
1175 * Register a lock's class in the hash-table, if the class is not present
1176 * yet. Otherwise we look it up. We cache the result in the lock object
1177 * itself, so actual lookup of the hash should be once per lock object.
1178 */
1179static inline struct lock_class *
1180register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1181{
1182        struct lockdep_subclass_key *key;
1183        struct list_head *hash_head;
1184        struct lock_class *class;
1185
1186        class = look_up_lock_class(lock, subclass);
1187        if (likely(class))
1188                return class;
1189
1190        /*
1191         * Debug-check: all keys must be persistent!
1192         */
1193        if (!static_obj(lock->key)) {
1194                debug_locks_off();
1195                printk("INFO: trying to register non-static key.\n");
1196                printk("the code is fine but needs lockdep annotation.\n");
1197                printk("turning off the locking correctness validator.\n");
1198                dump_stack();
1199
1200                return NULL;
1201        }
1202
1203        key = lock->key->subkeys + subclass;
1204        hash_head = classhashentry(key);
1205
1206        __raw_spin_lock(&hash_lock);
1207        /*
1208         * We have to do the hash-walk again, to avoid races
1209         * with another CPU:
1210         */
1211        list_for_each_entry(class, hash_head, hash_entry)
1212                if (class->key == key)
1213                        goto out_unlock_set;
1214        /*
1215         * Allocate a new key from the static array, and add it to
1216         * the hash:
1217         */
1218        if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1219                __raw_spin_unlock(&hash_lock);
1220                debug_locks_off();
1221                printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1222                printk("turning off the locking correctness validator.\n");
1223                return NULL;
1224        }
1225        class = lock_classes + nr_lock_classes++;
1226        debug_atomic_inc(&nr_unused_locks);
1227        class->key = key;
1228        class->name = lock->name;
1229        class->subclass = subclass;
1230        INIT_LIST_HEAD(&class->lock_entry);
1231        INIT_LIST_HEAD(&class->locks_before);
1232        INIT_LIST_HEAD(&class->locks_after);
1233        class->name_version = count_matching_names(class);
1234        /*
1235         * We use RCU's safe list-add method to make
1236         * parallel walking of the hash-list safe:
1237         */
1238        list_add_tail_rcu(&class->hash_entry, hash_head);
1239
1240        if (verbose(class)) {
1241                __raw_spin_unlock(&hash_lock);
1242                printk("\nnew class %p: %s", class->key, class->name);
1243                if (class->name_version > 1)
1244                        printk("#%d", class->name_version);
1245                printk("\n");
1246                dump_stack();
1247                __raw_spin_lock(&hash_lock);
1248        }
1249out_unlock_set:
1250        __raw_spin_unlock(&hash_lock);
1251
1252        if (!subclass || force)
1253                lock->class_cache = class;
1254
1255        DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
1256
1257        return class;
1258}
1259
1260/*
1261 * Look up a dependency chain. If the key is not present yet then
1262 * add it and return 0 - in this case the new dependency chain is
1263 * validated. If the key is already hashed, return 1.
1264 */
1265static inline int lookup_chain_cache(u64 chain_key)
1266{
1267        struct list_head *hash_head = chainhashentry(chain_key);
1268        struct lock_chain *chain;
1269
1270        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1271        /*
1272         * We can walk it lock-free, because entries only get added
1273         * to the hash:
1274         */
1275        list_for_each_entry(chain, hash_head, entry) {
1276                if (chain->chain_key == chain_key) {
1277cache_hit:
1278                        debug_atomic_inc(&chain_lookup_hits);
1279                        /*
1280                         * In the debugging case, force redundant checking
1281                         * by returning 1:
1282                         */
1283#ifdef CONFIG_DEBUG_LOCKDEP
1284                        __raw_spin_lock(&hash_lock);
1285                        return 1;
1286#endif
1287                        return 0;
1288                }
1289        }
1290        /*
1291         * Allocate a new chain entry from the static array, and add
1292         * it to the hash:
1293         */
1294        __raw_spin_lock(&hash_lock);
1295        /*
1296         * We have to walk the chain again locked - to avoid duplicates:
1297         */
1298        list_for_each_entry(chain, hash_head, entry) {
1299                if (chain->chain_key == chain_key) {
1300                        __raw_spin_unlock(&hash_lock);
1301                        goto cache_hit;
1302                }
1303        }
1304        if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1305                __raw_spin_unlock(&hash_lock);
1306                debug_locks_off();
1307                printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1308                printk("turning off the locking correctness validator.\n");
1309                return 0;
1310        }
1311        chain = lock_chains + nr_lock_chains++;
1312        chain->chain_key = chain_key;
1313        list_add_tail_rcu(&chain->entry, hash_head);
1314        debug_atomic_inc(&chain_lookup_misses);
1315#ifdef CONFIG_TRACE_IRQFLAGS
1316        if (current->hardirq_context)
1317                nr_hardirq_chains++;
1318        else {
1319                if (current->softirq_context)
1320                        nr_softirq_chains++;
1321                else
1322                        nr_process_chains++;
1323        }
1324#else
1325        nr_process_chains++;
1326#endif
1327
1328        return 1;
1329}
1330
1331/*
1332 * We are building curr_chain_key incrementally, so double-check
1333 * it from scratch, to make sure that it's done correctly:
1334 */
1335static void check_chain_key(struct task_struct *curr)
1336{
1337#ifdef CONFIG_DEBUG_LOCKDEP
1338        struct held_lock *hlock, *prev_hlock = NULL;
1339        unsigned int i, id;
1340        u64 chain_key = 0;
1341
1342        for (i = 0; i < curr->lockdep_depth; i++) {
1343                hlock = curr->held_locks + i;
1344                if (chain_key != hlock->prev_chain_key) {
1345                        debug_locks_off();
1346                        printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1347                                curr->lockdep_depth, i,
1348                                (unsigned long long)chain_key,
1349                                (unsigned long long)hlock->prev_chain_key);
1350                        WARN_ON(1);
1351                        return;
1352                }
1353                id = hlock->class - lock_classes;
1354                DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
1355                if (prev_hlock && (prev_hlock->irq_context !=
1356                                                        hlock->irq_context))
1357                        chain_key = 0;
1358                chain_key = iterate_chain_key(chain_key, id);
1359                prev_hlock = hlock;
1360        }
1361        if (chain_key != curr->curr_chain_key) {
1362                debug_locks_off();
1363                printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1364                        curr->lockdep_depth, i,
1365                        (unsigned long long)chain_key,
1366                        (unsigned long long)curr->curr_chain_key);
1367                WARN_ON(1);
1368        }
1369#endif
1370}
1371
1372#ifdef CONFIG_TRACE_IRQFLAGS
1373
1374/*
1375 * print irq inversion bug:
1376 */
1377static int
1378print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1379                        struct held_lock *this, int forwards,
1380                        const char *irqclass)
1381{
1382        __raw_spin_unlock(&hash_lock);
1383        debug_locks_off();
1384        if (debug_locks_silent)
1385                return 0;
1386
1387        printk("\n=========================================================\n");
1388        printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1389        print_kernel_version();
1390        printk(  "---------------------------------------------------------\n");
1391        printk("%s/%d just changed the state of lock:\n",
1392                curr->comm, curr->pid);
1393        print_lock(this);
1394        if (forwards)
1395                printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1396        else
1397                printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1398        print_lock_name(other);
1399        printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1400
1401        printk("\nother info that might help us debug this:\n");
1402        lockdep_print_held_locks(curr);
1403
1404        printk("\nthe first lock's dependencies:\n");
1405        print_lock_dependencies(this->class, 0);
1406
1407        printk("\nthe second lock's dependencies:\n");
1408        print_lock_dependencies(other, 0);
1409
1410        printk("\nstack backtrace:\n");
1411        dump_stack();
1412
1413        return 0;
1414}
1415
1416/*
1417 * Prove that in the forwards-direction subgraph starting at <this>
1418 * there is no lock matching <mask>:
1419 */
1420static int
1421check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1422                     enum lock_usage_bit bit, const char *irqclass)
1423{
1424        int ret;
1425
1426        find_usage_bit = bit;
1427        /* fills in <forwards_match> */
1428        ret = find_usage_forwards(this->class, 0);
1429        if (!ret || ret == 1)
1430                return ret;
1431
1432        return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1433}
1434
1435/*
1436 * Prove that in the backwards-direction subgraph starting at <this>
1437 * there is no lock matching <mask>:
1438 */
1439static int
1440check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1441                      enum lock_usage_bit bit, const char *irqclass)
1442{
1443        int ret;
1444
1445        find_usage_bit = bit;
1446        /* fills in <backwards_match> */
1447        ret = find_usage_backwards(this->class, 0);
1448        if (!ret || ret == 1)
1449                return ret;
1450
1451        return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1452}
1453
1454static inline void print_irqtrace_events(struct task_struct *curr)
1455{
1456        printk("irq event stamp: %u\n", curr->irq_events);
1457        printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1458        print_ip_sym(curr->hardirq_enable_ip);
1459        printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1460        print_ip_sym(curr->hardirq_disable_ip);
1461        printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1462        print_ip_sym(curr->softirq_enable_ip);
1463        printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1464        print_ip_sym(curr->softirq_disable_ip);
1465}
1466
1467#else
1468static inline void print_irqtrace_events(struct task_struct *curr)
1469{
1470}
1471#endif
1472
1473static int
1474print_usage_bug(struct task_struct *curr, struct held_lock *this,
1475                enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1476{
1477        __raw_spin_unlock(&hash_lock);
1478        debug_locks_off();
1479        if (debug_locks_silent)
1480                return 0;
1481
1482        printk("\n=================================\n");
1483        printk(  "[ INFO: inconsistent lock state ]\n");
1484        print_kernel_version();
1485        printk(  "---------------------------------\n");
1486
1487        printk("inconsistent {%s} -> {%s} usage.\n",
1488                usage_str[prev_bit], usage_str[new_bit]);
1489
1490        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1491                curr->comm, curr->pid,
1492                trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1493                trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1494                trace_hardirqs_enabled(curr),
1495                trace_softirqs_enabled(curr));
1496        print_lock(this);
1497
1498        printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1499        print_stack_trace(this->class->usage_traces + prev_bit, 1);
1500
1501        print_irqtrace_events(curr);
1502        printk("\nother info that might help us debug this:\n");
1503        lockdep_print_held_locks(curr);
1504
1505        printk("\nstack backtrace:\n");
1506        dump_stack();
1507
1508        return 0;
1509}
1510
1511/*
1512 * Print out an error if an invalid bit is set:
1513 */
1514static inline int
1515valid_state(struct task_struct *curr, struct held_lock *this,
1516            enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1517{
1518        if (unlikely(this->class->usage_mask & (1 << bad_bit)))
1519                return print_usage_bug(curr, this, bad_bit, new_bit);
1520        return 1;
1521}
1522
1523#define STRICT_READ_CHECKS      1
1524
1525/*
1526 * Mark a lock with a usage bit, and validate the state transition:
1527 */
1528static int mark_lock(struct task_struct *curr, struct held_lock *this,
1529                     enum lock_usage_bit new_bit, unsigned long ip)
1530{
1531        unsigned int new_mask = 1 << new_bit, ret = 1;
1532
1533        /*
1534         * If already set then do not dirty the cacheline,
1535         * nor do any checks:
1536         */
1537        if (likely(this->class->usage_mask & new_mask))
1538                return 1;
1539
1540        __raw_spin_lock(&hash_lock);
1541        /*
1542         * Make sure we didnt race:
1543         */
1544        if (unlikely(this->class->usage_mask & new_mask)) {
1545                __raw_spin_unlock(&hash_lock);
1546                return 1;
1547        }
1548
1549        this->class->usage_mask |= new_mask;
1550
1551#ifdef CONFIG_TRACE_IRQFLAGS
1552        if (new_bit == LOCK_ENABLED_HARDIRQS ||
1553                        new_bit == LOCK_ENABLED_HARDIRQS_READ)
1554                ip = curr->hardirq_enable_ip;
1555        else if (new_bit == LOCK_ENABLED_SOFTIRQS ||
1556                        new_bit == LOCK_ENABLED_SOFTIRQS_READ)
1557                ip = curr->softirq_enable_ip;
1558#endif
1559        if (!save_trace(this->class->usage_traces + new_bit))
1560                return 0;
1561
1562        switch (new_bit) {
1563#ifdef CONFIG_TRACE_IRQFLAGS
1564        case LOCK_USED_IN_HARDIRQ:
1565                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1566                        return 0;
1567                if (!valid_state(curr, this, new_bit,
1568                                 LOCK_ENABLED_HARDIRQS_READ))
1569                        return 0;
1570                /*
1571                 * just marked it hardirq-safe, check that this lock
1572                 * took no hardirq-unsafe lock in the past:
1573                 */
1574                if (!check_usage_forwards(curr, this,
1575                                          LOCK_ENABLED_HARDIRQS, "hard"))
1576                        return 0;
1577#if STRICT_READ_CHECKS
1578                /*
1579                 * just marked it hardirq-safe, check that this lock
1580                 * took no hardirq-unsafe-read lock in the past:
1581                 */
1582                if (!check_usage_forwards(curr, this,
1583                                LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1584                        return 0;
1585#endif
1586                if (hardirq_verbose(this->class))
1587                        ret = 2;
1588                break;
1589        case LOCK_USED_IN_SOFTIRQ:
1590                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1591                        return 0;
1592                if (!valid_state(curr, this, new_bit,
1593                                 LOCK_ENABLED_SOFTIRQS_READ))
1594                        return 0;
1595                /*
1596                 * just marked it softirq-safe, check that this lock
1597                 * took no softirq-unsafe lock in the past:
1598                 */
1599                if (!check_usage_forwards(curr, this,
1600                                          LOCK_ENABLED_SOFTIRQS, "soft"))
1601                        return 0;
1602#if STRICT_READ_CHECKS
1603                /*
1604                 * just marked it softirq-safe, check that this lock
1605                 * took no softirq-unsafe-read lock in the past:
1606                 */
1607                if (!check_usage_forwards(curr, this,
1608                                LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1609                        return 0;
1610#endif
1611                if (softirq_verbose(this->class))
1612                        ret = 2;
1613                break;
1614        case LOCK_USED_IN_HARDIRQ_READ:
1615                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1616                        return 0;
1617                /*
1618                 * just marked it hardirq-read-safe, check that this lock
1619                 * took no hardirq-unsafe lock in the past:
1620                 */
1621                if (!check_usage_forwards(curr, this,
1622                                          LOCK_ENABLED_HARDIRQS, "hard"))
1623                        return 0;
1624                if (hardirq_verbose(this->class))
1625                        ret = 2;
1626                break;
1627        case LOCK_USED_IN_SOFTIRQ_READ:
1628                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1629                        return 0;
1630                /*
1631                 * just marked it softirq-read-safe, check that this lock
1632                 * took no softirq-unsafe lock in the past:
1633                 */
1634                if (!check_usage_forwards(curr, this,
1635                                          LOCK_ENABLED_SOFTIRQS, "soft"))
1636                        return 0;
1637                if (softirq_verbose(this->class))
1638                        ret = 2;
1639                break;
1640        case LOCK_ENABLED_HARDIRQS:
1641                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1642                        return 0;
1643                if (!valid_state(curr, this, new_bit,
1644                                 LOCK_USED_IN_HARDIRQ_READ))
1645                        return 0;
1646                /*
1647                 * just marked it hardirq-unsafe, check that no hardirq-safe
1648                 * lock in the system ever took it in the past:
1649                 */
1650                if (!check_usage_backwards(curr, this,
1651                                           LOCK_USED_IN_HARDIRQ, "hard"))
1652                        return 0;
1653#if STRICT_READ_CHECKS
1654                /*
1655                 * just marked it hardirq-unsafe, check that no
1656                 * hardirq-safe-read lock in the system ever took
1657                 * it in the past:
1658                 */
1659                if (!check_usage_backwards(curr, this,
1660                                   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1661                        return 0;
1662#endif
1663                if (hardirq_verbose(this->class))
1664                        ret = 2;
1665                break;
1666        case LOCK_ENABLED_SOFTIRQS:
1667                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1668                        return 0;
1669                if (!valid_state(curr, this, new_bit,
1670                                 LOCK_USED_IN_SOFTIRQ_READ))
1671                        return 0;
1672                /*
1673                 * just marked it softirq-unsafe, check that no softirq-safe
1674                 * lock in the system ever took it in the past:
1675                 */
1676                if (!check_usage_backwards(curr, this,
1677                                           LOCK_USED_IN_SOFTIRQ, "soft"))
1678                        return 0;
1679#if STRICT_READ_CHECKS
1680                /*
1681                 * just marked it softirq-unsafe, check that no
1682                 * softirq-safe-read lock in the system ever took
1683                 * it in the past:
1684                 */
1685                if (!check_usage_backwards(curr, this,
1686                                   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1687                        return 0;
1688#endif
1689                if (softirq_verbose(this->class))
1690                        ret = 2;
1691                break;
1692        case LOCK_ENABLED_HARDIRQS_READ:
1693                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1694                        return 0;
1695#if STRICT_READ_CHECKS
1696                /*
1697                 * just marked it hardirq-read-unsafe, check that no
1698                 * hardirq-safe lock in the system ever took it in the past:
1699                 */
1700                if (!check_usage_backwards(curr, this,
1701                                           LOCK_USED_IN_HARDIRQ, "hard"))
1702                        return 0;
1703#endif
1704                if (hardirq_verbose(this->class))
1705                        ret = 2;
1706                break;
1707        case LOCK_ENABLED_SOFTIRQS_READ:
1708                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1709                        return 0;
1710#if STRICT_READ_CHECKS
1711                /*
1712                 * just marked it softirq-read-unsafe, check that no
1713                 * softirq-safe lock in the system ever took it in the past:
1714                 */
1715                if (!check_usage_backwards(curr, this,
1716                                           LOCK_USED_IN_SOFTIRQ, "soft"))
1717                        return 0;
1718#endif
1719                if (softirq_verbose(this->class))
1720                        ret = 2;
1721                break;
1722#endif
1723        case LOCK_USED:
1724                /*
1725                 * Add it to the global list of classes:
1726                 */
1727                list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
1728                debug_atomic_dec(&nr_unused_locks);
1729                break;
1730        default:
1731                debug_locks_off();
1732                WARN_ON(1);
1733                return 0;
1734        }
1735
1736        __raw_spin_unlock(&hash_lock);
1737
1738        /*
1739         * We must printk outside of the hash_lock:
1740         */
1741        if (ret == 2) {
1742                printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
1743                print_lock(this);
1744                print_irqtrace_events(curr);
1745                dump_stack();
1746        }
1747
1748        return ret;
1749}
1750
1751#ifdef CONFIG_TRACE_IRQFLAGS
1752/*
1753 * Mark all held locks with a usage bit:
1754 */
1755static int
1756mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip)
1757{
1758        enum lock_usage_bit usage_bit;
1759        struct held_lock *hlock;
1760        int i;
1761
1762        for (i = 0; i < curr->lockdep_depth; i++) {
1763                hlock = curr->held_locks + i;
1764
1765                if (hardirq) {
1766                        if (hlock->read)
1767                                usage_bit = LOCK_ENABLED_HARDIRQS_READ;
1768                        else
1769                                usage_bit = LOCK_ENABLED_HARDIRQS;
1770                } else {
1771                        if (hlock->read)
1772                                usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
1773                        else
1774                                usage_bit = LOCK_ENABLED_SOFTIRQS;
1775                }
1776                if (!mark_lock(curr, hlock, usage_bit, ip))
1777                        return 0;
1778        }
1779
1780        return 1;
1781}
1782
1783/*
1784 * Debugging helper: via this flag we know that we are in
1785 * 'early bootup code', and will warn about any invalid irqs-on event:
1786 */
1787static int early_boot_irqs_enabled;
1788
1789void early_boot_irqs_off(void)
1790{
1791        early_boot_irqs_enabled = 0;
1792}
1793
1794void early_boot_irqs_on(void)
1795{
1796        early_boot_irqs_enabled = 1;
1797}
1798
1799/*
1800 * Hardirqs will be enabled:
1801 */
1802void trace_hardirqs_on(void)
1803{
1804        struct task_struct *curr = current;
1805        unsigned long ip;
1806
1807        if (unlikely(!debug_locks || current->lockdep_recursion))
1808                return;
1809
1810        if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
1811                return;
1812
1813        if (unlikely(curr->hardirqs_enabled)) {
1814                debug_atomic_inc(&redundant_hardirqs_on);
1815                return;
1816        }
1817        /* we'll do an OFF -> ON transition: */
1818        curr->hardirqs_enabled = 1;
1819        ip = (unsigned long) __builtin_return_address(0);
1820
1821        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1822                return;
1823        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
1824                return;
1825        /*
1826         * We are going to turn hardirqs on, so set the
1827         * usage bit for all held locks:
1828         */
1829        if (!mark_held_locks(curr, 1, ip))
1830                return;
1831        /*
1832         * If we have softirqs enabled, then set the usage
1833         * bit for all held locks. (disabled hardirqs prevented
1834         * this bit from being set before)
1835         */
1836        if (curr->softirqs_enabled)
1837                if (!mark_held_locks(curr, 0, ip))
1838                        return;
1839
1840        curr->hardirq_enable_ip = ip;
1841        curr->hardirq_enable_event = ++curr->irq_events;
1842        debug_atomic_inc(&hardirqs_on_events);
1843}
1844
1845EXPORT_SYMBOL(trace_hardirqs_on);
1846
1847/*
1848 * Hardirqs were disabled:
1849 */
1850void trace_hardirqs_off(void)
1851{
1852        struct task_struct *curr = current;
1853
1854        if (unlikely(!debug_locks || current->lockdep_recursion))
1855                return;
1856
1857        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1858                return;
1859
1860        if (curr->hardirqs_enabled) {
1861                /*
1862                 * We have done an ON -> OFF transition:
1863                 */
1864                curr->hardirqs_enabled = 0;
1865                curr->hardirq_disable_ip = _RET_IP_;
1866                curr->hardirq_disable_event = ++curr->irq_events;
1867                debug_atomic_inc(&hardirqs_off_events);
1868        } else
1869                debug_atomic_inc(&redundant_hardirqs_off);
1870}
1871
1872EXPORT_SYMBOL(trace_hardirqs_off);
1873
1874/*
1875 * Softirqs will be enabled:
1876 */
1877void trace_softirqs_on(unsigned long ip)
1878{
1879        struct task_struct *curr = current;
1880
1881        if (unlikely(!debug_locks))
1882                return;
1883
1884        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1885                return;
1886
1887        if (curr->softirqs_enabled) {
1888                debug_atomic_inc(&redundant_softirqs_on);
1889                return;
1890        }
1891
1892        /*
1893         * We'll do an OFF -> ON transition:
1894         */
1895        curr->softirqs_enabled = 1;
1896        curr->softirq_enable_ip = ip;
1897        curr->softirq_enable_event = ++curr->irq_events;
1898        debug_atomic_inc(&softirqs_on_events);
1899        /*
1900         * We are going to turn softirqs on, so set the
1901         * usage bit for all held locks, if hardirqs are
1902         * enabled too:
1903         */
1904        if (curr->hardirqs_enabled)
1905                mark_held_locks(curr, 0, ip);
1906}
1907
1908/*
1909 * Softirqs were disabled:
1910 */
1911void trace_softirqs_off(unsigned long ip)
1912{
1913        struct task_struct *curr = current;
1914
1915        if (unlikely(!debug_locks))
1916                return;
1917
1918        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1919                return;
1920
1921        if (curr->softirqs_enabled) {
1922                /*
1923                 * We have done an ON -> OFF transition:
1924                 */
1925                curr->softirqs_enabled = 0;
1926                curr->softirq_disable_ip = ip;
1927                curr->softirq_disable_event = ++curr->irq_events;
1928                debug_atomic_inc(&softirqs_off_events);
1929                DEBUG_LOCKS_WARN_ON(!softirq_count());
1930        } else
1931                debug_atomic_inc(&redundant_softirqs_off);
1932}
1933
1934#endif
1935
1936/*
1937 * Initialize a lock instance's lock-class mapping info:
1938 */
1939void lockdep_init_map(struct lockdep_map *lock, const char *name,
1940                      struct lock_class_key *key, int subclass)
1941{
1942        if (unlikely(!debug_locks))
1943                return;
1944
1945        if (DEBUG_LOCKS_WARN_ON(!key))
1946                return;
1947        if (DEBUG_LOCKS_WARN_ON(!name))
1948                return;
1949        /*
1950         * Sanity check, the lock-class key must be persistent:
1951         */
1952        if (!static_obj(key)) {
1953                printk("BUG: key %p not in .data!\n", key);
1954                DEBUG_LOCKS_WARN_ON(1);
1955                return;
1956        }
1957        lock->name = name;
1958        lock->key = key;
1959        lock->class_cache = NULL;
1960        if (subclass)
1961                register_lock_class(lock, subclass, 1);
1962}
1963
1964EXPORT_SYMBOL_GPL(lockdep_init_map);
1965
1966/*
1967 * This gets called for every mutex_lock*()/spin_lock*() operation.
1968 * We maintain the dependency maps and validate the locking attempt:
1969 */
1970static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1971                          int trylock, int read, int check, int hardirqs_off,
1972                          unsigned long ip)
1973{
1974        struct task_struct *curr = current;
1975        struct lock_class *class = NULL;
1976        struct held_lock *hlock;
1977        unsigned int depth, id;
1978        int chain_head = 0;
1979        u64 chain_key;
1980
1981        if (unlikely(!debug_locks))
1982                return 0;
1983
1984        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1985                return 0;
1986
1987        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
1988                debug_locks_off();
1989                printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
1990                printk("turning off the locking correctness validator.\n");
1991                return 0;
1992        }
1993
1994        if (!subclass)
1995                class = lock->class_cache;
1996        /*
1997         * Not cached yet or subclass?
1998         */
1999        if (unlikely(!class)) {
2000                class = register_lock_class(lock, subclass, 0);
2001                if (!class)
2002                        return 0;
2003        }
2004        debug_atomic_inc((atomic_t *)&class->ops);
2005        if (very_verbose(class)) {
2006                printk("\nacquire class [%p] %s", class->key, class->name);
2007                if (class->name_version > 1)
2008                        printk("#%d", class->name_version);
2009                printk("\n");
2010                dump_stack();
2011        }
2012
2013        /*
2014         * Add the lock to the list of currently held locks.
2015         * (we dont increase the depth just yet, up until the
2016         * dependency checks are done)
2017         */
2018        depth = curr->lockdep_depth;
2019        if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2020                return 0;
2021
2022        hlock = curr->held_locks + depth;
2023
2024        hlock->class = class;
2025        hlock->acquire_ip = ip;
2026        hlock->instance = lock;
2027        hlock->trylock = trylock;
2028        hlock->read = read;
2029        hlock->check = check;
2030        hlock->hardirqs_off = hardirqs_off;
2031
2032        if (check != 2)
2033                goto out_calc_hash;
2034#ifdef CONFIG_TRACE_IRQFLAGS
2035        /*
2036         * If non-trylock use in a hardirq or softirq context, then
2037         * mark the lock as used in these contexts:
2038         */
2039        if (!trylock) {
2040                if (read) {
2041                        if (curr->hardirq_context)
2042                                if (!mark_lock(curr, hlock,
2043                                                LOCK_USED_IN_HARDIRQ_READ, ip))
2044                                        return 0;
2045                        if (curr->softirq_context)
2046                                if (!mark_lock(curr, hlock,
2047                                                LOCK_USED_IN_SOFTIRQ_READ, ip))
2048                                        return 0;
2049                } else {
2050                        if (curr->hardirq_context)
2051                                if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip))
2052                                        return 0;
2053                        if (curr->softirq_context)
2054                                if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip))
2055                                        return 0;
2056                }
2057        }
2058        if (!hardirqs_off) {
2059                if (read) {
2060                        if (!mark_lock(curr, hlock,
2061                                        LOCK_ENABLED_HARDIRQS_READ, ip))
2062                                return 0;
2063                        if (curr->softirqs_enabled)
2064                                if (!mark_lock(curr, hlock,
2065                                                LOCK_ENABLED_SOFTIRQS_READ, ip))
2066                                        return 0;
2067                } else {
2068                        if (!mark_lock(curr, hlock,
2069                                        LOCK_ENABLED_HARDIRQS, ip))
2070                                return 0;
2071                        if (curr->softirqs_enabled)
2072                                if (!mark_lock(curr, hlock,
2073                                                LOCK_ENABLED_SOFTIRQS, ip))
2074                                        return 0;
2075                }
2076        }
2077#endif
2078        /* mark it as used: */
2079        if (!mark_lock(curr, hlock, LOCK_USED, ip))
2080                return 0;
2081out_calc_hash:
2082        /*
2083         * Calculate the chain hash: it's the combined has of all the
2084         * lock keys along the dependency chain. We save the hash value
2085         * at every step so that we can get the current hash easily
2086         * after unlock. The chain hash is then used to cache dependency
2087         * results.
2088         *
2089         * The 'key ID' is what is the most compact key value to drive
2090         * the hash, not class->key.
2091         */
2092        id = class - lock_classes;
2093        if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2094                return 0;
2095
2096        chain_key = curr->curr_chain_key;
2097        if (!depth) {
2098                if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2099                        return 0;
2100                chain_head = 1;
2101        }
2102
2103        hlock->prev_chain_key = chain_key;
2104
2105#ifdef CONFIG_TRACE_IRQFLAGS
2106        /*
2107         * Keep track of points where we cross into an interrupt context:
2108         */
2109        hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2110                                curr->softirq_context;
2111        if (depth) {
2112                struct held_lock *prev_hlock;
2113
2114                prev_hlock = curr->held_locks + depth-1;
2115                /*
2116                 * If we cross into another context, reset the
2117                 * hash key (this also prevents the checking and the
2118                 * adding of the dependency to 'prev'):
2119                 */
2120                if (prev_hlock->irq_context != hlock->irq_context) {
2121                        chain_key = 0;
2122                        chain_head = 1;
2123                }
2124        }
2125#endif
2126        chain_key = iterate_chain_key(chain_key, id);
2127        curr->curr_chain_key = chain_key;
2128
2129        /*
2130         * Trylock needs to maintain the stack of held locks, but it
2131         * does not add new dependencies, because trylock can be done
2132         * in any order.
2133         *
2134         * We look up the chain_key and do the O(N^2) check and update of
2135         * the dependencies only if this is a new dependency chain.
2136         * (If lookup_chain_cache() returns with 1 it acquires
2137         * hash_lock for us)
2138         */
2139        if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) {
2140                /*
2141                 * Check whether last held lock:
2142                 *
2143                 * - is irq-safe, if this lock is irq-unsafe
2144                 * - is softirq-safe, if this lock is hardirq-unsafe
2145                 *
2146                 * And check whether the new lock's dependency graph
2147                 * could lead back to the previous lock.
2148                 *
2149                 * any of these scenarios could lead to a deadlock. If
2150                 * All validations
2151                 */
2152                int ret = check_deadlock(curr, hlock, lock, read);
2153
2154                if (!ret)
2155                        return 0;
2156                /*
2157                 * Mark recursive read, as we jump over it when
2158                 * building dependencies (just like we jump over
2159                 * trylock entries):
2160                 */
2161                if (ret == 2)
2162                        hlock->read = 2;
2163                /*
2164                 * Add dependency only if this lock is not the head
2165                 * of the chain, and if it's not a secondary read-lock:
2166                 */
2167                if (!chain_head && ret != 2)
2168                        if (!check_prevs_add(curr, hlock))
2169                                return 0;
2170                __raw_spin_unlock(&hash_lock);
2171        }
2172        curr->lockdep_depth++;
2173        check_chain_key(curr);
2174        if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2175                debug_locks_off();
2176                printk("BUG: MAX_LOCK_DEPTH too low!\n");
2177                printk("turning off the locking correctness validator.\n");
2178                return 0;
2179        }
2180        if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2181                max_lockdep_depth = curr->lockdep_depth;
2182
2183        return 1;
2184}
2185
2186static int
2187print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2188                           unsigned long ip)
2189{
2190        if (!debug_locks_off())
2191                return 0;
2192        if (debug_locks_silent)
2193                return 0;
2194
2195        printk("\n=====================================\n");
2196        printk(  "[ BUG: bad unlock balance detected! ]\n");
2197        printk(  "-------------------------------------\n");
2198        printk("%s/%d is trying to release lock (",
2199                curr->comm, curr->pid);
2200        print_lockdep_cache(lock);
2201        printk(") at:\n");
2202        print_ip_sym(ip);
2203        printk("but there are no more locks to release!\n");
2204        printk("\nother info that might help us debug this:\n");
2205        lockdep_print_held_locks(curr);
2206
2207        printk("\nstack backtrace:\n");
2208        dump_stack();
2209
2210        return 0;
2211}
2212
2213/*
2214 * Common debugging checks for both nested and non-nested unlock:
2215 */
2216static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2217                        unsigned long ip)
2218{
2219        if (unlikely(!debug_locks))
2220                return 0;
2221        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2222                return 0;
2223
2224        if (curr->lockdep_depth <= 0)
2225                return print_unlock_inbalance_bug(curr, lock, ip);
2226
2227        return 1;
2228}
2229
2230/*
2231 * Remove the lock to the list of currently held locks in a
2232 * potentially non-nested (out of order) manner. This is a
2233 * relatively rare operation, as all the unlock APIs default
2234 * to nested mode (which uses lock_release()):
2235 */
2236static int
2237lock_release_non_nested(struct task_struct *curr,
2238                        struct lockdep_map *lock, unsigned long ip)
2239{
2240        struct held_lock *hlock, *prev_hlock;
2241        unsigned int depth;
2242        int i;
2243
2244        /*
2245         * Check whether the lock exists in the current stack
2246         * of held locks:
2247         */
2248        depth = curr->lockdep_depth;
2249        if (DEBUG_LOCKS_WARN_ON(!depth))
2250                return 0;
2251
2252        prev_hlock = NULL;
2253        for (i = depth-1; i >= 0; i--) {
2254                hlock = curr->held_locks + i;
2255                /*
2256                 * We must not cross into another context:
2257                 */
2258                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2259                        break;
2260                if (hlock->instance == lock)
2261                        goto found_it;
2262                prev_hlock = hlock;
2263        }
2264        return print_unlock_inbalance_bug(curr, lock, ip);
2265
2266found_it:
2267        /*
2268         * We have the right lock to unlock, 'hlock' points to it.
2269         * Now we remove it from the stack, and add back the other
2270         * entries (if any), recalculating the hash along the way:
2271         */
2272        curr->lockdep_depth = i;
2273        curr->curr_chain_key = hlock->prev_chain_key;
2274
2275        for (i++; i < depth; i++) {
2276                hlock = curr->held_locks + i;
2277                if (!__lock_acquire(hlock->instance,
2278                        hlock->class->subclass, hlock->trylock,
2279                                hlock->read, hlock->check, hlock->hardirqs_off,
2280                                hlock->acquire_ip))
2281                        return 0;
2282        }
2283
2284        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2285                return 0;
2286        return 1;
2287}
2288
2289/*
2290 * Remove the lock to the list of currently held locks - this gets
2291 * called on mutex_unlock()/spin_unlock*() (or on a failed
2292 * mutex_lock_interruptible()). This is done for unlocks that nest
2293 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2294 */
2295static int lock_release_nested(struct task_struct *curr,
2296                               struct lockdep_map *lock, unsigned long ip)
2297{
2298        struct held_lock *hlock;
2299        unsigned int depth;
2300
2301        /*
2302         * Pop off the top of the lock stack:
2303         */
2304        depth = curr->lockdep_depth - 1;
2305        hlock = curr->held_locks + depth;
2306
2307        /*
2308         * Is the unlock non-nested:
2309         */
2310        if (hlock->instance != lock)
2311                return lock_release_non_nested(curr, lock, ip);
2312        curr->lockdep_depth--;
2313
2314        if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2315                return 0;
2316
2317        curr->curr_chain_key = hlock->prev_chain_key;
2318
2319#ifdef CONFIG_DEBUG_LOCKDEP
2320        hlock->prev_chain_key = 0;
2321        hlock->class = NULL;
2322        hlock->acquire_ip = 0;
2323        hlock->irq_context = 0;
2324#endif
2325        return 1;
2326}
2327
2328/*
2329 * Remove the lock to the list of currently held locks - this gets
2330 * called on mutex_unlock()/spin_unlock*() (or on a failed
2331 * mutex_lock_interruptible()). This is done for unlocks that nest
2332 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2333 */
2334static void
2335__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2336{
2337        struct task_struct *curr = current;
2338
2339        if (!check_unlock(curr, lock, ip))
2340                return;
2341
2342        if (nested) {
2343                if (!lock_release_nested(curr, lock, ip))
2344                        return;
2345        } else {
2346                if (!lock_release_non_nested(curr, lock, ip))
2347                        return;
2348        }
2349
2350        check_chain_key(curr);
2351}
2352
2353/*
2354 * Check whether we follow the irq-flags state precisely:
2355 */
2356static void check_flags(unsigned long flags)
2357{
2358#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2359        if (!debug_locks)
2360                return;
2361
2362        if (irqs_disabled_flags(flags))
2363                DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
2364        else
2365                DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
2366
2367        /*
2368         * We dont accurately track softirq state in e.g.
2369         * hardirq contexts (such as on 4KSTACKS), so only
2370         * check if not in hardirq contexts:
2371         */
2372        if (!hardirq_count()) {
2373                if (softirq_count())
2374                        DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2375                else
2376                        DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2377        }
2378
2379        if (!debug_locks)
2380                print_irqtrace_events(current);
2381#endif
2382}
2383
2384/*
2385 * We are not always called with irqs disabled - do that here,
2386 * and also avoid lockdep recursion:
2387 */
2388void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2389                  int trylock, int read, int check, unsigned long ip)
2390{
2391        unsigned long flags;
2392
2393        if (unlikely(current->lockdep_recursion))
2394                return;
2395
2396        raw_local_irq_save(flags);
2397        check_flags(flags);
2398
2399        current->lockdep_recursion = 1;
2400        __lock_acquire(lock, subclass, trylock, read, check,
2401                       irqs_disabled_flags(flags), ip);
2402        current->lockdep_recursion = 0;
2403        raw_local_irq_restore(flags);
2404}
2405
2406EXPORT_SYMBOL_GPL(lock_acquire);
2407
2408void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2409{
2410        unsigned long flags;
2411
2412        if (unlikely(current->lockdep_recursion))
2413                return;
2414
2415        raw_local_irq_save(flags);
2416        check_flags(flags);
2417        current->lockdep_recursion = 1;
2418        __lock_release(lock, nested, ip);
2419        current->lockdep_recursion = 0;
2420        raw_local_irq_restore(flags);
2421}
2422
2423EXPORT_SYMBOL_GPL(lock_release);
2424
2425/*
2426 * Used by the testsuite, sanitize the validator state
2427 * after a simulated failure:
2428 */
2429
2430void lockdep_reset(void)
2431{
2432        unsigned long flags;
2433
2434        raw_local_irq_save(flags);
2435        current->curr_chain_key = 0;
2436        current->lockdep_depth = 0;
2437        current->lockdep_recursion = 0;
2438        memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
2439        nr_hardirq_chains = 0;
2440        nr_softirq_chains = 0;
2441        nr_process_chains = 0;
2442        debug_locks = 1;
2443        raw_local_irq_restore(flags);
2444}
2445
2446static void zap_class(struct lock_class *class)
2447{
2448        int i;
2449
2450        /*
2451         * Remove all dependencies this lock is
2452         * involved in:
2453         */
2454        for (i = 0; i < nr_list_entries; i++) {
2455                if (list_entries[i].class == class)
2456                        list_del_rcu(&list_entries[i].entry);
2457        }
2458        /*
2459         * Unhash the class and remove it from the all_lock_classes list:
2460         */
2461        list_del_rcu(&class->hash_entry);
2462        list_del_rcu(&class->lock_entry);
2463
2464}
2465
2466static inline int within(void *addr, void *start, unsigned long size)
2467{
2468        return addr >= start && addr < start + size;
2469}
2470
2471void lockdep_free_key_range(void *start, unsigned long size)
2472{
2473        struct lock_class *class, *next;
2474        struct list_head *head;
2475        unsigned long flags;
2476        int i;
2477
2478        raw_local_irq_save(flags);
2479        __raw_spin_lock(&hash_lock);
2480
2481        /*
2482         * Unhash all classes that were created by this module:
2483         */
2484        for (i = 0; i < CLASSHASH_SIZE; i++) {
2485                head = classhash_table + i;
2486                if (list_empty(head))
2487                        continue;
2488                list_for_each_entry_safe(class, next, head, hash_entry)
2489                        if (within(class->key, start, size))
2490                                zap_class(class);
2491        }
2492
2493        __raw_spin_unlock(&hash_lock);
2494        raw_local_irq_restore(flags);
2495}
2496
2497void lockdep_reset_lock(struct lockdep_map *lock)
2498{
2499        struct lock_class *class, *next;
2500        struct list_head *head;
2501        unsigned long flags;
2502        int i, j;
2503
2504        raw_local_irq_save(flags);
2505
2506        /*
2507         * Remove all classes this lock might have:
2508         */
2509        for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2510                /*
2511                 * If the class exists we look it up and zap it:
2512                 */
2513                class = look_up_lock_class(lock, j);
2514                if (class)
2515                        zap_class(class);
2516        }
2517        /*
2518         * Debug check: in the end all mapped classes should
2519         * be gone.
2520         */
2521        __raw_spin_lock(&hash_lock);
2522        for (i = 0; i < CLASSHASH_SIZE; i++) {
2523                head = classhash_table + i;
2524                if (list_empty(head))
2525                        continue;
2526                list_for_each_entry_safe(class, next, head, hash_entry) {
2527                        if (unlikely(class == lock->class_cache)) {
2528                                __raw_spin_unlock(&hash_lock);
2529                                DEBUG_LOCKS_WARN_ON(1);
2530                                goto out_restore;
2531                        }
2532                }
2533        }
2534        __raw_spin_unlock(&hash_lock);
2535
2536out_restore:
2537        raw_local_irq_restore(flags);
2538}
2539
2540void __init lockdep_init(void)
2541{
2542        int i;
2543
2544        /*
2545         * Some architectures have their own start_kernel()
2546         * code which calls lockdep_init(), while we also
2547         * call lockdep_init() from the start_kernel() itself,
2548         * and we want to initialize the hashes only once:
2549         */
2550        if (lockdep_initialized)
2551                return;
2552
2553        for (i = 0; i < CLASSHASH_SIZE; i++)
2554                INIT_LIST_HEAD(classhash_table + i);
2555
2556        for (i = 0; i < CHAINHASH_SIZE; i++)
2557                INIT_LIST_HEAD(chainhash_table + i);
2558
2559        lockdep_initialized = 1;
2560}
2561
2562void __init lockdep_info(void)
2563{
2564        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
2565
2566        printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
2567        printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
2568        printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
2569        printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
2570        printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
2571        printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
2572        printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
2573
2574        printk(" memory used by lock dependency info: %lu kB\n",
2575                (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
2576                sizeof(struct list_head) * CLASSHASH_SIZE +
2577                sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
2578                sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
2579                sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
2580
2581        printk(" per task-struct memory footprint: %lu bytes\n",
2582                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
2583
2584#ifdef CONFIG_DEBUG_LOCKDEP
2585        if (lockdep_init_error)
2586                printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
2587#endif
2588}
2589
2590static inline int in_range(const void *start, const void *addr, const void *end)
2591{
2592        return addr >= start && addr <= end;
2593}
2594
2595static void
2596print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
2597                     const void *mem_to, struct held_lock *hlock)
2598{
2599        if (!debug_locks_off())
2600                return;
2601        if (debug_locks_silent)
2602                return;
2603
2604        printk("\n=========================\n");
2605        printk(  "[ BUG: held lock freed! ]\n");
2606        printk(  "-------------------------\n");
2607        printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
2608                curr->comm, curr->pid, mem_from, mem_to-1);
2609        print_lock(hlock);
2610        lockdep_print_held_locks(curr);
2611
2612        printk("\nstack backtrace:\n");
2613        dump_stack();
2614}
2615
2616/*
2617 * Called when kernel memory is freed (or unmapped), or if a lock
2618 * is destroyed or reinitialized - this code checks whether there is
2619 * any held lock in the memory range of <from> to <to>:
2620 */
2621void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
2622{
2623        const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
2624        struct task_struct *curr = current;
2625        struct held_lock *hlock;
2626        unsigned long flags;
2627        int i;
2628
2629        if (unlikely(!debug_locks))
2630                return;
2631
2632        local_irq_save(flags);
2633        for (i = 0; i < curr->lockdep_depth; i++) {
2634                hlock = curr->held_locks + i;
2635
2636                lock_from = (void *)hlock->instance;
2637                lock_to = (void *)(hlock->instance + 1);
2638
2639                if (!in_range(mem_from, lock_from, mem_to) &&
2640                                        !in_range(mem_from, lock_to, mem_to))
2641                        continue;
2642
2643                print_freed_lock_bug(curr, mem_from, mem_to, hlock);
2644                break;
2645        }
2646        local_irq_restore(flags);
2647}
2648
2649static void print_held_locks_bug(struct task_struct *curr)
2650{
2651        if (!debug_locks_off())
2652                return;
2653        if (debug_locks_silent)
2654                return;
2655
2656        printk("\n=====================================\n");
2657        printk(  "[ BUG: lock held at task exit time! ]\n");
2658        printk(  "-------------------------------------\n");
2659        printk("%s/%d is exiting with locks still held!\n",
2660                curr->comm, curr->pid);
2661        lockdep_print_held_locks(curr);
2662
2663        printk("\nstack backtrace:\n");
2664        dump_stack();
2665}
2666
2667void debug_check_no_locks_held(struct task_struct *task)
2668{
2669        if (unlikely(task->lockdep_depth > 0))
2670                print_held_locks_bug(task);
2671}
2672
2673void debug_show_all_locks(void)
2674{
2675        struct task_struct *g, *p;
2676        int count = 10;
2677        int unlock = 1;
2678
2679        printk("\nShowing all locks held in the system:\n");
2680
2681        /*
2682         * Here we try to get the tasklist_lock as hard as possible,
2683         * if not successful after 2 seconds we ignore it (but keep
2684         * trying). This is to enable a debug printout even if a
2685         * tasklist_lock-holding task deadlocks or crashes.
2686         */
2687retry:
2688        if (!read_trylock(&tasklist_lock)) {
2689                if (count == 10)
2690                        printk("hm, tasklist_lock locked, retrying... ");
2691                if (count) {
2692                        count--;
2693                        printk(" #%d", 10-count);
2694                        mdelay(200);
2695                        goto retry;
2696                }
2697                printk(" ignoring it.\n");
2698                unlock = 0;
2699        }
2700        if (count != 10)
2701                printk(" locked it.\n");
2702
2703        do_each_thread(g, p) {
2704                if (p->lockdep_depth)
2705                        lockdep_print_held_locks(p);
2706                if (!unlock)
2707                        if (read_trylock(&tasklist_lock))
2708                                unlock = 1;
2709        } while_each_thread(g, p);
2710
2711        printk("\n");
2712        printk("=============================================\n\n");
2713
2714        if (unlock)
2715                read_unlock(&tasklist_lock);
2716}
2717
2718EXPORT_SYMBOL_GPL(debug_show_all_locks);
2719
2720void debug_show_held_locks(struct task_struct *task)
2721{
2722        lockdep_print_held_locks(task);
2723}
2724
2725EXPORT_SYMBOL_GPL(debug_show_held_locks);
2726
2727