linux/kernel/lockdep.c
<<
>>
Prefs
   1/*
   2 * kernel/lockdep.c
   3 *
   4 * Runtime locking correctness validator
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  10 *
  11 * this code maps all the lock dependencies as they occur in a live kernel
  12 * and will warn about the following classes of locking bugs:
  13 *
  14 * - lock inversion scenarios
  15 * - circular lock dependencies
  16 * - hardirq/softirq safe/unsafe locking bugs
  17 *
  18 * Bugs are reported even if the current locking scenario does not cause
  19 * any deadlock at this point.
  20 *
  21 * I.e. if anytime in the past two locks were taken in a different order,
  22 * even if it happened for another task, even if those were different
  23 * locks (but of the same class as this lock), this code will detect it.
  24 *
  25 * Thanks to Arjan van de Ven for coming up with the initial idea of
  26 * mapping lock dependencies runtime.
  27 */
  28#include <linux/mutex.h>
  29#include <linux/sched.h>
  30#include <linux/delay.h>
  31#include <linux/module.h>
  32#include <linux/proc_fs.h>
  33#include <linux/seq_file.h>
  34#include <linux/spinlock.h>
  35#include <linux/kallsyms.h>
  36#include <linux/interrupt.h>
  37#include <linux/stacktrace.h>
  38#include <linux/debug_locks.h>
  39#include <linux/irqflags.h>
  40#include <linux/utsname.h>
  41#include <linux/hash.h>
  42#include <linux/ftrace.h>
  43
  44#include <asm/sections.h>
  45
  46#include "lockdep_internals.h"
  47
  48#ifdef CONFIG_PROVE_LOCKING
  49int prove_locking = 1;
  50module_param(prove_locking, int, 0644);
  51#else
  52#define prove_locking 0
  53#endif
  54
  55#ifdef CONFIG_LOCK_STAT
  56int lock_stat = 1;
  57module_param(lock_stat, int, 0644);
  58#else
  59#define lock_stat 0
  60#endif
  61
  62/*
  63 * lockdep_lock: protects the lockdep graph, the hashes and the
  64 *               class/list/hash allocators.
  65 *
  66 * This is one of the rare exceptions where it's justified
  67 * to use a raw spinlock - we really dont want the spinlock
  68 * code to recurse back into the lockdep code...
  69 */
  70static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  71
  72static int graph_lock(void)
  73{
  74        __raw_spin_lock(&lockdep_lock);
  75        /*
  76         * Make sure that if another CPU detected a bug while
  77         * walking the graph we dont change it (while the other
  78         * CPU is busy printing out stuff with the graph lock
  79         * dropped already)
  80         */
  81        if (!debug_locks) {
  82                __raw_spin_unlock(&lockdep_lock);
  83                return 0;
  84        }
  85        /* prevent any recursions within lockdep from causing deadlocks */
  86        current->lockdep_recursion++;
  87        return 1;
  88}
  89
  90static inline int graph_unlock(void)
  91{
  92        if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
  93                return DEBUG_LOCKS_WARN_ON(1);
  94
  95        current->lockdep_recursion--;
  96        __raw_spin_unlock(&lockdep_lock);
  97        return 0;
  98}
  99
 100/*
 101 * Turn lock debugging off and return with 0 if it was off already,
 102 * and also release the graph lock:
 103 */
 104static inline int debug_locks_off_graph_unlock(void)
 105{
 106        int ret = debug_locks_off();
 107
 108        __raw_spin_unlock(&lockdep_lock);
 109
 110        return ret;
 111}
 112
 113static int lockdep_initialized;
 114
 115unsigned long nr_list_entries;
 116static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 117
 118/*
 119 * All data structures here are protected by the global debug_lock.
 120 *
 121 * Mutex key structs only get allocated, once during bootup, and never
 122 * get freed - this significantly simplifies the debugging code.
 123 */
 124unsigned long nr_lock_classes;
 125static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 126
 127static inline struct lock_class *hlock_class(struct held_lock *hlock)
 128{
 129        if (!hlock->class_idx) {
 130                DEBUG_LOCKS_WARN_ON(1);
 131                return NULL;
 132        }
 133        return lock_classes + hlock->class_idx - 1;
 134}
 135
 136#ifdef CONFIG_LOCK_STAT
 137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 138
 139static int lock_contention_point(struct lock_class *class, unsigned long ip)
 140{
 141        int i;
 142
 143        for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
 144                if (class->contention_point[i] == 0) {
 145                        class->contention_point[i] = ip;
 146                        break;
 147                }
 148                if (class->contention_point[i] == ip)
 149                        break;
 150        }
 151
 152        return i;
 153}
 154
 155static void lock_time_inc(struct lock_time *lt, s64 time)
 156{
 157        if (time > lt->max)
 158                lt->max = time;
 159
 160        if (time < lt->min || !lt->min)
 161                lt->min = time;
 162
 163        lt->total += time;
 164        lt->nr++;
 165}
 166
 167static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 168{
 169        dst->min += src->min;
 170        dst->max += src->max;
 171        dst->total += src->total;
 172        dst->nr += src->nr;
 173}
 174
 175struct lock_class_stats lock_stats(struct lock_class *class)
 176{
 177        struct lock_class_stats stats;
 178        int cpu, i;
 179
 180        memset(&stats, 0, sizeof(struct lock_class_stats));
 181        for_each_possible_cpu(cpu) {
 182                struct lock_class_stats *pcs =
 183                        &per_cpu(lock_stats, cpu)[class - lock_classes];
 184
 185                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
 186                        stats.contention_point[i] += pcs->contention_point[i];
 187
 188                lock_time_add(&pcs->read_waittime, &stats.read_waittime);
 189                lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 190
 191                lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
 192                lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
 193
 194                for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
 195                        stats.bounces[i] += pcs->bounces[i];
 196        }
 197
 198        return stats;
 199}
 200
 201void clear_lock_stats(struct lock_class *class)
 202{
 203        int cpu;
 204
 205        for_each_possible_cpu(cpu) {
 206                struct lock_class_stats *cpu_stats =
 207                        &per_cpu(lock_stats, cpu)[class - lock_classes];
 208
 209                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
 210        }
 211        memset(class->contention_point, 0, sizeof(class->contention_point));
 212}
 213
 214static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 215{
 216        return &get_cpu_var(lock_stats)[class - lock_classes];
 217}
 218
 219static void put_lock_stats(struct lock_class_stats *stats)
 220{
 221        put_cpu_var(lock_stats);
 222}
 223
 224static void lock_release_holdtime(struct held_lock *hlock)
 225{
 226        struct lock_class_stats *stats;
 227        s64 holdtime;
 228
 229        if (!lock_stat)
 230                return;
 231
 232        holdtime = sched_clock() - hlock->holdtime_stamp;
 233
 234        stats = get_lock_stats(hlock_class(hlock));
 235        if (hlock->read)
 236                lock_time_inc(&stats->read_holdtime, holdtime);
 237        else
 238                lock_time_inc(&stats->write_holdtime, holdtime);
 239        put_lock_stats(stats);
 240}
 241#else
 242static inline void lock_release_holdtime(struct held_lock *hlock)
 243{
 244}
 245#endif
 246
 247/*
 248 * We keep a global list of all lock classes. The list only grows,
 249 * never shrinks. The list is only accessed with the lockdep
 250 * spinlock lock held.
 251 */
 252LIST_HEAD(all_lock_classes);
 253
 254/*
 255 * The lockdep classes are in a hash-table as well, for fast lookup:
 256 */
 257#define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
 258#define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
 259#define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
 260#define classhashentry(key)     (classhash_table + __classhashfn((key)))
 261
 262static struct list_head classhash_table[CLASSHASH_SIZE];
 263
 264/*
 265 * We put the lock dependency chains into a hash-table as well, to cache
 266 * their existence:
 267 */
 268#define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
 269#define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
 270#define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
 271#define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
 272
 273static struct list_head chainhash_table[CHAINHASH_SIZE];
 274
 275/*
 276 * The hash key of the lock dependency chains is a hash itself too:
 277 * it's a hash of all locks taken up to that lock, including that lock.
 278 * It's a 64-bit hash, because it's important for the keys to be
 279 * unique.
 280 */
 281#define iterate_chain_key(key1, key2) \
 282        (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
 283        ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
 284        (key2))
 285
 286void lockdep_off(void)
 287{
 288        current->lockdep_recursion++;
 289}
 290
 291EXPORT_SYMBOL(lockdep_off);
 292
 293void lockdep_on(void)
 294{
 295        current->lockdep_recursion--;
 296}
 297
 298EXPORT_SYMBOL(lockdep_on);
 299
 300/*
 301 * Debugging switches:
 302 */
 303
 304#define VERBOSE                 0
 305#define VERY_VERBOSE            0
 306
 307#if VERBOSE
 308# define HARDIRQ_VERBOSE        1
 309# define SOFTIRQ_VERBOSE        1
 310#else
 311# define HARDIRQ_VERBOSE        0
 312# define SOFTIRQ_VERBOSE        0
 313#endif
 314
 315#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
 316/*
 317 * Quick filtering for interesting events:
 318 */
 319static int class_filter(struct lock_class *class)
 320{
 321#if 0
 322        /* Example */
 323        if (class->name_version == 1 &&
 324                        !strcmp(class->name, "lockname"))
 325                return 1;
 326        if (class->name_version == 1 &&
 327                        !strcmp(class->name, "&struct->lockfield"))
 328                return 1;
 329#endif
 330        /* Filter everything else. 1 would be to allow everything else */
 331        return 0;
 332}
 333#endif
 334
 335static int verbose(struct lock_class *class)
 336{
 337#if VERBOSE
 338        return class_filter(class);
 339#endif
 340        return 0;
 341}
 342
 343/*
 344 * Stack-trace: tightly packed array of stack backtrace
 345 * addresses. Protected by the graph_lock.
 346 */
 347unsigned long nr_stack_trace_entries;
 348static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
 349
 350static int save_trace(struct stack_trace *trace)
 351{
 352        trace->nr_entries = 0;
 353        trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
 354        trace->entries = stack_trace + nr_stack_trace_entries;
 355
 356        trace->skip = 3;
 357
 358        save_stack_trace(trace);
 359
 360        trace->max_entries = trace->nr_entries;
 361
 362        nr_stack_trace_entries += trace->nr_entries;
 363
 364        if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
 365                if (!debug_locks_off_graph_unlock())
 366                        return 0;
 367
 368                printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
 369                printk("turning off the locking correctness validator.\n");
 370                dump_stack();
 371
 372                return 0;
 373        }
 374
 375        return 1;
 376}
 377
 378unsigned int nr_hardirq_chains;
 379unsigned int nr_softirq_chains;
 380unsigned int nr_process_chains;
 381unsigned int max_lockdep_depth;
 382unsigned int max_recursion_depth;
 383
 384static unsigned int lockdep_dependency_gen_id;
 385
 386static bool lockdep_dependency_visit(struct lock_class *source,
 387                                     unsigned int depth)
 388{
 389        if (!depth)
 390                lockdep_dependency_gen_id++;
 391        if (source->dep_gen_id == lockdep_dependency_gen_id)
 392                return true;
 393        source->dep_gen_id = lockdep_dependency_gen_id;
 394        return false;
 395}
 396
 397#ifdef CONFIG_DEBUG_LOCKDEP
 398/*
 399 * We cannot printk in early bootup code. Not even early_printk()
 400 * might work. So we mark any initialization errors and printk
 401 * about it later on, in lockdep_info().
 402 */
 403static int lockdep_init_error;
 404static unsigned long lockdep_init_trace_data[20];
 405static struct stack_trace lockdep_init_trace = {
 406        .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
 407        .entries = lockdep_init_trace_data,
 408};
 409
 410/*
 411 * Various lockdep statistics:
 412 */
 413atomic_t chain_lookup_hits;
 414atomic_t chain_lookup_misses;
 415atomic_t hardirqs_on_events;
 416atomic_t hardirqs_off_events;
 417atomic_t redundant_hardirqs_on;
 418atomic_t redundant_hardirqs_off;
 419atomic_t softirqs_on_events;
 420atomic_t softirqs_off_events;
 421atomic_t redundant_softirqs_on;
 422atomic_t redundant_softirqs_off;
 423atomic_t nr_unused_locks;
 424atomic_t nr_cyclic_checks;
 425atomic_t nr_cyclic_check_recursions;
 426atomic_t nr_find_usage_forwards_checks;
 427atomic_t nr_find_usage_forwards_recursions;
 428atomic_t nr_find_usage_backwards_checks;
 429atomic_t nr_find_usage_backwards_recursions;
 430# define debug_atomic_inc(ptr)          atomic_inc(ptr)
 431# define debug_atomic_dec(ptr)          atomic_dec(ptr)
 432# define debug_atomic_read(ptr)         atomic_read(ptr)
 433#else
 434# define debug_atomic_inc(ptr)          do { } while (0)
 435# define debug_atomic_dec(ptr)          do { } while (0)
 436# define debug_atomic_read(ptr)         0
 437#endif
 438
 439/*
 440 * Locking printouts:
 441 */
 442
 443static const char *usage_str[] =
 444{
 445        [LOCK_USED] =                   "initial-use ",
 446        [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
 447        [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
 448        [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
 449        [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
 450        [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
 451        [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
 452        [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
 453        [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
 454};
 455
 456const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
 457{
 458        return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 459}
 460
 461void
 462get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
 463{
 464        *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
 465
 466        if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
 467                *c1 = '+';
 468        else
 469                if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
 470                        *c1 = '-';
 471
 472        if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
 473                *c2 = '+';
 474        else
 475                if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
 476                        *c2 = '-';
 477
 478        if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
 479                *c3 = '-';
 480        if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
 481                *c3 = '+';
 482                if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
 483                        *c3 = '?';
 484        }
 485
 486        if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
 487                *c4 = '-';
 488        if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
 489                *c4 = '+';
 490                if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
 491                        *c4 = '?';
 492        }
 493}
 494
 495static void print_lock_name(struct lock_class *class)
 496{
 497        char str[KSYM_NAME_LEN], c1, c2, c3, c4;
 498        const char *name;
 499
 500        get_usage_chars(class, &c1, &c2, &c3, &c4);
 501
 502        name = class->name;
 503        if (!name) {
 504                name = __get_key_name(class->key, str);
 505                printk(" (%s", name);
 506        } else {
 507                printk(" (%s", name);
 508                if (class->name_version > 1)
 509                        printk("#%d", class->name_version);
 510                if (class->subclass)
 511                        printk("/%d", class->subclass);
 512        }
 513        printk("){%c%c%c%c}", c1, c2, c3, c4);
 514}
 515
 516static void print_lockdep_cache(struct lockdep_map *lock)
 517{
 518        const char *name;
 519        char str[KSYM_NAME_LEN];
 520
 521        name = lock->name;
 522        if (!name)
 523                name = __get_key_name(lock->key->subkeys, str);
 524
 525        printk("%s", name);
 526}
 527
 528static void print_lock(struct held_lock *hlock)
 529{
 530        print_lock_name(hlock_class(hlock));
 531        printk(", at: ");
 532        print_ip_sym(hlock->acquire_ip);
 533}
 534
 535static void lockdep_print_held_locks(struct task_struct *curr)
 536{
 537        int i, depth = curr->lockdep_depth;
 538
 539        if (!depth) {
 540                printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
 541                return;
 542        }
 543        printk("%d lock%s held by %s/%d:\n",
 544                depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
 545
 546        for (i = 0; i < depth; i++) {
 547                printk(" #%d: ", i);
 548                print_lock(curr->held_locks + i);
 549        }
 550}
 551
 552static void print_lock_class_header(struct lock_class *class, int depth)
 553{
 554        int bit;
 555
 556        printk("%*s->", depth, "");
 557        print_lock_name(class);
 558        printk(" ops: %lu", class->ops);
 559        printk(" {\n");
 560
 561        for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
 562                if (class->usage_mask & (1 << bit)) {
 563                        int len = depth;
 564
 565                        len += printk("%*s   %s", depth, "", usage_str[bit]);
 566                        len += printk(" at:\n");
 567                        print_stack_trace(class->usage_traces + bit, len);
 568                }
 569        }
 570        printk("%*s }\n", depth, "");
 571
 572        printk("%*s ... key      at: ",depth,"");
 573        print_ip_sym((unsigned long)class->key);
 574}
 575
 576/*
 577 * printk all lock dependencies starting at <entry>:
 578 */
 579static void print_lock_dependencies(struct lock_class *class, int depth)
 580{
 581        struct lock_list *entry;
 582
 583        if (lockdep_dependency_visit(class, depth))
 584                return;
 585
 586        if (DEBUG_LOCKS_WARN_ON(depth >= 20))
 587                return;
 588
 589        print_lock_class_header(class, depth);
 590
 591        list_for_each_entry(entry, &class->locks_after, entry) {
 592                if (DEBUG_LOCKS_WARN_ON(!entry->class))
 593                        return;
 594
 595                print_lock_dependencies(entry->class, depth + 1);
 596
 597                printk("%*s ... acquired at:\n",depth,"");
 598                print_stack_trace(&entry->trace, 2);
 599                printk("\n");
 600        }
 601}
 602
 603static void print_kernel_version(void)
 604{
 605        printk("%s %.*s\n", init_utsname()->release,
 606                (int)strcspn(init_utsname()->version, " "),
 607                init_utsname()->version);
 608}
 609
 610static int very_verbose(struct lock_class *class)
 611{
 612#if VERY_VERBOSE
 613        return class_filter(class);
 614#endif
 615        return 0;
 616}
 617
 618/*
 619 * Is this the address of a static object:
 620 */
 621static int static_obj(void *obj)
 622{
 623        unsigned long start = (unsigned long) &_stext,
 624                      end   = (unsigned long) &_end,
 625                      addr  = (unsigned long) obj;
 626#ifdef CONFIG_SMP
 627        int i;
 628#endif
 629
 630        /*
 631         * static variable?
 632         */
 633        if ((addr >= start) && (addr < end))
 634                return 1;
 635
 636#ifdef CONFIG_SMP
 637        /*
 638         * percpu var?
 639         */
 640        for_each_possible_cpu(i) {
 641                start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
 642                end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
 643                                        + per_cpu_offset(i);
 644
 645                if ((addr >= start) && (addr < end))
 646                        return 1;
 647        }
 648#endif
 649
 650        /*
 651         * module var?
 652         */
 653        return is_module_address(addr);
 654}
 655
 656/*
 657 * To make lock name printouts unique, we calculate a unique
 658 * class->name_version generation counter:
 659 */
 660static int count_matching_names(struct lock_class *new_class)
 661{
 662        struct lock_class *class;
 663        int count = 0;
 664
 665        if (!new_class->name)
 666                return 0;
 667
 668        list_for_each_entry(class, &all_lock_classes, lock_entry) {
 669                if (new_class->key - new_class->subclass == class->key)
 670                        return class->name_version;
 671                if (class->name && !strcmp(class->name, new_class->name))
 672                        count = max(count, class->name_version);
 673        }
 674
 675        return count + 1;
 676}
 677
 678/*
 679 * Register a lock's class in the hash-table, if the class is not present
 680 * yet. Otherwise we look it up. We cache the result in the lock object
 681 * itself, so actual lookup of the hash should be once per lock object.
 682 */
 683static inline struct lock_class *
 684look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 685{
 686        struct lockdep_subclass_key *key;
 687        struct list_head *hash_head;
 688        struct lock_class *class;
 689
 690#ifdef CONFIG_DEBUG_LOCKDEP
 691        /*
 692         * If the architecture calls into lockdep before initializing
 693         * the hashes then we'll warn about it later. (we cannot printk
 694         * right now)
 695         */
 696        if (unlikely(!lockdep_initialized)) {
 697                lockdep_init();
 698                lockdep_init_error = 1;
 699                save_stack_trace(&lockdep_init_trace);
 700        }
 701#endif
 702
 703        /*
 704         * Static locks do not have their class-keys yet - for them the key
 705         * is the lock object itself:
 706         */
 707        if (unlikely(!lock->key))
 708                lock->key = (void *)lock;
 709
 710        /*
 711         * NOTE: the class-key must be unique. For dynamic locks, a static
 712         * lock_class_key variable is passed in through the mutex_init()
 713         * (or spin_lock_init()) call - which acts as the key. For static
 714         * locks we use the lock object itself as the key.
 715         */
 716        BUILD_BUG_ON(sizeof(struct lock_class_key) >
 717                        sizeof(struct lockdep_map));
 718
 719        key = lock->key->subkeys + subclass;
 720
 721        hash_head = classhashentry(key);
 722
 723        /*
 724         * We can walk the hash lockfree, because the hash only
 725         * grows, and we are careful when adding entries to the end:
 726         */
 727        list_for_each_entry(class, hash_head, hash_entry) {
 728                if (class->key == key) {
 729                        WARN_ON_ONCE(class->name != lock->name);
 730                        return class;
 731                }
 732        }
 733
 734        return NULL;
 735}
 736
 737/*
 738 * Register a lock's class in the hash-table, if the class is not present
 739 * yet. Otherwise we look it up. We cache the result in the lock object
 740 * itself, so actual lookup of the hash should be once per lock object.
 741 */
 742static inline struct lock_class *
 743register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 744{
 745        struct lockdep_subclass_key *key;
 746        struct list_head *hash_head;
 747        struct lock_class *class;
 748        unsigned long flags;
 749
 750        class = look_up_lock_class(lock, subclass);
 751        if (likely(class))
 752                return class;
 753
 754        /*
 755         * Debug-check: all keys must be persistent!
 756         */
 757        if (!static_obj(lock->key)) {
 758                debug_locks_off();
 759                printk("INFO: trying to register non-static key.\n");
 760                printk("the code is fine but needs lockdep annotation.\n");
 761                printk("turning off the locking correctness validator.\n");
 762                dump_stack();
 763
 764                return NULL;
 765        }
 766
 767        key = lock->key->subkeys + subclass;
 768        hash_head = classhashentry(key);
 769
 770        raw_local_irq_save(flags);
 771        if (!graph_lock()) {
 772                raw_local_irq_restore(flags);
 773                return NULL;
 774        }
 775        /*
 776         * We have to do the hash-walk again, to avoid races
 777         * with another CPU:
 778         */
 779        list_for_each_entry(class, hash_head, hash_entry)
 780                if (class->key == key)
 781                        goto out_unlock_set;
 782        /*
 783         * Allocate a new key from the static array, and add it to
 784         * the hash:
 785         */
 786        if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
 787                if (!debug_locks_off_graph_unlock()) {
 788                        raw_local_irq_restore(flags);
 789                        return NULL;
 790                }
 791                raw_local_irq_restore(flags);
 792
 793                printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
 794                printk("turning off the locking correctness validator.\n");
 795                return NULL;
 796        }
 797        class = lock_classes + nr_lock_classes++;
 798        debug_atomic_inc(&nr_unused_locks);
 799        class->key = key;
 800        class->name = lock->name;
 801        class->subclass = subclass;
 802        INIT_LIST_HEAD(&class->lock_entry);
 803        INIT_LIST_HEAD(&class->locks_before);
 804        INIT_LIST_HEAD(&class->locks_after);
 805        class->name_version = count_matching_names(class);
 806        /*
 807         * We use RCU's safe list-add method to make
 808         * parallel walking of the hash-list safe:
 809         */
 810        list_add_tail_rcu(&class->hash_entry, hash_head);
 811        /*
 812         * Add it to the global list of classes:
 813         */
 814        list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
 815
 816        if (verbose(class)) {
 817                graph_unlock();
 818                raw_local_irq_restore(flags);
 819
 820                printk("\nnew class %p: %s", class->key, class->name);
 821                if (class->name_version > 1)
 822                        printk("#%d", class->name_version);
 823                printk("\n");
 824                dump_stack();
 825
 826                raw_local_irq_save(flags);
 827                if (!graph_lock()) {
 828                        raw_local_irq_restore(flags);
 829                        return NULL;
 830                }
 831        }
 832out_unlock_set:
 833        graph_unlock();
 834        raw_local_irq_restore(flags);
 835
 836        if (!subclass || force)
 837                lock->class_cache = class;
 838
 839        if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
 840                return NULL;
 841
 842        return class;
 843}
 844
 845#ifdef CONFIG_PROVE_LOCKING
 846/*
 847 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 848 * with NULL on failure)
 849 */
 850static struct lock_list *alloc_list_entry(void)
 851{
 852        if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
 853                if (!debug_locks_off_graph_unlock())
 854                        return NULL;
 855
 856                printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
 857                printk("turning off the locking correctness validator.\n");
 858                return NULL;
 859        }
 860        return list_entries + nr_list_entries++;
 861}
 862
 863/*
 864 * Add a new dependency to the head of the list:
 865 */
 866static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 867                            struct list_head *head, unsigned long ip, int distance)
 868{
 869        struct lock_list *entry;
 870        /*
 871         * Lock not present yet - get a new dependency struct and
 872         * add it to the list:
 873         */
 874        entry = alloc_list_entry();
 875        if (!entry)
 876                return 0;
 877
 878        if (!save_trace(&entry->trace))
 879                return 0;
 880
 881        entry->class = this;
 882        entry->distance = distance;
 883        /*
 884         * Since we never remove from the dependency list, the list can
 885         * be walked lockless by other CPUs, it's only allocation
 886         * that must be protected by the spinlock. But this also means
 887         * we must make new entries visible only once writes to the
 888         * entry become visible - hence the RCU op:
 889         */
 890        list_add_tail_rcu(&entry->entry, head);
 891
 892        return 1;
 893}
 894
 895/*
 896 * Recursive, forwards-direction lock-dependency checking, used for
 897 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
 898 * checking.
 899 *
 900 * (to keep the stackframe of the recursive functions small we
 901 *  use these global variables, and we also mark various helper
 902 *  functions as noinline.)
 903 */
 904static struct held_lock *check_source, *check_target;
 905
 906/*
 907 * Print a dependency chain entry (this is only done when a deadlock
 908 * has been detected):
 909 */
 910static noinline int
 911print_circular_bug_entry(struct lock_list *target, unsigned int depth)
 912{
 913        if (debug_locks_silent)
 914                return 0;
 915        printk("\n-> #%u", depth);
 916        print_lock_name(target->class);
 917        printk(":\n");
 918        print_stack_trace(&target->trace, 6);
 919
 920        return 0;
 921}
 922
 923/*
 924 * When a circular dependency is detected, print the
 925 * header first:
 926 */
 927static noinline int
 928print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 929{
 930        struct task_struct *curr = current;
 931
 932        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 933                return 0;
 934
 935        printk("\n=======================================================\n");
 936        printk(  "[ INFO: possible circular locking dependency detected ]\n");
 937        print_kernel_version();
 938        printk(  "-------------------------------------------------------\n");
 939        printk("%s/%d is trying to acquire lock:\n",
 940                curr->comm, task_pid_nr(curr));
 941        print_lock(check_source);
 942        printk("\nbut task is already holding lock:\n");
 943        print_lock(check_target);
 944        printk("\nwhich lock already depends on the new lock.\n\n");
 945        printk("\nthe existing dependency chain (in reverse order) is:\n");
 946
 947        print_circular_bug_entry(entry, depth);
 948
 949        return 0;
 950}
 951
 952static noinline int print_circular_bug_tail(void)
 953{
 954        struct task_struct *curr = current;
 955        struct lock_list this;
 956
 957        if (debug_locks_silent)
 958                return 0;
 959
 960        this.class = hlock_class(check_source);
 961        if (!save_trace(&this.trace))
 962                return 0;
 963
 964        print_circular_bug_entry(&this, 0);
 965
 966        printk("\nother info that might help us debug this:\n\n");
 967        lockdep_print_held_locks(curr);
 968
 969        printk("\nstack backtrace:\n");
 970        dump_stack();
 971
 972        return 0;
 973}
 974
 975#define RECURSION_LIMIT 40
 976
 977static int noinline print_infinite_recursion_bug(void)
 978{
 979        if (!debug_locks_off_graph_unlock())
 980                return 0;
 981
 982        WARN_ON(1);
 983
 984        return 0;
 985}
 986
 987unsigned long __lockdep_count_forward_deps(struct lock_class *class,
 988                                           unsigned int depth)
 989{
 990        struct lock_list *entry;
 991        unsigned long ret = 1;
 992
 993        if (lockdep_dependency_visit(class, depth))
 994                return 0;
 995
 996        /*
 997         * Recurse this class's dependency list:
 998         */
 999        list_for_each_entry(entry, &class->locks_after, entry)
1000                ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1001
1002        return ret;
1003}
1004
1005unsigned long lockdep_count_forward_deps(struct lock_class *class)
1006{
1007        unsigned long ret, flags;
1008
1009        local_irq_save(flags);
1010        __raw_spin_lock(&lockdep_lock);
1011        ret = __lockdep_count_forward_deps(class, 0);
1012        __raw_spin_unlock(&lockdep_lock);
1013        local_irq_restore(flags);
1014
1015        return ret;
1016}
1017
1018unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1019                                            unsigned int depth)
1020{
1021        struct lock_list *entry;
1022        unsigned long ret = 1;
1023
1024        if (lockdep_dependency_visit(class, depth))
1025                return 0;
1026        /*
1027         * Recurse this class's dependency list:
1028         */
1029        list_for_each_entry(entry, &class->locks_before, entry)
1030                ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1031
1032        return ret;
1033}
1034
1035unsigned long lockdep_count_backward_deps(struct lock_class *class)
1036{
1037        unsigned long ret, flags;
1038
1039        local_irq_save(flags);
1040        __raw_spin_lock(&lockdep_lock);
1041        ret = __lockdep_count_backward_deps(class, 0);
1042        __raw_spin_unlock(&lockdep_lock);
1043        local_irq_restore(flags);
1044
1045        return ret;
1046}
1047
1048/*
1049 * Prove that the dependency graph starting at <entry> can not
1050 * lead to <target>. Print an error and return 0 if it does.
1051 */
1052static noinline int
1053check_noncircular(struct lock_class *source, unsigned int depth)
1054{
1055        struct lock_list *entry;
1056
1057        if (lockdep_dependency_visit(source, depth))
1058                return 1;
1059
1060        debug_atomic_inc(&nr_cyclic_check_recursions);
1061        if (depth > max_recursion_depth)
1062                max_recursion_depth = depth;
1063        if (depth >= RECURSION_LIMIT)
1064                return print_infinite_recursion_bug();
1065        /*
1066         * Check this lock's dependency list:
1067         */
1068        list_for_each_entry(entry, &source->locks_after, entry) {
1069                if (entry->class == hlock_class(check_target))
1070                        return print_circular_bug_header(entry, depth+1);
1071                debug_atomic_inc(&nr_cyclic_checks);
1072                if (!check_noncircular(entry->class, depth+1))
1073                        return print_circular_bug_entry(entry, depth+1);
1074        }
1075        return 1;
1076}
1077
1078#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1079/*
1080 * Forwards and backwards subgraph searching, for the purposes of
1081 * proving that two subgraphs can be connected by a new dependency
1082 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1083 */
1084static enum lock_usage_bit find_usage_bit;
1085static struct lock_class *forwards_match, *backwards_match;
1086
1087/*
1088 * Find a node in the forwards-direction dependency sub-graph starting
1089 * at <source> that matches <find_usage_bit>.
1090 *
1091 * Return 2 if such a node exists in the subgraph, and put that node
1092 * into <forwards_match>.
1093 *
1094 * Return 1 otherwise and keep <forwards_match> unchanged.
1095 * Return 0 on error.
1096 */
1097static noinline int
1098find_usage_forwards(struct lock_class *source, unsigned int depth)
1099{
1100        struct lock_list *entry;
1101        int ret;
1102
1103        if (lockdep_dependency_visit(source, depth))
1104                return 1;
1105
1106        if (depth > max_recursion_depth)
1107                max_recursion_depth = depth;
1108        if (depth >= RECURSION_LIMIT)
1109                return print_infinite_recursion_bug();
1110
1111        debug_atomic_inc(&nr_find_usage_forwards_checks);
1112        if (source->usage_mask & (1 << find_usage_bit)) {
1113                forwards_match = source;
1114                return 2;
1115        }
1116
1117        /*
1118         * Check this lock's dependency list:
1119         */
1120        list_for_each_entry(entry, &source->locks_after, entry) {
1121                debug_atomic_inc(&nr_find_usage_forwards_recursions);
1122                ret = find_usage_forwards(entry->class, depth+1);
1123                if (ret == 2 || ret == 0)
1124                        return ret;
1125        }
1126        return 1;
1127}
1128
1129/*
1130 * Find a node in the backwards-direction dependency sub-graph starting
1131 * at <source> that matches <find_usage_bit>.
1132 *
1133 * Return 2 if such a node exists in the subgraph, and put that node
1134 * into <backwards_match>.
1135 *
1136 * Return 1 otherwise and keep <backwards_match> unchanged.
1137 * Return 0 on error.
1138 */
1139static noinline int
1140find_usage_backwards(struct lock_class *source, unsigned int depth)
1141{
1142        struct lock_list *entry;
1143        int ret;
1144
1145        if (lockdep_dependency_visit(source, depth))
1146                return 1;
1147
1148        if (!__raw_spin_is_locked(&lockdep_lock))
1149                return DEBUG_LOCKS_WARN_ON(1);
1150
1151        if (depth > max_recursion_depth)
1152                max_recursion_depth = depth;
1153        if (depth >= RECURSION_LIMIT)
1154                return print_infinite_recursion_bug();
1155
1156        debug_atomic_inc(&nr_find_usage_backwards_checks);
1157        if (source->usage_mask & (1 << find_usage_bit)) {
1158                backwards_match = source;
1159                return 2;
1160        }
1161
1162        if (!source && debug_locks_off_graph_unlock()) {
1163                WARN_ON(1);
1164                return 0;
1165        }
1166
1167        /*
1168         * Check this lock's dependency list:
1169         */
1170        list_for_each_entry(entry, &source->locks_before, entry) {
1171                debug_atomic_inc(&nr_find_usage_backwards_recursions);
1172                ret = find_usage_backwards(entry->class, depth+1);
1173                if (ret == 2 || ret == 0)
1174                        return ret;
1175        }
1176        return 1;
1177}
1178
1179static int
1180print_bad_irq_dependency(struct task_struct *curr,
1181                         struct held_lock *prev,
1182                         struct held_lock *next,
1183                         enum lock_usage_bit bit1,
1184                         enum lock_usage_bit bit2,
1185                         const char *irqclass)
1186{
1187        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1188                return 0;
1189
1190        printk("\n======================================================\n");
1191        printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1192                irqclass, irqclass);
1193        print_kernel_version();
1194        printk(  "------------------------------------------------------\n");
1195        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1196                curr->comm, task_pid_nr(curr),
1197                curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1198                curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1199                curr->hardirqs_enabled,
1200                curr->softirqs_enabled);
1201        print_lock(next);
1202
1203        printk("\nand this task is already holding:\n");
1204        print_lock(prev);
1205        printk("which would create a new lock dependency:\n");
1206        print_lock_name(hlock_class(prev));
1207        printk(" ->");
1208        print_lock_name(hlock_class(next));
1209        printk("\n");
1210
1211        printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1212                irqclass);
1213        print_lock_name(backwards_match);
1214        printk("\n... which became %s-irq-safe at:\n", irqclass);
1215
1216        print_stack_trace(backwards_match->usage_traces + bit1, 1);
1217
1218        printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1219        print_lock_name(forwards_match);
1220        printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1221        printk("...");
1222
1223        print_stack_trace(forwards_match->usage_traces + bit2, 1);
1224
1225        printk("\nother info that might help us debug this:\n\n");
1226        lockdep_print_held_locks(curr);
1227
1228        printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1229        print_lock_dependencies(backwards_match, 0);
1230
1231        printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1232        print_lock_dependencies(forwards_match, 0);
1233
1234        printk("\nstack backtrace:\n");
1235        dump_stack();
1236
1237        return 0;
1238}
1239
1240static int
1241check_usage(struct task_struct *curr, struct held_lock *prev,
1242            struct held_lock *next, enum lock_usage_bit bit_backwards,
1243            enum lock_usage_bit bit_forwards, const char *irqclass)
1244{
1245        int ret;
1246
1247        find_usage_bit = bit_backwards;
1248        /* fills in <backwards_match> */
1249        ret = find_usage_backwards(hlock_class(prev), 0);
1250        if (!ret || ret == 1)
1251                return ret;
1252
1253        find_usage_bit = bit_forwards;
1254        ret = find_usage_forwards(hlock_class(next), 0);
1255        if (!ret || ret == 1)
1256                return ret;
1257        /* ret == 2 */
1258        return print_bad_irq_dependency(curr, prev, next,
1259                        bit_backwards, bit_forwards, irqclass);
1260}
1261
1262static int
1263check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1264                struct held_lock *next)
1265{
1266        /*
1267         * Prove that the new dependency does not connect a hardirq-safe
1268         * lock with a hardirq-unsafe lock - to achieve this we search
1269         * the backwards-subgraph starting at <prev>, and the
1270         * forwards-subgraph starting at <next>:
1271         */
1272        if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1273                                        LOCK_ENABLED_HARDIRQS, "hard"))
1274                return 0;
1275
1276        /*
1277         * Prove that the new dependency does not connect a hardirq-safe-read
1278         * lock with a hardirq-unsafe lock - to achieve this we search
1279         * the backwards-subgraph starting at <prev>, and the
1280         * forwards-subgraph starting at <next>:
1281         */
1282        if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1283                                        LOCK_ENABLED_HARDIRQS, "hard-read"))
1284                return 0;
1285
1286        /*
1287         * Prove that the new dependency does not connect a softirq-safe
1288         * lock with a softirq-unsafe lock - to achieve this we search
1289         * the backwards-subgraph starting at <prev>, and the
1290         * forwards-subgraph starting at <next>:
1291         */
1292        if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1293                                        LOCK_ENABLED_SOFTIRQS, "soft"))
1294                return 0;
1295        /*
1296         * Prove that the new dependency does not connect a softirq-safe-read
1297         * lock with a softirq-unsafe lock - to achieve this we search
1298         * the backwards-subgraph starting at <prev>, and the
1299         * forwards-subgraph starting at <next>:
1300         */
1301        if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1302                                        LOCK_ENABLED_SOFTIRQS, "soft"))
1303                return 0;
1304
1305        return 1;
1306}
1307
1308static void inc_chains(void)
1309{
1310        if (current->hardirq_context)
1311                nr_hardirq_chains++;
1312        else {
1313                if (current->softirq_context)
1314                        nr_softirq_chains++;
1315                else
1316                        nr_process_chains++;
1317        }
1318}
1319
1320#else
1321
1322static inline int
1323check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1324                struct held_lock *next)
1325{
1326        return 1;
1327}
1328
1329static inline void inc_chains(void)
1330{
1331        nr_process_chains++;
1332}
1333
1334#endif
1335
1336static int
1337print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1338                   struct held_lock *next)
1339{
1340        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1341                return 0;
1342
1343        printk("\n=============================================\n");
1344        printk(  "[ INFO: possible recursive locking detected ]\n");
1345        print_kernel_version();
1346        printk(  "---------------------------------------------\n");
1347        printk("%s/%d is trying to acquire lock:\n",
1348                curr->comm, task_pid_nr(curr));
1349        print_lock(next);
1350        printk("\nbut task is already holding lock:\n");
1351        print_lock(prev);
1352
1353        printk("\nother info that might help us debug this:\n");
1354        lockdep_print_held_locks(curr);
1355
1356        printk("\nstack backtrace:\n");
1357        dump_stack();
1358
1359        return 0;
1360}
1361
1362/*
1363 * Check whether we are holding such a class already.
1364 *
1365 * (Note that this has to be done separately, because the graph cannot
1366 * detect such classes of deadlocks.)
1367 *
1368 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1369 */
1370static int
1371check_deadlock(struct task_struct *curr, struct held_lock *next,
1372               struct lockdep_map *next_instance, int read)
1373{
1374        struct held_lock *prev;
1375        struct held_lock *nest = NULL;
1376        int i;
1377
1378        for (i = 0; i < curr->lockdep_depth; i++) {
1379                prev = curr->held_locks + i;
1380
1381                if (prev->instance == next->nest_lock)
1382                        nest = prev;
1383
1384                if (hlock_class(prev) != hlock_class(next))
1385                        continue;
1386
1387                /*
1388                 * Allow read-after-read recursion of the same
1389                 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1390                 */
1391                if ((read == 2) && prev->read)
1392                        return 2;
1393
1394                /*
1395                 * We're holding the nest_lock, which serializes this lock's
1396                 * nesting behaviour.
1397                 */
1398                if (nest)
1399                        return 2;
1400
1401                return print_deadlock_bug(curr, prev, next);
1402        }
1403        return 1;
1404}
1405
1406/*
1407 * There was a chain-cache miss, and we are about to add a new dependency
1408 * to a previous lock. We recursively validate the following rules:
1409 *
1410 *  - would the adding of the <prev> -> <next> dependency create a
1411 *    circular dependency in the graph? [== circular deadlock]
1412 *
1413 *  - does the new prev->next dependency connect any hardirq-safe lock
1414 *    (in the full backwards-subgraph starting at <prev>) with any
1415 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1416 *    <next>)? [== illegal lock inversion with hardirq contexts]
1417 *
1418 *  - does the new prev->next dependency connect any softirq-safe lock
1419 *    (in the full backwards-subgraph starting at <prev>) with any
1420 *    softirq-unsafe lock (in the full forwards-subgraph starting at
1421 *    <next>)? [== illegal lock inversion with softirq contexts]
1422 *
1423 * any of these scenarios could lead to a deadlock.
1424 *
1425 * Then if all the validations pass, we add the forwards and backwards
1426 * dependency.
1427 */
1428static int
1429check_prev_add(struct task_struct *curr, struct held_lock *prev,
1430               struct held_lock *next, int distance)
1431{
1432        struct lock_list *entry;
1433        int ret;
1434
1435        /*
1436         * Prove that the new <prev> -> <next> dependency would not
1437         * create a circular dependency in the graph. (We do this by
1438         * forward-recursing into the graph starting at <next>, and
1439         * checking whether we can reach <prev>.)
1440         *
1441         * We are using global variables to control the recursion, to
1442         * keep the stackframe size of the recursive functions low:
1443         */
1444        check_source = next;
1445        check_target = prev;
1446        if (!(check_noncircular(hlock_class(next), 0)))
1447                return print_circular_bug_tail();
1448
1449        if (!check_prev_add_irq(curr, prev, next))
1450                return 0;
1451
1452        /*
1453         * For recursive read-locks we do all the dependency checks,
1454         * but we dont store read-triggered dependencies (only
1455         * write-triggered dependencies). This ensures that only the
1456         * write-side dependencies matter, and that if for example a
1457         * write-lock never takes any other locks, then the reads are
1458         * equivalent to a NOP.
1459         */
1460        if (next->read == 2 || prev->read == 2)
1461                return 1;
1462        /*
1463         * Is the <prev> -> <next> dependency already present?
1464         *
1465         * (this may occur even though this is a new chain: consider
1466         *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1467         *  chains - the second one will be new, but L1 already has
1468         *  L2 added to its dependency list, due to the first chain.)
1469         */
1470        list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1471                if (entry->class == hlock_class(next)) {
1472                        if (distance == 1)
1473                                entry->distance = 1;
1474                        return 2;
1475                }
1476        }
1477
1478        /*
1479         * Ok, all validations passed, add the new lock
1480         * to the previous lock's dependency list:
1481         */
1482        ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1483                               &hlock_class(prev)->locks_after,
1484                               next->acquire_ip, distance);
1485
1486        if (!ret)
1487                return 0;
1488
1489        ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1490                               &hlock_class(next)->locks_before,
1491                               next->acquire_ip, distance);
1492        if (!ret)
1493                return 0;
1494
1495        /*
1496         * Debugging printouts:
1497         */
1498        if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1499                graph_unlock();
1500                printk("\n new dependency: ");
1501                print_lock_name(hlock_class(prev));
1502                printk(" => ");
1503                print_lock_name(hlock_class(next));
1504                printk("\n");
1505                dump_stack();
1506                return graph_lock();
1507        }
1508        return 1;
1509}
1510
1511/*
1512 * Add the dependency to all directly-previous locks that are 'relevant'.
1513 * The ones that are relevant are (in increasing distance from curr):
1514 * all consecutive trylock entries and the final non-trylock entry - or
1515 * the end of this context's lock-chain - whichever comes first.
1516 */
1517static int
1518check_prevs_add(struct task_struct *curr, struct held_lock *next)
1519{
1520        int depth = curr->lockdep_depth;
1521        struct held_lock *hlock;
1522
1523        /*
1524         * Debugging checks.
1525         *
1526         * Depth must not be zero for a non-head lock:
1527         */
1528        if (!depth)
1529                goto out_bug;
1530        /*
1531         * At least two relevant locks must exist for this
1532         * to be a head:
1533         */
1534        if (curr->held_locks[depth].irq_context !=
1535                        curr->held_locks[depth-1].irq_context)
1536                goto out_bug;
1537
1538        for (;;) {
1539                int distance = curr->lockdep_depth - depth + 1;
1540                hlock = curr->held_locks + depth-1;
1541                /*
1542                 * Only non-recursive-read entries get new dependencies
1543                 * added:
1544                 */
1545                if (hlock->read != 2) {
1546                        if (!check_prev_add(curr, hlock, next, distance))
1547                                return 0;
1548                        /*
1549                         * Stop after the first non-trylock entry,
1550                         * as non-trylock entries have added their
1551                         * own direct dependencies already, so this
1552                         * lock is connected to them indirectly:
1553                         */
1554                        if (!hlock->trylock)
1555                                break;
1556                }
1557                depth--;
1558                /*
1559                 * End of lock-stack?
1560                 */
1561                if (!depth)
1562                        break;
1563                /*
1564                 * Stop the search if we cross into another context:
1565                 */
1566                if (curr->held_locks[depth].irq_context !=
1567                                curr->held_locks[depth-1].irq_context)
1568                        break;
1569        }
1570        return 1;
1571out_bug:
1572        if (!debug_locks_off_graph_unlock())
1573                return 0;
1574
1575        WARN_ON(1);
1576
1577        return 0;
1578}
1579
1580unsigned long nr_lock_chains;
1581struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1582int nr_chain_hlocks;
1583static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1584
1585struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1586{
1587        return lock_classes + chain_hlocks[chain->base + i];
1588}
1589
1590/*
1591 * Look up a dependency chain. If the key is not present yet then
1592 * add it and return 1 - in this case the new dependency chain is
1593 * validated. If the key is already hashed, return 0.
1594 * (On return with 1 graph_lock is held.)
1595 */
1596static inline int lookup_chain_cache(struct task_struct *curr,
1597                                     struct held_lock *hlock,
1598                                     u64 chain_key)
1599{
1600        struct lock_class *class = hlock_class(hlock);
1601        struct list_head *hash_head = chainhashentry(chain_key);
1602        struct lock_chain *chain;
1603        struct held_lock *hlock_curr, *hlock_next;
1604        int i, j, n, cn;
1605
1606        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1607                return 0;
1608        /*
1609         * We can walk it lock-free, because entries only get added
1610         * to the hash:
1611         */
1612        list_for_each_entry(chain, hash_head, entry) {
1613                if (chain->chain_key == chain_key) {
1614cache_hit:
1615                        debug_atomic_inc(&chain_lookup_hits);
1616                        if (very_verbose(class))
1617                                printk("\nhash chain already cached, key: "
1618                                        "%016Lx tail class: [%p] %s\n",
1619                                        (unsigned long long)chain_key,
1620                                        class->key, class->name);
1621                        return 0;
1622                }
1623        }
1624        if (very_verbose(class))
1625                printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1626                        (unsigned long long)chain_key, class->key, class->name);
1627        /*
1628         * Allocate a new chain entry from the static array, and add
1629         * it to the hash:
1630         */
1631        if (!graph_lock())
1632                return 0;
1633        /*
1634         * We have to walk the chain again locked - to avoid duplicates:
1635         */
1636        list_for_each_entry(chain, hash_head, entry) {
1637                if (chain->chain_key == chain_key) {
1638                        graph_unlock();
1639                        goto cache_hit;
1640                }
1641        }
1642        if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1643                if (!debug_locks_off_graph_unlock())
1644                        return 0;
1645
1646                printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1647                printk("turning off the locking correctness validator.\n");
1648                return 0;
1649        }
1650        chain = lock_chains + nr_lock_chains++;
1651        chain->chain_key = chain_key;
1652        chain->irq_context = hlock->irq_context;
1653        /* Find the first held_lock of current chain */
1654        hlock_next = hlock;
1655        for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1656                hlock_curr = curr->held_locks + i;
1657                if (hlock_curr->irq_context != hlock_next->irq_context)
1658                        break;
1659                hlock_next = hlock;
1660        }
1661        i++;
1662        chain->depth = curr->lockdep_depth + 1 - i;
1663        cn = nr_chain_hlocks;
1664        while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1665                n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1666                if (n == cn)
1667                        break;
1668                cn = n;
1669        }
1670        if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1671                chain->base = cn;
1672                for (j = 0; j < chain->depth - 1; j++, i++) {
1673                        int lock_id = curr->held_locks[i].class_idx - 1;
1674                        chain_hlocks[chain->base + j] = lock_id;
1675                }
1676                chain_hlocks[chain->base + j] = class - lock_classes;
1677        }
1678        list_add_tail_rcu(&chain->entry, hash_head);
1679        debug_atomic_inc(&chain_lookup_misses);
1680        inc_chains();
1681
1682        return 1;
1683}
1684
1685static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1686                struct held_lock *hlock, int chain_head, u64 chain_key)
1687{
1688        /*
1689         * Trylock needs to maintain the stack of held locks, but it
1690         * does not add new dependencies, because trylock can be done
1691         * in any order.
1692         *
1693         * We look up the chain_key and do the O(N^2) check and update of
1694         * the dependencies only if this is a new dependency chain.
1695         * (If lookup_chain_cache() returns with 1 it acquires
1696         * graph_lock for us)
1697         */
1698        if (!hlock->trylock && (hlock->check == 2) &&
1699            lookup_chain_cache(curr, hlock, chain_key)) {
1700                /*
1701                 * Check whether last held lock:
1702                 *
1703                 * - is irq-safe, if this lock is irq-unsafe
1704                 * - is softirq-safe, if this lock is hardirq-unsafe
1705                 *
1706                 * And check whether the new lock's dependency graph
1707                 * could lead back to the previous lock.
1708                 *
1709                 * any of these scenarios could lead to a deadlock. If
1710                 * All validations
1711                 */
1712                int ret = check_deadlock(curr, hlock, lock, hlock->read);
1713
1714                if (!ret)
1715                        return 0;
1716                /*
1717                 * Mark recursive read, as we jump over it when
1718                 * building dependencies (just like we jump over
1719                 * trylock entries):
1720                 */
1721                if (ret == 2)
1722                        hlock->read = 2;
1723                /*
1724                 * Add dependency only if this lock is not the head
1725                 * of the chain, and if it's not a secondary read-lock:
1726                 */
1727                if (!chain_head && ret != 2)
1728                        if (!check_prevs_add(curr, hlock))
1729                                return 0;
1730                graph_unlock();
1731        } else
1732                /* after lookup_chain_cache(): */
1733                if (unlikely(!debug_locks))
1734                        return 0;
1735
1736        return 1;
1737}
1738#else
1739static inline int validate_chain(struct task_struct *curr,
1740                struct lockdep_map *lock, struct held_lock *hlock,
1741                int chain_head, u64 chain_key)
1742{
1743        return 1;
1744}
1745#endif
1746
1747/*
1748 * We are building curr_chain_key incrementally, so double-check
1749 * it from scratch, to make sure that it's done correctly:
1750 */
1751static void check_chain_key(struct task_struct *curr)
1752{
1753#ifdef CONFIG_DEBUG_LOCKDEP
1754        struct held_lock *hlock, *prev_hlock = NULL;
1755        unsigned int i, id;
1756        u64 chain_key = 0;
1757
1758        for (i = 0; i < curr->lockdep_depth; i++) {
1759                hlock = curr->held_locks + i;
1760                if (chain_key != hlock->prev_chain_key) {
1761                        debug_locks_off();
1762                        WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1763                                curr->lockdep_depth, i,
1764                                (unsigned long long)chain_key,
1765                                (unsigned long long)hlock->prev_chain_key);
1766                        return;
1767                }
1768                id = hlock->class_idx - 1;
1769                if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1770                        return;
1771
1772                if (prev_hlock && (prev_hlock->irq_context !=
1773                                                        hlock->irq_context))
1774                        chain_key = 0;
1775                chain_key = iterate_chain_key(chain_key, id);
1776                prev_hlock = hlock;
1777        }
1778        if (chain_key != curr->curr_chain_key) {
1779                debug_locks_off();
1780                WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1781                        curr->lockdep_depth, i,
1782                        (unsigned long long)chain_key,
1783                        (unsigned long long)curr->curr_chain_key);
1784        }
1785#endif
1786}
1787
1788static int
1789print_usage_bug(struct task_struct *curr, struct held_lock *this,
1790                enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1791{
1792        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1793                return 0;
1794
1795        printk("\n=================================\n");
1796        printk(  "[ INFO: inconsistent lock state ]\n");
1797        print_kernel_version();
1798        printk(  "---------------------------------\n");
1799
1800        printk("inconsistent {%s} -> {%s} usage.\n",
1801                usage_str[prev_bit], usage_str[new_bit]);
1802
1803        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1804                curr->comm, task_pid_nr(curr),
1805                trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1806                trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1807                trace_hardirqs_enabled(curr),
1808                trace_softirqs_enabled(curr));
1809        print_lock(this);
1810
1811        printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1812        print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1813
1814        print_irqtrace_events(curr);
1815        printk("\nother info that might help us debug this:\n");
1816        lockdep_print_held_locks(curr);
1817
1818        printk("\nstack backtrace:\n");
1819        dump_stack();
1820
1821        return 0;
1822}
1823
1824/*
1825 * Print out an error if an invalid bit is set:
1826 */
1827static inline int
1828valid_state(struct task_struct *curr, struct held_lock *this,
1829            enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1830{
1831        if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1832                return print_usage_bug(curr, this, bad_bit, new_bit);
1833        return 1;
1834}
1835
1836static int mark_lock(struct task_struct *curr, struct held_lock *this,
1837                     enum lock_usage_bit new_bit);
1838
1839#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1840
1841/*
1842 * print irq inversion bug:
1843 */
1844static int
1845print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1846                        struct held_lock *this, int forwards,
1847                        const char *irqclass)
1848{
1849        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1850                return 0;
1851
1852        printk("\n=========================================================\n");
1853        printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1854        print_kernel_version();
1855        printk(  "---------------------------------------------------------\n");
1856        printk("%s/%d just changed the state of lock:\n",
1857                curr->comm, task_pid_nr(curr));
1858        print_lock(this);
1859        if (forwards)
1860                printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1861        else
1862                printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1863        print_lock_name(other);
1864        printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1865
1866        printk("\nother info that might help us debug this:\n");
1867        lockdep_print_held_locks(curr);
1868
1869        printk("\nthe first lock's dependencies:\n");
1870        print_lock_dependencies(hlock_class(this), 0);
1871
1872        printk("\nthe second lock's dependencies:\n");
1873        print_lock_dependencies(other, 0);
1874
1875        printk("\nstack backtrace:\n");
1876        dump_stack();
1877
1878        return 0;
1879}
1880
1881/*
1882 * Prove that in the forwards-direction subgraph starting at <this>
1883 * there is no lock matching <mask>:
1884 */
1885static int
1886check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1887                     enum lock_usage_bit bit, const char *irqclass)
1888{
1889        int ret;
1890
1891        find_usage_bit = bit;
1892        /* fills in <forwards_match> */
1893        ret = find_usage_forwards(hlock_class(this), 0);
1894        if (!ret || ret == 1)
1895                return ret;
1896
1897        return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1898}
1899
1900/*
1901 * Prove that in the backwards-direction subgraph starting at <this>
1902 * there is no lock matching <mask>:
1903 */
1904static int
1905check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1906                      enum lock_usage_bit bit, const char *irqclass)
1907{
1908        int ret;
1909
1910        find_usage_bit = bit;
1911        /* fills in <backwards_match> */
1912        ret = find_usage_backwards(hlock_class(this), 0);
1913        if (!ret || ret == 1)
1914                return ret;
1915
1916        return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1917}
1918
1919void print_irqtrace_events(struct task_struct *curr)
1920{
1921        printk("irq event stamp: %u\n", curr->irq_events);
1922        printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1923        print_ip_sym(curr->hardirq_enable_ip);
1924        printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1925        print_ip_sym(curr->hardirq_disable_ip);
1926        printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1927        print_ip_sym(curr->softirq_enable_ip);
1928        printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1929        print_ip_sym(curr->softirq_disable_ip);
1930}
1931
1932static int hardirq_verbose(struct lock_class *class)
1933{
1934#if HARDIRQ_VERBOSE
1935        return class_filter(class);
1936#endif
1937        return 0;
1938}
1939
1940static int softirq_verbose(struct lock_class *class)
1941{
1942#if SOFTIRQ_VERBOSE
1943        return class_filter(class);
1944#endif
1945        return 0;
1946}
1947
1948#define STRICT_READ_CHECKS      1
1949
1950static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1951                enum lock_usage_bit new_bit)
1952{
1953        int ret = 1;
1954
1955        switch(new_bit) {
1956        case LOCK_USED_IN_HARDIRQ:
1957                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1958                        return 0;
1959                if (!valid_state(curr, this, new_bit,
1960                                 LOCK_ENABLED_HARDIRQS_READ))
1961                        return 0;
1962                /*
1963                 * just marked it hardirq-safe, check that this lock
1964                 * took no hardirq-unsafe lock in the past:
1965                 */
1966                if (!check_usage_forwards(curr, this,
1967                                          LOCK_ENABLED_HARDIRQS, "hard"))
1968                        return 0;
1969#if STRICT_READ_CHECKS
1970                /*
1971                 * just marked it hardirq-safe, check that this lock
1972                 * took no hardirq-unsafe-read lock in the past:
1973                 */
1974                if (!check_usage_forwards(curr, this,
1975                                LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1976                        return 0;
1977#endif
1978                if (hardirq_verbose(hlock_class(this)))
1979                        ret = 2;
1980                break;
1981        case LOCK_USED_IN_SOFTIRQ:
1982                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1983                        return 0;
1984                if (!valid_state(curr, this, new_bit,
1985                                 LOCK_ENABLED_SOFTIRQS_READ))
1986                        return 0;
1987                /*
1988                 * just marked it softirq-safe, check that this lock
1989                 * took no softirq-unsafe lock in the past:
1990                 */
1991                if (!check_usage_forwards(curr, this,
1992                                          LOCK_ENABLED_SOFTIRQS, "soft"))
1993                        return 0;
1994#if STRICT_READ_CHECKS
1995                /*
1996                 * just marked it softirq-safe, check that this lock
1997                 * took no softirq-unsafe-read lock in the past:
1998                 */
1999                if (!check_usage_forwards(curr, this,
2000                                LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2001                        return 0;
2002#endif
2003                if (softirq_verbose(hlock_class(this)))
2004                        ret = 2;
2005                break;
2006        case LOCK_USED_IN_HARDIRQ_READ:
2007                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2008                        return 0;
2009                /*
2010                 * just marked it hardirq-read-safe, check that this lock
2011                 * took no hardirq-unsafe lock in the past:
2012                 */
2013                if (!check_usage_forwards(curr, this,
2014                                          LOCK_ENABLED_HARDIRQS, "hard"))
2015                        return 0;
2016                if (hardirq_verbose(hlock_class(this)))
2017                        ret = 2;
2018                break;
2019        case LOCK_USED_IN_SOFTIRQ_READ:
2020                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2021                        return 0;
2022                /*
2023                 * just marked it softirq-read-safe, check that this lock
2024                 * took no softirq-unsafe lock in the past:
2025                 */
2026                if (!check_usage_forwards(curr, this,
2027                                          LOCK_ENABLED_SOFTIRQS, "soft"))
2028                        return 0;
2029                if (softirq_verbose(hlock_class(this)))
2030                        ret = 2;
2031                break;
2032        case LOCK_ENABLED_HARDIRQS:
2033                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2034                        return 0;
2035                if (!valid_state(curr, this, new_bit,
2036                                 LOCK_USED_IN_HARDIRQ_READ))
2037                        return 0;
2038                /*
2039                 * just marked it hardirq-unsafe, check that no hardirq-safe
2040                 * lock in the system ever took it in the past:
2041                 */
2042                if (!check_usage_backwards(curr, this,
2043                                           LOCK_USED_IN_HARDIRQ, "hard"))
2044                        return 0;
2045#if STRICT_READ_CHECKS
2046                /*
2047                 * just marked it hardirq-unsafe, check that no
2048                 * hardirq-safe-read lock in the system ever took
2049                 * it in the past:
2050                 */
2051                if (!check_usage_backwards(curr, this,
2052                                   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2053                        return 0;
2054#endif
2055                if (hardirq_verbose(hlock_class(this)))
2056                        ret = 2;
2057                break;
2058        case LOCK_ENABLED_SOFTIRQS:
2059                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2060                        return 0;
2061                if (!valid_state(curr, this, new_bit,
2062                                 LOCK_USED_IN_SOFTIRQ_READ))
2063                        return 0;
2064                /*
2065                 * just marked it softirq-unsafe, check that no softirq-safe
2066                 * lock in the system ever took it in the past:
2067                 */
2068                if (!check_usage_backwards(curr, this,
2069                                           LOCK_USED_IN_SOFTIRQ, "soft"))
2070                        return 0;
2071#if STRICT_READ_CHECKS
2072                /*
2073                 * just marked it softirq-unsafe, check that no
2074                 * softirq-safe-read lock in the system ever took
2075                 * it in the past:
2076                 */
2077                if (!check_usage_backwards(curr, this,
2078                                   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2079                        return 0;
2080#endif
2081                if (softirq_verbose(hlock_class(this)))
2082                        ret = 2;
2083                break;
2084        case LOCK_ENABLED_HARDIRQS_READ:
2085                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2086                        return 0;
2087#if STRICT_READ_CHECKS
2088                /*
2089                 * just marked it hardirq-read-unsafe, check that no
2090                 * hardirq-safe lock in the system ever took it in the past:
2091                 */
2092                if (!check_usage_backwards(curr, this,
2093                                           LOCK_USED_IN_HARDIRQ, "hard"))
2094                        return 0;
2095#endif
2096                if (hardirq_verbose(hlock_class(this)))
2097                        ret = 2;
2098                break;
2099        case LOCK_ENABLED_SOFTIRQS_READ:
2100                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2101                        return 0;
2102#if STRICT_READ_CHECKS
2103                /*
2104                 * just marked it softirq-read-unsafe, check that no
2105                 * softirq-safe lock in the system ever took it in the past:
2106                 */
2107                if (!check_usage_backwards(curr, this,
2108                                           LOCK_USED_IN_SOFTIRQ, "soft"))
2109                        return 0;
2110#endif
2111                if (softirq_verbose(hlock_class(this)))
2112                        ret = 2;
2113                break;
2114        default:
2115                WARN_ON(1);
2116                break;
2117        }
2118
2119        return ret;
2120}
2121
2122/*
2123 * Mark all held locks with a usage bit:
2124 */
2125static int
2126mark_held_locks(struct task_struct *curr, int hardirq)
2127{
2128        enum lock_usage_bit usage_bit;
2129        struct held_lock *hlock;
2130        int i;
2131
2132        for (i = 0; i < curr->lockdep_depth; i++) {
2133                hlock = curr->held_locks + i;
2134
2135                if (hardirq) {
2136                        if (hlock->read)
2137                                usage_bit = LOCK_ENABLED_HARDIRQS_READ;
2138                        else
2139                                usage_bit = LOCK_ENABLED_HARDIRQS;
2140                } else {
2141                        if (hlock->read)
2142                                usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2143                        else
2144                                usage_bit = LOCK_ENABLED_SOFTIRQS;
2145                }
2146                if (!mark_lock(curr, hlock, usage_bit))
2147                        return 0;
2148        }
2149
2150        return 1;
2151}
2152
2153/*
2154 * Debugging helper: via this flag we know that we are in
2155 * 'early bootup code', and will warn about any invalid irqs-on event:
2156 */
2157static int early_boot_irqs_enabled;
2158
2159void early_boot_irqs_off(void)
2160{
2161        early_boot_irqs_enabled = 0;
2162}
2163
2164void early_boot_irqs_on(void)
2165{
2166        early_boot_irqs_enabled = 1;
2167}
2168
2169/*
2170 * Hardirqs will be enabled:
2171 */
2172void trace_hardirqs_on_caller(unsigned long a0)
2173{
2174        struct task_struct *curr = current;
2175        unsigned long ip;
2176
2177        time_hardirqs_on(CALLER_ADDR0, a0);
2178
2179        if (unlikely(!debug_locks || current->lockdep_recursion))
2180                return;
2181
2182        if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2183                return;
2184
2185        if (unlikely(curr->hardirqs_enabled)) {
2186                debug_atomic_inc(&redundant_hardirqs_on);
2187                return;
2188        }
2189        /* we'll do an OFF -> ON transition: */
2190        curr->hardirqs_enabled = 1;
2191        ip = (unsigned long) __builtin_return_address(0);
2192
2193        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2194                return;
2195        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2196                return;
2197        /*
2198         * We are going to turn hardirqs on, so set the
2199         * usage bit for all held locks:
2200         */
2201        if (!mark_held_locks(curr, 1))
2202                return;
2203        /*
2204         * If we have softirqs enabled, then set the usage
2205         * bit for all held locks. (disabled hardirqs prevented
2206         * this bit from being set before)
2207         */
2208        if (curr->softirqs_enabled)
2209                if (!mark_held_locks(curr, 0))
2210                        return;
2211
2212        curr->hardirq_enable_ip = ip;
2213        curr->hardirq_enable_event = ++curr->irq_events;
2214        debug_atomic_inc(&hardirqs_on_events);
2215}
2216EXPORT_SYMBOL(trace_hardirqs_on_caller);
2217
2218void trace_hardirqs_on(void)
2219{
2220        trace_hardirqs_on_caller(CALLER_ADDR0);
2221}
2222EXPORT_SYMBOL(trace_hardirqs_on);
2223
2224/*
2225 * Hardirqs were disabled:
2226 */
2227void trace_hardirqs_off_caller(unsigned long a0)
2228{
2229        struct task_struct *curr = current;
2230
2231        time_hardirqs_off(CALLER_ADDR0, a0);
2232
2233        if (unlikely(!debug_locks || current->lockdep_recursion))
2234                return;
2235
2236        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2237                return;
2238
2239        if (curr->hardirqs_enabled) {
2240                /*
2241                 * We have done an ON -> OFF transition:
2242                 */
2243                curr->hardirqs_enabled = 0;
2244                curr->hardirq_disable_ip = _RET_IP_;
2245                curr->hardirq_disable_event = ++curr->irq_events;
2246                debug_atomic_inc(&hardirqs_off_events);
2247        } else
2248                debug_atomic_inc(&redundant_hardirqs_off);
2249}
2250EXPORT_SYMBOL(trace_hardirqs_off_caller);
2251
2252void trace_hardirqs_off(void)
2253{
2254        trace_hardirqs_off_caller(CALLER_ADDR0);
2255}
2256EXPORT_SYMBOL(trace_hardirqs_off);
2257
2258/*
2259 * Softirqs will be enabled:
2260 */
2261void trace_softirqs_on(unsigned long ip)
2262{
2263        struct task_struct *curr = current;
2264
2265        if (unlikely(!debug_locks))
2266                return;
2267
2268        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2269                return;
2270
2271        if (curr->softirqs_enabled) {
2272                debug_atomic_inc(&redundant_softirqs_on);
2273                return;
2274        }
2275
2276        /*
2277         * We'll do an OFF -> ON transition:
2278         */
2279        curr->softirqs_enabled = 1;
2280        curr->softirq_enable_ip = ip;
2281        curr->softirq_enable_event = ++curr->irq_events;
2282        debug_atomic_inc(&softirqs_on_events);
2283        /*
2284         * We are going to turn softirqs on, so set the
2285         * usage bit for all held locks, if hardirqs are
2286         * enabled too:
2287         */
2288        if (curr->hardirqs_enabled)
2289                mark_held_locks(curr, 0);
2290}
2291
2292/*
2293 * Softirqs were disabled:
2294 */
2295void trace_softirqs_off(unsigned long ip)
2296{
2297        struct task_struct *curr = current;
2298
2299        if (unlikely(!debug_locks))
2300                return;
2301
2302        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2303                return;
2304
2305        if (curr->softirqs_enabled) {
2306                /*
2307                 * We have done an ON -> OFF transition:
2308                 */
2309                curr->softirqs_enabled = 0;
2310                curr->softirq_disable_ip = ip;
2311                curr->softirq_disable_event = ++curr->irq_events;
2312                debug_atomic_inc(&softirqs_off_events);
2313                DEBUG_LOCKS_WARN_ON(!softirq_count());
2314        } else
2315                debug_atomic_inc(&redundant_softirqs_off);
2316}
2317
2318static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2319{
2320        /*
2321         * If non-trylock use in a hardirq or softirq context, then
2322         * mark the lock as used in these contexts:
2323         */
2324        if (!hlock->trylock) {
2325                if (hlock->read) {
2326                        if (curr->hardirq_context)
2327                                if (!mark_lock(curr, hlock,
2328                                                LOCK_USED_IN_HARDIRQ_READ))
2329                                        return 0;
2330                        if (curr->softirq_context)
2331                                if (!mark_lock(curr, hlock,
2332                                                LOCK_USED_IN_SOFTIRQ_READ))
2333                                        return 0;
2334                } else {
2335                        if (curr->hardirq_context)
2336                                if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2337                                        return 0;
2338                        if (curr->softirq_context)
2339                                if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2340                                        return 0;
2341                }
2342        }
2343        if (!hlock->hardirqs_off) {
2344                if (hlock->read) {
2345                        if (!mark_lock(curr, hlock,
2346                                        LOCK_ENABLED_HARDIRQS_READ))
2347                                return 0;
2348                        if (curr->softirqs_enabled)
2349                                if (!mark_lock(curr, hlock,
2350                                                LOCK_ENABLED_SOFTIRQS_READ))
2351                                        return 0;
2352                } else {
2353                        if (!mark_lock(curr, hlock,
2354                                        LOCK_ENABLED_HARDIRQS))
2355                                return 0;
2356                        if (curr->softirqs_enabled)
2357                                if (!mark_lock(curr, hlock,
2358                                                LOCK_ENABLED_SOFTIRQS))
2359                                        return 0;
2360                }
2361        }
2362
2363        return 1;
2364}
2365
2366static int separate_irq_context(struct task_struct *curr,
2367                struct held_lock *hlock)
2368{
2369        unsigned int depth = curr->lockdep_depth;
2370
2371        /*
2372         * Keep track of points where we cross into an interrupt context:
2373         */
2374        hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2375                                curr->softirq_context;
2376        if (depth) {
2377                struct held_lock *prev_hlock;
2378
2379                prev_hlock = curr->held_locks + depth-1;
2380                /*
2381                 * If we cross into another context, reset the
2382                 * hash key (this also prevents the checking and the
2383                 * adding of the dependency to 'prev'):
2384                 */
2385                if (prev_hlock->irq_context != hlock->irq_context)
2386                        return 1;
2387        }
2388        return 0;
2389}
2390
2391#else
2392
2393static inline
2394int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2395                enum lock_usage_bit new_bit)
2396{
2397        WARN_ON(1);
2398        return 1;
2399}
2400
2401static inline int mark_irqflags(struct task_struct *curr,
2402                struct held_lock *hlock)
2403{
2404        return 1;
2405}
2406
2407static inline int separate_irq_context(struct task_struct *curr,
2408                struct held_lock *hlock)
2409{
2410        return 0;
2411}
2412
2413#endif
2414
2415/*
2416 * Mark a lock with a usage bit, and validate the state transition:
2417 */
2418static int mark_lock(struct task_struct *curr, struct held_lock *this,
2419                             enum lock_usage_bit new_bit)
2420{
2421        unsigned int new_mask = 1 << new_bit, ret = 1;
2422
2423        /*
2424         * If already set then do not dirty the cacheline,
2425         * nor do any checks:
2426         */
2427        if (likely(hlock_class(this)->usage_mask & new_mask))
2428                return 1;
2429
2430        if (!graph_lock())
2431                return 0;
2432        /*
2433         * Make sure we didnt race:
2434         */
2435        if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2436                graph_unlock();
2437                return 1;
2438        }
2439
2440        hlock_class(this)->usage_mask |= new_mask;
2441
2442        if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2443                return 0;
2444
2445        switch (new_bit) {
2446        case LOCK_USED_IN_HARDIRQ:
2447        case LOCK_USED_IN_SOFTIRQ:
2448        case LOCK_USED_IN_HARDIRQ_READ:
2449        case LOCK_USED_IN_SOFTIRQ_READ:
2450        case LOCK_ENABLED_HARDIRQS:
2451        case LOCK_ENABLED_SOFTIRQS:
2452        case LOCK_ENABLED_HARDIRQS_READ:
2453        case LOCK_ENABLED_SOFTIRQS_READ:
2454                ret = mark_lock_irq(curr, this, new_bit);
2455                if (!ret)
2456                        return 0;
2457                break;
2458        case LOCK_USED:
2459                debug_atomic_dec(&nr_unused_locks);
2460                break;
2461        default:
2462                if (!debug_locks_off_graph_unlock())
2463                        return 0;
2464                WARN_ON(1);
2465                return 0;
2466        }
2467
2468        graph_unlock();
2469
2470        /*
2471         * We must printk outside of the graph_lock:
2472         */
2473        if (ret == 2) {
2474                printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2475                print_lock(this);
2476                print_irqtrace_events(curr);
2477                dump_stack();
2478        }
2479
2480        return ret;
2481}
2482
2483/*
2484 * Initialize a lock instance's lock-class mapping info:
2485 */
2486void lockdep_init_map(struct lockdep_map *lock, const char *name,
2487                      struct lock_class_key *key, int subclass)
2488{
2489        if (unlikely(!debug_locks))
2490                return;
2491
2492        if (DEBUG_LOCKS_WARN_ON(!key))
2493                return;
2494        if (DEBUG_LOCKS_WARN_ON(!name))
2495                return;
2496        /*
2497         * Sanity check, the lock-class key must be persistent:
2498         */
2499        if (!static_obj(key)) {
2500                printk("BUG: key %p not in .data!\n", key);
2501                DEBUG_LOCKS_WARN_ON(1);
2502                return;
2503        }
2504        lock->name = name;
2505        lock->key = key;
2506        lock->class_cache = NULL;
2507#ifdef CONFIG_LOCK_STAT
2508        lock->cpu = raw_smp_processor_id();
2509#endif
2510        if (subclass)
2511                register_lock_class(lock, subclass, 1);
2512}
2513
2514EXPORT_SYMBOL_GPL(lockdep_init_map);
2515
2516/*
2517 * This gets called for every mutex_lock*()/spin_lock*() operation.
2518 * We maintain the dependency maps and validate the locking attempt:
2519 */
2520static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2521                          int trylock, int read, int check, int hardirqs_off,
2522                          struct lockdep_map *nest_lock, unsigned long ip)
2523{
2524        struct task_struct *curr = current;
2525        struct lock_class *class = NULL;
2526        struct held_lock *hlock;
2527        unsigned int depth, id;
2528        int chain_head = 0;
2529        u64 chain_key;
2530
2531        if (!prove_locking)
2532                check = 1;
2533
2534        if (unlikely(!debug_locks))
2535                return 0;
2536
2537        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2538                return 0;
2539
2540        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2541                debug_locks_off();
2542                printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2543                printk("turning off the locking correctness validator.\n");
2544                return 0;
2545        }
2546
2547        if (!subclass)
2548                class = lock->class_cache;
2549        /*
2550         * Not cached yet or subclass?
2551         */
2552        if (unlikely(!class)) {
2553                class = register_lock_class(lock, subclass, 0);
2554                if (!class)
2555                        return 0;
2556        }
2557        debug_atomic_inc((atomic_t *)&class->ops);
2558        if (very_verbose(class)) {
2559                printk("\nacquire class [%p] %s", class->key, class->name);
2560                if (class->name_version > 1)
2561                        printk("#%d", class->name_version);
2562                printk("\n");
2563                dump_stack();
2564        }
2565
2566        /*
2567         * Add the lock to the list of currently held locks.
2568         * (we dont increase the depth just yet, up until the
2569         * dependency checks are done)
2570         */
2571        depth = curr->lockdep_depth;
2572        if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2573                return 0;
2574
2575        hlock = curr->held_locks + depth;
2576        if (DEBUG_LOCKS_WARN_ON(!class))
2577                return 0;
2578        hlock->class_idx = class - lock_classes + 1;
2579        hlock->acquire_ip = ip;
2580        hlock->instance = lock;
2581        hlock->nest_lock = nest_lock;
2582        hlock->trylock = trylock;
2583        hlock->read = read;
2584        hlock->check = check;
2585        hlock->hardirqs_off = !!hardirqs_off;
2586#ifdef CONFIG_LOCK_STAT
2587        hlock->waittime_stamp = 0;
2588        hlock->holdtime_stamp = sched_clock();
2589#endif
2590
2591        if (check == 2 && !mark_irqflags(curr, hlock))
2592                return 0;
2593
2594        /* mark it as used: */
2595        if (!mark_lock(curr, hlock, LOCK_USED))
2596                return 0;
2597
2598        /*
2599         * Calculate the chain hash: it's the combined hash of all the
2600         * lock keys along the dependency chain. We save the hash value
2601         * at every step so that we can get the current hash easily
2602         * after unlock. The chain hash is then used to cache dependency
2603         * results.
2604         *
2605         * The 'key ID' is what is the most compact key value to drive
2606         * the hash, not class->key.
2607         */
2608        id = class - lock_classes;
2609        if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2610                return 0;
2611
2612        chain_key = curr->curr_chain_key;
2613        if (!depth) {
2614                if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2615                        return 0;
2616                chain_head = 1;
2617        }
2618
2619        hlock->prev_chain_key = chain_key;
2620        if (separate_irq_context(curr, hlock)) {
2621                chain_key = 0;
2622                chain_head = 1;
2623        }
2624        chain_key = iterate_chain_key(chain_key, id);
2625
2626        if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2627                return 0;
2628
2629        curr->curr_chain_key = chain_key;
2630        curr->lockdep_depth++;
2631        check_chain_key(curr);
2632#ifdef CONFIG_DEBUG_LOCKDEP
2633        if (unlikely(!debug_locks))
2634                return 0;
2635#endif
2636        if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2637                debug_locks_off();
2638                printk("BUG: MAX_LOCK_DEPTH too low!\n");
2639                printk("turning off the locking correctness validator.\n");
2640                return 0;
2641        }
2642
2643        if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2644                max_lockdep_depth = curr->lockdep_depth;
2645
2646        return 1;
2647}
2648
2649static int
2650print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2651                           unsigned long ip)
2652{
2653        if (!debug_locks_off())
2654                return 0;
2655        if (debug_locks_silent)
2656                return 0;
2657
2658        printk("\n=====================================\n");
2659        printk(  "[ BUG: bad unlock balance detected! ]\n");
2660        printk(  "-------------------------------------\n");
2661        printk("%s/%d is trying to release lock (",
2662                curr->comm, task_pid_nr(curr));
2663        print_lockdep_cache(lock);
2664        printk(") at:\n");
2665        print_ip_sym(ip);
2666        printk("but there are no more locks to release!\n");
2667        printk("\nother info that might help us debug this:\n");
2668        lockdep_print_held_locks(curr);
2669
2670        printk("\nstack backtrace:\n");
2671        dump_stack();
2672
2673        return 0;
2674}
2675
2676/*
2677 * Common debugging checks for both nested and non-nested unlock:
2678 */
2679static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2680                        unsigned long ip)
2681{
2682        if (unlikely(!debug_locks))
2683                return 0;
2684        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2685                return 0;
2686
2687        if (curr->lockdep_depth <= 0)
2688                return print_unlock_inbalance_bug(curr, lock, ip);
2689
2690        return 1;
2691}
2692
2693static int
2694__lock_set_subclass(struct lockdep_map *lock,
2695                    unsigned int subclass, unsigned long ip)
2696{
2697        struct task_struct *curr = current;
2698        struct held_lock *hlock, *prev_hlock;
2699        struct lock_class *class;
2700        unsigned int depth;
2701        int i;
2702
2703        depth = curr->lockdep_depth;
2704        if (DEBUG_LOCKS_WARN_ON(!depth))
2705                return 0;
2706
2707        prev_hlock = NULL;
2708        for (i = depth-1; i >= 0; i--) {
2709                hlock = curr->held_locks + i;
2710                /*
2711                 * We must not cross into another context:
2712                 */
2713                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2714                        break;
2715                if (hlock->instance == lock)
2716                        goto found_it;
2717                prev_hlock = hlock;
2718        }
2719        return print_unlock_inbalance_bug(curr, lock, ip);
2720
2721found_it:
2722        class = register_lock_class(lock, subclass, 0);
2723        hlock->class_idx = class - lock_classes + 1;
2724
2725        curr->lockdep_depth = i;
2726        curr->curr_chain_key = hlock->prev_chain_key;
2727
2728        for (; i < depth; i++) {
2729                hlock = curr->held_locks + i;
2730                if (!__lock_acquire(hlock->instance,
2731                        hlock_class(hlock)->subclass, hlock->trylock,
2732                                hlock->read, hlock->check, hlock->hardirqs_off,
2733                                hlock->nest_lock, hlock->acquire_ip))
2734                        return 0;
2735        }
2736
2737        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2738                return 0;
2739        return 1;
2740}
2741
2742/*
2743 * Remove the lock to the list of currently held locks in a
2744 * potentially non-nested (out of order) manner. This is a
2745 * relatively rare operation, as all the unlock APIs default
2746 * to nested mode (which uses lock_release()):
2747 */
2748static int
2749lock_release_non_nested(struct task_struct *curr,
2750                        struct lockdep_map *lock, unsigned long ip)
2751{
2752        struct held_lock *hlock, *prev_hlock;
2753        unsigned int depth;
2754        int i;
2755
2756        /*
2757         * Check whether the lock exists in the current stack
2758         * of held locks:
2759         */
2760        depth = curr->lockdep_depth;
2761        if (DEBUG_LOCKS_WARN_ON(!depth))
2762                return 0;
2763
2764        prev_hlock = NULL;
2765        for (i = depth-1; i >= 0; i--) {
2766                hlock = curr->held_locks + i;
2767                /*
2768                 * We must not cross into another context:
2769                 */
2770                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2771                        break;
2772                if (hlock->instance == lock)
2773                        goto found_it;
2774                prev_hlock = hlock;
2775        }
2776        return print_unlock_inbalance_bug(curr, lock, ip);
2777
2778found_it:
2779        lock_release_holdtime(hlock);
2780
2781        /*
2782         * We have the right lock to unlock, 'hlock' points to it.
2783         * Now we remove it from the stack, and add back the other
2784         * entries (if any), recalculating the hash along the way:
2785         */
2786        curr->lockdep_depth = i;
2787        curr->curr_chain_key = hlock->prev_chain_key;
2788
2789        for (i++; i < depth; i++) {
2790                hlock = curr->held_locks + i;
2791                if (!__lock_acquire(hlock->instance,
2792                        hlock_class(hlock)->subclass, hlock->trylock,
2793                                hlock->read, hlock->check, hlock->hardirqs_off,
2794                                hlock->nest_lock, hlock->acquire_ip))
2795                        return 0;
2796        }
2797
2798        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2799                return 0;
2800        return 1;
2801}
2802
2803/*
2804 * Remove the lock to the list of currently held locks - this gets
2805 * called on mutex_unlock()/spin_unlock*() (or on a failed
2806 * mutex_lock_interruptible()). This is done for unlocks that nest
2807 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2808 */
2809static int lock_release_nested(struct task_struct *curr,
2810                               struct lockdep_map *lock, unsigned long ip)
2811{
2812        struct held_lock *hlock;
2813        unsigned int depth;
2814
2815        /*
2816         * Pop off the top of the lock stack:
2817         */
2818        depth = curr->lockdep_depth - 1;
2819        hlock = curr->held_locks + depth;
2820
2821        /*
2822         * Is the unlock non-nested:
2823         */
2824        if (hlock->instance != lock)
2825                return lock_release_non_nested(curr, lock, ip);
2826        curr->lockdep_depth--;
2827
2828        if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2829                return 0;
2830
2831        curr->curr_chain_key = hlock->prev_chain_key;
2832
2833        lock_release_holdtime(hlock);
2834
2835#ifdef CONFIG_DEBUG_LOCKDEP
2836        hlock->prev_chain_key = 0;
2837        hlock->class_idx = 0;
2838        hlock->acquire_ip = 0;
2839        hlock->irq_context = 0;
2840#endif
2841        return 1;
2842}
2843
2844/*
2845 * Remove the lock to the list of currently held locks - this gets
2846 * called on mutex_unlock()/spin_unlock*() (or on a failed
2847 * mutex_lock_interruptible()). This is done for unlocks that nest
2848 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2849 */
2850static void
2851__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2852{
2853        struct task_struct *curr = current;
2854
2855        if (!check_unlock(curr, lock, ip))
2856                return;
2857
2858        if (nested) {
2859                if (!lock_release_nested(curr, lock, ip))
2860                        return;
2861        } else {
2862                if (!lock_release_non_nested(curr, lock, ip))
2863                        return;
2864        }
2865
2866        check_chain_key(curr);
2867}
2868
2869/*
2870 * Check whether we follow the irq-flags state precisely:
2871 */
2872static void check_flags(unsigned long flags)
2873{
2874#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2875    defined(CONFIG_TRACE_IRQFLAGS)
2876        if (!debug_locks)
2877                return;
2878
2879        if (irqs_disabled_flags(flags)) {
2880                if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2881                        printk("possible reason: unannotated irqs-off.\n");
2882                }
2883        } else {
2884                if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2885                        printk("possible reason: unannotated irqs-on.\n");
2886                }
2887        }
2888
2889        /*
2890         * We dont accurately track softirq state in e.g.
2891         * hardirq contexts (such as on 4KSTACKS), so only
2892         * check if not in hardirq contexts:
2893         */
2894        if (!hardirq_count()) {
2895                if (softirq_count())
2896                        DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2897                else
2898                        DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2899        }
2900
2901        if (!debug_locks)
2902                print_irqtrace_events(current);
2903#endif
2904}
2905
2906void
2907lock_set_subclass(struct lockdep_map *lock,
2908                  unsigned int subclass, unsigned long ip)
2909{
2910        unsigned long flags;
2911
2912        if (unlikely(current->lockdep_recursion))
2913                return;
2914
2915        raw_local_irq_save(flags);
2916        current->lockdep_recursion = 1;
2917        check_flags(flags);
2918        if (__lock_set_subclass(lock, subclass, ip))
2919                check_chain_key(current);
2920        current->lockdep_recursion = 0;
2921        raw_local_irq_restore(flags);
2922}
2923
2924EXPORT_SYMBOL_GPL(lock_set_subclass);
2925
2926/*
2927 * We are not always called with irqs disabled - do that here,
2928 * and also avoid lockdep recursion:
2929 */
2930void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2931                          int trylock, int read, int check,
2932                          struct lockdep_map *nest_lock, unsigned long ip)
2933{
2934        unsigned long flags;
2935
2936        if (unlikely(current->lockdep_recursion))
2937                return;
2938
2939        raw_local_irq_save(flags);
2940        check_flags(flags);
2941
2942        current->lockdep_recursion = 1;
2943        __lock_acquire(lock, subclass, trylock, read, check,
2944                       irqs_disabled_flags(flags), nest_lock, ip);
2945        current->lockdep_recursion = 0;
2946        raw_local_irq_restore(flags);
2947}
2948
2949EXPORT_SYMBOL_GPL(lock_acquire);
2950
2951void lock_release(struct lockdep_map *lock, int nested,
2952                          unsigned long ip)
2953{
2954        unsigned long flags;
2955
2956        if (unlikely(current->lockdep_recursion))
2957                return;
2958
2959        raw_local_irq_save(flags);
2960        check_flags(flags);
2961        current->lockdep_recursion = 1;
2962        __lock_release(lock, nested, ip);
2963        current->lockdep_recursion = 0;
2964        raw_local_irq_restore(flags);
2965}
2966
2967EXPORT_SYMBOL_GPL(lock_release);
2968
2969#ifdef CONFIG_LOCK_STAT
2970static int
2971print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2972                           unsigned long ip)
2973{
2974        if (!debug_locks_off())
2975                return 0;
2976        if (debug_locks_silent)
2977                return 0;
2978
2979        printk("\n=================================\n");
2980        printk(  "[ BUG: bad contention detected! ]\n");
2981        printk(  "---------------------------------\n");
2982        printk("%s/%d is trying to contend lock (",
2983                curr->comm, task_pid_nr(curr));
2984        print_lockdep_cache(lock);
2985        printk(") at:\n");
2986        print_ip_sym(ip);
2987        printk("but there are no locks held!\n");
2988        printk("\nother info that might help us debug this:\n");
2989        lockdep_print_held_locks(curr);
2990
2991        printk("\nstack backtrace:\n");
2992        dump_stack();
2993
2994        return 0;
2995}
2996
2997static void
2998__lock_contended(struct lockdep_map *lock, unsigned long ip)
2999{
3000        struct task_struct *curr = current;
3001        struct held_lock *hlock, *prev_hlock;
3002        struct lock_class_stats *stats;
3003        unsigned int depth;
3004        int i, point;
3005
3006        depth = curr->lockdep_depth;
3007        if (DEBUG_LOCKS_WARN_ON(!depth))
3008                return;
3009
3010        prev_hlock = NULL;
3011        for (i = depth-1; i >= 0; i--) {
3012                hlock = curr->held_locks + i;
3013                /*
3014                 * We must not cross into another context:
3015                 */
3016                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3017                        break;
3018                if (hlock->instance == lock)
3019                        goto found_it;
3020                prev_hlock = hlock;
3021        }
3022        print_lock_contention_bug(curr, lock, ip);
3023        return;
3024
3025found_it:
3026        hlock->waittime_stamp = sched_clock();
3027
3028        point = lock_contention_point(hlock_class(hlock), ip);
3029
3030        stats = get_lock_stats(hlock_class(hlock));
3031        if (point < ARRAY_SIZE(stats->contention_point))
3032                stats->contention_point[point]++;
3033        if (lock->cpu != smp_processor_id())
3034                stats->bounces[bounce_contended + !!hlock->read]++;
3035        put_lock_stats(stats);
3036}
3037
3038static void
3039__lock_acquired(struct lockdep_map *lock)
3040{
3041        struct task_struct *curr = current;
3042        struct held_lock *hlock, *prev_hlock;
3043        struct lock_class_stats *stats;
3044        unsigned int depth;
3045        u64 now;
3046        s64 waittime = 0;
3047        int i, cpu;
3048
3049        depth = curr->lockdep_depth;
3050        if (DEBUG_LOCKS_WARN_ON(!depth))
3051                return;
3052
3053        prev_hlock = NULL;
3054        for (i = depth-1; i >= 0; i--) {
3055                hlock = curr->held_locks + i;
3056                /*
3057                 * We must not cross into another context:
3058                 */
3059                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3060                        break;
3061                if (hlock->instance == lock)
3062                        goto found_it;
3063                prev_hlock = hlock;
3064        }
3065        print_lock_contention_bug(curr, lock, _RET_IP_);
3066        return;
3067
3068found_it:
3069        cpu = smp_processor_id();
3070        if (hlock->waittime_stamp) {
3071                now = sched_clock();
3072                waittime = now - hlock->waittime_stamp;
3073                hlock->holdtime_stamp = now;
3074        }
3075
3076        stats = get_lock_stats(hlock_class(hlock));
3077        if (waittime) {
3078                if (hlock->read)
3079                        lock_time_inc(&stats->read_waittime, waittime);
3080                else
3081                        lock_time_inc(&stats->write_waittime, waittime);
3082        }
3083        if (lock->cpu != cpu)
3084                stats->bounces[bounce_acquired + !!hlock->read]++;
3085        put_lock_stats(stats);
3086
3087        lock->cpu = cpu;
3088}
3089
3090void lock_contended(struct lockdep_map *lock, unsigned long ip)
3091{
3092        unsigned long flags;
3093
3094        if (unlikely(!lock_stat))
3095                return;
3096
3097        if (unlikely(current->lockdep_recursion))
3098                return;
3099
3100        raw_local_irq_save(flags);
3101        check_flags(flags);
3102        current->lockdep_recursion = 1;
3103        __lock_contended(lock, ip);
3104        current->lockdep_recursion = 0;
3105        raw_local_irq_restore(flags);
3106}
3107EXPORT_SYMBOL_GPL(lock_contended);
3108
3109void lock_acquired(struct lockdep_map *lock)
3110{
3111        unsigned long flags;
3112
3113        if (unlikely(!lock_stat))
3114                return;
3115
3116        if (unlikely(current->lockdep_recursion))
3117                return;
3118
3119        raw_local_irq_save(flags);
3120        check_flags(flags);
3121        current->lockdep_recursion = 1;
3122        __lock_acquired(lock);
3123        current->lockdep_recursion = 0;
3124        raw_local_irq_restore(flags);
3125}
3126EXPORT_SYMBOL_GPL(lock_acquired);
3127#endif
3128
3129/*
3130 * Used by the testsuite, sanitize the validator state
3131 * after a simulated failure:
3132 */
3133
3134void lockdep_reset(void)
3135{
3136        unsigned long flags;
3137        int i;
3138
3139        raw_local_irq_save(flags);
3140        current->curr_chain_key = 0;
3141        current->lockdep_depth = 0;
3142        current->lockdep_recursion = 0;
3143        memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3144        nr_hardirq_chains = 0;
3145        nr_softirq_chains = 0;
3146        nr_process_chains = 0;
3147        debug_locks = 1;
3148        for (i = 0; i < CHAINHASH_SIZE; i++)
3149                INIT_LIST_HEAD(chainhash_table + i);
3150        raw_local_irq_restore(flags);
3151}
3152
3153static void zap_class(struct lock_class *class)
3154{
3155        int i;
3156
3157        /*
3158         * Remove all dependencies this lock is
3159         * involved in:
3160         */
3161        for (i = 0; i < nr_list_entries; i++) {
3162                if (list_entries[i].class == class)
3163                        list_del_rcu(&list_entries[i].entry);
3164        }
3165        /*
3166         * Unhash the class and remove it from the all_lock_classes list:
3167         */
3168        list_del_rcu(&class->hash_entry);
3169        list_del_rcu(&class->lock_entry);
3170
3171        class->key = NULL;
3172}
3173
3174static inline int within(const void *addr, void *start, unsigned long size)
3175{
3176        return addr >= start && addr < start + size;
3177}
3178
3179void lockdep_free_key_range(void *start, unsigned long size)
3180{
3181        struct lock_class *class, *next;
3182        struct list_head *head;
3183        unsigned long flags;
3184        int i;
3185        int locked;
3186
3187        raw_local_irq_save(flags);
3188        locked = graph_lock();
3189
3190        /*
3191         * Unhash all classes that were created by this module:
3192         */
3193        for (i = 0; i < CLASSHASH_SIZE; i++) {
3194                head = classhash_table + i;
3195                if (list_empty(head))
3196                        continue;
3197                list_for_each_entry_safe(class, next, head, hash_entry) {
3198                        if (within(class->key, start, size))
3199                                zap_class(class);
3200                        else if (within(class->name, start, size))
3201                                zap_class(class);
3202                }
3203        }
3204
3205        if (locked)
3206                graph_unlock();
3207        raw_local_irq_restore(flags);
3208}
3209
3210void lockdep_reset_lock(struct lockdep_map *lock)
3211{
3212        struct lock_class *class, *next;
3213        struct list_head *head;
3214        unsigned long flags;
3215        int i, j;
3216        int locked;
3217
3218        raw_local_irq_save(flags);
3219
3220        /*
3221         * Remove all classes this lock might have:
3222         */
3223        for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3224                /*
3225                 * If the class exists we look it up and zap it:
3226                 */
3227                class = look_up_lock_class(lock, j);
3228                if (class)
3229                        zap_class(class);
3230        }
3231        /*
3232         * Debug check: in the end all mapped classes should
3233         * be gone.
3234         */
3235        locked = graph_lock();
3236        for (i = 0; i < CLASSHASH_SIZE; i++) {
3237                head = classhash_table + i;
3238                if (list_empty(head))
3239                        continue;
3240                list_for_each_entry_safe(class, next, head, hash_entry) {
3241                        if (unlikely(class == lock->class_cache)) {
3242                                if (debug_locks_off_graph_unlock())
3243                                        WARN_ON(1);
3244                                goto out_restore;
3245                        }
3246                }
3247        }
3248        if (locked)
3249                graph_unlock();
3250
3251out_restore:
3252        raw_local_irq_restore(flags);
3253}
3254
3255void lockdep_init(void)
3256{
3257        int i;
3258
3259        /*
3260         * Some architectures have their own start_kernel()
3261         * code which calls lockdep_init(), while we also
3262         * call lockdep_init() from the start_kernel() itself,
3263         * and we want to initialize the hashes only once:
3264         */
3265        if (lockdep_initialized)
3266                return;
3267
3268        for (i = 0; i < CLASSHASH_SIZE; i++)
3269                INIT_LIST_HEAD(classhash_table + i);
3270
3271        for (i = 0; i < CHAINHASH_SIZE; i++)
3272                INIT_LIST_HEAD(chainhash_table + i);
3273
3274        lockdep_initialized = 1;
3275}
3276
3277void __init lockdep_info(void)
3278{
3279        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3280
3281        printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
3282        printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3283        printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3284        printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
3285        printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3286        printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3287        printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3288
3289        printk(" memory used by lock dependency info: %lu kB\n",
3290                (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3291                sizeof(struct list_head) * CLASSHASH_SIZE +
3292                sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3293                sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3294                sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3295
3296        printk(" per task-struct memory footprint: %lu bytes\n",
3297                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3298
3299#ifdef CONFIG_DEBUG_LOCKDEP
3300        if (lockdep_init_error) {
3301                printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3302                printk("Call stack leading to lockdep invocation was:\n");
3303                print_stack_trace(&lockdep_init_trace, 0);
3304        }
3305#endif
3306}
3307
3308static void
3309print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3310                     const void *mem_to, struct held_lock *hlock)
3311{
3312        if (!debug_locks_off())
3313                return;
3314        if (debug_locks_silent)
3315                return;
3316
3317        printk("\n=========================\n");
3318        printk(  "[ BUG: held lock freed! ]\n");
3319        printk(  "-------------------------\n");
3320        printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3321                curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3322        print_lock(hlock);
3323        lockdep_print_held_locks(curr);
3324
3325        printk("\nstack backtrace:\n");
3326        dump_stack();
3327}
3328
3329static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3330                                const void* lock_from, unsigned long lock_len)
3331{
3332        return lock_from + lock_len <= mem_from ||
3333                mem_from + mem_len <= lock_from;
3334}
3335
3336/*
3337 * Called when kernel memory is freed (or unmapped), or if a lock
3338 * is destroyed or reinitialized - this code checks whether there is
3339 * any held lock in the memory range of <from> to <to>:
3340 */
3341void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3342{
3343        struct task_struct *curr = current;
3344        struct held_lock *hlock;
3345        unsigned long flags;
3346        int i;
3347
3348        if (unlikely(!debug_locks))
3349                return;
3350
3351        local_irq_save(flags);
3352        for (i = 0; i < curr->lockdep_depth; i++) {
3353                hlock = curr->held_locks + i;
3354
3355                if (not_in_range(mem_from, mem_len, hlock->instance,
3356                                        sizeof(*hlock->instance)))
3357                        continue;
3358
3359                print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3360                break;
3361        }
3362        local_irq_restore(flags);
3363}
3364EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3365
3366static void print_held_locks_bug(struct task_struct *curr)
3367{
3368        if (!debug_locks_off())
3369                return;
3370        if (debug_locks_silent)
3371                return;
3372
3373        printk("\n=====================================\n");
3374        printk(  "[ BUG: lock held at task exit time! ]\n");
3375        printk(  "-------------------------------------\n");
3376        printk("%s/%d is exiting with locks still held!\n",
3377                curr->comm, task_pid_nr(curr));
3378        lockdep_print_held_locks(curr);
3379
3380        printk("\nstack backtrace:\n");
3381        dump_stack();
3382}
3383
3384void debug_check_no_locks_held(struct task_struct *task)
3385{
3386        if (unlikely(task->lockdep_depth > 0))
3387                print_held_locks_bug(task);
3388}
3389
3390void debug_show_all_locks(void)
3391{
3392        struct task_struct *g, *p;
3393        int count = 10;
3394        int unlock = 1;
3395
3396        if (unlikely(!debug_locks)) {
3397                printk("INFO: lockdep is turned off.\n");
3398                return;
3399        }
3400        printk("\nShowing all locks held in the system:\n");
3401
3402        /*
3403         * Here we try to get the tasklist_lock as hard as possible,
3404         * if not successful after 2 seconds we ignore it (but keep
3405         * trying). This is to enable a debug printout even if a
3406         * tasklist_lock-holding task deadlocks or crashes.
3407         */
3408retry:
3409        if (!read_trylock(&tasklist_lock)) {
3410                if (count == 10)
3411                        printk("hm, tasklist_lock locked, retrying... ");
3412                if (count) {
3413                        count--;
3414                        printk(" #%d", 10-count);
3415                        mdelay(200);
3416                        goto retry;
3417                }
3418                printk(" ignoring it.\n");
3419                unlock = 0;
3420        }
3421        if (count != 10)
3422                printk(" locked it.\n");
3423
3424        do_each_thread(g, p) {
3425                /*
3426                 * It's not reliable to print a task's held locks
3427                 * if it's not sleeping (or if it's not the current
3428                 * task):
3429                 */
3430                if (p->state == TASK_RUNNING && p != current)
3431                        continue;
3432                if (p->lockdep_depth)
3433                        lockdep_print_held_locks(p);
3434                if (!unlock)
3435                        if (read_trylock(&tasklist_lock))
3436                                unlock = 1;
3437        } while_each_thread(g, p);
3438
3439        printk("\n");
3440        printk("=============================================\n\n");
3441
3442        if (unlock)
3443                read_unlock(&tasklist_lock);
3444}
3445
3446EXPORT_SYMBOL_GPL(debug_show_all_locks);
3447
3448/*
3449 * Careful: only use this function if you are sure that
3450 * the task cannot run in parallel!
3451 */
3452void __debug_show_held_locks(struct task_struct *task)
3453{
3454        if (unlikely(!debug_locks)) {
3455                printk("INFO: lockdep is turned off.\n");
3456                return;
3457        }
3458        lockdep_print_held_locks(task);
3459}
3460EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3461
3462void debug_show_held_locks(struct task_struct *task)
3463{
3464                __debug_show_held_locks(task);
3465}
3466
3467EXPORT_SYMBOL_GPL(debug_show_held_locks);
3468
3469void lockdep_sys_exit(void)
3470{
3471        struct task_struct *curr = current;
3472
3473        if (unlikely(curr->lockdep_depth)) {
3474                if (!debug_locks_off())
3475                        return;
3476                printk("\n================================================\n");
3477                printk(  "[ BUG: lock held when returning to user space! ]\n");
3478                printk(  "------------------------------------------------\n");
3479                printk("%s/%d is leaving the kernel with locks still held!\n",
3480                                curr->comm, curr->pid);
3481                lockdep_print_held_locks(curr);
3482        }
3483}
3484