1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define DISABLE_BRANCH_PROFILING
30#include <linux/mutex.h>
31#include <linux/sched.h>
32#include <linux/sched/clock.h>
33#include <linux/sched/task.h>
34#include <linux/sched/mm.h>
35#include <linux/delay.h>
36#include <linux/module.h>
37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
39#include <linux/spinlock.h>
40#include <linux/kallsyms.h>
41#include <linux/interrupt.h>
42#include <linux/stacktrace.h>
43#include <linux/debug_locks.h>
44#include <linux/irqflags.h>
45#include <linux/utsname.h>
46#include <linux/hash.h>
47#include <linux/ftrace.h>
48#include <linux/stringify.h>
49#include <linux/bitmap.h>
50#include <linux/bitops.h>
51#include <linux/gfp.h>
52#include <linux/random.h>
53#include <linux/jhash.h>
54#include <linux/nmi.h>
55#include <linux/rcupdate.h>
56#include <linux/kprobes.h>
57#include <linux/lockdep.h>
58
59#include <asm/sections.h>
60
61#include "lockdep_internals.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/lock.h>
65
66#ifdef CONFIG_PROVE_LOCKING
67int prove_locking = 1;
68module_param(prove_locking, int, 0644);
69#else
70#define prove_locking 0
71#endif
72
73#ifdef CONFIG_LOCK_STAT
74int lock_stat = 1;
75module_param(lock_stat, int, 0644);
76#else
77#define lock_stat 0
78#endif
79
80DEFINE_PER_CPU(unsigned int, lockdep_recursion);
81EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
82
83static __always_inline bool lockdep_enabled(void)
84{
85 if (!debug_locks)
86 return false;
87
88 if (this_cpu_read(lockdep_recursion))
89 return false;
90
91 if (current->lockdep_recursion)
92 return false;
93
94 return true;
95}
96
97
98
99
100
101
102
103
104
105static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
106static struct task_struct *__owner;
107
108static inline void lockdep_lock(void)
109{
110 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
111
112 __this_cpu_inc(lockdep_recursion);
113 arch_spin_lock(&__lock);
114 __owner = current;
115}
116
117static inline void lockdep_unlock(void)
118{
119 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
120
121 if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
122 return;
123
124 __owner = NULL;
125 arch_spin_unlock(&__lock);
126 __this_cpu_dec(lockdep_recursion);
127}
128
129static inline bool lockdep_assert_locked(void)
130{
131 return DEBUG_LOCKS_WARN_ON(__owner != current);
132}
133
134static struct task_struct *lockdep_selftest_task_struct;
135
136
137static int graph_lock(void)
138{
139 lockdep_lock();
140
141
142
143
144
145
146 if (!debug_locks) {
147 lockdep_unlock();
148 return 0;
149 }
150 return 1;
151}
152
153static inline void graph_unlock(void)
154{
155 lockdep_unlock();
156}
157
158
159
160
161
162static inline int debug_locks_off_graph_unlock(void)
163{
164 int ret = debug_locks_off();
165
166 lockdep_unlock();
167
168 return ret;
169}
170
171unsigned long nr_list_entries;
172static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
173static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
174
175
176
177
178
179
180
181#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
182#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
183static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
184unsigned long nr_lock_classes;
185unsigned long nr_zapped_classes;
186#ifndef CONFIG_DEBUG_LOCKDEP
187static
188#endif
189struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
190static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
191
192static inline struct lock_class *hlock_class(struct held_lock *hlock)
193{
194 unsigned int class_idx = hlock->class_idx;
195
196
197 barrier();
198
199 if (!test_bit(class_idx, lock_classes_in_use)) {
200
201
202
203 DEBUG_LOCKS_WARN_ON(1);
204 return NULL;
205 }
206
207
208
209
210
211 return lock_classes + class_idx;
212}
213
214#ifdef CONFIG_LOCK_STAT
215static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
216
217static inline u64 lockstat_clock(void)
218{
219 return local_clock();
220}
221
222static int lock_point(unsigned long points[], unsigned long ip)
223{
224 int i;
225
226 for (i = 0; i < LOCKSTAT_POINTS; i++) {
227 if (points[i] == 0) {
228 points[i] = ip;
229 break;
230 }
231 if (points[i] == ip)
232 break;
233 }
234
235 return i;
236}
237
238static void lock_time_inc(struct lock_time *lt, u64 time)
239{
240 if (time > lt->max)
241 lt->max = time;
242
243 if (time < lt->min || !lt->nr)
244 lt->min = time;
245
246 lt->total += time;
247 lt->nr++;
248}
249
250static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
251{
252 if (!src->nr)
253 return;
254
255 if (src->max > dst->max)
256 dst->max = src->max;
257
258 if (src->min < dst->min || !dst->nr)
259 dst->min = src->min;
260
261 dst->total += src->total;
262 dst->nr += src->nr;
263}
264
265struct lock_class_stats lock_stats(struct lock_class *class)
266{
267 struct lock_class_stats stats;
268 int cpu, i;
269
270 memset(&stats, 0, sizeof(struct lock_class_stats));
271 for_each_possible_cpu(cpu) {
272 struct lock_class_stats *pcs =
273 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
274
275 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
276 stats.contention_point[i] += pcs->contention_point[i];
277
278 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
279 stats.contending_point[i] += pcs->contending_point[i];
280
281 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
282 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
283
284 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
285 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
286
287 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
288 stats.bounces[i] += pcs->bounces[i];
289 }
290
291 return stats;
292}
293
294void clear_lock_stats(struct lock_class *class)
295{
296 int cpu;
297
298 for_each_possible_cpu(cpu) {
299 struct lock_class_stats *cpu_stats =
300 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
301
302 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
303 }
304 memset(class->contention_point, 0, sizeof(class->contention_point));
305 memset(class->contending_point, 0, sizeof(class->contending_point));
306}
307
308static struct lock_class_stats *get_lock_stats(struct lock_class *class)
309{
310 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
311}
312
313static void lock_release_holdtime(struct held_lock *hlock)
314{
315 struct lock_class_stats *stats;
316 u64 holdtime;
317
318 if (!lock_stat)
319 return;
320
321 holdtime = lockstat_clock() - hlock->holdtime_stamp;
322
323 stats = get_lock_stats(hlock_class(hlock));
324 if (hlock->read)
325 lock_time_inc(&stats->read_holdtime, holdtime);
326 else
327 lock_time_inc(&stats->write_holdtime, holdtime);
328}
329#else
330static inline void lock_release_holdtime(struct held_lock *hlock)
331{
332}
333#endif
334
335
336
337
338
339
340
341LIST_HEAD(all_lock_classes);
342static LIST_HEAD(free_lock_classes);
343
344
345
346
347
348
349
350struct pending_free {
351 struct list_head zapped;
352 DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
353};
354
355
356
357
358
359
360
361
362
363
364
365
366static struct delayed_free {
367 struct rcu_head rcu_head;
368 int index;
369 int scheduled;
370 struct pending_free pf[2];
371} delayed_free;
372
373
374
375
376#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
377#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
378#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
379#define classhashentry(key) (classhash_table + __classhashfn((key)))
380
381static struct hlist_head classhash_table[CLASSHASH_SIZE];
382
383
384
385
386
387#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
388#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
389#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
390#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
391
392static struct hlist_head chainhash_table[CHAINHASH_SIZE];
393
394
395
396
397static inline u16 hlock_id(struct held_lock *hlock)
398{
399 BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16);
400
401 return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
402}
403
404static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
405{
406 return hlock_id & (MAX_LOCKDEP_KEYS - 1);
407}
408
409
410
411
412
413
414
415static inline u64 iterate_chain_key(u64 key, u32 idx)
416{
417 u32 k0 = key, k1 = key >> 32;
418
419 __jhash_mix(idx, k0, k1);
420
421 return k0 | (u64)k1 << 32;
422}
423
424void lockdep_init_task(struct task_struct *task)
425{
426 task->lockdep_depth = 0;
427 task->curr_chain_key = INITIAL_CHAIN_KEY;
428 task->lockdep_recursion = 0;
429}
430
431static __always_inline void lockdep_recursion_inc(void)
432{
433 __this_cpu_inc(lockdep_recursion);
434}
435
436static __always_inline void lockdep_recursion_finish(void)
437{
438 if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
439 __this_cpu_write(lockdep_recursion, 0);
440}
441
442void lockdep_set_selftest_task(struct task_struct *task)
443{
444 lockdep_selftest_task_struct = task;
445}
446
447
448
449
450
451#define VERBOSE 0
452#define VERY_VERBOSE 0
453
454#if VERBOSE
455# define HARDIRQ_VERBOSE 1
456# define SOFTIRQ_VERBOSE 1
457#else
458# define HARDIRQ_VERBOSE 0
459# define SOFTIRQ_VERBOSE 0
460#endif
461
462#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
463
464
465
466static int class_filter(struct lock_class *class)
467{
468#if 0
469
470 if (class->name_version == 1 &&
471 !strcmp(class->name, "lockname"))
472 return 1;
473 if (class->name_version == 1 &&
474 !strcmp(class->name, "&struct->lockfield"))
475 return 1;
476#endif
477
478 return 0;
479}
480#endif
481
482static int verbose(struct lock_class *class)
483{
484#if VERBOSE
485 return class_filter(class);
486#endif
487 return 0;
488}
489
490static void print_lockdep_off(const char *bug_msg)
491{
492 printk(KERN_DEBUG "%s\n", bug_msg);
493 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
494#ifdef CONFIG_LOCK_STAT
495 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
496#endif
497}
498
499unsigned long nr_stack_trace_entries;
500
501#ifdef CONFIG_PROVE_LOCKING
502
503
504
505
506
507
508
509struct lock_trace {
510 struct hlist_node hash_entry;
511 u32 hash;
512 u32 nr_entries;
513 unsigned long entries[] __aligned(sizeof(unsigned long));
514};
515#define LOCK_TRACE_SIZE_IN_LONGS \
516 (sizeof(struct lock_trace) / sizeof(unsigned long))
517
518
519
520static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
521static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
522
523static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
524{
525 return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
526 memcmp(t1->entries, t2->entries,
527 t1->nr_entries * sizeof(t1->entries[0])) == 0;
528}
529
530static struct lock_trace *save_trace(void)
531{
532 struct lock_trace *trace, *t2;
533 struct hlist_head *hash_head;
534 u32 hash;
535 int max_entries;
536
537 BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
538 BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
539
540 trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
541 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
542 LOCK_TRACE_SIZE_IN_LONGS;
543
544 if (max_entries <= 0) {
545 if (!debug_locks_off_graph_unlock())
546 return NULL;
547
548 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
549 dump_stack();
550
551 return NULL;
552 }
553 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
554
555 hash = jhash(trace->entries, trace->nr_entries *
556 sizeof(trace->entries[0]), 0);
557 trace->hash = hash;
558 hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
559 hlist_for_each_entry(t2, hash_head, hash_entry) {
560 if (traces_identical(trace, t2))
561 return t2;
562 }
563 nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
564 hlist_add_head(&trace->hash_entry, hash_head);
565
566 return trace;
567}
568
569
570u64 lockdep_stack_trace_count(void)
571{
572 struct lock_trace *trace;
573 u64 c = 0;
574 int i;
575
576 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
577 hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
578 c++;
579 }
580 }
581
582 return c;
583}
584
585
586u64 lockdep_stack_hash_count(void)
587{
588 u64 c = 0;
589 int i;
590
591 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
592 if (!hlist_empty(&stack_trace_hash[i]))
593 c++;
594
595 return c;
596}
597#endif
598
599unsigned int nr_hardirq_chains;
600unsigned int nr_softirq_chains;
601unsigned int nr_process_chains;
602unsigned int max_lockdep_depth;
603
604#ifdef CONFIG_DEBUG_LOCKDEP
605
606
607
608DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
609#endif
610
611#ifdef CONFIG_PROVE_LOCKING
612
613
614
615
616#define __USAGE(__STATE) \
617 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
618 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
619 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
620 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
621
622static const char *usage_str[] =
623{
624#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
625#include "lockdep_states.h"
626#undef LOCKDEP_STATE
627 [LOCK_USED] = "INITIAL USE",
628 [LOCK_USED_READ] = "INITIAL READ USE",
629
630 [LOCK_USAGE_STATES] = "IN-NMI",
631};
632#endif
633
634const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
635{
636 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
637}
638
639static inline unsigned long lock_flag(enum lock_usage_bit bit)
640{
641 return 1UL << bit;
642}
643
644static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
645{
646
647
648
649
650 char c = '.';
651
652
653
654
655
656
657
658
659
660 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
661 c = '+';
662 if (class->usage_mask & lock_flag(bit))
663 c = '?';
664 } else if (class->usage_mask & lock_flag(bit))
665 c = '-';
666
667 return c;
668}
669
670void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
671{
672 int i = 0;
673
674#define LOCKDEP_STATE(__STATE) \
675 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
676 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
677#include "lockdep_states.h"
678#undef LOCKDEP_STATE
679
680 usage[i] = '\0';
681}
682
683static void __print_lock_name(struct lock_class *class)
684{
685 char str[KSYM_NAME_LEN];
686 const char *name;
687
688 name = class->name;
689 if (!name) {
690 name = __get_key_name(class->key, str);
691 printk(KERN_CONT "%s", name);
692 } else {
693 printk(KERN_CONT "%s", name);
694 if (class->name_version > 1)
695 printk(KERN_CONT "#%d", class->name_version);
696 if (class->subclass)
697 printk(KERN_CONT "/%d", class->subclass);
698 }
699}
700
701static void print_lock_name(struct lock_class *class)
702{
703 char usage[LOCK_USAGE_CHARS];
704
705 get_usage_chars(class, usage);
706
707 printk(KERN_CONT " (");
708 __print_lock_name(class);
709 printk(KERN_CONT "){%s}-{%d:%d}", usage,
710 class->wait_type_outer ?: class->wait_type_inner,
711 class->wait_type_inner);
712}
713
714static void print_lockdep_cache(struct lockdep_map *lock)
715{
716 const char *name;
717 char str[KSYM_NAME_LEN];
718
719 name = lock->name;
720 if (!name)
721 name = __get_key_name(lock->key->subkeys, str);
722
723 printk(KERN_CONT "%s", name);
724}
725
726static void print_lock(struct held_lock *hlock)
727{
728
729
730
731
732
733
734
735
736
737
738 struct lock_class *lock = hlock_class(hlock);
739
740 if (!lock) {
741 printk(KERN_CONT "<RELEASED>\n");
742 return;
743 }
744
745 printk(KERN_CONT "%px", hlock->instance);
746 print_lock_name(lock);
747 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
748}
749
750static void lockdep_print_held_locks(struct task_struct *p)
751{
752 int i, depth = READ_ONCE(p->lockdep_depth);
753
754 if (!depth)
755 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
756 else
757 printk("%d lock%s held by %s/%d:\n", depth,
758 depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
759
760
761
762
763 if (p != current && task_is_running(p))
764 return;
765 for (i = 0; i < depth; i++) {
766 printk(" #%d: ", i);
767 print_lock(p->held_locks + i);
768 }
769}
770
771static void print_kernel_ident(void)
772{
773 printk("%s %.*s %s\n", init_utsname()->release,
774 (int)strcspn(init_utsname()->version, " "),
775 init_utsname()->version,
776 print_tainted());
777}
778
779static int very_verbose(struct lock_class *class)
780{
781#if VERY_VERBOSE
782 return class_filter(class);
783#endif
784 return 0;
785}
786
787
788
789
790#ifdef __KERNEL__
791static int static_obj(const void *obj)
792{
793 unsigned long start = (unsigned long) &_stext,
794 end = (unsigned long) &_end,
795 addr = (unsigned long) obj;
796
797 if (arch_is_kernel_initmem_freed(addr))
798 return 0;
799
800
801
802
803 if ((addr >= start) && (addr < end))
804 return 1;
805
806 if (arch_is_kernel_data(addr))
807 return 1;
808
809
810
811
812 if (is_kernel_percpu_address(addr))
813 return 1;
814
815
816
817
818 return is_module_address(addr) || is_module_percpu_address(addr);
819}
820#endif
821
822
823
824
825
826
827static int count_matching_names(struct lock_class *new_class)
828{
829 struct lock_class *class;
830 int count = 0;
831
832 if (!new_class->name)
833 return 0;
834
835 list_for_each_entry(class, &all_lock_classes, lock_entry) {
836 if (new_class->key - new_class->subclass == class->key)
837 return class->name_version;
838 if (class->name && !strcmp(class->name, new_class->name))
839 count = max(count, class->name_version);
840 }
841
842 return count + 1;
843}
844
845
846static noinstr struct lock_class *
847look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
848{
849 struct lockdep_subclass_key *key;
850 struct hlist_head *hash_head;
851 struct lock_class *class;
852
853 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
854 instrumentation_begin();
855 debug_locks_off();
856 printk(KERN_ERR
857 "BUG: looking up invalid subclass: %u\n", subclass);
858 printk(KERN_ERR
859 "turning off the locking correctness validator.\n");
860 dump_stack();
861 instrumentation_end();
862 return NULL;
863 }
864
865
866
867
868
869 if (unlikely(!lock->key))
870 return NULL;
871
872
873
874
875
876
877
878 BUILD_BUG_ON(sizeof(struct lock_class_key) >
879 sizeof(struct lockdep_map));
880
881 key = lock->key->subkeys + subclass;
882
883 hash_head = classhashentry(key);
884
885
886
887
888 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
889 return NULL;
890
891 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
892 if (class->key == key) {
893
894
895
896
897 WARN_ON_ONCE(class->name != lock->name &&
898 lock->key != &__lockdep_no_validate__);
899 return class;
900 }
901 }
902
903 return NULL;
904}
905
906
907
908
909
910
911static bool assign_lock_key(struct lockdep_map *lock)
912{
913 unsigned long can_addr, addr = (unsigned long)lock;
914
915#ifdef __KERNEL__
916
917
918
919
920
921
922
923 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
924#endif
925
926 if (__is_kernel_percpu_address(addr, &can_addr))
927 lock->key = (void *)can_addr;
928 else if (__is_module_percpu_address(addr, &can_addr))
929 lock->key = (void *)can_addr;
930 else if (static_obj(lock))
931 lock->key = (void *)lock;
932 else {
933
934 debug_locks_off();
935 pr_err("INFO: trying to register non-static key.\n");
936 pr_err("The code is fine but needs lockdep annotation, or maybe\n");
937 pr_err("you didn't initialize this object before use?\n");
938 pr_err("turning off the locking correctness validator.\n");
939 dump_stack();
940 return false;
941 }
942
943 return true;
944}
945
946#ifdef CONFIG_DEBUG_LOCKDEP
947
948
949static bool in_list(struct list_head *e, struct list_head *h)
950{
951 struct list_head *f;
952
953 list_for_each(f, h) {
954 if (e == f)
955 return true;
956 }
957
958 return false;
959}
960
961
962
963
964
965static bool in_any_class_list(struct list_head *e)
966{
967 struct lock_class *class;
968 int i;
969
970 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
971 class = &lock_classes[i];
972 if (in_list(e, &class->locks_after) ||
973 in_list(e, &class->locks_before))
974 return true;
975 }
976 return false;
977}
978
979static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
980{
981 struct lock_list *e;
982
983 list_for_each_entry(e, h, entry) {
984 if (e->links_to != c) {
985 printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
986 c->name ? : "(?)",
987 (unsigned long)(e - list_entries),
988 e->links_to && e->links_to->name ?
989 e->links_to->name : "(?)",
990 e->class && e->class->name ? e->class->name :
991 "(?)");
992 return false;
993 }
994 }
995 return true;
996}
997
998#ifdef CONFIG_PROVE_LOCKING
999static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1000#endif
1001
1002static bool check_lock_chain_key(struct lock_chain *chain)
1003{
1004#ifdef CONFIG_PROVE_LOCKING
1005 u64 chain_key = INITIAL_CHAIN_KEY;
1006 int i;
1007
1008 for (i = chain->base; i < chain->base + chain->depth; i++)
1009 chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
1010
1011
1012
1013
1014 if (chain->chain_key != chain_key) {
1015 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
1016 (unsigned long long)(chain - lock_chains),
1017 (unsigned long long)chain->chain_key,
1018 (unsigned long long)chain_key);
1019 return false;
1020 }
1021#endif
1022 return true;
1023}
1024
1025static bool in_any_zapped_class_list(struct lock_class *class)
1026{
1027 struct pending_free *pf;
1028 int i;
1029
1030 for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
1031 if (in_list(&class->lock_entry, &pf->zapped))
1032 return true;
1033 }
1034
1035 return false;
1036}
1037
1038static bool __check_data_structures(void)
1039{
1040 struct lock_class *class;
1041 struct lock_chain *chain;
1042 struct hlist_head *head;
1043 struct lock_list *e;
1044 int i;
1045
1046
1047 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1048 class = &lock_classes[i];
1049 if (!in_list(&class->lock_entry, &all_lock_classes) &&
1050 !in_list(&class->lock_entry, &free_lock_classes) &&
1051 !in_any_zapped_class_list(class)) {
1052 printk(KERN_INFO "class %px/%s is not in any class list\n",
1053 class, class->name ? : "(?)");
1054 return false;
1055 }
1056 }
1057
1058
1059 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1060 class = &lock_classes[i];
1061 if (!class_lock_list_valid(class, &class->locks_before))
1062 return false;
1063 if (!class_lock_list_valid(class, &class->locks_after))
1064 return false;
1065 }
1066
1067
1068 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1069 head = chainhash_table + i;
1070 hlist_for_each_entry_rcu(chain, head, entry) {
1071 if (!check_lock_chain_key(chain))
1072 return false;
1073 }
1074 }
1075
1076
1077
1078
1079
1080 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1081 e = list_entries + i;
1082 if (!in_any_class_list(&e->entry)) {
1083 printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1084 (unsigned int)(e - list_entries),
1085 e->class->name ? : "(?)",
1086 e->links_to->name ? : "(?)");
1087 return false;
1088 }
1089 }
1090
1091
1092
1093
1094
1095 for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1096 e = list_entries + i;
1097 if (in_any_class_list(&e->entry)) {
1098 printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1099 (unsigned int)(e - list_entries),
1100 e->class && e->class->name ? e->class->name :
1101 "(?)",
1102 e->links_to && e->links_to->name ?
1103 e->links_to->name : "(?)");
1104 return false;
1105 }
1106 }
1107
1108 return true;
1109}
1110
1111int check_consistency = 0;
1112module_param(check_consistency, int, 0644);
1113
1114static void check_data_structures(void)
1115{
1116 static bool once = false;
1117
1118 if (check_consistency && !once) {
1119 if (!__check_data_structures()) {
1120 once = true;
1121 WARN_ON(once);
1122 }
1123 }
1124}
1125
1126#else
1127
1128static inline void check_data_structures(void) { }
1129
1130#endif
1131
1132static void init_chain_block_buckets(void);
1133
1134
1135
1136
1137
1138static void init_data_structures_once(void)
1139{
1140 static bool __read_mostly ds_initialized, rcu_head_initialized;
1141 int i;
1142
1143 if (likely(rcu_head_initialized))
1144 return;
1145
1146 if (system_state >= SYSTEM_SCHEDULING) {
1147 init_rcu_head(&delayed_free.rcu_head);
1148 rcu_head_initialized = true;
1149 }
1150
1151 if (ds_initialized)
1152 return;
1153
1154 ds_initialized = true;
1155
1156 INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1157 INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1158
1159 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1160 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1161 INIT_LIST_HEAD(&lock_classes[i].locks_after);
1162 INIT_LIST_HEAD(&lock_classes[i].locks_before);
1163 }
1164 init_chain_block_buckets();
1165}
1166
1167static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1168{
1169 unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1170
1171 return lock_keys_hash + hash;
1172}
1173
1174
1175void lockdep_register_key(struct lock_class_key *key)
1176{
1177 struct hlist_head *hash_head;
1178 struct lock_class_key *k;
1179 unsigned long flags;
1180
1181 if (WARN_ON_ONCE(static_obj(key)))
1182 return;
1183 hash_head = keyhashentry(key);
1184
1185 raw_local_irq_save(flags);
1186 if (!graph_lock())
1187 goto restore_irqs;
1188 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1189 if (WARN_ON_ONCE(k == key))
1190 goto out_unlock;
1191 }
1192 hlist_add_head_rcu(&key->hash_entry, hash_head);
1193out_unlock:
1194 graph_unlock();
1195restore_irqs:
1196 raw_local_irq_restore(flags);
1197}
1198EXPORT_SYMBOL_GPL(lockdep_register_key);
1199
1200
1201static bool is_dynamic_key(const struct lock_class_key *key)
1202{
1203 struct hlist_head *hash_head;
1204 struct lock_class_key *k;
1205 bool found = false;
1206
1207 if (WARN_ON_ONCE(static_obj(key)))
1208 return false;
1209
1210
1211
1212
1213
1214
1215 if (!debug_locks)
1216 return true;
1217
1218 hash_head = keyhashentry(key);
1219
1220 rcu_read_lock();
1221 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1222 if (k == key) {
1223 found = true;
1224 break;
1225 }
1226 }
1227 rcu_read_unlock();
1228
1229 return found;
1230}
1231
1232
1233
1234
1235
1236
1237static struct lock_class *
1238register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1239{
1240 struct lockdep_subclass_key *key;
1241 struct hlist_head *hash_head;
1242 struct lock_class *class;
1243
1244 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1245
1246 class = look_up_lock_class(lock, subclass);
1247 if (likely(class))
1248 goto out_set_class_cache;
1249
1250 if (!lock->key) {
1251 if (!assign_lock_key(lock))
1252 return NULL;
1253 } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1254 return NULL;
1255 }
1256
1257 key = lock->key->subkeys + subclass;
1258 hash_head = classhashentry(key);
1259
1260 if (!graph_lock()) {
1261 return NULL;
1262 }
1263
1264
1265
1266
1267 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1268 if (class->key == key)
1269 goto out_unlock_set;
1270 }
1271
1272 init_data_structures_once();
1273
1274
1275 class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1276 lock_entry);
1277 if (!class) {
1278 if (!debug_locks_off_graph_unlock()) {
1279 return NULL;
1280 }
1281
1282 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1283 dump_stack();
1284 return NULL;
1285 }
1286 nr_lock_classes++;
1287 __set_bit(class - lock_classes, lock_classes_in_use);
1288 debug_atomic_inc(nr_unused_locks);
1289 class->key = key;
1290 class->name = lock->name;
1291 class->subclass = subclass;
1292 WARN_ON_ONCE(!list_empty(&class->locks_before));
1293 WARN_ON_ONCE(!list_empty(&class->locks_after));
1294 class->name_version = count_matching_names(class);
1295 class->wait_type_inner = lock->wait_type_inner;
1296 class->wait_type_outer = lock->wait_type_outer;
1297 class->lock_type = lock->lock_type;
1298
1299
1300
1301
1302 hlist_add_head_rcu(&class->hash_entry, hash_head);
1303
1304
1305
1306
1307 list_move_tail(&class->lock_entry, &all_lock_classes);
1308
1309 if (verbose(class)) {
1310 graph_unlock();
1311
1312 printk("\nnew class %px: %s", class->key, class->name);
1313 if (class->name_version > 1)
1314 printk(KERN_CONT "#%d", class->name_version);
1315 printk(KERN_CONT "\n");
1316 dump_stack();
1317
1318 if (!graph_lock()) {
1319 return NULL;
1320 }
1321 }
1322out_unlock_set:
1323 graph_unlock();
1324
1325out_set_class_cache:
1326 if (!subclass || force)
1327 lock->class_cache[0] = class;
1328 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1329 lock->class_cache[subclass] = class;
1330
1331
1332
1333
1334
1335 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1336 return NULL;
1337
1338 return class;
1339}
1340
1341#ifdef CONFIG_PROVE_LOCKING
1342
1343
1344
1345
1346static struct lock_list *alloc_list_entry(void)
1347{
1348 int idx = find_first_zero_bit(list_entries_in_use,
1349 ARRAY_SIZE(list_entries));
1350
1351 if (idx >= ARRAY_SIZE(list_entries)) {
1352 if (!debug_locks_off_graph_unlock())
1353 return NULL;
1354
1355 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1356 dump_stack();
1357 return NULL;
1358 }
1359 nr_list_entries++;
1360 __set_bit(idx, list_entries_in_use);
1361 return list_entries + idx;
1362}
1363
1364
1365
1366
1367static int add_lock_to_list(struct lock_class *this,
1368 struct lock_class *links_to, struct list_head *head,
1369 unsigned long ip, u16 distance, u8 dep,
1370 const struct lock_trace *trace)
1371{
1372 struct lock_list *entry;
1373
1374
1375
1376
1377 entry = alloc_list_entry();
1378 if (!entry)
1379 return 0;
1380
1381 entry->class = this;
1382 entry->links_to = links_to;
1383 entry->dep = dep;
1384 entry->distance = distance;
1385 entry->trace = trace;
1386
1387
1388
1389
1390
1391 list_add_tail_rcu(&entry->entry, head);
1392
1393 return 1;
1394}
1395
1396
1397
1398
1399#define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
1400#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412struct circular_queue {
1413 struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
1414 unsigned int front, rear;
1415};
1416
1417static struct circular_queue lock_cq;
1418
1419unsigned int max_bfs_queue_depth;
1420
1421static unsigned int lockdep_dependency_gen_id;
1422
1423static inline void __cq_init(struct circular_queue *cq)
1424{
1425 cq->front = cq->rear = 0;
1426 lockdep_dependency_gen_id++;
1427}
1428
1429static inline int __cq_empty(struct circular_queue *cq)
1430{
1431 return (cq->front == cq->rear);
1432}
1433
1434static inline int __cq_full(struct circular_queue *cq)
1435{
1436 return ((cq->rear + 1) & CQ_MASK) == cq->front;
1437}
1438
1439static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
1440{
1441 if (__cq_full(cq))
1442 return -1;
1443
1444 cq->element[cq->rear] = elem;
1445 cq->rear = (cq->rear + 1) & CQ_MASK;
1446 return 0;
1447}
1448
1449
1450
1451
1452
1453static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
1454{
1455 struct lock_list * lock;
1456
1457 if (__cq_empty(cq))
1458 return NULL;
1459
1460 lock = cq->element[cq->front];
1461 cq->front = (cq->front + 1) & CQ_MASK;
1462
1463 return lock;
1464}
1465
1466static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
1467{
1468 return (cq->rear - cq->front) & CQ_MASK;
1469}
1470
1471static inline void mark_lock_accessed(struct lock_list *lock)
1472{
1473 lock->class->dep_gen_id = lockdep_dependency_gen_id;
1474}
1475
1476static inline void visit_lock_entry(struct lock_list *lock,
1477 struct lock_list *parent)
1478{
1479 lock->parent = parent;
1480}
1481
1482static inline unsigned long lock_accessed(struct lock_list *lock)
1483{
1484 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1485}
1486
1487static inline struct lock_list *get_lock_parent(struct lock_list *child)
1488{
1489 return child->parent;
1490}
1491
1492static inline int get_lock_depth(struct lock_list *child)
1493{
1494 int depth = 0;
1495 struct lock_list *parent;
1496
1497 while ((parent = get_lock_parent(child))) {
1498 child = parent;
1499 depth++;
1500 }
1501 return depth;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
1512{
1513 void *lock_class = lock->class;
1514
1515 return lock_class + offset;
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533enum bfs_result {
1534 BFS_EINVALIDNODE = -2,
1535 BFS_EQUEUEFULL = -1,
1536 BFS_RMATCH = 0,
1537 BFS_RNOMATCH = 1,
1538};
1539
1540
1541
1542
1543static inline bool bfs_error(enum bfs_result res)
1544{
1545 return res < 0;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563#define DEP_SR_BIT (0 + (0 << 1))
1564#define DEP_ER_BIT (1 + (0 << 1))
1565#define DEP_SN_BIT (0 + (1 << 1))
1566#define DEP_EN_BIT (1 + (1 << 1))
1567
1568#define DEP_SR_MASK (1U << (DEP_SR_BIT))
1569#define DEP_ER_MASK (1U << (DEP_ER_BIT))
1570#define DEP_SN_MASK (1U << (DEP_SN_BIT))
1571#define DEP_EN_MASK (1U << (DEP_EN_BIT))
1572
1573static inline unsigned int
1574__calc_dep_bit(struct held_lock *prev, struct held_lock *next)
1575{
1576 return (prev->read == 0) + ((next->read != 2) << 1);
1577}
1578
1579static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
1580{
1581 return 1U << __calc_dep_bit(prev, next);
1582}
1583
1584
1585
1586
1587
1588static inline unsigned int
1589__calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
1590{
1591 return (next->read != 2) + ((prev->read == 0) << 1);
1592}
1593
1594static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
1595{
1596 return 1U << __calc_dep_bitb(prev, next);
1597}
1598
1599
1600
1601
1602
1603static inline void __bfs_init_root(struct lock_list *lock,
1604 struct lock_class *class)
1605{
1606 lock->class = class;
1607 lock->parent = NULL;
1608 lock->only_xr = 0;
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619static inline void bfs_init_root(struct lock_list *lock,
1620 struct held_lock *hlock)
1621{
1622 __bfs_init_root(lock, hlock_class(hlock));
1623 lock->only_xr = (hlock->read == 2);
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633static inline void bfs_init_rootb(struct lock_list *lock,
1634 struct held_lock *hlock)
1635{
1636 __bfs_init_root(lock, hlock_class(hlock));
1637 lock->only_xr = (hlock->read != 0);
1638}
1639
1640static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
1641{
1642 if (!lock || !lock->parent)
1643 return NULL;
1644
1645 return list_next_or_null_rcu(get_dep_list(lock->parent, offset),
1646 &lock->entry, struct lock_list, entry);
1647}
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676static enum bfs_result __bfs(struct lock_list *source_entry,
1677 void *data,
1678 bool (*match)(struct lock_list *entry, void *data),
1679 bool (*skip)(struct lock_list *entry, void *data),
1680 struct lock_list **target_entry,
1681 int offset)
1682{
1683 struct circular_queue *cq = &lock_cq;
1684 struct lock_list *lock = NULL;
1685 struct lock_list *entry;
1686 struct list_head *head;
1687 unsigned int cq_depth;
1688 bool first;
1689
1690 lockdep_assert_locked();
1691
1692 __cq_init(cq);
1693 __cq_enqueue(cq, source_entry);
1694
1695 while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) {
1696 if (!lock->class)
1697 return BFS_EINVALIDNODE;
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 if (lock_accessed(lock))
1709 continue;
1710 else
1711 mark_lock_accessed(lock);
1712
1713
1714
1715
1716
1717 if (lock->parent) {
1718 u8 dep = lock->dep;
1719 bool prev_only_xr = lock->parent->only_xr;
1720
1721
1722
1723
1724
1725
1726 if (prev_only_xr)
1727 dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
1728
1729
1730 if (!dep)
1731 continue;
1732
1733
1734 lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
1735 }
1736
1737
1738
1739
1740
1741
1742
1743 if (skip && skip(lock, data))
1744 continue;
1745
1746 if (match(lock, data)) {
1747 *target_entry = lock;
1748 return BFS_RMATCH;
1749 }
1750
1751
1752
1753
1754
1755
1756 first = true;
1757 head = get_dep_list(lock, offset);
1758 list_for_each_entry_rcu(entry, head, entry) {
1759 visit_lock_entry(entry, lock);
1760
1761
1762
1763
1764
1765
1766
1767 if (!first)
1768 continue;
1769
1770 first = false;
1771
1772 if (__cq_enqueue(cq, entry))
1773 return BFS_EQUEUEFULL;
1774
1775 cq_depth = __cq_get_elem_count(cq);
1776 if (max_bfs_queue_depth < cq_depth)
1777 max_bfs_queue_depth = cq_depth;
1778 }
1779 }
1780
1781 return BFS_RNOMATCH;
1782}
1783
1784static inline enum bfs_result
1785__bfs_forwards(struct lock_list *src_entry,
1786 void *data,
1787 bool (*match)(struct lock_list *entry, void *data),
1788 bool (*skip)(struct lock_list *entry, void *data),
1789 struct lock_list **target_entry)
1790{
1791 return __bfs(src_entry, data, match, skip, target_entry,
1792 offsetof(struct lock_class, locks_after));
1793
1794}
1795
1796static inline enum bfs_result
1797__bfs_backwards(struct lock_list *src_entry,
1798 void *data,
1799 bool (*match)(struct lock_list *entry, void *data),
1800 bool (*skip)(struct lock_list *entry, void *data),
1801 struct lock_list **target_entry)
1802{
1803 return __bfs(src_entry, data, match, skip, target_entry,
1804 offsetof(struct lock_class, locks_before));
1805
1806}
1807
1808static void print_lock_trace(const struct lock_trace *trace,
1809 unsigned int spaces)
1810{
1811 stack_trace_print(trace->entries, trace->nr_entries, spaces);
1812}
1813
1814
1815
1816
1817
1818static noinline void
1819print_circular_bug_entry(struct lock_list *target, int depth)
1820{
1821 if (debug_locks_silent)
1822 return;
1823 printk("\n-> #%u", depth);
1824 print_lock_name(target->class);
1825 printk(KERN_CONT ":\n");
1826 print_lock_trace(target->trace, 6);
1827}
1828
1829static void
1830print_circular_lock_scenario(struct held_lock *src,
1831 struct held_lock *tgt,
1832 struct lock_list *prt)
1833{
1834 struct lock_class *source = hlock_class(src);
1835 struct lock_class *target = hlock_class(tgt);
1836 struct lock_class *parent = prt->class;
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851 if (parent != source) {
1852 printk("Chain exists of:\n ");
1853 __print_lock_name(source);
1854 printk(KERN_CONT " --> ");
1855 __print_lock_name(parent);
1856 printk(KERN_CONT " --> ");
1857 __print_lock_name(target);
1858 printk(KERN_CONT "\n\n");
1859 }
1860
1861 printk(" Possible unsafe locking scenario:\n\n");
1862 printk(" CPU0 CPU1\n");
1863 printk(" ---- ----\n");
1864 printk(" lock(");
1865 __print_lock_name(target);
1866 printk(KERN_CONT ");\n");
1867 printk(" lock(");
1868 __print_lock_name(parent);
1869 printk(KERN_CONT ");\n");
1870 printk(" lock(");
1871 __print_lock_name(target);
1872 printk(KERN_CONT ");\n");
1873 printk(" lock(");
1874 __print_lock_name(source);
1875 printk(KERN_CONT ");\n");
1876 printk("\n *** DEADLOCK ***\n\n");
1877}
1878
1879
1880
1881
1882
1883static noinline void
1884print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1885 struct held_lock *check_src,
1886 struct held_lock *check_tgt)
1887{
1888 struct task_struct *curr = current;
1889
1890 if (debug_locks_silent)
1891 return;
1892
1893 pr_warn("\n");
1894 pr_warn("======================================================\n");
1895 pr_warn("WARNING: possible circular locking dependency detected\n");
1896 print_kernel_ident();
1897 pr_warn("------------------------------------------------------\n");
1898 pr_warn("%s/%d is trying to acquire lock:\n",
1899 curr->comm, task_pid_nr(curr));
1900 print_lock(check_src);
1901
1902 pr_warn("\nbut task is already holding lock:\n");
1903
1904 print_lock(check_tgt);
1905 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1906 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1907
1908 print_circular_bug_entry(entry, depth);
1909}
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937static inline bool hlock_equal(struct lock_list *entry, void *data)
1938{
1939 struct held_lock *hlock = (struct held_lock *)data;
1940
1941 return hlock_class(hlock) == entry->class &&
1942 (hlock->read == 2 ||
1943 !entry->only_xr);
1944}
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964static inline bool hlock_conflict(struct lock_list *entry, void *data)
1965{
1966 struct held_lock *hlock = (struct held_lock *)data;
1967
1968 return hlock_class(hlock) == entry->class &&
1969 (hlock->read == 0 ||
1970 !entry->only_xr);
1971}
1972
1973static noinline void print_circular_bug(struct lock_list *this,
1974 struct lock_list *target,
1975 struct held_lock *check_src,
1976 struct held_lock *check_tgt)
1977{
1978 struct task_struct *curr = current;
1979 struct lock_list *parent;
1980 struct lock_list *first_parent;
1981 int depth;
1982
1983 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1984 return;
1985
1986 this->trace = save_trace();
1987 if (!this->trace)
1988 return;
1989
1990 depth = get_lock_depth(target);
1991
1992 print_circular_bug_header(target, depth, check_src, check_tgt);
1993
1994 parent = get_lock_parent(target);
1995 first_parent = parent;
1996
1997 while (parent) {
1998 print_circular_bug_entry(parent, --depth);
1999 parent = get_lock_parent(parent);
2000 }
2001
2002 printk("\nother info that might help us debug this:\n\n");
2003 print_circular_lock_scenario(check_src, check_tgt,
2004 first_parent);
2005
2006 lockdep_print_held_locks(curr);
2007
2008 printk("\nstack backtrace:\n");
2009 dump_stack();
2010}
2011
2012static noinline void print_bfs_bug(int ret)
2013{
2014 if (!debug_locks_off_graph_unlock())
2015 return;
2016
2017
2018
2019
2020 WARN(1, "lockdep bfs error:%d\n", ret);
2021}
2022
2023static bool noop_count(struct lock_list *entry, void *data)
2024{
2025 (*(unsigned long *)data)++;
2026 return false;
2027}
2028
2029static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
2030{
2031 unsigned long count = 0;
2032 struct lock_list *target_entry;
2033
2034 __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
2035
2036 return count;
2037}
2038unsigned long lockdep_count_forward_deps(struct lock_class *class)
2039{
2040 unsigned long ret, flags;
2041 struct lock_list this;
2042
2043 __bfs_init_root(&this, class);
2044
2045 raw_local_irq_save(flags);
2046 lockdep_lock();
2047 ret = __lockdep_count_forward_deps(&this);
2048 lockdep_unlock();
2049 raw_local_irq_restore(flags);
2050
2051 return ret;
2052}
2053
2054static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
2055{
2056 unsigned long count = 0;
2057 struct lock_list *target_entry;
2058
2059 __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
2060
2061 return count;
2062}
2063
2064unsigned long lockdep_count_backward_deps(struct lock_class *class)
2065{
2066 unsigned long ret, flags;
2067 struct lock_list this;
2068
2069 __bfs_init_root(&this, class);
2070
2071 raw_local_irq_save(flags);
2072 lockdep_lock();
2073 ret = __lockdep_count_backward_deps(&this);
2074 lockdep_unlock();
2075 raw_local_irq_restore(flags);
2076
2077 return ret;
2078}
2079
2080
2081
2082
2083
2084static noinline enum bfs_result
2085check_path(struct held_lock *target, struct lock_list *src_entry,
2086 bool (*match)(struct lock_list *entry, void *data),
2087 bool (*skip)(struct lock_list *entry, void *data),
2088 struct lock_list **target_entry)
2089{
2090 enum bfs_result ret;
2091
2092 ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
2093
2094 if (unlikely(bfs_error(ret)))
2095 print_bfs_bug(ret);
2096
2097 return ret;
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107static noinline enum bfs_result
2108check_noncircular(struct held_lock *src, struct held_lock *target,
2109 struct lock_trace **const trace)
2110{
2111 enum bfs_result ret;
2112 struct lock_list *target_entry;
2113 struct lock_list src_entry;
2114
2115 bfs_init_root(&src_entry, src);
2116
2117 debug_atomic_inc(nr_cyclic_checks);
2118
2119 ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
2120
2121 if (unlikely(ret == BFS_RMATCH)) {
2122 if (!*trace) {
2123
2124
2125
2126
2127
2128 *trace = save_trace();
2129 }
2130
2131 print_circular_bug(&src_entry, target_entry, src, target);
2132 }
2133
2134 return ret;
2135}
2136
2137#ifdef CONFIG_TRACE_IRQFLAGS
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180static inline bool usage_accumulate(struct lock_list *entry, void *mask)
2181{
2182 if (!entry->only_xr)
2183 *(unsigned long *)mask |= entry->class->usage_mask;
2184 else
2185 *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ);
2186
2187 return false;
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199static inline bool usage_match(struct lock_list *entry, void *mask)
2200{
2201 if (!entry->only_xr)
2202 return !!(entry->class->usage_mask & *(unsigned long *)mask);
2203 else
2204 return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
2205}
2206
2207static inline bool usage_skip(struct lock_list *entry, void *mask)
2208{
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235 if (entry->class->lock_type == LD_LOCK_PERCPU) {
2236 if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
2237 return false;
2238
2239 return true;
2240 }
2241
2242 return false;
2243}
2244
2245
2246
2247
2248
2249
2250
2251
2252static enum bfs_result
2253find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
2254 struct lock_list **target_entry)
2255{
2256 enum bfs_result result;
2257
2258 debug_atomic_inc(nr_find_usage_forwards_checks);
2259
2260 result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2261
2262 return result;
2263}
2264
2265
2266
2267
2268
2269static enum bfs_result
2270find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
2271 struct lock_list **target_entry)
2272{
2273 enum bfs_result result;
2274
2275 debug_atomic_inc(nr_find_usage_backwards_checks);
2276
2277 result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2278
2279 return result;
2280}
2281
2282static void print_lock_class_header(struct lock_class *class, int depth)
2283{
2284 int bit;
2285
2286 printk("%*s->", depth, "");
2287 print_lock_name(class);
2288#ifdef CONFIG_DEBUG_LOCKDEP
2289 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
2290#endif
2291 printk(KERN_CONT " {\n");
2292
2293 for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
2294 if (class->usage_mask & (1 << bit)) {
2295 int len = depth;
2296
2297 len += printk("%*s %s", depth, "", usage_str[bit]);
2298 len += printk(KERN_CONT " at:\n");
2299 print_lock_trace(class->usage_traces[bit], len);
2300 }
2301 }
2302 printk("%*s }\n", depth, "");
2303
2304 printk("%*s ... key at: [<%px>] %pS\n",
2305 depth, "", class->key, class->key);
2306}
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360static void __used
2361print_shortest_lock_dependencies(struct lock_list *leaf,
2362 struct lock_list *root)
2363{
2364 struct lock_list *entry = leaf;
2365 int depth;
2366
2367
2368 depth = get_lock_depth(leaf);
2369
2370 do {
2371 print_lock_class_header(entry->class, depth);
2372 printk("%*s ... acquired at:\n", depth, "");
2373 print_lock_trace(entry->trace, 2);
2374 printk("\n");
2375
2376 if (depth == 0 && (entry != root)) {
2377 printk("lockdep:%s bad path found in chain graph\n", __func__);
2378 break;
2379 }
2380
2381 entry = get_lock_parent(entry);
2382 depth--;
2383 } while (entry && (depth >= 0));
2384}
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406static void __used
2407print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
2408 struct lock_list *root)
2409{
2410 struct lock_list *entry = leaf;
2411 const struct lock_trace *trace = NULL;
2412 int depth;
2413
2414
2415 depth = get_lock_depth(leaf);
2416
2417 do {
2418 print_lock_class_header(entry->class, depth);
2419 if (trace) {
2420 printk("%*s ... acquired at:\n", depth, "");
2421 print_lock_trace(trace, 2);
2422 printk("\n");
2423 }
2424
2425
2426
2427
2428
2429 trace = entry->trace;
2430
2431 if (depth == 0 && (entry != root)) {
2432 printk("lockdep:%s bad path found in chain graph\n", __func__);
2433 break;
2434 }
2435
2436 entry = get_lock_parent(entry);
2437 depth--;
2438 } while (entry && (depth >= 0));
2439}
2440
2441static void
2442print_irq_lock_scenario(struct lock_list *safe_entry,
2443 struct lock_list *unsafe_entry,
2444 struct lock_class *prev_class,
2445 struct lock_class *next_class)
2446{
2447 struct lock_class *safe_class = safe_entry->class;
2448 struct lock_class *unsafe_class = unsafe_entry->class;
2449 struct lock_class *middle_class = prev_class;
2450
2451 if (middle_class == safe_class)
2452 middle_class = next_class;
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467 if (middle_class != unsafe_class) {
2468 printk("Chain exists of:\n ");
2469 __print_lock_name(safe_class);
2470 printk(KERN_CONT " --> ");
2471 __print_lock_name(middle_class);
2472 printk(KERN_CONT " --> ");
2473 __print_lock_name(unsafe_class);
2474 printk(KERN_CONT "\n\n");
2475 }
2476
2477 printk(" Possible interrupt unsafe locking scenario:\n\n");
2478 printk(" CPU0 CPU1\n");
2479 printk(" ---- ----\n");
2480 printk(" lock(");
2481 __print_lock_name(unsafe_class);
2482 printk(KERN_CONT ");\n");
2483 printk(" local_irq_disable();\n");
2484 printk(" lock(");
2485 __print_lock_name(safe_class);
2486 printk(KERN_CONT ");\n");
2487 printk(" lock(");
2488 __print_lock_name(middle_class);
2489 printk(KERN_CONT ");\n");
2490 printk(" <Interrupt>\n");
2491 printk(" lock(");
2492 __print_lock_name(safe_class);
2493 printk(KERN_CONT ");\n");
2494 printk("\n *** DEADLOCK ***\n\n");
2495}
2496
2497static void
2498print_bad_irq_dependency(struct task_struct *curr,
2499 struct lock_list *prev_root,
2500 struct lock_list *next_root,
2501 struct lock_list *backwards_entry,
2502 struct lock_list *forwards_entry,
2503 struct held_lock *prev,
2504 struct held_lock *next,
2505 enum lock_usage_bit bit1,
2506 enum lock_usage_bit bit2,
2507 const char *irqclass)
2508{
2509 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2510 return;
2511
2512 pr_warn("\n");
2513 pr_warn("=====================================================\n");
2514 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2515 irqclass, irqclass);
2516 print_kernel_ident();
2517 pr_warn("-----------------------------------------------------\n");
2518 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2519 curr->comm, task_pid_nr(curr),
2520 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
2521 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2522 lockdep_hardirqs_enabled(),
2523 curr->softirqs_enabled);
2524 print_lock(next);
2525
2526 pr_warn("\nand this task is already holding:\n");
2527 print_lock(prev);
2528 pr_warn("which would create a new lock dependency:\n");
2529 print_lock_name(hlock_class(prev));
2530 pr_cont(" ->");
2531 print_lock_name(hlock_class(next));
2532 pr_cont("\n");
2533
2534 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2535 irqclass);
2536 print_lock_name(backwards_entry->class);
2537 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
2538
2539 print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
2540
2541 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
2542 print_lock_name(forwards_entry->class);
2543 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2544 pr_warn("...");
2545
2546 print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
2547
2548 pr_warn("\nother info that might help us debug this:\n\n");
2549 print_irq_lock_scenario(backwards_entry, forwards_entry,
2550 hlock_class(prev), hlock_class(next));
2551
2552 lockdep_print_held_locks(curr);
2553
2554 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
2555 print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
2556
2557 pr_warn("\nthe dependencies between the lock to be acquired");
2558 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
2559 next_root->trace = save_trace();
2560 if (!next_root->trace)
2561 return;
2562 print_shortest_lock_dependencies(forwards_entry, next_root);
2563
2564 pr_warn("\nstack backtrace:\n");
2565 dump_stack();
2566}
2567
2568static const char *state_names[] = {
2569#define LOCKDEP_STATE(__STATE) \
2570 __stringify(__STATE),
2571#include "lockdep_states.h"
2572#undef LOCKDEP_STATE
2573};
2574
2575static const char *state_rnames[] = {
2576#define LOCKDEP_STATE(__STATE) \
2577 __stringify(__STATE)"-READ",
2578#include "lockdep_states.h"
2579#undef LOCKDEP_STATE
2580};
2581
2582static inline const char *state_name(enum lock_usage_bit bit)
2583{
2584 if (bit & LOCK_USAGE_READ_MASK)
2585 return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2586 else
2587 return state_names[bit >> LOCK_USAGE_DIR_MASK];
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597static int exclusive_bit(int new_bit)
2598{
2599 int state = new_bit & LOCK_USAGE_STATE_MASK;
2600 int dir = new_bit & LOCK_USAGE_DIR_MASK;
2601
2602
2603
2604
2605 return state | (dir ^ LOCK_USAGE_DIR_MASK);
2606}
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622static unsigned long invert_dir_mask(unsigned long mask)
2623{
2624 unsigned long excl = 0;
2625
2626
2627 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2628 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
2629
2630 return excl;
2631}
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661static unsigned long exclusive_mask(unsigned long mask)
2662{
2663 unsigned long excl = invert_dir_mask(mask);
2664
2665 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2666 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2667
2668 return excl;
2669}
2670
2671
2672
2673
2674
2675
2676
2677
2678static unsigned long original_mask(unsigned long mask)
2679{
2680 unsigned long excl = invert_dir_mask(mask);
2681
2682
2683 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2684 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2685
2686 return excl;
2687}
2688
2689
2690
2691
2692
2693static int find_exclusive_match(unsigned long mask,
2694 unsigned long excl_mask,
2695 enum lock_usage_bit *bitp,
2696 enum lock_usage_bit *excl_bitp)
2697{
2698 int bit, excl, excl_read;
2699
2700 for_each_set_bit(bit, &mask, LOCK_USED) {
2701
2702
2703
2704
2705
2706 excl = exclusive_bit(bit);
2707 excl_read = excl | LOCK_USAGE_READ_MASK;
2708 if (excl_mask & lock_flag(excl)) {
2709 *bitp = bit;
2710 *excl_bitp = excl;
2711 return 0;
2712 } else if (excl_mask & lock_flag(excl_read)) {
2713 *bitp = bit;
2714 *excl_bitp = excl_read;
2715 return 0;
2716 }
2717 }
2718 return -1;
2719}
2720
2721
2722
2723
2724
2725
2726
2727static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2728 struct held_lock *next)
2729{
2730 unsigned long usage_mask = 0, forward_mask, backward_mask;
2731 enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2732 struct lock_list *target_entry1;
2733 struct lock_list *target_entry;
2734 struct lock_list this, that;
2735 enum bfs_result ret;
2736
2737
2738
2739
2740
2741 bfs_init_rootb(&this, prev);
2742
2743 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
2744 if (bfs_error(ret)) {
2745 print_bfs_bug(ret);
2746 return 0;
2747 }
2748
2749 usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2750 if (!usage_mask)
2751 return 1;
2752
2753
2754
2755
2756
2757 forward_mask = exclusive_mask(usage_mask);
2758
2759 bfs_init_root(&that, next);
2760
2761 ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2762 if (bfs_error(ret)) {
2763 print_bfs_bug(ret);
2764 return 0;
2765 }
2766 if (ret == BFS_RNOMATCH)
2767 return 1;
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
2785
2786 ret = find_usage_backwards(&this, backward_mask, &target_entry);
2787 if (bfs_error(ret)) {
2788 print_bfs_bug(ret);
2789 return 0;
2790 }
2791 if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
2792 return 1;
2793
2794
2795
2796
2797
2798 ret = find_exclusive_match(target_entry->class->usage_mask,
2799 target_entry1->class->usage_mask,
2800 &backward_bit, &forward_bit);
2801 if (DEBUG_LOCKS_WARN_ON(ret == -1))
2802 return 1;
2803
2804 print_bad_irq_dependency(curr, &this, &that,
2805 target_entry, target_entry1,
2806 prev, next,
2807 backward_bit, forward_bit,
2808 state_name(backward_bit));
2809
2810 return 0;
2811}
2812
2813#else
2814
2815static inline int check_irq_usage(struct task_struct *curr,
2816 struct held_lock *prev, struct held_lock *next)
2817{
2818 return 1;
2819}
2820
2821static inline bool usage_skip(struct lock_list *entry, void *mask)
2822{
2823 return false;
2824}
2825
2826#endif
2827
2828#ifdef CONFIG_LOCKDEP_SMALL
2829
2830
2831
2832
2833
2834
2835
2836
2837static noinline enum bfs_result
2838check_redundant(struct held_lock *src, struct held_lock *target)
2839{
2840 enum bfs_result ret;
2841 struct lock_list *target_entry;
2842 struct lock_list src_entry;
2843
2844 bfs_init_root(&src_entry, src);
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855 src_entry.only_xr = src->read == 0;
2856
2857 debug_atomic_inc(nr_redundant_checks);
2858
2859
2860
2861
2862
2863
2864 ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
2865
2866 if (ret == BFS_RMATCH)
2867 debug_atomic_inc(nr_redundant);
2868
2869 return ret;
2870}
2871
2872#else
2873
2874static inline enum bfs_result
2875check_redundant(struct held_lock *src, struct held_lock *target)
2876{
2877 return BFS_RNOMATCH;
2878}
2879
2880#endif
2881
2882static void inc_chains(int irq_context)
2883{
2884 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2885 nr_hardirq_chains++;
2886 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2887 nr_softirq_chains++;
2888 else
2889 nr_process_chains++;
2890}
2891
2892static void dec_chains(int irq_context)
2893{
2894 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2895 nr_hardirq_chains--;
2896 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2897 nr_softirq_chains--;
2898 else
2899 nr_process_chains--;
2900}
2901
2902static void
2903print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
2904{
2905 struct lock_class *next = hlock_class(nxt);
2906 struct lock_class *prev = hlock_class(prv);
2907
2908 printk(" Possible unsafe locking scenario:\n\n");
2909 printk(" CPU0\n");
2910 printk(" ----\n");
2911 printk(" lock(");
2912 __print_lock_name(prev);
2913 printk(KERN_CONT ");\n");
2914 printk(" lock(");
2915 __print_lock_name(next);
2916 printk(KERN_CONT ");\n");
2917 printk("\n *** DEADLOCK ***\n\n");
2918 printk(" May be due to missing lock nesting notation\n\n");
2919}
2920
2921static void
2922print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2923 struct held_lock *next)
2924{
2925 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2926 return;
2927
2928 pr_warn("\n");
2929 pr_warn("============================================\n");
2930 pr_warn("WARNING: possible recursive locking detected\n");
2931 print_kernel_ident();
2932 pr_warn("--------------------------------------------\n");
2933 pr_warn("%s/%d is trying to acquire lock:\n",
2934 curr->comm, task_pid_nr(curr));
2935 print_lock(next);
2936 pr_warn("\nbut task is already holding lock:\n");
2937 print_lock(prev);
2938
2939 pr_warn("\nother info that might help us debug this:\n");
2940 print_deadlock_scenario(next, prev);
2941 lockdep_print_held_locks(curr);
2942
2943 pr_warn("\nstack backtrace:\n");
2944 dump_stack();
2945}
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957static int
2958check_deadlock(struct task_struct *curr, struct held_lock *next)
2959{
2960 struct held_lock *prev;
2961 struct held_lock *nest = NULL;
2962 int i;
2963
2964 for (i = 0; i < curr->lockdep_depth; i++) {
2965 prev = curr->held_locks + i;
2966
2967 if (prev->instance == next->nest_lock)
2968 nest = prev;
2969
2970 if (hlock_class(prev) != hlock_class(next))
2971 continue;
2972
2973
2974
2975
2976
2977 if ((next->read == 2) && prev->read)
2978 continue;
2979
2980
2981
2982
2983
2984 if (nest)
2985 return 2;
2986
2987 print_deadlock_bug(curr, prev, next);
2988 return 0;
2989 }
2990 return 1;
2991}
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015static int
3016check_prev_add(struct task_struct *curr, struct held_lock *prev,
3017 struct held_lock *next, u16 distance,
3018 struct lock_trace **const trace)
3019{
3020 struct lock_list *entry;
3021 enum bfs_result ret;
3022
3023 if (!hlock_class(prev)->key || !hlock_class(next)->key) {
3024
3025
3026
3027
3028
3029
3030 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
3031 "Detected use-after-free of lock class %px/%s\n",
3032 hlock_class(prev),
3033 hlock_class(prev)->name);
3034 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
3035 "Detected use-after-free of lock class %px/%s\n",
3036 hlock_class(next),
3037 hlock_class(next)->name);
3038 return 2;
3039 }
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051 ret = check_noncircular(next, prev, trace);
3052 if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
3053 return 0;
3054
3055 if (!check_irq_usage(curr, prev, next))
3056 return 0;
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
3067 if (entry->class == hlock_class(next)) {
3068 if (distance == 1)
3069 entry->distance = 1;
3070 entry->dep |= calc_dep(prev, next);
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088 list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) {
3089 if (entry->class == hlock_class(prev)) {
3090 if (distance == 1)
3091 entry->distance = 1;
3092 entry->dep |= calc_depb(prev, next);
3093 return 1;
3094 }
3095 }
3096
3097
3098 return 0;
3099 }
3100 }
3101
3102
3103
3104
3105 ret = check_redundant(prev, next);
3106 if (bfs_error(ret))
3107 return 0;
3108 else if (ret == BFS_RMATCH)
3109 return 2;
3110
3111 if (!*trace) {
3112 *trace = save_trace();
3113 if (!*trace)
3114 return 0;
3115 }
3116
3117
3118
3119
3120
3121 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
3122 &hlock_class(prev)->locks_after,
3123 next->acquire_ip, distance,
3124 calc_dep(prev, next),
3125 *trace);
3126
3127 if (!ret)
3128 return 0;
3129
3130 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
3131 &hlock_class(next)->locks_before,
3132 next->acquire_ip, distance,
3133 calc_depb(prev, next),
3134 *trace);
3135 if (!ret)
3136 return 0;
3137
3138 return 2;
3139}
3140
3141
3142
3143
3144
3145
3146
3147static int
3148check_prevs_add(struct task_struct *curr, struct held_lock *next)
3149{
3150 struct lock_trace *trace = NULL;
3151 int depth = curr->lockdep_depth;
3152 struct held_lock *hlock;
3153
3154
3155
3156
3157
3158
3159 if (!depth)
3160 goto out_bug;
3161
3162
3163
3164
3165 if (curr->held_locks[depth].irq_context !=
3166 curr->held_locks[depth-1].irq_context)
3167 goto out_bug;
3168
3169 for (;;) {
3170 u16 distance = curr->lockdep_depth - depth + 1;
3171 hlock = curr->held_locks + depth - 1;
3172
3173 if (hlock->check) {
3174 int ret = check_prev_add(curr, hlock, next, distance, &trace);
3175 if (!ret)
3176 return 0;
3177
3178
3179
3180
3181
3182
3183
3184 if (!hlock->trylock)
3185 break;
3186 }
3187
3188 depth--;
3189
3190
3191
3192 if (!depth)
3193 break;
3194
3195
3196
3197 if (curr->held_locks[depth].irq_context !=
3198 curr->held_locks[depth-1].irq_context)
3199 break;
3200 }
3201 return 1;
3202out_bug:
3203 if (!debug_locks_off_graph_unlock())
3204 return 0;
3205
3206
3207
3208
3209
3210
3211 WARN_ON(1);
3212
3213 return 0;
3214}
3215
3216struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
3217static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
3218static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
3219unsigned long nr_zapped_lock_chains;
3220unsigned int nr_free_chain_hlocks;
3221unsigned int nr_lost_chain_hlocks;
3222unsigned int nr_large_chain_blocks;
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241#define MAX_CHAIN_BUCKETS 16
3242#define CHAIN_BLK_FLAG (1U << 15)
3243#define CHAIN_BLK_LIST_END 0xFFFFU
3244
3245static int chain_block_buckets[MAX_CHAIN_BUCKETS];
3246
3247static inline int size_to_bucket(int size)
3248{
3249 if (size > MAX_CHAIN_BUCKETS)
3250 return 0;
3251
3252 return size - 1;
3253}
3254
3255
3256
3257
3258#define for_each_chain_block(bucket, prev, curr) \
3259 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3260 (curr) >= 0; \
3261 (prev) = (curr), (curr) = chain_block_next(curr))
3262
3263
3264
3265
3266static inline int chain_block_next(int offset)
3267{
3268 int next = chain_hlocks[offset];
3269
3270 WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
3271
3272 if (next == CHAIN_BLK_LIST_END)
3273 return -1;
3274
3275 next &= ~CHAIN_BLK_FLAG;
3276 next <<= 16;
3277 next |= chain_hlocks[offset + 1];
3278
3279 return next;
3280}
3281
3282
3283
3284
3285static inline int chain_block_size(int offset)
3286{
3287 return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
3288}
3289
3290static inline void init_chain_block(int offset, int next, int bucket, int size)
3291{
3292 chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
3293 chain_hlocks[offset + 1] = (u16)next;
3294
3295 if (size && !bucket) {
3296 chain_hlocks[offset + 2] = size >> 16;
3297 chain_hlocks[offset + 3] = (u16)size;
3298 }
3299}
3300
3301static inline void add_chain_block(int offset, int size)
3302{
3303 int bucket = size_to_bucket(size);
3304 int next = chain_block_buckets[bucket];
3305 int prev, curr;
3306
3307 if (unlikely(size < 2)) {
3308
3309
3310
3311
3312
3313
3314
3315 if (size)
3316 nr_lost_chain_hlocks++;
3317 return;
3318 }
3319
3320 nr_free_chain_hlocks += size;
3321 if (!bucket) {
3322 nr_large_chain_blocks++;
3323
3324
3325
3326
3327 for_each_chain_block(0, prev, curr) {
3328 if (size >= chain_block_size(curr))
3329 break;
3330 }
3331 init_chain_block(offset, curr, 0, size);
3332 if (prev < 0)
3333 chain_block_buckets[0] = offset;
3334 else
3335 init_chain_block(prev, offset, 0, 0);
3336 return;
3337 }
3338
3339
3340
3341 init_chain_block(offset, next, bucket, size);
3342 chain_block_buckets[bucket] = offset;
3343}
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356static inline void del_chain_block(int bucket, int size, int next)
3357{
3358 nr_free_chain_hlocks -= size;
3359 chain_block_buckets[bucket] = next;
3360
3361 if (!bucket)
3362 nr_large_chain_blocks--;
3363}
3364
3365static void init_chain_block_buckets(void)
3366{
3367 int i;
3368
3369 for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
3370 chain_block_buckets[i] = -1;
3371
3372 add_chain_block(0, ARRAY_SIZE(chain_hlocks));
3373}
3374
3375
3376
3377
3378
3379
3380
3381static int alloc_chain_hlocks(int req)
3382{
3383 int bucket, curr, size;
3384
3385
3386
3387
3388
3389 BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
3390
3391 init_data_structures_once();
3392
3393 if (nr_free_chain_hlocks < req)
3394 return -1;
3395
3396
3397
3398
3399
3400 req = max(req, 2);
3401 bucket = size_to_bucket(req);
3402 curr = chain_block_buckets[bucket];
3403
3404 if (bucket) {
3405 if (curr >= 0) {
3406 del_chain_block(bucket, req, chain_block_next(curr));
3407 return curr;
3408 }
3409
3410 curr = chain_block_buckets[0];
3411 }
3412
3413
3414
3415
3416
3417 if (curr >= 0) {
3418 size = chain_block_size(curr);
3419 if (likely(size >= req)) {
3420 del_chain_block(0, size, chain_block_next(curr));
3421 add_chain_block(curr + req, size - req);
3422 return curr;
3423 }
3424 }
3425
3426
3427
3428
3429 for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
3430 bucket = size_to_bucket(size);
3431 curr = chain_block_buckets[bucket];
3432 if (curr < 0)
3433 continue;
3434
3435 del_chain_block(bucket, size, chain_block_next(curr));
3436 add_chain_block(curr + req, size - req);
3437 return curr;
3438 }
3439
3440 return -1;
3441}
3442
3443static inline void free_chain_hlocks(int base, int size)
3444{
3445 add_chain_block(base, max(size, 2));
3446}
3447
3448struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
3449{
3450 u16 chain_hlock = chain_hlocks[chain->base + i];
3451 unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
3452
3453 return lock_classes + class_idx - 1;
3454}
3455
3456
3457
3458
3459static inline int get_first_held_lock(struct task_struct *curr,
3460 struct held_lock *hlock)
3461{
3462 int i;
3463 struct held_lock *hlock_curr;
3464
3465 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
3466 hlock_curr = curr->held_locks + i;
3467 if (hlock_curr->irq_context != hlock->irq_context)
3468 break;
3469
3470 }
3471
3472 return ++i;
3473}
3474
3475#ifdef CONFIG_DEBUG_LOCKDEP
3476
3477
3478
3479static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key)
3480{
3481 u64 new_chain_key = iterate_chain_key(chain_key, hlock_id);
3482
3483 printk(" hlock_id:%d -> chain_key:%016Lx",
3484 (unsigned int)hlock_id,
3485 (unsigned long long)new_chain_key);
3486 return new_chain_key;
3487}
3488
3489static void
3490print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
3491{
3492 struct held_lock *hlock;
3493 u64 chain_key = INITIAL_CHAIN_KEY;
3494 int depth = curr->lockdep_depth;
3495 int i = get_first_held_lock(curr, hlock_next);
3496
3497 printk("depth: %u (irq_context %u)\n", depth - i + 1,
3498 hlock_next->irq_context);
3499 for (; i < depth; i++) {
3500 hlock = curr->held_locks + i;
3501 chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
3502
3503 print_lock(hlock);
3504 }
3505
3506 print_chain_key_iteration(hlock_id(hlock_next), chain_key);
3507 print_lock(hlock_next);
3508}
3509
3510static void print_chain_keys_chain(struct lock_chain *chain)
3511{
3512 int i;
3513 u64 chain_key = INITIAL_CHAIN_KEY;
3514 u16 hlock_id;
3515
3516 printk("depth: %u\n", chain->depth);
3517 for (i = 0; i < chain->depth; i++) {
3518 hlock_id = chain_hlocks[chain->base + i];
3519 chain_key = print_chain_key_iteration(hlock_id, chain_key);
3520
3521 print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
3522 printk("\n");
3523 }
3524}
3525
3526static void print_collision(struct task_struct *curr,
3527 struct held_lock *hlock_next,
3528 struct lock_chain *chain)
3529{
3530 pr_warn("\n");
3531 pr_warn("============================\n");
3532 pr_warn("WARNING: chain_key collision\n");
3533 print_kernel_ident();
3534 pr_warn("----------------------------\n");
3535 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
3536 pr_warn("Hash chain already cached but the contents don't match!\n");
3537
3538 pr_warn("Held locks:");
3539 print_chain_keys_held_locks(curr, hlock_next);
3540
3541 pr_warn("Locks in cached chain:");
3542 print_chain_keys_chain(chain);
3543
3544 pr_warn("\nstack backtrace:\n");
3545 dump_stack();
3546}
3547#endif
3548
3549
3550
3551
3552
3553
3554
3555static int check_no_collision(struct task_struct *curr,
3556 struct held_lock *hlock,
3557 struct lock_chain *chain)
3558{
3559#ifdef CONFIG_DEBUG_LOCKDEP
3560 int i, j, id;
3561
3562 i = get_first_held_lock(curr, hlock);
3563
3564 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
3565 print_collision(curr, hlock, chain);
3566 return 0;
3567 }
3568
3569 for (j = 0; j < chain->depth - 1; j++, i++) {
3570 id = hlock_id(&curr->held_locks[i]);
3571
3572 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
3573 print_collision(curr, hlock, chain);
3574 return 0;
3575 }
3576 }
3577#endif
3578 return 1;
3579}
3580
3581
3582
3583
3584
3585long lockdep_next_lockchain(long i)
3586{
3587 i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
3588 return i < ARRAY_SIZE(lock_chains) ? i : -2;
3589}
3590
3591unsigned long lock_chain_count(void)
3592{
3593 return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
3594}
3595
3596
3597static struct lock_chain *alloc_lock_chain(void)
3598{
3599 int idx = find_first_zero_bit(lock_chains_in_use,
3600 ARRAY_SIZE(lock_chains));
3601
3602 if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
3603 return NULL;
3604 __set_bit(idx, lock_chains_in_use);
3605 return lock_chains + idx;
3606}
3607
3608
3609
3610
3611
3612
3613
3614
3615static inline int add_chain_cache(struct task_struct *curr,
3616 struct held_lock *hlock,
3617 u64 chain_key)
3618{
3619 struct hlist_head *hash_head = chainhashentry(chain_key);
3620 struct lock_chain *chain;
3621 int i, j;
3622
3623
3624
3625
3626
3627
3628 if (lockdep_assert_locked())
3629 return 0;
3630
3631 chain = alloc_lock_chain();
3632 if (!chain) {
3633 if (!debug_locks_off_graph_unlock())
3634 return 0;
3635
3636 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
3637 dump_stack();
3638 return 0;
3639 }
3640 chain->chain_key = chain_key;
3641 chain->irq_context = hlock->irq_context;
3642 i = get_first_held_lock(curr, hlock);
3643 chain->depth = curr->lockdep_depth + 1 - i;
3644
3645 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
3646 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
3647 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
3648
3649 j = alloc_chain_hlocks(chain->depth);
3650 if (j < 0) {
3651 if (!debug_locks_off_graph_unlock())
3652 return 0;
3653
3654 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
3655 dump_stack();
3656 return 0;
3657 }
3658
3659 chain->base = j;
3660 for (j = 0; j < chain->depth - 1; j++, i++) {
3661 int lock_id = hlock_id(curr->held_locks + i);
3662
3663 chain_hlocks[chain->base + j] = lock_id;
3664 }
3665 chain_hlocks[chain->base + j] = hlock_id(hlock);
3666 hlist_add_head_rcu(&chain->entry, hash_head);
3667 debug_atomic_inc(chain_lookup_misses);
3668 inc_chains(chain->irq_context);
3669
3670 return 1;
3671}
3672
3673
3674
3675
3676
3677static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
3678{
3679 struct hlist_head *hash_head = chainhashentry(chain_key);
3680 struct lock_chain *chain;
3681
3682 hlist_for_each_entry_rcu(chain, hash_head, entry) {
3683 if (READ_ONCE(chain->chain_key) == chain_key) {
3684 debug_atomic_inc(chain_lookup_hits);
3685 return chain;
3686 }
3687 }
3688 return NULL;
3689}
3690
3691
3692
3693
3694
3695
3696
3697static inline int lookup_chain_cache_add(struct task_struct *curr,
3698 struct held_lock *hlock,
3699 u64 chain_key)
3700{
3701 struct lock_class *class = hlock_class(hlock);
3702 struct lock_chain *chain = lookup_chain_cache(chain_key);
3703
3704 if (chain) {
3705cache_hit:
3706 if (!check_no_collision(curr, hlock, chain))
3707 return 0;
3708
3709 if (very_verbose(class)) {
3710 printk("\nhash chain already cached, key: "
3711 "%016Lx tail class: [%px] %s\n",
3712 (unsigned long long)chain_key,
3713 class->key, class->name);
3714 }
3715
3716 return 0;
3717 }
3718
3719 if (very_verbose(class)) {
3720 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3721 (unsigned long long)chain_key, class->key, class->name);
3722 }
3723
3724 if (!graph_lock())
3725 return 0;
3726
3727
3728
3729
3730 chain = lookup_chain_cache(chain_key);
3731 if (chain) {
3732 graph_unlock();
3733 goto cache_hit;
3734 }
3735
3736 if (!add_chain_cache(curr, hlock, chain_key))
3737 return 0;
3738
3739 return 1;
3740}
3741
3742static int validate_chain(struct task_struct *curr,
3743 struct held_lock *hlock,
3744 int chain_head, u64 chain_key)
3745{
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756 if (!hlock->trylock && hlock->check &&
3757 lookup_chain_cache_add(curr, hlock, chain_key)) {
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776 int ret = check_deadlock(curr, hlock);
3777
3778 if (!ret)
3779 return 0;
3780
3781
3782
3783
3784
3785
3786
3787
3788 if (!chain_head && ret != 2) {
3789 if (!check_prevs_add(curr, hlock))
3790 return 0;
3791 }
3792
3793 graph_unlock();
3794 } else {
3795
3796 if (unlikely(!debug_locks))
3797 return 0;
3798 }
3799
3800 return 1;
3801}
3802#else
3803static inline int validate_chain(struct task_struct *curr,
3804 struct held_lock *hlock,
3805 int chain_head, u64 chain_key)
3806{
3807 return 1;
3808}
3809
3810static void init_chain_block_buckets(void) { }
3811#endif
3812
3813
3814
3815
3816
3817static void check_chain_key(struct task_struct *curr)
3818{
3819#ifdef CONFIG_DEBUG_LOCKDEP
3820 struct held_lock *hlock, *prev_hlock = NULL;
3821 unsigned int i;
3822 u64 chain_key = INITIAL_CHAIN_KEY;
3823
3824 for (i = 0; i < curr->lockdep_depth; i++) {
3825 hlock = curr->held_locks + i;
3826 if (chain_key != hlock->prev_chain_key) {
3827 debug_locks_off();
3828
3829
3830
3831
3832 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3833 curr->lockdep_depth, i,
3834 (unsigned long long)chain_key,
3835 (unsigned long long)hlock->prev_chain_key);
3836 return;
3837 }
3838
3839
3840
3841
3842
3843 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3844 return;
3845
3846 if (prev_hlock && (prev_hlock->irq_context !=
3847 hlock->irq_context))
3848 chain_key = INITIAL_CHAIN_KEY;
3849 chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
3850 prev_hlock = hlock;
3851 }
3852 if (chain_key != curr->curr_chain_key) {
3853 debug_locks_off();
3854
3855
3856
3857
3858 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3859 curr->lockdep_depth, i,
3860 (unsigned long long)chain_key,
3861 (unsigned long long)curr->curr_chain_key);
3862 }
3863#endif
3864}
3865
3866#ifdef CONFIG_PROVE_LOCKING
3867static int mark_lock(struct task_struct *curr, struct held_lock *this,
3868 enum lock_usage_bit new_bit);
3869
3870static void print_usage_bug_scenario(struct held_lock *lock)
3871{
3872 struct lock_class *class = hlock_class(lock);
3873
3874 printk(" Possible unsafe locking scenario:\n\n");
3875 printk(" CPU0\n");
3876 printk(" ----\n");
3877 printk(" lock(");
3878 __print_lock_name(class);
3879 printk(KERN_CONT ");\n");
3880 printk(" <Interrupt>\n");
3881 printk(" lock(");
3882 __print_lock_name(class);
3883 printk(KERN_CONT ");\n");
3884 printk("\n *** DEADLOCK ***\n\n");
3885}
3886
3887static void
3888print_usage_bug(struct task_struct *curr, struct held_lock *this,
3889 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3890{
3891 if (!debug_locks_off() || debug_locks_silent)
3892 return;
3893
3894 pr_warn("\n");
3895 pr_warn("================================\n");
3896 pr_warn("WARNING: inconsistent lock state\n");
3897 print_kernel_ident();
3898 pr_warn("--------------------------------\n");
3899
3900 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3901 usage_str[prev_bit], usage_str[new_bit]);
3902
3903 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3904 curr->comm, task_pid_nr(curr),
3905 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3906 lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3907 lockdep_hardirqs_enabled(),
3908 lockdep_softirqs_enabled(curr));
3909 print_lock(this);
3910
3911 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3912 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3913
3914 print_irqtrace_events(curr);
3915 pr_warn("\nother info that might help us debug this:\n");
3916 print_usage_bug_scenario(this);
3917
3918 lockdep_print_held_locks(curr);
3919
3920 pr_warn("\nstack backtrace:\n");
3921 dump_stack();
3922}
3923
3924
3925
3926
3927static inline int
3928valid_state(struct task_struct *curr, struct held_lock *this,
3929 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
3930{
3931 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3932 graph_unlock();
3933 print_usage_bug(curr, this, bad_bit, new_bit);
3934 return 0;
3935 }
3936 return 1;
3937}
3938
3939
3940
3941
3942
3943static void
3944print_irq_inversion_bug(struct task_struct *curr,
3945 struct lock_list *root, struct lock_list *other,
3946 struct held_lock *this, int forwards,
3947 const char *irqclass)
3948{
3949 struct lock_list *entry = other;
3950 struct lock_list *middle = NULL;
3951 int depth;
3952
3953 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3954 return;
3955
3956 pr_warn("\n");
3957 pr_warn("========================================================\n");
3958 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3959 print_kernel_ident();
3960 pr_warn("--------------------------------------------------------\n");
3961 pr_warn("%s/%d just changed the state of lock:\n",
3962 curr->comm, task_pid_nr(curr));
3963 print_lock(this);
3964 if (forwards)
3965 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
3966 else
3967 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
3968 print_lock_name(other->class);
3969 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3970
3971 pr_warn("\nother info that might help us debug this:\n");
3972
3973
3974 depth = get_lock_depth(other);
3975 do {
3976 if (depth == 0 && (entry != root)) {
3977 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
3978 break;
3979 }
3980 middle = entry;
3981 entry = get_lock_parent(entry);
3982 depth--;
3983 } while (entry && entry != root && (depth >= 0));
3984 if (forwards)
3985 print_irq_lock_scenario(root, other,
3986 middle ? middle->class : root->class, other->class);
3987 else
3988 print_irq_lock_scenario(other, root,
3989 middle ? middle->class : other->class, root->class);
3990
3991 lockdep_print_held_locks(curr);
3992
3993 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3994 root->trace = save_trace();
3995 if (!root->trace)
3996 return;
3997 print_shortest_lock_dependencies(other, root);
3998
3999 pr_warn("\nstack backtrace:\n");
4000 dump_stack();
4001}
4002
4003
4004
4005
4006
4007static int
4008check_usage_forwards(struct task_struct *curr, struct held_lock *this,
4009 enum lock_usage_bit bit)
4010{
4011 enum bfs_result ret;
4012 struct lock_list root;
4013 struct lock_list *target_entry;
4014 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4015 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4016
4017 bfs_init_root(&root, this);
4018 ret = find_usage_forwards(&root, usage_mask, &target_entry);
4019 if (bfs_error(ret)) {
4020 print_bfs_bug(ret);
4021 return 0;
4022 }
4023 if (ret == BFS_RNOMATCH)
4024 return 1;
4025
4026
4027 if (target_entry->class->usage_mask & lock_flag(bit)) {
4028 print_irq_inversion_bug(curr, &root, target_entry,
4029 this, 1, state_name(bit));
4030 } else {
4031 print_irq_inversion_bug(curr, &root, target_entry,
4032 this, 1, state_name(read_bit));
4033 }
4034
4035 return 0;
4036}
4037
4038
4039
4040
4041
4042static int
4043check_usage_backwards(struct task_struct *curr, struct held_lock *this,
4044 enum lock_usage_bit bit)
4045{
4046 enum bfs_result ret;
4047 struct lock_list root;
4048 struct lock_list *target_entry;
4049 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4050 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4051
4052 bfs_init_rootb(&root, this);
4053 ret = find_usage_backwards(&root, usage_mask, &target_entry);
4054 if (bfs_error(ret)) {
4055 print_bfs_bug(ret);
4056 return 0;
4057 }
4058 if (ret == BFS_RNOMATCH)
4059 return 1;
4060
4061
4062 if (target_entry->class->usage_mask & lock_flag(bit)) {
4063 print_irq_inversion_bug(curr, &root, target_entry,
4064 this, 0, state_name(bit));
4065 } else {
4066 print_irq_inversion_bug(curr, &root, target_entry,
4067 this, 0, state_name(read_bit));
4068 }
4069
4070 return 0;
4071}
4072
4073void print_irqtrace_events(struct task_struct *curr)
4074{
4075 const struct irqtrace_events *trace = &curr->irqtrace;
4076
4077 printk("irq event stamp: %u\n", trace->irq_events);
4078 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
4079 trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
4080 (void *)trace->hardirq_enable_ip);
4081 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
4082 trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
4083 (void *)trace->hardirq_disable_ip);
4084 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
4085 trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
4086 (void *)trace->softirq_enable_ip);
4087 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
4088 trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
4089 (void *)trace->softirq_disable_ip);
4090}
4091
4092static int HARDIRQ_verbose(struct lock_class *class)
4093{
4094#if HARDIRQ_VERBOSE
4095 return class_filter(class);
4096#endif
4097 return 0;
4098}
4099
4100static int SOFTIRQ_verbose(struct lock_class *class)
4101{
4102#if SOFTIRQ_VERBOSE
4103 return class_filter(class);
4104#endif
4105 return 0;
4106}
4107
4108static int (*state_verbose_f[])(struct lock_class *class) = {
4109#define LOCKDEP_STATE(__STATE) \
4110 __STATE##_verbose,
4111#include "lockdep_states.h"
4112#undef LOCKDEP_STATE
4113};
4114
4115static inline int state_verbose(enum lock_usage_bit bit,
4116 struct lock_class *class)
4117{
4118 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
4119}
4120
4121typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
4122 enum lock_usage_bit bit, const char *name);
4123
4124static int
4125mark_lock_irq(struct task_struct *curr, struct held_lock *this,
4126 enum lock_usage_bit new_bit)
4127{
4128 int excl_bit = exclusive_bit(new_bit);
4129 int read = new_bit & LOCK_USAGE_READ_MASK;
4130 int dir = new_bit & LOCK_USAGE_DIR_MASK;
4131
4132
4133
4134
4135
4136 if (!valid_state(curr, this, new_bit, excl_bit))
4137 return 0;
4138
4139
4140
4141
4142 if (!read && !valid_state(curr, this, new_bit,
4143 excl_bit + LOCK_USAGE_READ_MASK))
4144 return 0;
4145
4146
4147
4148
4149
4150
4151 if (dir) {
4152
4153
4154
4155
4156 if (!check_usage_backwards(curr, this, excl_bit))
4157 return 0;
4158 } else {
4159
4160
4161
4162
4163 if (!check_usage_forwards(curr, this, excl_bit))
4164 return 0;
4165 }
4166
4167 if (state_verbose(new_bit, hlock_class(this)))
4168 return 2;
4169
4170 return 1;
4171}
4172
4173
4174
4175
4176static int
4177mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
4178{
4179 struct held_lock *hlock;
4180 int i;
4181
4182 for (i = 0; i < curr->lockdep_depth; i++) {
4183 enum lock_usage_bit hlock_bit = base_bit;
4184 hlock = curr->held_locks + i;
4185
4186 if (hlock->read)
4187 hlock_bit += LOCK_USAGE_READ_MASK;
4188
4189 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
4190
4191 if (!hlock->check)
4192 continue;
4193
4194 if (!mark_lock(curr, hlock, hlock_bit))
4195 return 0;
4196 }
4197
4198 return 1;
4199}
4200
4201
4202
4203
4204static void __trace_hardirqs_on_caller(void)
4205{
4206 struct task_struct *curr = current;
4207
4208
4209
4210
4211
4212 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
4213 return;
4214
4215
4216
4217
4218
4219 if (curr->softirqs_enabled)
4220 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
4221}
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232void lockdep_hardirqs_on_prepare(unsigned long ip)
4233{
4234 if (unlikely(!debug_locks))
4235 return;
4236
4237
4238
4239
4240 if (unlikely(in_nmi()))
4241 return;
4242
4243 if (unlikely(this_cpu_read(lockdep_recursion)))
4244 return;
4245
4246 if (unlikely(lockdep_hardirqs_enabled())) {
4247
4248
4249
4250
4251
4252 __debug_atomic_inc(redundant_hardirqs_on);
4253 return;
4254 }
4255
4256
4257
4258
4259
4260
4261 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4262 return;
4263
4264
4265
4266
4267 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
4268 return;
4269
4270
4271
4272
4273
4274 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
4275 return;
4276
4277 current->hardirq_chain_key = current->curr_chain_key;
4278
4279 lockdep_recursion_inc();
4280 __trace_hardirqs_on_caller();
4281 lockdep_recursion_finish();
4282}
4283EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
4284
4285void noinstr lockdep_hardirqs_on(unsigned long ip)
4286{
4287 struct irqtrace_events *trace = ¤t->irqtrace;
4288
4289 if (unlikely(!debug_locks))
4290 return;
4291
4292
4293
4294
4295
4296
4297
4298
4299 if (unlikely(in_nmi())) {
4300 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4301 return;
4302
4303
4304
4305
4306
4307
4308
4309 goto skip_checks;
4310 }
4311
4312 if (unlikely(this_cpu_read(lockdep_recursion)))
4313 return;
4314
4315 if (lockdep_hardirqs_enabled()) {
4316
4317
4318
4319
4320
4321 __debug_atomic_inc(redundant_hardirqs_on);
4322 return;
4323 }
4324
4325
4326
4327
4328
4329
4330 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4331 return;
4332
4333
4334
4335
4336
4337 DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
4338 current->curr_chain_key);
4339
4340skip_checks:
4341
4342 __this_cpu_write(hardirqs_enabled, 1);
4343 trace->hardirq_enable_ip = ip;
4344 trace->hardirq_enable_event = ++trace->irq_events;
4345 debug_atomic_inc(hardirqs_on_events);
4346}
4347EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
4348
4349
4350
4351
4352void noinstr lockdep_hardirqs_off(unsigned long ip)
4353{
4354 if (unlikely(!debug_locks))
4355 return;
4356
4357
4358
4359
4360
4361
4362 if (in_nmi()) {
4363 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4364 return;
4365 } else if (__this_cpu_read(lockdep_recursion))
4366 return;
4367
4368
4369
4370
4371
4372 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4373 return;
4374
4375 if (lockdep_hardirqs_enabled()) {
4376 struct irqtrace_events *trace = ¤t->irqtrace;
4377
4378
4379
4380
4381 __this_cpu_write(hardirqs_enabled, 0);
4382 trace->hardirq_disable_ip = ip;
4383 trace->hardirq_disable_event = ++trace->irq_events;
4384 debug_atomic_inc(hardirqs_off_events);
4385 } else {
4386 debug_atomic_inc(redundant_hardirqs_off);
4387 }
4388}
4389EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
4390
4391
4392
4393
4394void lockdep_softirqs_on(unsigned long ip)
4395{
4396 struct irqtrace_events *trace = ¤t->irqtrace;
4397
4398 if (unlikely(!lockdep_enabled()))
4399 return;
4400
4401
4402
4403
4404
4405 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4406 return;
4407
4408 if (current->softirqs_enabled) {
4409 debug_atomic_inc(redundant_softirqs_on);
4410 return;
4411 }
4412
4413 lockdep_recursion_inc();
4414
4415
4416
4417 current->softirqs_enabled = 1;
4418 trace->softirq_enable_ip = ip;
4419 trace->softirq_enable_event = ++trace->irq_events;
4420 debug_atomic_inc(softirqs_on_events);
4421
4422
4423
4424
4425
4426 if (lockdep_hardirqs_enabled())
4427 mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
4428 lockdep_recursion_finish();
4429}
4430
4431
4432
4433
4434void lockdep_softirqs_off(unsigned long ip)
4435{
4436 if (unlikely(!lockdep_enabled()))
4437 return;
4438
4439
4440
4441
4442 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4443 return;
4444
4445 if (current->softirqs_enabled) {
4446 struct irqtrace_events *trace = ¤t->irqtrace;
4447
4448
4449
4450
4451 current->softirqs_enabled = 0;
4452 trace->softirq_disable_ip = ip;
4453 trace->softirq_disable_event = ++trace->irq_events;
4454 debug_atomic_inc(softirqs_off_events);
4455
4456
4457
4458 DEBUG_LOCKS_WARN_ON(!softirq_count());
4459 } else
4460 debug_atomic_inc(redundant_softirqs_off);
4461}
4462
4463static int
4464mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4465{
4466 if (!check)
4467 goto lock_used;
4468
4469
4470
4471
4472
4473 if (!hlock->trylock) {
4474 if (hlock->read) {
4475 if (lockdep_hardirq_context())
4476 if (!mark_lock(curr, hlock,
4477 LOCK_USED_IN_HARDIRQ_READ))
4478 return 0;
4479 if (curr->softirq_context)
4480 if (!mark_lock(curr, hlock,
4481 LOCK_USED_IN_SOFTIRQ_READ))
4482 return 0;
4483 } else {
4484 if (lockdep_hardirq_context())
4485 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
4486 return 0;
4487 if (curr->softirq_context)
4488 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
4489 return 0;
4490 }
4491 }
4492 if (!hlock->hardirqs_off) {
4493 if (hlock->read) {
4494 if (!mark_lock(curr, hlock,
4495 LOCK_ENABLED_HARDIRQ_READ))
4496 return 0;
4497 if (curr->softirqs_enabled)
4498 if (!mark_lock(curr, hlock,
4499 LOCK_ENABLED_SOFTIRQ_READ))
4500 return 0;
4501 } else {
4502 if (!mark_lock(curr, hlock,
4503 LOCK_ENABLED_HARDIRQ))
4504 return 0;
4505 if (curr->softirqs_enabled)
4506 if (!mark_lock(curr, hlock,
4507 LOCK_ENABLED_SOFTIRQ))
4508 return 0;
4509 }
4510 }
4511
4512lock_used:
4513
4514 if (!mark_lock(curr, hlock, LOCK_USED))
4515 return 0;
4516
4517 return 1;
4518}
4519
4520static inline unsigned int task_irq_context(struct task_struct *task)
4521{
4522 return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
4523 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
4524}
4525
4526static int separate_irq_context(struct task_struct *curr,
4527 struct held_lock *hlock)
4528{
4529 unsigned int depth = curr->lockdep_depth;
4530
4531
4532
4533
4534 if (depth) {
4535 struct held_lock *prev_hlock;
4536
4537 prev_hlock = curr->held_locks + depth-1;
4538
4539
4540
4541
4542
4543 if (prev_hlock->irq_context != hlock->irq_context)
4544 return 1;
4545 }
4546 return 0;
4547}
4548
4549
4550
4551
4552static int mark_lock(struct task_struct *curr, struct held_lock *this,
4553 enum lock_usage_bit new_bit)
4554{
4555 unsigned int new_mask, ret = 1;
4556
4557 if (new_bit >= LOCK_USAGE_STATES) {
4558 DEBUG_LOCKS_WARN_ON(1);
4559 return 0;
4560 }
4561
4562 if (new_bit == LOCK_USED && this->read)
4563 new_bit = LOCK_USED_READ;
4564
4565 new_mask = 1 << new_bit;
4566
4567
4568
4569
4570
4571 if (likely(hlock_class(this)->usage_mask & new_mask))
4572 return 1;
4573
4574 if (!graph_lock())
4575 return 0;
4576
4577
4578
4579 if (unlikely(hlock_class(this)->usage_mask & new_mask))
4580 goto unlock;
4581
4582 if (!hlock_class(this)->usage_mask)
4583 debug_atomic_dec(nr_unused_locks);
4584
4585 hlock_class(this)->usage_mask |= new_mask;
4586
4587 if (new_bit < LOCK_TRACE_STATES) {
4588 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
4589 return 0;
4590 }
4591
4592 if (new_bit < LOCK_USED) {
4593 ret = mark_lock_irq(curr, this, new_bit);
4594 if (!ret)
4595 return 0;
4596 }
4597
4598unlock:
4599 graph_unlock();
4600
4601
4602
4603
4604 if (ret == 2) {
4605 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
4606 print_lock(this);
4607 print_irqtrace_events(curr);
4608 dump_stack();
4609 }
4610
4611 return ret;
4612}
4613
4614static inline short task_wait_context(struct task_struct *curr)
4615{
4616
4617
4618
4619
4620 if (lockdep_hardirq_context()) {
4621
4622
4623
4624 if (curr->hardirq_threaded || curr->irq_config)
4625 return LD_WAIT_CONFIG;
4626
4627 return LD_WAIT_SPIN;
4628 } else if (curr->softirq_context) {
4629
4630
4631
4632 return LD_WAIT_CONFIG;
4633 }
4634
4635 return LD_WAIT_MAX;
4636}
4637
4638static int
4639print_lock_invalid_wait_context(struct task_struct *curr,
4640 struct held_lock *hlock)
4641{
4642 short curr_inner;
4643
4644 if (!debug_locks_off())
4645 return 0;
4646 if (debug_locks_silent)
4647 return 0;
4648
4649 pr_warn("\n");
4650 pr_warn("=============================\n");