1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#ifdef CONFIG_DEBUG_MUTEXES
34# include "mutex-debug.h"
35#else
36# include "mutex.h"
37#endif
38
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41{
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
47#endif
48
49 debug_mutex_init(lock, name, key);
50}
51EXPORT_SYMBOL(__mutex_init);
52
53
54
55
56
57
58
59
60
61
62#define MUTEX_FLAG_WAITERS 0x01
63#define MUTEX_FLAG_HANDOFF 0x02
64#define MUTEX_FLAG_PICKUP 0x04
65
66#define MUTEX_FLAGS 0x07
67
68
69
70
71
72
73static inline struct task_struct *__mutex_owner(struct mutex *lock)
74{
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
76}
77
78static inline struct task_struct *__owner_task(unsigned long owner)
79{
80 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
81}
82
83bool mutex_is_locked(struct mutex *lock)
84{
85 return __mutex_owner(lock) != NULL;
86}
87EXPORT_SYMBOL(mutex_is_locked);
88
89static inline unsigned long __owner_flags(unsigned long owner)
90{
91 return owner & MUTEX_FLAGS;
92}
93
94
95
96
97static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
98{
99 unsigned long owner, curr = (unsigned long)current;
100
101 owner = atomic_long_read(&lock->owner);
102 for (;;) {
103 unsigned long old, flags = __owner_flags(owner);
104 unsigned long task = owner & ~MUTEX_FLAGS;
105
106 if (task) {
107 if (likely(task != curr))
108 break;
109
110 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
111 break;
112
113 flags &= ~MUTEX_FLAG_PICKUP;
114 } else {
115#ifdef CONFIG_DEBUG_MUTEXES
116 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
117#endif
118 }
119
120
121
122
123
124
125 flags &= ~MUTEX_FLAG_HANDOFF;
126
127 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
128 if (old == owner)
129 return NULL;
130
131 owner = old;
132 }
133
134 return __owner_task(owner);
135}
136
137
138
139
140static inline bool __mutex_trylock(struct mutex *lock)
141{
142 return !__mutex_trylock_or_owner(lock);
143}
144
145#ifndef CONFIG_DEBUG_LOCK_ALLOC
146
147
148
149
150
151
152
153
154
155
156static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
157{
158 unsigned long curr = (unsigned long)current;
159 unsigned long zero = 0UL;
160
161 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
162 return true;
163
164 return false;
165}
166
167static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
168{
169 unsigned long curr = (unsigned long)current;
170
171 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
172 return true;
173
174 return false;
175}
176#endif
177
178static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
179{
180 atomic_long_or(flag, &lock->owner);
181}
182
183static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
184{
185 atomic_long_andnot(flag, &lock->owner);
186}
187
188static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
189{
190 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
191}
192
193
194
195
196
197static void
198__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
199 struct list_head *list)
200{
201 debug_mutex_add_waiter(lock, waiter, current);
202
203 list_add_tail(&waiter->list, list);
204 if (__mutex_waiter_is_first(lock, waiter))
205 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
206}
207
208static void
209__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
210{
211 list_del(&waiter->list);
212 if (likely(list_empty(&lock->wait_list)))
213 __mutex_clear_flag(lock, MUTEX_FLAGS);
214
215 debug_mutex_remove_waiter(lock, waiter, current);
216}
217
218
219
220
221
222
223
224static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
225{
226 unsigned long owner = atomic_long_read(&lock->owner);
227
228 for (;;) {
229 unsigned long old, new;
230
231#ifdef CONFIG_DEBUG_MUTEXES
232 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
233 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
234#endif
235
236 new = (owner & MUTEX_FLAG_WAITERS);
237 new |= (unsigned long)task;
238 if (task)
239 new |= MUTEX_FLAG_PICKUP;
240
241 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
242 if (old == owner)
243 break;
244
245 owner = old;
246 }
247}
248
249#ifndef CONFIG_DEBUG_LOCK_ALLOC
250
251
252
253
254
255
256static void __sched __mutex_lock_slowpath(struct mutex *lock);
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279void __sched mutex_lock(struct mutex *lock)
280{
281 might_sleep();
282
283 if (!__mutex_trylock_fast(lock))
284 __mutex_lock_slowpath(lock);
285}
286EXPORT_SYMBOL(mutex_lock);
287#endif
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305static __always_inline void
306ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
307{
308#ifdef CONFIG_DEBUG_MUTEXES
309
310
311
312
313
314
315 DEBUG_LOCKS_WARN_ON(ww->ctx);
316
317
318
319
320 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
321
322 if (ww_ctx->contending_lock) {
323
324
325
326
327 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
328
329
330
331
332
333 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
334 ww_ctx->contending_lock = NULL;
335 }
336
337
338
339
340 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
341#endif
342 ww_ctx->acquired++;
343 ww->ctx = ww_ctx;
344}
345
346
347
348
349
350
351static inline bool __sched
352__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
353{
354
355 return (signed long)(a->stamp - b->stamp) > 0;
356}
357
358
359
360
361
362
363
364
365
366static bool __sched
367__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
368 struct ww_acquire_ctx *ww_ctx)
369{
370 if (!ww_ctx->is_wait_die)
371 return false;
372
373 if (waiter->ww_ctx->acquired > 0 &&
374 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
375 debug_mutex_wake_waiter(lock, waiter);
376 wake_up_process(waiter->task);
377 }
378
379 return true;
380}
381
382
383
384
385
386
387
388
389static bool __ww_mutex_wound(struct mutex *lock,
390 struct ww_acquire_ctx *ww_ctx,
391 struct ww_acquire_ctx *hold_ctx)
392{
393 struct task_struct *owner = __mutex_owner(lock);
394
395 lockdep_assert_held(&lock->wait_lock);
396
397
398
399
400
401
402 if (!hold_ctx)
403 return false;
404
405
406
407
408
409
410 if (!owner)
411 return false;
412
413 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
414 hold_ctx->wounded = 1;
415
416
417
418
419
420
421
422 if (owner != current)
423 wake_up_process(owner);
424
425 return true;
426 }
427
428 return false;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443static void __sched
444__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
445{
446 struct mutex_waiter *cur;
447
448 lockdep_assert_held(&lock->wait_lock);
449
450 list_for_each_entry(cur, &lock->wait_list, list) {
451 if (!cur->ww_ctx)
452 continue;
453
454 if (__ww_mutex_die(lock, cur, ww_ctx) ||
455 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
456 break;
457 }
458}
459
460
461
462
463
464static __always_inline void
465ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
466{
467 ww_mutex_lock_acquired(lock, ctx);
468
469
470
471
472
473
474
475
476 smp_mb();
477
478
479
480
481
482
483
484
485
486
487 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
488 return;
489
490
491
492
493
494 spin_lock(&lock->base.wait_lock);
495 __ww_mutex_check_waiters(&lock->base, ctx);
496 spin_unlock(&lock->base.wait_lock);
497}
498
499#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
500
501static inline
502bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
503 struct mutex_waiter *waiter)
504{
505 struct ww_mutex *ww;
506
507 ww = container_of(lock, struct ww_mutex, base);
508
509
510
511
512
513
514
515
516
517
518
519
520 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
521 return false;
522
523
524
525
526
527
528
529
530 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
531 return false;
532
533
534
535
536
537 if (waiter && !__mutex_waiter_is_first(lock, waiter))
538 return false;
539
540 return true;
541}
542
543
544
545
546
547
548
549static noinline
550bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
551 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
552{
553 bool ret = true;
554
555 rcu_read_lock();
556 while (__mutex_owner(lock) == owner) {
557
558
559
560
561
562
563 barrier();
564
565
566
567
568 if (!owner->on_cpu || need_resched() ||
569 vcpu_is_preempted(task_cpu(owner))) {
570 ret = false;
571 break;
572 }
573
574 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
575 ret = false;
576 break;
577 }
578
579 cpu_relax();
580 }
581 rcu_read_unlock();
582
583 return ret;
584}
585
586
587
588
589static inline int mutex_can_spin_on_owner(struct mutex *lock)
590{
591 struct task_struct *owner;
592 int retval = 1;
593
594 if (need_resched())
595 return 0;
596
597 rcu_read_lock();
598 owner = __mutex_owner(lock);
599
600
601
602
603
604 if (owner)
605 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
606 rcu_read_unlock();
607
608
609
610
611
612
613 return retval;
614}
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637static __always_inline bool
638mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
639 struct mutex_waiter *waiter)
640{
641 if (!waiter) {
642
643
644
645
646
647
648
649 if (!mutex_can_spin_on_owner(lock))
650 goto fail;
651
652
653
654
655
656
657 if (!osq_lock(&lock->osq))
658 goto fail;
659 }
660
661 for (;;) {
662 struct task_struct *owner;
663
664
665 owner = __mutex_trylock_or_owner(lock);
666 if (!owner)
667 break;
668
669
670
671
672
673 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
674 goto fail_unlock;
675
676
677
678
679
680
681
682 cpu_relax();
683 }
684
685 if (!waiter)
686 osq_unlock(&lock->osq);
687
688 return true;
689
690
691fail_unlock:
692 if (!waiter)
693 osq_unlock(&lock->osq);
694
695fail:
696
697
698
699
700
701 if (need_resched()) {
702
703
704
705
706 __set_current_state(TASK_RUNNING);
707 schedule_preempt_disabled();
708 }
709
710 return false;
711}
712#else
713static __always_inline bool
714mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
715 struct mutex_waiter *waiter)
716{
717 return false;
718}
719#endif
720
721static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
722
723
724
725
726
727
728
729
730
731
732
733
734void __sched mutex_unlock(struct mutex *lock)
735{
736#ifndef CONFIG_DEBUG_LOCK_ALLOC
737 if (__mutex_unlock_fast(lock))
738 return;
739#endif
740 __mutex_unlock_slowpath(lock, _RET_IP_);
741}
742EXPORT_SYMBOL(mutex_unlock);
743
744
745
746
747
748
749
750
751
752
753
754
755void __sched ww_mutex_unlock(struct ww_mutex *lock)
756{
757
758
759
760
761 if (lock->ctx) {
762#ifdef CONFIG_DEBUG_MUTEXES
763 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
764#endif
765 if (lock->ctx->acquired > 0)
766 lock->ctx->acquired--;
767 lock->ctx = NULL;
768 }
769
770 mutex_unlock(&lock->base);
771}
772EXPORT_SYMBOL(ww_mutex_unlock);
773
774
775static __always_inline int __sched
776__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
777{
778 if (ww_ctx->acquired > 0) {
779#ifdef CONFIG_DEBUG_MUTEXES
780 struct ww_mutex *ww;
781
782 ww = container_of(lock, struct ww_mutex, base);
783 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
784 ww_ctx->contending_lock = ww;
785#endif
786 return -EDEADLK;
787 }
788
789 return 0;
790}
791
792
793
794
795
796
797
798
799
800
801
802
803
804static inline int __sched
805__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
806 struct ww_acquire_ctx *ctx)
807{
808 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
809 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
810 struct mutex_waiter *cur;
811
812 if (ctx->acquired == 0)
813 return 0;
814
815 if (!ctx->is_wait_die) {
816 if (ctx->wounded)
817 return __ww_mutex_kill(lock, ctx);
818
819 return 0;
820 }
821
822 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
823 return __ww_mutex_kill(lock, ctx);
824
825
826
827
828
829 cur = waiter;
830 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
831 if (!cur->ww_ctx)
832 continue;
833
834 return __ww_mutex_kill(lock, ctx);
835 }
836
837 return 0;
838}
839
840
841
842
843
844
845
846
847
848
849
850
851static inline int __sched
852__ww_mutex_add_waiter(struct mutex_waiter *waiter,
853 struct mutex *lock,
854 struct ww_acquire_ctx *ww_ctx)
855{
856 struct mutex_waiter *cur;
857 struct list_head *pos;
858 bool is_wait_die;
859
860 if (!ww_ctx) {
861 __mutex_add_waiter(lock, waiter, &lock->wait_list);
862 return 0;
863 }
864
865 is_wait_die = ww_ctx->is_wait_die;
866
867
868
869
870
871
872
873
874 pos = &lock->wait_list;
875 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
876 if (!cur->ww_ctx)
877 continue;
878
879 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
880
881
882
883
884
885 if (is_wait_die) {
886 int ret = __ww_mutex_kill(lock, ww_ctx);
887
888 if (ret)
889 return ret;
890 }
891
892 break;
893 }
894
895 pos = &cur->list;
896
897
898 __ww_mutex_die(lock, cur, ww_ctx);
899 }
900
901 __mutex_add_waiter(lock, waiter, pos);
902
903
904
905
906
907 if (!is_wait_die) {
908 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
909
910
911
912
913
914
915 smp_mb();
916 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
917 }
918
919 return 0;
920}
921
922
923
924
925static __always_inline int __sched
926__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
927 struct lockdep_map *nest_lock, unsigned long ip,
928 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
929{
930 struct mutex_waiter waiter;
931 bool first = false;
932 struct ww_mutex *ww;
933 int ret;
934
935 if (!use_ww_ctx)
936 ww_ctx = NULL;
937
938 might_sleep();
939
940#ifdef CONFIG_DEBUG_MUTEXES
941 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
942#endif
943
944 ww = container_of(lock, struct ww_mutex, base);
945 if (ww_ctx) {
946 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
947 return -EALREADY;
948
949
950
951
952
953
954 if (ww_ctx->acquired == 0)
955 ww_ctx->wounded = 0;
956 }
957
958 preempt_disable();
959 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
960
961 if (__mutex_trylock(lock) ||
962 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
963
964 lock_acquired(&lock->dep_map, ip);
965 if (ww_ctx)
966 ww_mutex_set_context_fastpath(ww, ww_ctx);
967 preempt_enable();
968 return 0;
969 }
970
971 spin_lock(&lock->wait_lock);
972
973
974
975 if (__mutex_trylock(lock)) {
976 if (ww_ctx)
977 __ww_mutex_check_waiters(lock, ww_ctx);
978
979 goto skip_wait;
980 }
981
982 debug_mutex_lock_common(lock, &waiter);
983
984 lock_contended(&lock->dep_map, ip);
985
986 if (!use_ww_ctx) {
987
988 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
989
990
991#ifdef CONFIG_DEBUG_MUTEXES
992 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
993#endif
994 } else {
995
996
997
998
999 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
1000 if (ret)
1001 goto err_early_kill;
1002
1003 waiter.ww_ctx = ww_ctx;
1004 }
1005
1006 waiter.task = current;
1007
1008 set_current_state(state);
1009 for (;;) {
1010
1011
1012
1013
1014
1015
1016 if (__mutex_trylock(lock))
1017 goto acquired;
1018
1019
1020
1021
1022
1023
1024 if (signal_pending_state(state, current)) {
1025 ret = -EINTR;
1026 goto err;
1027 }
1028
1029 if (ww_ctx) {
1030 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1031 if (ret)
1032 goto err;
1033 }
1034
1035 spin_unlock(&lock->wait_lock);
1036 schedule_preempt_disabled();
1037
1038
1039
1040
1041
1042 if (ww_ctx || !first) {
1043 first = __mutex_waiter_is_first(lock, &waiter);
1044 if (first)
1045 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1046 }
1047
1048 set_current_state(state);
1049
1050
1051
1052
1053
1054 if (__mutex_trylock(lock) ||
1055 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1056 break;
1057
1058 spin_lock(&lock->wait_lock);
1059 }
1060 spin_lock(&lock->wait_lock);
1061acquired:
1062 __set_current_state(TASK_RUNNING);
1063
1064 if (ww_ctx) {
1065
1066
1067
1068
1069 if (!ww_ctx->is_wait_die &&
1070 !__mutex_waiter_is_first(lock, &waiter))
1071 __ww_mutex_check_waiters(lock, ww_ctx);
1072 }
1073
1074 __mutex_remove_waiter(lock, &waiter);
1075
1076 debug_mutex_free_waiter(&waiter);
1077
1078skip_wait:
1079
1080 lock_acquired(&lock->dep_map, ip);
1081
1082 if (ww_ctx)
1083 ww_mutex_lock_acquired(ww, ww_ctx);
1084
1085 spin_unlock(&lock->wait_lock);
1086 preempt_enable();
1087 return 0;
1088
1089err:
1090 __set_current_state(TASK_RUNNING);
1091 __mutex_remove_waiter(lock, &waiter);
1092err_early_kill:
1093 spin_unlock(&lock->wait_lock);
1094 debug_mutex_free_waiter(&waiter);
1095 mutex_release(&lock->dep_map, ip);
1096 preempt_enable();
1097 return ret;
1098}
1099
1100static int __sched
1101__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
1102 struct lockdep_map *nest_lock, unsigned long ip)
1103{
1104 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1105}
1106
1107static int __sched
1108__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
1109 struct lockdep_map *nest_lock, unsigned long ip,
1110 struct ww_acquire_ctx *ww_ctx)
1111{
1112 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1113}
1114
1115#ifdef CONFIG_DEBUG_LOCK_ALLOC
1116void __sched
1117mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1118{
1119 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1120}
1121
1122EXPORT_SYMBOL_GPL(mutex_lock_nested);
1123
1124void __sched
1125_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1126{
1127 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1128}
1129EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1130
1131int __sched
1132mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1133{
1134 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1135}
1136EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1137
1138int __sched
1139mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1140{
1141 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1142}
1143EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1144
1145void __sched
1146mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1147{
1148 int token;
1149
1150 might_sleep();
1151
1152 token = io_schedule_prepare();
1153 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1154 subclass, NULL, _RET_IP_, NULL, 0);
1155 io_schedule_finish(token);
1156}
1157EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1158
1159static inline int
1160ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1161{
1162#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1163 unsigned tmp;
1164
1165 if (ctx->deadlock_inject_countdown-- == 0) {
1166 tmp = ctx->deadlock_inject_interval;
1167 if (tmp > UINT_MAX/4)
1168 tmp = UINT_MAX;
1169 else
1170 tmp = tmp*2 + tmp + tmp/2;
1171
1172 ctx->deadlock_inject_interval = tmp;
1173 ctx->deadlock_inject_countdown = tmp;
1174 ctx->contending_lock = lock;
1175
1176 ww_mutex_unlock(lock);
1177
1178 return -EDEADLK;
1179 }
1180#endif
1181
1182 return 0;
1183}
1184
1185int __sched
1186ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1187{
1188 int ret;
1189
1190 might_sleep();
1191 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1192 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1193 ctx);
1194 if (!ret && ctx && ctx->acquired > 1)
1195 return ww_mutex_deadlock_injection(lock, ctx);
1196
1197 return ret;
1198}
1199EXPORT_SYMBOL_GPL(ww_mutex_lock);
1200
1201int __sched
1202ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1203{
1204 int ret;
1205
1206 might_sleep();
1207 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1208 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1209 ctx);
1210
1211 if (!ret && ctx && ctx->acquired > 1)
1212 return ww_mutex_deadlock_injection(lock, ctx);
1213
1214 return ret;
1215}
1216EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1217
1218#endif
1219
1220
1221
1222
1223static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1224{
1225 struct task_struct *next = NULL;
1226 DEFINE_WAKE_Q(wake_q);
1227 unsigned long owner;
1228
1229 mutex_release(&lock->dep_map, ip);
1230
1231
1232
1233
1234
1235
1236
1237
1238 owner = atomic_long_read(&lock->owner);
1239 for (;;) {
1240 unsigned long old;
1241
1242#ifdef CONFIG_DEBUG_MUTEXES
1243 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1244 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1245#endif
1246
1247 if (owner & MUTEX_FLAG_HANDOFF)
1248 break;
1249
1250 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1251 __owner_flags(owner));
1252 if (old == owner) {
1253 if (owner & MUTEX_FLAG_WAITERS)
1254 break;
1255
1256 return;
1257 }
1258
1259 owner = old;
1260 }
1261
1262 spin_lock(&lock->wait_lock);
1263 debug_mutex_unlock(lock);
1264 if (!list_empty(&lock->wait_list)) {
1265
1266 struct mutex_waiter *waiter =
1267 list_first_entry(&lock->wait_list,
1268 struct mutex_waiter, list);
1269
1270 next = waiter->task;
1271
1272 debug_mutex_wake_waiter(lock, waiter);
1273 wake_q_add(&wake_q, next);
1274 }
1275
1276 if (owner & MUTEX_FLAG_HANDOFF)
1277 __mutex_handoff(lock, next);
1278
1279 spin_unlock(&lock->wait_lock);
1280
1281 wake_up_q(&wake_q);
1282}
1283
1284#ifndef CONFIG_DEBUG_LOCK_ALLOC
1285
1286
1287
1288
1289static noinline int __sched
1290__mutex_lock_killable_slowpath(struct mutex *lock);
1291
1292static noinline int __sched
1293__mutex_lock_interruptible_slowpath(struct mutex *lock);
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307int __sched mutex_lock_interruptible(struct mutex *lock)
1308{
1309 might_sleep();
1310
1311 if (__mutex_trylock_fast(lock))
1312 return 0;
1313
1314 return __mutex_lock_interruptible_slowpath(lock);
1315}
1316
1317EXPORT_SYMBOL(mutex_lock_interruptible);
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331int __sched mutex_lock_killable(struct mutex *lock)
1332{
1333 might_sleep();
1334
1335 if (__mutex_trylock_fast(lock))
1336 return 0;
1337
1338 return __mutex_lock_killable_slowpath(lock);
1339}
1340EXPORT_SYMBOL(mutex_lock_killable);
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352void __sched mutex_lock_io(struct mutex *lock)
1353{
1354 int token;
1355
1356 token = io_schedule_prepare();
1357 mutex_lock(lock);
1358 io_schedule_finish(token);
1359}
1360EXPORT_SYMBOL_GPL(mutex_lock_io);
1361
1362static noinline void __sched
1363__mutex_lock_slowpath(struct mutex *lock)
1364{
1365 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1366}
1367
1368static noinline int __sched
1369__mutex_lock_killable_slowpath(struct mutex *lock)
1370{
1371 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1372}
1373
1374static noinline int __sched
1375__mutex_lock_interruptible_slowpath(struct mutex *lock)
1376{
1377 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1378}
1379
1380static noinline int __sched
1381__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1382{
1383 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1384 _RET_IP_, ctx);
1385}
1386
1387static noinline int __sched
1388__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1389 struct ww_acquire_ctx *ctx)
1390{
1391 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1392 _RET_IP_, ctx);
1393}
1394
1395#endif
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411int __sched mutex_trylock(struct mutex *lock)
1412{
1413 bool locked;
1414
1415#ifdef CONFIG_DEBUG_MUTEXES
1416 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1417#endif
1418
1419 locked = __mutex_trylock(lock);
1420 if (locked)
1421 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1422
1423 return locked;
1424}
1425EXPORT_SYMBOL(mutex_trylock);
1426
1427#ifndef CONFIG_DEBUG_LOCK_ALLOC
1428int __sched
1429ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1430{
1431 might_sleep();
1432
1433 if (__mutex_trylock_fast(&lock->base)) {
1434 if (ctx)
1435 ww_mutex_set_context_fastpath(lock, ctx);
1436 return 0;
1437 }
1438
1439 return __ww_mutex_lock_slowpath(lock, ctx);
1440}
1441EXPORT_SYMBOL(ww_mutex_lock);
1442
1443int __sched
1444ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1445{
1446 might_sleep();
1447
1448 if (__mutex_trylock_fast(&lock->base)) {
1449 if (ctx)
1450 ww_mutex_set_context_fastpath(lock, ctx);
1451 return 0;
1452 }
1453
1454 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1455}
1456EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1457
1458#endif
1459
1460
1461
1462
1463
1464
1465
1466
1467int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1468{
1469
1470 if (atomic_add_unless(cnt, -1, 1))
1471 return 0;
1472
1473 mutex_lock(lock);
1474 if (!atomic_dec_and_test(cnt)) {
1475
1476 mutex_unlock(lock);
1477 return 0;
1478 }
1479
1480 return 1;
1481}
1482EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1483