1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/init.h>
41#include <linux/spinlock.h>
42#include <linux/smp.h>
43#include <linux/rcupdate.h>
44#include <linux/interrupt.h>
45#include <linux/sched.h>
46#include <asm/atomic.h>
47#include <linux/bitops.h>
48#include <linux/module.h>
49#include <linux/kthread.h>
50#include <linux/completion.h>
51#include <linux/moduleparam.h>
52#include <linux/percpu.h>
53#include <linux/notifier.h>
54#include <linux/cpu.h>
55#include <linux/random.h>
56#include <linux/delay.h>
57#include <linux/cpumask.h>
58#include <linux/rcupreempt_trace.h>
59#include <asm/byteorder.h>
60
61
62
63
64
65
66
67
68
69
70
71
72#define GP_STAGES 2
73struct rcu_data {
74 spinlock_t lock;
75 long completed;
76 int waitlistcount;
77 struct rcu_head *nextlist;
78 struct rcu_head **nexttail;
79 struct rcu_head *waitlist[GP_STAGES];
80 struct rcu_head **waittail[GP_STAGES];
81 struct rcu_head *donelist;
82 struct rcu_head **donetail;
83 long rcu_flipctr[2];
84 struct rcu_head *nextschedlist;
85 struct rcu_head **nextschedtail;
86 struct rcu_head *waitschedlist;
87 struct rcu_head **waitschedtail;
88 int rcu_sched_sleeping;
89#ifdef CONFIG_RCU_TRACE
90 struct rcupreempt_trace trace;
91#endif
92};
93
94
95
96
97
98enum rcu_try_flip_states {
99
100
101
102
103
104 rcu_try_flip_idle_state,
105
106
107
108
109
110
111
112
113 rcu_try_flip_waitack_state,
114
115
116
117
118
119 rcu_try_flip_waitzero_state,
120
121
122
123
124
125
126
127 rcu_try_flip_waitmb_state,
128};
129
130
131
132
133
134enum rcu_sched_sleep_states {
135 rcu_sched_not_sleeping,
136 rcu_sched_sleep_prep,
137 rcu_sched_sleeping,
138};
139
140struct rcu_ctrlblk {
141 spinlock_t fliplock;
142 long completed;
143 enum rcu_try_flip_states rcu_try_flip_state;
144
145 spinlock_t schedlock;
146 enum rcu_sched_sleep_states sched_sleep;
147 wait_queue_head_t sched_wq;
148};
149
150static DEFINE_PER_CPU(struct rcu_data, rcu_data);
151static struct rcu_ctrlblk rcu_ctrlblk = {
152 .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
153 .completed = 0,
154 .rcu_try_flip_state = rcu_try_flip_idle_state,
155 .schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock),
156 .sched_sleep = rcu_sched_not_sleeping,
157 .sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq),
158};
159
160static struct task_struct *rcu_sched_grace_period_task;
161
162#ifdef CONFIG_RCU_TRACE
163static char *rcu_try_flip_state_names[] =
164 { "idle", "waitack", "waitzero", "waitmb" };
165#endif
166
167static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE;
168
169
170
171
172
173
174enum rcu_flip_flag_values {
175 rcu_flip_seen,
176
177 rcu_flipped
178
179};
180static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag)
181 = rcu_flip_seen;
182
183
184
185
186
187
188
189enum rcu_mb_flag_values {
190 rcu_mb_done,
191
192 rcu_mb_needed
193
194};
195static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag)
196 = rcu_mb_done;
197
198
199
200
201
202#define RCU_DATA_ME() (&__get_cpu_var(rcu_data))
203#define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu))
204
205
206
207
208
209#define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
210
211
212
213
214
215#define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace));
216
217
218
219
220
221#define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace));
222
223#define RCU_SCHED_BATCH_TIME (HZ / 50)
224
225
226
227
228
229long rcu_batches_completed(void)
230{
231 return rcu_ctrlblk.completed;
232}
233EXPORT_SYMBOL_GPL(rcu_batches_completed);
234
235void __rcu_read_lock(void)
236{
237 int idx;
238 struct task_struct *t = current;
239 int nesting;
240
241 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
242 if (nesting != 0) {
243
244
245
246 t->rcu_read_lock_nesting = nesting + 1;
247
248 } else {
249 unsigned long flags;
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267 local_irq_save(flags);
268
269
270
271
272
273
274
275 idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
276 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++;
277
278
279
280
281
282
283
284
285
286 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1;
287
288
289
290
291
292
293
294
295 ACCESS_ONCE(t->rcu_flipctr_idx) = idx;
296 local_irq_restore(flags);
297 }
298}
299EXPORT_SYMBOL_GPL(__rcu_read_lock);
300
301void __rcu_read_unlock(void)
302{
303 int idx;
304 struct task_struct *t = current;
305 int nesting;
306
307 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
308 if (nesting > 1) {
309
310
311
312
313
314
315 t->rcu_read_lock_nesting = nesting - 1;
316
317 } else {
318 unsigned long flags;
319
320
321
322
323
324
325
326
327 local_irq_save(flags);
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346 idx = ACCESS_ONCE(t->rcu_flipctr_idx);
347
348
349
350
351
352
353
354 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1;
355
356
357
358
359
360
361
362
363
364
365 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--;
366 local_irq_restore(flags);
367 }
368}
369EXPORT_SYMBOL_GPL(__rcu_read_unlock);
370
371
372
373
374
375
376static void __rcu_advance_callbacks(struct rcu_data *rdp)
377{
378 int cpu;
379 int i;
380 int wlc = 0;
381
382 if (rdp->completed != rcu_ctrlblk.completed) {
383 if (rdp->waitlist[GP_STAGES - 1] != NULL) {
384 *rdp->donetail = rdp->waitlist[GP_STAGES - 1];
385 rdp->donetail = rdp->waittail[GP_STAGES - 1];
386 RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp);
387 }
388 for (i = GP_STAGES - 2; i >= 0; i--) {
389 if (rdp->waitlist[i] != NULL) {
390 rdp->waitlist[i + 1] = rdp->waitlist[i];
391 rdp->waittail[i + 1] = rdp->waittail[i];
392 wlc++;
393 } else {
394 rdp->waitlist[i + 1] = NULL;
395 rdp->waittail[i + 1] =
396 &rdp->waitlist[i + 1];
397 }
398 }
399 if (rdp->nextlist != NULL) {
400 rdp->waitlist[0] = rdp->nextlist;
401 rdp->waittail[0] = rdp->nexttail;
402 wlc++;
403 rdp->nextlist = NULL;
404 rdp->nexttail = &rdp->nextlist;
405 RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp);
406 } else {
407 rdp->waitlist[0] = NULL;
408 rdp->waittail[0] = &rdp->waitlist[0];
409 }
410 rdp->waitlistcount = wlc;
411 rdp->completed = rcu_ctrlblk.completed;
412 }
413
414
415
416
417
418
419
420 cpu = raw_smp_processor_id();
421 if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
422 smp_mb();
423 per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
424 smp_mb();
425
426 }
427}
428
429DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
430 .dynticks = 1,
431};
432
433#ifdef CONFIG_NO_HZ
434static DEFINE_PER_CPU(int, rcu_update_flag);
435
436
437
438
439
440
441
442
443void rcu_irq_enter(void)
444{
445 int cpu = smp_processor_id();
446 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
447
448 if (per_cpu(rcu_update_flag, cpu))
449 per_cpu(rcu_update_flag, cpu)++;
450
451
452
453
454
455 if (!in_interrupt() &&
456 (rdssp->dynticks & 0x1) == 0) {
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479 rdssp->dynticks++;
480
481
482
483
484
485
486
487
488
489 smp_mb();
490
491
492
493
494
495
496 per_cpu(rcu_update_flag, cpu)++;
497
498
499
500
501
502
503 }
504}
505
506
507
508
509
510
511
512
513void rcu_irq_exit(void)
514{
515 int cpu = smp_processor_id();
516 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
517
518
519
520
521
522
523
524
525
526
527 if (per_cpu(rcu_update_flag, cpu)) {
528 if (--per_cpu(rcu_update_flag, cpu))
529 return;
530
531
532 WARN_ON(in_interrupt());
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548 smp_mb();
549 rdssp->dynticks++;
550 WARN_ON(rdssp->dynticks & 0x1);
551 }
552}
553
554static void dyntick_save_progress_counter(int cpu)
555{
556 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
557
558 rdssp->dynticks_snap = rdssp->dynticks;
559}
560
561static inline int
562rcu_try_flip_waitack_needed(int cpu)
563{
564 long curr;
565 long snap;
566 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
567
568 curr = rdssp->dynticks;
569 snap = rdssp->dynticks_snap;
570 smp_mb();
571
572
573
574
575
576
577
578
579
580
581 if ((curr == snap) && ((curr & 0x1) == 0))
582 return 0;
583
584
585
586
587
588
589
590 if ((curr - snap) > 2 || (curr & 0x1) == 0)
591 return 0;
592
593
594
595 return 1;
596}
597
598static inline int
599rcu_try_flip_waitmb_needed(int cpu)
600{
601 long curr;
602 long snap;
603 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
604
605 curr = rdssp->dynticks;
606 snap = rdssp->dynticks_snap;
607 smp_mb();
608
609
610
611
612
613
614
615
616
617 if ((curr == snap) && ((curr & 0x1) == 0))
618 return 0;
619
620
621
622
623
624
625 if (curr != snap)
626 return 0;
627
628
629
630 return 1;
631}
632
633static void dyntick_save_progress_counter_sched(int cpu)
634{
635 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
636
637 rdssp->sched_dynticks_snap = rdssp->dynticks;
638}
639
640static int rcu_qsctr_inc_needed_dyntick(int cpu)
641{
642 long curr;
643 long snap;
644 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
645
646 curr = rdssp->dynticks;
647 snap = rdssp->sched_dynticks_snap;
648 smp_mb();
649
650
651
652
653
654
655
656
657
658
659 if ((curr == snap) && ((curr & 0x1) == 0))
660 return 0;
661
662
663
664
665
666
667
668 if ((curr - snap) > 2 || (snap & 0x1) == 0)
669 return 0;
670
671
672
673 return 1;
674}
675
676#else
677
678# define dyntick_save_progress_counter(cpu) do { } while (0)
679# define rcu_try_flip_waitack_needed(cpu) (1)
680# define rcu_try_flip_waitmb_needed(cpu) (1)
681
682# define dyntick_save_progress_counter_sched(cpu) do { } while (0)
683# define rcu_qsctr_inc_needed_dyntick(cpu) (1)
684
685#endif
686
687static void save_qsctr_sched(int cpu)
688{
689 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
690
691 rdssp->sched_qs_snap = rdssp->sched_qs;
692}
693
694static inline int rcu_qsctr_inc_needed(int cpu)
695{
696 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
697
698
699
700
701
702
703 if (rdssp->sched_qs != rdssp->sched_qs_snap) {
704 smp_mb();
705 return 0;
706 }
707
708
709
710 return 1;
711}
712
713
714
715
716
717
718
719
720
721
722
723static int
724rcu_try_flip_idle(void)
725{
726 int cpu;
727
728 RCU_TRACE_ME(rcupreempt_trace_try_flip_i1);
729 if (!rcu_pending(smp_processor_id())) {
730 RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1);
731 return 0;
732 }
733
734
735
736
737
738 RCU_TRACE_ME(rcupreempt_trace_try_flip_g1);
739 rcu_ctrlblk.completed++;
740
741
742
743
744
745
746
747 smp_mb();
748
749
750
751 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
752 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
753 dyntick_save_progress_counter(cpu);
754 }
755
756 return 1;
757}
758
759
760
761
762
763static int
764rcu_try_flip_waitack(void)
765{
766 int cpu;
767
768 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
769 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
770 if (rcu_try_flip_waitack_needed(cpu) &&
771 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
772 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
773 return 0;
774 }
775
776
777
778
779
780
781 smp_mb();
782 RCU_TRACE_ME(rcupreempt_trace_try_flip_a2);
783 return 1;
784}
785
786
787
788
789
790
791static int
792rcu_try_flip_waitzero(void)
793{
794 int cpu;
795 int lastidx = !(rcu_ctrlblk.completed & 0x1);
796 int sum = 0;
797
798
799
800 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
801 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
802 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
803 if (sum != 0) {
804 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
805 return 0;
806 }
807
808
809
810
811
812
813 smp_mb();
814
815
816 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
817 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
818 dyntick_save_progress_counter(cpu);
819 }
820
821 RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
822 return 1;
823}
824
825
826
827
828
829
830static int
831rcu_try_flip_waitmb(void)
832{
833 int cpu;
834
835 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
836 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
837 if (rcu_try_flip_waitmb_needed(cpu) &&
838 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
839 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
840 return 0;
841 }
842
843 smp_mb();
844 RCU_TRACE_ME(rcupreempt_trace_try_flip_m2);
845 return 1;
846}
847
848
849
850
851
852
853
854
855
856
857static void rcu_try_flip(void)
858{
859 unsigned long flags;
860
861 RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
862 if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
863 RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
864 return;
865 }
866
867
868
869
870
871
872 switch (rcu_ctrlblk.rcu_try_flip_state) {
873 case rcu_try_flip_idle_state:
874 if (rcu_try_flip_idle())
875 rcu_ctrlblk.rcu_try_flip_state =
876 rcu_try_flip_waitack_state;
877 break;
878 case rcu_try_flip_waitack_state:
879 if (rcu_try_flip_waitack())
880 rcu_ctrlblk.rcu_try_flip_state =
881 rcu_try_flip_waitzero_state;
882 break;
883 case rcu_try_flip_waitzero_state:
884 if (rcu_try_flip_waitzero())
885 rcu_ctrlblk.rcu_try_flip_state =
886 rcu_try_flip_waitmb_state;
887 break;
888 case rcu_try_flip_waitmb_state:
889 if (rcu_try_flip_waitmb())
890 rcu_ctrlblk.rcu_try_flip_state =
891 rcu_try_flip_idle_state;
892 }
893 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
894}
895
896
897
898
899
900
901
902static void rcu_check_mb(int cpu)
903{
904 if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
905 smp_mb();
906 per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
907 }
908}
909
910void rcu_check_callbacks(int cpu, int user)
911{
912 unsigned long flags;
913 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928 if (user ||
929 (idle_cpu(cpu) && !in_softirq() &&
930 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
931 smp_mb();
932 rcu_qsctr_inc(cpu);
933 }
934
935 rcu_check_mb(cpu);
936 if (rcu_ctrlblk.completed == rdp->completed)
937 rcu_try_flip();
938 spin_lock_irqsave(&rdp->lock, flags);
939 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
940 __rcu_advance_callbacks(rdp);
941 if (rdp->donelist == NULL) {
942 spin_unlock_irqrestore(&rdp->lock, flags);
943 } else {
944 spin_unlock_irqrestore(&rdp->lock, flags);
945 raise_softirq(RCU_SOFTIRQ);
946 }
947}
948
949
950
951
952
953void rcu_advance_callbacks(int cpu, int user)
954{
955 unsigned long flags;
956 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
957
958 if (rcu_ctrlblk.completed == rdp->completed) {
959 rcu_try_flip();
960 if (rcu_ctrlblk.completed == rdp->completed)
961 return;
962 }
963 spin_lock_irqsave(&rdp->lock, flags);
964 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
965 __rcu_advance_callbacks(rdp);
966 spin_unlock_irqrestore(&rdp->lock, flags);
967}
968
969#ifdef CONFIG_HOTPLUG_CPU
970#define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \
971 *dsttail = srclist; \
972 if (srclist != NULL) { \
973 dsttail = srctail; \
974 srclist = NULL; \
975 srctail = &srclist;\
976 } \
977 } while (0)
978
979void rcu_offline_cpu(int cpu)
980{
981 int i;
982 struct rcu_head *list = NULL;
983 unsigned long flags;
984 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
985 struct rcu_head *schedlist = NULL;
986 struct rcu_head **schedtail = &schedlist;
987 struct rcu_head **tail = &list;
988
989
990
991
992
993
994 spin_lock_irqsave(&rdp->lock, flags);
995 rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail);
996 for (i = GP_STAGES - 1; i >= 0; i--)
997 rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i],
998 list, tail);
999 rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail);
1000 rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail,
1001 schedlist, schedtail);
1002 rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail,
1003 schedlist, schedtail);
1004 rdp->rcu_sched_sleeping = 0;
1005 spin_unlock_irqrestore(&rdp->lock, flags);
1006 rdp->waitlistcount = 0;
1007
1008
1009
1010 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1011 rcu_check_mb(cpu);
1012 if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
1013 smp_mb();
1014 per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
1015 smp_mb();
1016
1017 }
1018
1019 RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0];
1020 RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1];
1021
1022 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
1023 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
1024
1025 cpu_clear(cpu, rcu_cpu_online_map);
1026
1027 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 local_irq_save(flags);
1040 rdp = RCU_DATA_ME();
1041 spin_lock(&rdp->lock);
1042 *rdp->nexttail = list;
1043 if (list)
1044 rdp->nexttail = tail;
1045 *rdp->nextschedtail = schedlist;
1046 if (schedlist)
1047 rdp->nextschedtail = schedtail;
1048 spin_unlock_irqrestore(&rdp->lock, flags);
1049}
1050
1051#else
1052
1053void rcu_offline_cpu(int cpu)
1054{
1055}
1056
1057#endif
1058
1059void __cpuinit rcu_online_cpu(int cpu)
1060{
1061 unsigned long flags;
1062 struct rcu_data *rdp;
1063
1064 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1065 cpu_set(cpu, rcu_cpu_online_map);
1066 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 rdp = RCU_DATA_CPU(cpu);
1078 spin_lock_irqsave(&rdp->lock, flags);
1079 rdp->rcu_sched_sleeping = 1;
1080 spin_unlock_irqrestore(&rdp->lock, flags);
1081}
1082
1083static void rcu_process_callbacks(struct softirq_action *unused)
1084{
1085 unsigned long flags;
1086 struct rcu_head *next, *list;
1087 struct rcu_data *rdp;
1088
1089 local_irq_save(flags);
1090 rdp = RCU_DATA_ME();
1091 spin_lock(&rdp->lock);
1092 list = rdp->donelist;
1093 if (list == NULL) {
1094 spin_unlock_irqrestore(&rdp->lock, flags);
1095 return;
1096 }
1097 rdp->donelist = NULL;
1098 rdp->donetail = &rdp->donelist;
1099 RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
1100 spin_unlock_irqrestore(&rdp->lock, flags);
1101 while (list) {
1102 next = list->next;
1103 list->func(list);
1104 list = next;
1105 RCU_TRACE_ME(rcupreempt_trace_invoke);
1106 }
1107}
1108
1109void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1110{
1111 unsigned long flags;
1112 struct rcu_data *rdp;
1113
1114 head->func = func;
1115 head->next = NULL;
1116 local_irq_save(flags);
1117 rdp = RCU_DATA_ME();
1118 spin_lock(&rdp->lock);
1119 __rcu_advance_callbacks(rdp);
1120 *rdp->nexttail = head;
1121 rdp->nexttail = &head->next;
1122 RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
1123 spin_unlock_irqrestore(&rdp->lock, flags);
1124}
1125EXPORT_SYMBOL_GPL(call_rcu);
1126
1127void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1128{
1129 unsigned long flags;
1130 struct rcu_data *rdp;
1131 int wake_gp = 0;
1132
1133 head->func = func;
1134 head->next = NULL;
1135 local_irq_save(flags);
1136 rdp = RCU_DATA_ME();
1137 spin_lock(&rdp->lock);
1138 *rdp->nextschedtail = head;
1139 rdp->nextschedtail = &head->next;
1140 if (rdp->rcu_sched_sleeping) {
1141
1142
1143
1144 rdp->rcu_sched_sleeping = 0;
1145 wake_gp = 1;
1146 }
1147 spin_unlock_irqrestore(&rdp->lock, flags);
1148 if (wake_gp) {
1149
1150
1151
1152 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1153 if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping)
1154 wake_gp = 0;
1155 rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping;
1156 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1157 if (wake_gp)
1158 wake_up_interruptible(&rcu_ctrlblk.sched_wq);
1159 }
1160}
1161EXPORT_SYMBOL_GPL(call_rcu_sched);
1162
1163
1164
1165
1166
1167
1168
1169synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched)
1170EXPORT_SYMBOL_GPL(__synchronize_sched);
1171
1172
1173
1174
1175static int rcu_sched_grace_period(void *arg)
1176{
1177 int couldsleep;
1178 int couldsleepnext = 0;
1179 int cpu;
1180 unsigned long flags;
1181 struct rcu_data *rdp;
1182 int ret;
1183
1184
1185
1186
1187
1188 do {
1189
1190
1191 for_each_online_cpu(cpu) {
1192 dyntick_save_progress_counter_sched(cpu);
1193 save_qsctr_sched(cpu);
1194 }
1195
1196
1197
1198
1199
1200 schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME);
1201
1202
1203
1204
1205
1206 couldsleep = couldsleepnext;
1207 couldsleepnext = 1;
1208 if (couldsleep) {
1209 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1210 rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep;
1211 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1212 }
1213
1214
1215
1216
1217
1218 for_each_online_cpu(cpu) {
1219 while (rcu_qsctr_inc_needed(cpu) &&
1220 rcu_qsctr_inc_needed_dyntick(cpu)) {
1221
1222 schedule_timeout_interruptible(1);
1223 }
1224 }
1225
1226
1227
1228 for_each_online_cpu(cpu) {
1229
1230 rdp = RCU_DATA_CPU(cpu);
1231 spin_lock_irqsave(&rdp->lock, flags);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 if (rdp->waitschedlist != NULL) {
1246 *rdp->donetail = rdp->waitschedlist;
1247 rdp->donetail = rdp->waitschedtail;
1248
1249
1250
1251
1252
1253 }
1254 if (rdp->nextschedlist != NULL) {
1255 rdp->waitschedlist = rdp->nextschedlist;
1256 rdp->waitschedtail = rdp->nextschedtail;
1257 couldsleep = 0;
1258 couldsleepnext = 0;
1259 } else {
1260 rdp->waitschedlist = NULL;
1261 rdp->waitschedtail = &rdp->waitschedlist;
1262 }
1263 rdp->nextschedlist = NULL;
1264 rdp->nextschedtail = &rdp->nextschedlist;
1265
1266
1267
1268 rdp->rcu_sched_sleeping = couldsleep;
1269
1270 spin_unlock_irqrestore(&rdp->lock, flags);
1271 }
1272
1273
1274
1275 if (!couldsleep)
1276 continue;
1277
1278
1279
1280 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1281 if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) {
1282
1283
1284
1285
1286
1287 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1288 couldsleepnext = 0;
1289 continue;
1290 }
1291
1292
1293
1294 rcu_ctrlblk.sched_sleep = rcu_sched_sleeping;
1295 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1296 ret = 0;
1297 __wait_event_interruptible(rcu_ctrlblk.sched_wq,
1298 rcu_ctrlblk.sched_sleep != rcu_sched_sleeping,
1299 ret);
1300
1301
1302
1303
1304
1305 if (ret)
1306 flush_signals(current);
1307 couldsleepnext = 0;
1308
1309 } while (!kthread_should_stop());
1310
1311 return (0);
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323int rcu_needs_cpu(int cpu)
1324{
1325 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1326
1327 return (rdp->donelist != NULL ||
1328 !!rdp->waitlistcount ||
1329 rdp->nextlist != NULL ||
1330 rdp->nextschedlist != NULL ||
1331 rdp->waitschedlist != NULL);
1332}
1333
1334int rcu_pending(int cpu)
1335{
1336 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1337
1338
1339
1340 if (rdp->donelist != NULL ||
1341 !!rdp->waitlistcount ||
1342 rdp->nextlist != NULL ||
1343 rdp->nextschedlist != NULL ||
1344 rdp->waitschedlist != NULL)
1345 return 1;
1346
1347
1348
1349 if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
1350 (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
1351 return 1;
1352
1353
1354
1355 if (rdp->completed != rcu_ctrlblk.completed)
1356 return 1;
1357
1358
1359
1360 return 0;
1361}
1362
1363static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1364 unsigned long action, void *hcpu)
1365{
1366 long cpu = (long)hcpu;
1367
1368 switch (action) {
1369 case CPU_UP_PREPARE:
1370 case CPU_UP_PREPARE_FROZEN:
1371 rcu_online_cpu(cpu);
1372 break;
1373 case CPU_UP_CANCELED:
1374 case CPU_UP_CANCELED_FROZEN:
1375 case CPU_DEAD:
1376 case CPU_DEAD_FROZEN:
1377 rcu_offline_cpu(cpu);
1378 break;
1379 default:
1380 break;
1381 }
1382 return NOTIFY_OK;
1383}
1384
1385static struct notifier_block __cpuinitdata rcu_nb = {
1386 .notifier_call = rcu_cpu_notify,
1387};
1388
1389void __init __rcu_init(void)
1390{
1391 int cpu;
1392 int i;
1393 struct rcu_data *rdp;
1394
1395 printk(KERN_NOTICE "Preemptible RCU implementation.\n");
1396 for_each_possible_cpu(cpu) {
1397 rdp = RCU_DATA_CPU(cpu);
1398 spin_lock_init(&rdp->lock);
1399 rdp->completed = 0;
1400 rdp->waitlistcount = 0;
1401 rdp->nextlist = NULL;
1402 rdp->nexttail = &rdp->nextlist;
1403 for (i = 0; i < GP_STAGES; i++) {
1404 rdp->waitlist[i] = NULL;
1405 rdp->waittail[i] = &rdp->waitlist[i];
1406 }
1407 rdp->donelist = NULL;
1408 rdp->donetail = &rdp->donelist;
1409 rdp->rcu_flipctr[0] = 0;
1410 rdp->rcu_flipctr[1] = 0;
1411 rdp->nextschedlist = NULL;
1412 rdp->nextschedtail = &rdp->nextschedlist;
1413 rdp->waitschedlist = NULL;
1414 rdp->waitschedtail = &rdp->waitschedlist;
1415 rdp->rcu_sched_sleeping = 0;
1416 }
1417 register_cpu_notifier(&rcu_nb);
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 for_each_online_cpu(cpu)
1432 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu);
1433
1434 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1435}
1436
1437
1438
1439
1440
1441void __init rcu_init_sched(void)
1442{
1443 rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period,
1444 NULL,
1445 "rcu_sched_grace_period");
1446 WARN_ON(IS_ERR(rcu_sched_grace_period_task));
1447}
1448
1449#ifdef CONFIG_RCU_TRACE
1450long *rcupreempt_flipctr(int cpu)
1451{
1452 return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
1453}
1454EXPORT_SYMBOL_GPL(rcupreempt_flipctr);
1455
1456int rcupreempt_flip_flag(int cpu)
1457{
1458 return per_cpu(rcu_flip_flag, cpu);
1459}
1460EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);
1461
1462int rcupreempt_mb_flag(int cpu)
1463{
1464 return per_cpu(rcu_mb_flag, cpu);
1465}
1466EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);
1467
1468char *rcupreempt_try_flip_state_name(void)
1469{
1470 return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];
1471}
1472EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);
1473
1474struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu)
1475{
1476 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1477
1478 return &rdp->trace;
1479}
1480EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);
1481
1482#endif
1483