1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/completion.h>
26#include <linux/interrupt.h>
27#include <linux/notifier.h>
28#include <linux/rcupdate.h>
29#include <linux/kernel.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
35#include <linux/time.h>
36#include <linux/cpu.h>
37#include <linux/prefetch.h>
38
39#ifdef CONFIG_RCU_TRACE
40#include <trace/events/rcu.h>
41#endif
42
43#include "rcu.h"
44
45
46struct rcu_ctrlblk;
47static void invoke_rcu_callbacks(void);
48static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49static void rcu_process_callbacks(struct softirq_action *unused);
50static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp);
53
54#include "rcutiny_plugin.h"
55
56static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
57
58
59static void rcu_idle_enter_common(long long oldval)
60{
61 if (rcu_dynticks_nesting) {
62 RCU_TRACE(trace_rcu_dyntick("--=",
63 oldval, rcu_dynticks_nesting));
64 return;
65 }
66 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
67 if (!is_idle_task(current)) {
68 struct task_struct *idle = idle_task(smp_processor_id());
69
70 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
71 oldval, rcu_dynticks_nesting));
72 ftrace_dump(DUMP_ALL);
73 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
74 current->pid, current->comm,
75 idle->pid, idle->comm);
76 }
77 rcu_sched_qs(0);
78}
79
80
81
82
83
84void rcu_idle_enter(void)
85{
86 unsigned long flags;
87 long long oldval;
88
89 local_irq_save(flags);
90 oldval = rcu_dynticks_nesting;
91 rcu_dynticks_nesting = 0;
92 rcu_idle_enter_common(oldval);
93 local_irq_restore(flags);
94}
95
96
97
98
99void rcu_irq_exit(void)
100{
101 unsigned long flags;
102 long long oldval;
103
104 local_irq_save(flags);
105 oldval = rcu_dynticks_nesting;
106 rcu_dynticks_nesting--;
107 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
108 rcu_idle_enter_common(oldval);
109 local_irq_restore(flags);
110}
111
112
113static void rcu_idle_exit_common(long long oldval)
114{
115 if (oldval) {
116 RCU_TRACE(trace_rcu_dyntick("++=",
117 oldval, rcu_dynticks_nesting));
118 return;
119 }
120 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
121 if (!is_idle_task(current)) {
122 struct task_struct *idle = idle_task(smp_processor_id());
123
124 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
125 oldval, rcu_dynticks_nesting));
126 ftrace_dump(DUMP_ALL);
127 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
128 current->pid, current->comm,
129 idle->pid, idle->comm);
130 }
131}
132
133
134
135
136void rcu_idle_exit(void)
137{
138 unsigned long flags;
139 long long oldval;
140
141 local_irq_save(flags);
142 oldval = rcu_dynticks_nesting;
143 WARN_ON_ONCE(oldval != 0);
144 rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
145 rcu_idle_exit_common(oldval);
146 local_irq_restore(flags);
147}
148
149
150
151
152void rcu_irq_enter(void)
153{
154 unsigned long flags;
155 long long oldval;
156
157 local_irq_save(flags);
158 oldval = rcu_dynticks_nesting;
159 rcu_dynticks_nesting++;
160 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
161 rcu_idle_exit_common(oldval);
162 local_irq_restore(flags);
163}
164
165#ifdef CONFIG_PROVE_RCU
166
167
168
169
170int rcu_is_cpu_idle(void)
171{
172 return !rcu_dynticks_nesting;
173}
174EXPORT_SYMBOL(rcu_is_cpu_idle);
175
176#endif
177
178
179
180
181
182
183int rcu_is_cpu_rrupt_from_idle(void)
184{
185 return rcu_dynticks_nesting <= 0;
186}
187
188
189
190
191
192
193static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
194{
195 if (rcp->rcucblist != NULL &&
196 rcp->donetail != rcp->curtail) {
197 rcp->donetail = rcp->curtail;
198 return 1;
199 }
200
201 return 0;
202}
203
204
205
206
207
208
209void rcu_sched_qs(int cpu)
210{
211 unsigned long flags;
212
213 local_irq_save(flags);
214 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
215 rcu_qsctr_help(&rcu_bh_ctrlblk))
216 invoke_rcu_callbacks();
217 local_irq_restore(flags);
218}
219
220
221
222
223void rcu_bh_qs(int cpu)
224{
225 unsigned long flags;
226
227 local_irq_save(flags);
228 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
229 invoke_rcu_callbacks();
230 local_irq_restore(flags);
231}
232
233
234
235
236
237
238
239void rcu_check_callbacks(int cpu, int user)
240{
241 if (user || rcu_is_cpu_rrupt_from_idle())
242 rcu_sched_qs(cpu);
243 else if (!in_softirq())
244 rcu_bh_qs(cpu);
245 rcu_preempt_check_callbacks();
246}
247
248
249
250
251
252static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
253{
254 char *rn = NULL;
255 struct rcu_head *next, *list;
256 unsigned long flags;
257 RCU_TRACE(int cb_count = 0);
258
259
260 if (&rcp->rcucblist == rcp->donetail) {
261 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
262 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
263 ACCESS_ONCE(rcp->rcucblist),
264 need_resched(),
265 is_idle_task(current),
266 rcu_is_callbacks_kthread()));
267 return;
268 }
269
270
271 local_irq_save(flags);
272 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
273 list = rcp->rcucblist;
274 rcp->rcucblist = *rcp->donetail;
275 *rcp->donetail = NULL;
276 if (rcp->curtail == rcp->donetail)
277 rcp->curtail = &rcp->rcucblist;
278 rcu_preempt_remove_callbacks(rcp);
279 rcp->donetail = &rcp->rcucblist;
280 local_irq_restore(flags);
281
282
283 RCU_TRACE(rn = rcp->name);
284 while (list) {
285 next = list->next;
286 prefetch(next);
287 debug_rcu_head_unqueue(list);
288 local_bh_disable();
289 __rcu_reclaim(rn, list);
290 local_bh_enable();
291 list = next;
292 RCU_TRACE(cb_count++);
293 }
294 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
295 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
296 is_idle_task(current),
297 rcu_is_callbacks_kthread()));
298}
299
300static void rcu_process_callbacks(struct softirq_action *unused)
301{
302 __rcu_process_callbacks(&rcu_sched_ctrlblk);
303 __rcu_process_callbacks(&rcu_bh_ctrlblk);
304 rcu_preempt_process_callbacks();
305}
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320void synchronize_sched(void)
321{
322 cond_resched();
323}
324EXPORT_SYMBOL_GPL(synchronize_sched);
325
326
327
328
329static void __call_rcu(struct rcu_head *head,
330 void (*func)(struct rcu_head *rcu),
331 struct rcu_ctrlblk *rcp)
332{
333 unsigned long flags;
334
335 debug_rcu_head_queue(head);
336 head->func = func;
337 head->next = NULL;
338
339 local_irq_save(flags);
340 *rcp->curtail = head;
341 rcp->curtail = &head->next;
342 RCU_TRACE(rcp->qlen++);
343 local_irq_restore(flags);
344}
345
346
347
348
349
350
351void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
352{
353 __call_rcu(head, func, &rcu_sched_ctrlblk);
354}
355EXPORT_SYMBOL_GPL(call_rcu_sched);
356
357
358
359
360
361void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
362{
363 __call_rcu(head, func, &rcu_bh_ctrlblk);
364}
365EXPORT_SYMBOL_GPL(call_rcu_bh);
366