1
2
3
4
5
6
7
8
9
10#include <linux/compiler.h>
11#include <linux/completion.h>
12#include <linux/cpu.h>
13#include <linux/init.h>
14#include <linux/kthread.h>
15#include <linux/export.h>
16#include <linux/percpu.h>
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
19#include <linux/interrupt.h>
20#include <linux/kallsyms.h>
21#include <linux/smpboot.h>
22#include <linux/atomic.h>
23#include <linux/nmi.h>
24#include <linux/sched/wake_q.h>
25
26
27
28
29
30struct cpu_stop_done {
31 atomic_t nr_todo;
32 int ret;
33 struct completion completion;
34};
35
36
37struct cpu_stopper {
38 struct task_struct *thread;
39
40 raw_spinlock_t lock;
41 bool enabled;
42 struct list_head works;
43
44 struct cpu_stop_work stop_work;
45 unsigned long caller;
46 cpu_stop_fn_t fn;
47};
48
49static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
50static bool stop_machine_initialized = false;
51
52void print_stop_info(const char *log_lvl, struct task_struct *task)
53{
54
55
56
57
58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
59
60 if (task != stopper->thread)
61 return;
62
63 printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
64}
65
66
67static DEFINE_MUTEX(stop_cpus_mutex);
68static bool stop_cpus_in_progress;
69
70static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
71{
72 memset(done, 0, sizeof(*done));
73 atomic_set(&done->nr_todo, nr_todo);
74 init_completion(&done->completion);
75}
76
77
78static void cpu_stop_signal_done(struct cpu_stop_done *done)
79{
80 if (atomic_dec_and_test(&done->nr_todo))
81 complete(&done->completion);
82}
83
84static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
85 struct cpu_stop_work *work,
86 struct wake_q_head *wakeq)
87{
88 list_add_tail(&work->list, &stopper->works);
89 wake_q_add(wakeq, stopper->thread);
90}
91
92
93static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
94{
95 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
96 DEFINE_WAKE_Q(wakeq);
97 unsigned long flags;
98 bool enabled;
99
100 preempt_disable();
101 raw_spin_lock_irqsave(&stopper->lock, flags);
102 enabled = stopper->enabled;
103 if (enabled)
104 __cpu_stop_queue_work(stopper, work, &wakeq);
105 else if (work->done)
106 cpu_stop_signal_done(work->done);
107 raw_spin_unlock_irqrestore(&stopper->lock, flags);
108
109 wake_up_q(&wakeq);
110 preempt_enable();
111
112 return enabled;
113}
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
140{
141 struct cpu_stop_done done;
142 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
143
144 cpu_stop_init_done(&done, 1);
145 if (!cpu_stop_queue_work(cpu, &work))
146 return -ENOENT;
147
148
149
150
151 cond_resched();
152 wait_for_completion(&done.completion);
153 return done.ret;
154}
155
156
157enum multi_stop_state {
158
159 MULTI_STOP_NONE,
160
161 MULTI_STOP_PREPARE,
162
163 MULTI_STOP_DISABLE_IRQ,
164
165 MULTI_STOP_RUN,
166
167 MULTI_STOP_EXIT,
168};
169
170struct multi_stop_data {
171 cpu_stop_fn_t fn;
172 void *data;
173
174 unsigned int num_threads;
175 const struct cpumask *active_cpus;
176
177 enum multi_stop_state state;
178 atomic_t thread_ack;
179};
180
181static void set_state(struct multi_stop_data *msdata,
182 enum multi_stop_state newstate)
183{
184
185 atomic_set(&msdata->thread_ack, msdata->num_threads);
186 smp_wmb();
187 WRITE_ONCE(msdata->state, newstate);
188}
189
190
191static void ack_state(struct multi_stop_data *msdata)
192{
193 if (atomic_dec_and_test(&msdata->thread_ack))
194 set_state(msdata, msdata->state + 1);
195}
196
197notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
198{
199 cpu_relax();
200}
201
202
203static int multi_cpu_stop(void *data)
204{
205 struct multi_stop_data *msdata = data;
206 enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
207 int cpu = smp_processor_id(), err = 0;
208 const struct cpumask *cpumask;
209 unsigned long flags;
210 bool is_active;
211
212
213
214
215
216 local_save_flags(flags);
217
218 if (!msdata->active_cpus) {
219 cpumask = cpu_online_mask;
220 is_active = cpu == cpumask_first(cpumask);
221 } else {
222 cpumask = msdata->active_cpus;
223 is_active = cpumask_test_cpu(cpu, cpumask);
224 }
225
226
227 do {
228
229 stop_machine_yield(cpumask);
230 newstate = READ_ONCE(msdata->state);
231 if (newstate != curstate) {
232 curstate = newstate;
233 switch (curstate) {
234 case MULTI_STOP_DISABLE_IRQ:
235 local_irq_disable();
236 hard_irq_disable();
237 break;
238 case MULTI_STOP_RUN:
239 if (is_active)
240 err = msdata->fn(msdata->data);
241 break;
242 default:
243 break;
244 }
245 ack_state(msdata);
246 } else if (curstate > MULTI_STOP_PREPARE) {
247
248
249
250
251
252 touch_nmi_watchdog();
253 }
254 rcu_momentary_dyntick_idle();
255 } while (curstate != MULTI_STOP_EXIT);
256
257 local_irq_restore(flags);
258 return err;
259}
260
261static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
262 int cpu2, struct cpu_stop_work *work2)
263{
264 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
265 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
266 DEFINE_WAKE_Q(wakeq);
267 int err;
268
269retry:
270
271
272
273
274
275
276
277 preempt_disable();
278 raw_spin_lock_irq(&stopper1->lock);
279 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
280
281 if (!stopper1->enabled || !stopper2->enabled) {
282 err = -ENOENT;
283 goto unlock;
284 }
285
286
287
288
289
290
291
292
293
294
295
296 if (unlikely(stop_cpus_in_progress)) {
297 err = -EDEADLK;
298 goto unlock;
299 }
300
301 err = 0;
302 __cpu_stop_queue_work(stopper1, work1, &wakeq);
303 __cpu_stop_queue_work(stopper2, work2, &wakeq);
304
305unlock:
306 raw_spin_unlock(&stopper2->lock);
307 raw_spin_unlock_irq(&stopper1->lock);
308
309 if (unlikely(err == -EDEADLK)) {
310 preempt_enable();
311
312 while (stop_cpus_in_progress)
313 cpu_relax();
314
315 goto retry;
316 }
317
318 wake_up_q(&wakeq);
319 preempt_enable();
320
321 return err;
322}
323
324
325
326
327
328
329
330
331
332
333
334int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
335{
336 struct cpu_stop_done done;
337 struct cpu_stop_work work1, work2;
338 struct multi_stop_data msdata;
339
340 msdata = (struct multi_stop_data){
341 .fn = fn,
342 .data = arg,
343 .num_threads = 2,
344 .active_cpus = cpumask_of(cpu1),
345 };
346
347 work1 = work2 = (struct cpu_stop_work){
348 .fn = multi_cpu_stop,
349 .arg = &msdata,
350 .done = &done,
351 .caller = _RET_IP_,
352 };
353
354 cpu_stop_init_done(&done, 2);
355 set_state(&msdata, MULTI_STOP_PREPARE);
356
357 if (cpu1 > cpu2)
358 swap(cpu1, cpu2);
359 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
360 return -ENOENT;
361
362 wait_for_completion(&done.completion);
363 return done.ret;
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
385 struct cpu_stop_work *work_buf)
386{
387 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
388 return cpu_stop_queue_work(cpu, work_buf);
389}
390
391static bool queue_stop_cpus_work(const struct cpumask *cpumask,
392 cpu_stop_fn_t fn, void *arg,
393 struct cpu_stop_done *done)
394{
395 struct cpu_stop_work *work;
396 unsigned int cpu;
397 bool queued = false;
398
399
400
401
402
403
404 preempt_disable();
405 stop_cpus_in_progress = true;
406 barrier();
407 for_each_cpu(cpu, cpumask) {
408 work = &per_cpu(cpu_stopper.stop_work, cpu);
409 work->fn = fn;
410 work->arg = arg;
411 work->done = done;
412 work->caller = _RET_IP_;
413 if (cpu_stop_queue_work(cpu, work))
414 queued = true;
415 }
416 barrier();
417 stop_cpus_in_progress = false;
418 preempt_enable();
419
420 return queued;
421}
422
423static int __stop_cpus(const struct cpumask *cpumask,
424 cpu_stop_fn_t fn, void *arg)
425{
426 struct cpu_stop_done done;
427
428 cpu_stop_init_done(&done, cpumask_weight(cpumask));
429 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
430 return -ENOENT;
431 wait_for_completion(&done.completion);
432 return done.ret;
433}
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
464{
465 int ret;
466
467
468 mutex_lock(&stop_cpus_mutex);
469 ret = __stop_cpus(cpumask, fn, arg);
470 mutex_unlock(&stop_cpus_mutex);
471 return ret;
472}
473
474static int cpu_stop_should_run(unsigned int cpu)
475{
476 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
477 unsigned long flags;
478 int run;
479
480 raw_spin_lock_irqsave(&stopper->lock, flags);
481 run = !list_empty(&stopper->works);
482 raw_spin_unlock_irqrestore(&stopper->lock, flags);
483 return run;
484}
485
486static void cpu_stopper_thread(unsigned int cpu)
487{
488 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
489 struct cpu_stop_work *work;
490
491repeat:
492 work = NULL;
493 raw_spin_lock_irq(&stopper->lock);
494 if (!list_empty(&stopper->works)) {
495 work = list_first_entry(&stopper->works,
496 struct cpu_stop_work, list);
497 list_del_init(&work->list);
498 }
499 raw_spin_unlock_irq(&stopper->lock);
500
501 if (work) {
502 cpu_stop_fn_t fn = work->fn;
503 void *arg = work->arg;
504 struct cpu_stop_done *done = work->done;
505 int ret;
506
507
508 stopper->caller = work->caller;
509 stopper->fn = fn;
510 preempt_count_inc();
511 ret = fn(arg);
512 if (done) {
513 if (ret)
514 done->ret = ret;
515 cpu_stop_signal_done(done);
516 }
517 preempt_count_dec();
518 stopper->fn = NULL;
519 stopper->caller = 0;
520 WARN_ONCE(preempt_count(),
521 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
522 goto repeat;
523 }
524}
525
526void stop_machine_park(int cpu)
527{
528 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
529
530
531
532
533
534 stopper->enabled = false;
535 kthread_park(stopper->thread);
536}
537
538static void cpu_stop_create(unsigned int cpu)
539{
540 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
541}
542
543static void cpu_stop_park(unsigned int cpu)
544{
545 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
546
547 WARN_ON(!list_empty(&stopper->works));
548}
549
550void stop_machine_unpark(int cpu)
551{
552 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
553
554 stopper->enabled = true;
555 kthread_unpark(stopper->thread);
556}
557
558static struct smp_hotplug_thread cpu_stop_threads = {
559 .store = &cpu_stopper.thread,
560 .thread_should_run = cpu_stop_should_run,
561 .thread_fn = cpu_stopper_thread,
562 .thread_comm = "migration/%u",
563 .create = cpu_stop_create,
564 .park = cpu_stop_park,
565 .selfparking = true,
566};
567
568static int __init cpu_stop_init(void)
569{
570 unsigned int cpu;
571
572 for_each_possible_cpu(cpu) {
573 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
574
575 raw_spin_lock_init(&stopper->lock);
576 INIT_LIST_HEAD(&stopper->works);
577 }
578
579 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
580 stop_machine_unpark(raw_smp_processor_id());
581 stop_machine_initialized = true;
582 return 0;
583}
584early_initcall(cpu_stop_init);
585
586int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
587 const struct cpumask *cpus)
588{
589 struct multi_stop_data msdata = {
590 .fn = fn,
591 .data = data,
592 .num_threads = num_online_cpus(),
593 .active_cpus = cpus,
594 };
595
596 lockdep_assert_cpus_held();
597
598 if (!stop_machine_initialized) {
599
600
601
602
603
604 unsigned long flags;
605 int ret;
606
607 WARN_ON_ONCE(msdata.num_threads != 1);
608
609 local_irq_save(flags);
610 hard_irq_disable();
611 ret = (*fn)(data);
612 local_irq_restore(flags);
613
614 return ret;
615 }
616
617
618 set_state(&msdata, MULTI_STOP_PREPARE);
619 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
620}
621
622int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
623{
624 int ret;
625
626
627 cpus_read_lock();
628 ret = stop_machine_cpuslocked(fn, data, cpus);
629 cpus_read_unlock();
630 return ret;
631}
632EXPORT_SYMBOL_GPL(stop_machine);
633
634#ifdef CONFIG_SCHED_SMT
635int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
636{
637 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
638
639 struct multi_stop_data msdata = {
640 .fn = fn,
641 .data = data,
642 .num_threads = cpumask_weight(smt_mask),
643 .active_cpus = smt_mask,
644 };
645
646 lockdep_assert_cpus_held();
647
648
649 set_state(&msdata, MULTI_STOP_PREPARE);
650 return stop_cpus(smt_mask, multi_cpu_stop, &msdata);
651}
652EXPORT_SYMBOL_GPL(stop_core_cpuslocked);
653#endif
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
678 const struct cpumask *cpus)
679{
680 struct multi_stop_data msdata = { .fn = fn, .data = data,
681 .active_cpus = cpus };
682 struct cpu_stop_done done;
683 int ret;
684
685
686 BUG_ON(cpu_active(raw_smp_processor_id()));
687 msdata.num_threads = num_active_cpus() + 1;
688
689
690 while (!mutex_trylock(&stop_cpus_mutex))
691 cpu_relax();
692
693
694 set_state(&msdata, MULTI_STOP_PREPARE);
695 cpu_stop_init_done(&done, num_active_cpus());
696 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
697 &done);
698 ret = multi_cpu_stop(&msdata);
699
700
701 while (!completion_done(&done.completion))
702 cpu_relax();
703
704 mutex_unlock(&stop_cpus_mutex);
705 return ret ?: done.ret;
706}
707