1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "sched.h"
19#include "pelt.h"
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46#ifdef CONFIG_RT_MUTEXES
47static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
48{
49 return dl_se->pi_se;
50}
51
52static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
53{
54 return pi_of(dl_se) != dl_se;
55}
56#else
57static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
58{
59 return dl_se;
60}
61
62static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
63{
64 return false;
65}
66#endif
67
68#ifdef CONFIG_SMP
69static inline struct dl_bw *dl_bw_of(int i)
70{
71 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
72 "sched RCU must be held");
73 return &cpu_rq(i)->rd->dl_bw;
74}
75
76static inline int dl_bw_cpus(int i)
77{
78 struct root_domain *rd = cpu_rq(i)->rd;
79 int cpus;
80
81 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
82 "sched RCU must be held");
83
84 if (cpumask_subset(rd->span, cpu_active_mask))
85 return cpumask_weight(rd->span);
86
87 cpus = 0;
88
89 for_each_cpu_and(i, rd->span, cpu_active_mask)
90 cpus++;
91
92 return cpus;
93}
94
95static inline unsigned long __dl_bw_capacity(int i)
96{
97 struct root_domain *rd = cpu_rq(i)->rd;
98 unsigned long cap = 0;
99
100 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
101 "sched RCU must be held");
102
103 for_each_cpu_and(i, rd->span, cpu_active_mask)
104 cap += capacity_orig_of(i);
105
106 return cap;
107}
108
109
110
111
112
113static inline unsigned long dl_bw_capacity(int i)
114{
115 if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
116 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
117 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
118 } else {
119 return __dl_bw_capacity(i);
120 }
121}
122
123static inline bool dl_bw_visited(int cpu, u64 gen)
124{
125 struct root_domain *rd = cpu_rq(cpu)->rd;
126
127 if (rd->visit_gen == gen)
128 return true;
129
130 rd->visit_gen = gen;
131 return false;
132}
133#else
134static inline struct dl_bw *dl_bw_of(int i)
135{
136 return &cpu_rq(i)->dl.dl_bw;
137}
138
139static inline int dl_bw_cpus(int i)
140{
141 return 1;
142}
143
144static inline unsigned long dl_bw_capacity(int i)
145{
146 return SCHED_CAPACITY_SCALE;
147}
148
149static inline bool dl_bw_visited(int cpu, u64 gen)
150{
151 return false;
152}
153#endif
154
155static inline
156void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
157{
158 u64 old = dl_rq->running_bw;
159
160 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
161 dl_rq->running_bw += dl_bw;
162 SCHED_WARN_ON(dl_rq->running_bw < old);
163 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
164
165 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
166}
167
168static inline
169void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
170{
171 u64 old = dl_rq->running_bw;
172
173 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
174 dl_rq->running_bw -= dl_bw;
175 SCHED_WARN_ON(dl_rq->running_bw > old);
176 if (dl_rq->running_bw > old)
177 dl_rq->running_bw = 0;
178
179 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
180}
181
182static inline
183void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
184{
185 u64 old = dl_rq->this_bw;
186
187 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
188 dl_rq->this_bw += dl_bw;
189 SCHED_WARN_ON(dl_rq->this_bw < old);
190}
191
192static inline
193void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
194{
195 u64 old = dl_rq->this_bw;
196
197 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
198 dl_rq->this_bw -= dl_bw;
199 SCHED_WARN_ON(dl_rq->this_bw > old);
200 if (dl_rq->this_bw > old)
201 dl_rq->this_bw = 0;
202 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
203}
204
205static inline
206void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
207{
208 if (!dl_entity_is_special(dl_se))
209 __add_rq_bw(dl_se->dl_bw, dl_rq);
210}
211
212static inline
213void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
214{
215 if (!dl_entity_is_special(dl_se))
216 __sub_rq_bw(dl_se->dl_bw, dl_rq);
217}
218
219static inline
220void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
221{
222 if (!dl_entity_is_special(dl_se))
223 __add_running_bw(dl_se->dl_bw, dl_rq);
224}
225
226static inline
227void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
228{
229 if (!dl_entity_is_special(dl_se))
230 __sub_running_bw(dl_se->dl_bw, dl_rq);
231}
232
233static void dl_change_utilization(struct task_struct *p, u64 new_bw)
234{
235 struct rq *rq;
236
237 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
238
239 if (task_on_rq_queued(p))
240 return;
241
242 rq = task_rq(p);
243 if (p->dl.dl_non_contending) {
244 sub_running_bw(&p->dl, &rq->dl);
245 p->dl.dl_non_contending = 0;
246
247
248
249
250
251
252
253 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
254 put_task_struct(p);
255 }
256 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
257 __add_rq_bw(new_bw, &rq->dl);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static void task_non_contending(struct task_struct *p)
315{
316 struct sched_dl_entity *dl_se = &p->dl;
317 struct hrtimer *timer = &dl_se->inactive_timer;
318 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
319 struct rq *rq = rq_of_dl_rq(dl_rq);
320 s64 zerolag_time;
321
322
323
324
325
326 if (dl_se->dl_runtime == 0)
327 return;
328
329 if (dl_entity_is_special(dl_se))
330 return;
331
332 WARN_ON(dl_se->dl_non_contending);
333
334 zerolag_time = dl_se->deadline -
335 div64_long((dl_se->runtime * dl_se->dl_period),
336 dl_se->dl_runtime);
337
338
339
340
341
342 zerolag_time -= rq_clock(rq);
343
344
345
346
347
348 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
349 if (dl_task(p))
350 sub_running_bw(dl_se, dl_rq);
351 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
352 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
353
354 if (READ_ONCE(p->__state) == TASK_DEAD)
355 sub_rq_bw(&p->dl, &rq->dl);
356 raw_spin_lock(&dl_b->lock);
357 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
358 __dl_clear_params(p);
359 raw_spin_unlock(&dl_b->lock);
360 }
361
362 return;
363 }
364
365 dl_se->dl_non_contending = 1;
366 get_task_struct(p);
367 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
368}
369
370static void task_contending(struct sched_dl_entity *dl_se, int flags)
371{
372 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
373
374
375
376
377
378 if (dl_se->dl_runtime == 0)
379 return;
380
381 if (flags & ENQUEUE_MIGRATED)
382 add_rq_bw(dl_se, dl_rq);
383
384 if (dl_se->dl_non_contending) {
385 dl_se->dl_non_contending = 0;
386
387
388
389
390
391
392
393 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
394 put_task_struct(dl_task_of(dl_se));
395 } else {
396
397
398
399
400
401
402
403 add_running_bw(dl_se, dl_rq);
404 }
405}
406
407static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
408{
409 struct sched_dl_entity *dl_se = &p->dl;
410
411 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
412}
413
414static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
415
416void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
417{
418 raw_spin_lock_init(&dl_b->dl_runtime_lock);
419 dl_b->dl_period = period;
420 dl_b->dl_runtime = runtime;
421}
422
423void init_dl_bw(struct dl_bw *dl_b)
424{
425 raw_spin_lock_init(&dl_b->lock);
426 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
427 if (global_rt_runtime() == RUNTIME_INF)
428 dl_b->bw = -1;
429 else
430 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
431 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
432 dl_b->total_bw = 0;
433}
434
435void init_dl_rq(struct dl_rq *dl_rq)
436{
437 dl_rq->root = RB_ROOT_CACHED;
438
439#ifdef CONFIG_SMP
440
441 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
442
443 dl_rq->dl_nr_migratory = 0;
444 dl_rq->overloaded = 0;
445 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
446#else
447 init_dl_bw(&dl_rq->dl_bw);
448#endif
449
450 dl_rq->running_bw = 0;
451 dl_rq->this_bw = 0;
452 init_dl_rq_bw_ratio(dl_rq);
453}
454
455#ifdef CONFIG_SMP
456
457static inline int dl_overloaded(struct rq *rq)
458{
459 return atomic_read(&rq->rd->dlo_count);
460}
461
462static inline void dl_set_overload(struct rq *rq)
463{
464 if (!rq->online)
465 return;
466
467 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
468
469
470
471
472
473
474 smp_wmb();
475 atomic_inc(&rq->rd->dlo_count);
476}
477
478static inline void dl_clear_overload(struct rq *rq)
479{
480 if (!rq->online)
481 return;
482
483 atomic_dec(&rq->rd->dlo_count);
484 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
485}
486
487static void update_dl_migration(struct dl_rq *dl_rq)
488{
489 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
490 if (!dl_rq->overloaded) {
491 dl_set_overload(rq_of_dl_rq(dl_rq));
492 dl_rq->overloaded = 1;
493 }
494 } else if (dl_rq->overloaded) {
495 dl_clear_overload(rq_of_dl_rq(dl_rq));
496 dl_rq->overloaded = 0;
497 }
498}
499
500static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
501{
502 struct task_struct *p = dl_task_of(dl_se);
503
504 if (p->nr_cpus_allowed > 1)
505 dl_rq->dl_nr_migratory++;
506
507 update_dl_migration(dl_rq);
508}
509
510static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
511{
512 struct task_struct *p = dl_task_of(dl_se);
513
514 if (p->nr_cpus_allowed > 1)
515 dl_rq->dl_nr_migratory--;
516
517 update_dl_migration(dl_rq);
518}
519
520#define __node_2_pdl(node) \
521 rb_entry((node), struct task_struct, pushable_dl_tasks)
522
523static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
524{
525 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
526}
527
528
529
530
531
532static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
533{
534 struct rb_node *leftmost;
535
536 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
537
538 leftmost = rb_add_cached(&p->pushable_dl_tasks,
539 &rq->dl.pushable_dl_tasks_root,
540 __pushable_less);
541 if (leftmost)
542 rq->dl.earliest_dl.next = p->dl.deadline;
543}
544
545static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
546{
547 struct dl_rq *dl_rq = &rq->dl;
548 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
549 struct rb_node *leftmost;
550
551 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
552 return;
553
554 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
555 if (leftmost)
556 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
557
558 RB_CLEAR_NODE(&p->pushable_dl_tasks);
559}
560
561static inline int has_pushable_dl_tasks(struct rq *rq)
562{
563 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
564}
565
566static int push_dl_task(struct rq *rq);
567
568static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
569{
570 return rq->online && dl_task(prev);
571}
572
573static DEFINE_PER_CPU(struct callback_head, dl_push_head);
574static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
575
576static void push_dl_tasks(struct rq *);
577static void pull_dl_task(struct rq *);
578
579static inline void deadline_queue_push_tasks(struct rq *rq)
580{
581 if (!has_pushable_dl_tasks(rq))
582 return;
583
584 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
585}
586
587static inline void deadline_queue_pull_task(struct rq *rq)
588{
589 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
590}
591
592static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
593
594static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
595{
596 struct rq *later_rq = NULL;
597 struct dl_bw *dl_b;
598
599 later_rq = find_lock_later_rq(p, rq);
600 if (!later_rq) {
601 int cpu;
602
603
604
605
606
607 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
608 if (cpu >= nr_cpu_ids) {
609
610
611
612
613 BUG_ON(dl_bandwidth_enabled());
614
615
616
617
618
619
620 cpu = cpumask_any(cpu_active_mask);
621 }
622 later_rq = cpu_rq(cpu);
623 double_lock_balance(rq, later_rq);
624 }
625
626 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
627
628
629
630
631
632
633 sub_running_bw(&p->dl, &rq->dl);
634 sub_rq_bw(&p->dl, &rq->dl);
635
636 add_rq_bw(&p->dl, &later_rq->dl);
637 add_running_bw(&p->dl, &later_rq->dl);
638 } else {
639 sub_rq_bw(&p->dl, &rq->dl);
640 add_rq_bw(&p->dl, &later_rq->dl);
641 }
642
643
644
645
646
647
648 dl_b = &rq->rd->dl_bw;
649 raw_spin_lock(&dl_b->lock);
650 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
651 raw_spin_unlock(&dl_b->lock);
652
653 dl_b = &later_rq->rd->dl_bw;
654 raw_spin_lock(&dl_b->lock);
655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
656 raw_spin_unlock(&dl_b->lock);
657
658 set_task_cpu(p, later_rq->cpu);
659 double_unlock_balance(later_rq, rq);
660
661 return later_rq;
662}
663
664#else
665
666static inline
667void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
668{
669}
670
671static inline
672void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
673{
674}
675
676static inline
677void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
678{
679}
680
681static inline
682void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
683{
684}
685
686static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
687{
688 return false;
689}
690
691static inline void pull_dl_task(struct rq *rq)
692{
693}
694
695static inline void deadline_queue_push_tasks(struct rq *rq)
696{
697}
698
699static inline void deadline_queue_pull_task(struct rq *rq)
700{
701}
702#endif
703
704static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
705static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
706static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
707
708
709
710
711
712
713
714
715
716
717
718
719
720static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
721{
722 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
723 struct rq *rq = rq_of_dl_rq(dl_rq);
724
725 WARN_ON(is_dl_boosted(dl_se));
726 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
727
728
729
730
731
732
733 if (dl_se->dl_throttled)
734 return;
735
736
737
738
739
740
741 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
742 dl_se->runtime = dl_se->dl_runtime;
743}
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763static void replenish_dl_entity(struct sched_dl_entity *dl_se)
764{
765 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
766 struct rq *rq = rq_of_dl_rq(dl_rq);
767
768 BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
769
770
771
772
773
774 if (dl_se->dl_deadline == 0) {
775 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
776 dl_se->runtime = pi_of(dl_se)->dl_runtime;
777 }
778
779 if (dl_se->dl_yielded && dl_se->runtime > 0)
780 dl_se->runtime = 0;
781
782
783
784
785
786
787
788 while (dl_se->runtime <= 0) {
789 dl_se->deadline += pi_of(dl_se)->dl_period;
790 dl_se->runtime += pi_of(dl_se)->dl_runtime;
791 }
792
793
794
795
796
797
798
799
800
801
802 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
803 printk_deferred_once("sched: DL replenish lagged too much\n");
804 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
805 dl_se->runtime = pi_of(dl_se)->dl_runtime;
806 }
807
808 if (dl_se->dl_yielded)
809 dl_se->dl_yielded = 0;
810 if (dl_se->dl_throttled)
811 dl_se->dl_throttled = 0;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
839{
840 u64 left, right;
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
861 right = ((dl_se->deadline - t) >> DL_SCALE) *
862 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
863
864 return dl_time_before(right, left);
865}
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885static void
886update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
887{
888 u64 laxity = dl_se->deadline - rq_clock(rq);
889
890
891
892
893
894
895
896 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
897
898 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
899}
900
901
902
903
904
905
906
907
908
909
910
911
912static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
913{
914 return dl_se->dl_deadline == dl_se->dl_period;
915}
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947static void update_dl_entity(struct sched_dl_entity *dl_se)
948{
949 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
950 struct rq *rq = rq_of_dl_rq(dl_rq);
951
952 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
953 dl_entity_overflow(dl_se, rq_clock(rq))) {
954
955 if (unlikely(!dl_is_implicit(dl_se) &&
956 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
957 !is_dl_boosted(dl_se))) {
958 update_dl_revised_wakeup(dl_se, rq);
959 return;
960 }
961
962 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
963 dl_se->runtime = pi_of(dl_se)->dl_runtime;
964 }
965}
966
967static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
968{
969 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
970}
971
972
973
974
975
976
977
978
979
980
981
982static int start_dl_timer(struct task_struct *p)
983{
984 struct sched_dl_entity *dl_se = &p->dl;
985 struct hrtimer *timer = &dl_se->dl_timer;
986 struct rq *rq = task_rq(p);
987 ktime_t now, act;
988 s64 delta;
989
990 lockdep_assert_rq_held(rq);
991
992
993
994
995
996
997 act = ns_to_ktime(dl_next_period(dl_se));
998 now = hrtimer_cb_get_time(timer);
999 delta = ktime_to_ns(now) - rq_clock(rq);
1000 act = ktime_add_ns(act, delta);
1001
1002
1003
1004
1005
1006
1007 if (ktime_us_delta(act, now) < 0)
1008 return 0;
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019 if (!hrtimer_is_queued(timer)) {
1020 get_task_struct(p);
1021 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1022 }
1023
1024 return 1;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1041{
1042 struct sched_dl_entity *dl_se = container_of(timer,
1043 struct sched_dl_entity,
1044 dl_timer);
1045 struct task_struct *p = dl_task_of(dl_se);
1046 struct rq_flags rf;
1047 struct rq *rq;
1048
1049 rq = task_rq_lock(p, &rf);
1050
1051
1052
1053
1054
1055 if (!dl_task(p))
1056 goto unlock;
1057
1058
1059
1060
1061
1062 if (is_dl_boosted(dl_se))
1063 goto unlock;
1064
1065
1066
1067
1068
1069 if (!dl_se->dl_throttled)
1070 goto unlock;
1071
1072 sched_clock_tick();
1073 update_rq_clock(rq);
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 if (!task_on_rq_queued(p)) {
1090 replenish_dl_entity(dl_se);
1091 goto unlock;
1092 }
1093
1094#ifdef CONFIG_SMP
1095 if (unlikely(!rq->online)) {
1096
1097
1098
1099
1100 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1101 rq = dl_task_offline_migration(rq, p);
1102 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1103 update_rq_clock(rq);
1104
1105
1106
1107
1108
1109
1110 }
1111#endif
1112
1113 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1114 if (dl_task(rq->curr))
1115 check_preempt_curr_dl(rq, p, 0);
1116 else
1117 resched_curr(rq);
1118
1119#ifdef CONFIG_SMP
1120
1121
1122
1123
1124 if (has_pushable_dl_tasks(rq)) {
1125
1126
1127
1128
1129 rq_unpin_lock(rq, &rf);
1130 push_dl_task(rq);
1131 rq_repin_lock(rq, &rf);
1132 }
1133#endif
1134
1135unlock:
1136 task_rq_unlock(rq, p, &rf);
1137
1138
1139
1140
1141
1142 put_task_struct(p);
1143
1144 return HRTIMER_NORESTART;
1145}
1146
1147void init_dl_task_timer(struct sched_dl_entity *dl_se)
1148{
1149 struct hrtimer *timer = &dl_se->dl_timer;
1150
1151 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1152 timer->function = dl_task_timer;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1174{
1175 struct task_struct *p = dl_task_of(dl_se);
1176 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1177
1178 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1179 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1180 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1181 return;
1182 dl_se->dl_throttled = 1;
1183 if (dl_se->runtime > 0)
1184 dl_se->runtime = 0;
1185 }
1186}
1187
1188static
1189int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1190{
1191 return (dl_se->runtime <= 0);
1192}
1193
1194extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1216{
1217 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw;
1218 u64 u_act;
1219 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1230 u_act = u_act_min;
1231 else
1232 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1233
1234 return (delta * u_act) >> BW_SHIFT;
1235}
1236
1237
1238
1239
1240
1241static void update_curr_dl(struct rq *rq)
1242{
1243 struct task_struct *curr = rq->curr;
1244 struct sched_dl_entity *dl_se = &curr->dl;
1245 u64 delta_exec, scaled_delta_exec;
1246 int cpu = cpu_of(rq);
1247 u64 now;
1248
1249 if (!dl_task(curr) || !on_dl_rq(dl_se))
1250 return;
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 now = rq_clock_task(rq);
1261 delta_exec = now - curr->se.exec_start;
1262 if (unlikely((s64)delta_exec <= 0)) {
1263 if (unlikely(dl_se->dl_yielded))
1264 goto throttle;
1265 return;
1266 }
1267
1268 schedstat_set(curr->se.statistics.exec_max,
1269 max(curr->se.statistics.exec_max, delta_exec));
1270
1271 curr->se.sum_exec_runtime += delta_exec;
1272 account_group_exec_runtime(curr, delta_exec);
1273
1274 curr->se.exec_start = now;
1275 cgroup_account_cputime(curr, delta_exec);
1276
1277 if (dl_entity_is_special(dl_se))
1278 return;
1279
1280
1281
1282
1283
1284
1285
1286
1287 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1288 scaled_delta_exec = grub_reclaim(delta_exec,
1289 rq,
1290 &curr->dl);
1291 } else {
1292 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1293 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1294
1295 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1296 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1297 }
1298
1299 dl_se->runtime -= scaled_delta_exec;
1300
1301throttle:
1302 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1303 dl_se->dl_throttled = 1;
1304
1305
1306 if (dl_runtime_exceeded(dl_se) &&
1307 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1308 dl_se->dl_overrun = 1;
1309
1310 __dequeue_task_dl(rq, curr, 0);
1311 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1312 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1313
1314 if (!is_leftmost(curr, &rq->dl))
1315 resched_curr(rq);
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 if (rt_bandwidth_enabled()) {
1330 struct rt_rq *rt_rq = &rq->rt;
1331
1332 raw_spin_lock(&rt_rq->rt_runtime_lock);
1333
1334
1335
1336
1337
1338 if (sched_rt_bandwidth_account(rt_rq))
1339 rt_rq->rt_time += delta_exec;
1340 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1341 }
1342}
1343
1344static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1345{
1346 struct sched_dl_entity *dl_se = container_of(timer,
1347 struct sched_dl_entity,
1348 inactive_timer);
1349 struct task_struct *p = dl_task_of(dl_se);
1350 struct rq_flags rf;
1351 struct rq *rq;
1352
1353 rq = task_rq_lock(p, &rf);
1354
1355 sched_clock_tick();
1356 update_rq_clock(rq);
1357
1358 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1359 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1360
1361 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1362 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1363 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1364 dl_se->dl_non_contending = 0;
1365 }
1366
1367 raw_spin_lock(&dl_b->lock);
1368 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1369 raw_spin_unlock(&dl_b->lock);
1370 __dl_clear_params(p);
1371
1372 goto unlock;
1373 }
1374 if (dl_se->dl_non_contending == 0)
1375 goto unlock;
1376
1377 sub_running_bw(dl_se, &rq->dl);
1378 dl_se->dl_non_contending = 0;
1379unlock:
1380 task_rq_unlock(rq, p, &rf);
1381 put_task_struct(p);
1382
1383 return HRTIMER_NORESTART;
1384}
1385
1386void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1387{
1388 struct hrtimer *timer = &dl_se->inactive_timer;
1389
1390 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1391 timer->function = inactive_task_timer;
1392}
1393
1394#ifdef CONFIG_SMP
1395
1396static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1397{
1398 struct rq *rq = rq_of_dl_rq(dl_rq);
1399
1400 if (dl_rq->earliest_dl.curr == 0 ||
1401 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1402 if (dl_rq->earliest_dl.curr == 0)
1403 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1404 dl_rq->earliest_dl.curr = deadline;
1405 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1406 }
1407}
1408
1409static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1410{
1411 struct rq *rq = rq_of_dl_rq(dl_rq);
1412
1413
1414
1415
1416
1417 if (!dl_rq->dl_nr_running) {
1418 dl_rq->earliest_dl.curr = 0;
1419 dl_rq->earliest_dl.next = 0;
1420 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1421 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1422 } else {
1423 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1424 struct sched_dl_entity *entry;
1425
1426 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1427 dl_rq->earliest_dl.curr = entry->deadline;
1428 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1429 }
1430}
1431
1432#else
1433
1434static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1435static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1436
1437#endif
1438
1439static inline
1440void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1441{
1442 int prio = dl_task_of(dl_se)->prio;
1443 u64 deadline = dl_se->deadline;
1444
1445 WARN_ON(!dl_prio(prio));
1446 dl_rq->dl_nr_running++;
1447 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1448
1449 inc_dl_deadline(dl_rq, deadline);
1450 inc_dl_migration(dl_se, dl_rq);
1451}
1452
1453static inline
1454void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1455{
1456 int prio = dl_task_of(dl_se)->prio;
1457
1458 WARN_ON(!dl_prio(prio));
1459 WARN_ON(!dl_rq->dl_nr_running);
1460 dl_rq->dl_nr_running--;
1461 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1462
1463 dec_dl_deadline(dl_rq, dl_se->deadline);
1464 dec_dl_migration(dl_se, dl_rq);
1465}
1466
1467#define __node_2_dle(node) \
1468 rb_entry((node), struct sched_dl_entity, rb_node)
1469
1470static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1471{
1472 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1473}
1474
1475static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1476{
1477 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1478
1479 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1480
1481 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1482
1483 inc_dl_tasks(dl_se, dl_rq);
1484}
1485
1486static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1487{
1488 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1489
1490 if (RB_EMPTY_NODE(&dl_se->rb_node))
1491 return;
1492
1493 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1494
1495 RB_CLEAR_NODE(&dl_se->rb_node);
1496
1497 dec_dl_tasks(dl_se, dl_rq);
1498}
1499
1500static void
1501enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1502{
1503 BUG_ON(on_dl_rq(dl_se));
1504
1505
1506
1507
1508
1509
1510 if (flags & ENQUEUE_WAKEUP) {
1511 task_contending(dl_se, flags);
1512 update_dl_entity(dl_se);
1513 } else if (flags & ENQUEUE_REPLENISH) {
1514 replenish_dl_entity(dl_se);
1515 } else if ((flags & ENQUEUE_RESTORE) &&
1516 dl_time_before(dl_se->deadline,
1517 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1518 setup_new_dl_entity(dl_se);
1519 }
1520
1521 __enqueue_dl_entity(dl_se);
1522}
1523
1524static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1525{
1526 __dequeue_dl_entity(dl_se);
1527}
1528
1529static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1530{
1531 if (is_dl_boosted(&p->dl)) {
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 if (p->dl.dl_throttled) {
1545
1546
1547
1548
1549
1550 hrtimer_try_to_cancel(&p->dl.dl_timer);
1551 p->dl.dl_throttled = 0;
1552 }
1553 } else if (!dl_prio(p->normal_prio)) {
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 p->dl.dl_throttled = 0;
1564 BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1565 return;
1566 }
1567
1568
1569
1570
1571
1572
1573
1574 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1575 dl_check_constrained_dl(&p->dl);
1576
1577 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1578 add_rq_bw(&p->dl, &rq->dl);
1579 add_running_bw(&p->dl, &rq->dl);
1580 }
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1595 if (flags & ENQUEUE_WAKEUP)
1596 task_contending(&p->dl, flags);
1597
1598 return;
1599 }
1600
1601 enqueue_dl_entity(&p->dl, flags);
1602
1603 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1604 enqueue_pushable_dl_task(rq, p);
1605}
1606
1607static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1608{
1609 dequeue_dl_entity(&p->dl);
1610 dequeue_pushable_dl_task(rq, p);
1611}
1612
1613static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1614{
1615 update_curr_dl(rq);
1616 __dequeue_task_dl(rq, p, flags);
1617
1618 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1619 sub_running_bw(&p->dl, &rq->dl);
1620 sub_rq_bw(&p->dl, &rq->dl);
1621 }
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 if (flags & DEQUEUE_SLEEP)
1633 task_non_contending(p);
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646static void yield_task_dl(struct rq *rq)
1647{
1648
1649
1650
1651
1652
1653
1654 rq->curr->dl.dl_yielded = 1;
1655
1656 update_rq_clock(rq);
1657 update_curr_dl(rq);
1658
1659
1660
1661
1662
1663 rq_clock_skip_update(rq);
1664}
1665
1666#ifdef CONFIG_SMP
1667
1668static int find_later_rq(struct task_struct *task);
1669
1670static int
1671select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1672{
1673 struct task_struct *curr;
1674 bool select_rq;
1675 struct rq *rq;
1676
1677 if (!(flags & WF_TTWU))
1678 goto out;
1679
1680 rq = cpu_rq(cpu);
1681
1682 rcu_read_lock();
1683 curr = READ_ONCE(rq->curr);
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 select_rq = unlikely(dl_task(curr)) &&
1695 (curr->nr_cpus_allowed < 2 ||
1696 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1697 p->nr_cpus_allowed > 1;
1698
1699
1700
1701
1702
1703 if (static_branch_unlikely(&sched_asym_cpucapacity))
1704 select_rq |= !dl_task_fits_capacity(p, cpu);
1705
1706 if (select_rq) {
1707 int target = find_later_rq(p);
1708
1709 if (target != -1 &&
1710 (dl_time_before(p->dl.deadline,
1711 cpu_rq(target)->dl.earliest_dl.curr) ||
1712 (cpu_rq(target)->dl.dl_nr_running == 0)))
1713 cpu = target;
1714 }
1715 rcu_read_unlock();
1716
1717out:
1718 return cpu;
1719}
1720
1721static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1722{
1723 struct rq *rq;
1724
1725 if (READ_ONCE(p->__state) != TASK_WAKING)
1726 return;
1727
1728 rq = task_rq(p);
1729
1730
1731
1732
1733
1734 raw_spin_rq_lock(rq);
1735 if (p->dl.dl_non_contending) {
1736 sub_running_bw(&p->dl, &rq->dl);
1737 p->dl.dl_non_contending = 0;
1738
1739
1740
1741
1742
1743
1744
1745 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1746 put_task_struct(p);
1747 }
1748 sub_rq_bw(&p->dl, &rq->dl);
1749 raw_spin_rq_unlock(rq);
1750}
1751
1752static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1753{
1754
1755
1756
1757
1758 if (rq->curr->nr_cpus_allowed == 1 ||
1759 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1760 return;
1761
1762
1763
1764
1765
1766 if (p->nr_cpus_allowed != 1 &&
1767 cpudl_find(&rq->rd->cpudl, p, NULL))
1768 return;
1769
1770 resched_curr(rq);
1771}
1772
1773static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1774{
1775 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1776
1777
1778
1779
1780
1781
1782 rq_unpin_lock(rq, rf);
1783 pull_dl_task(rq);
1784 rq_repin_lock(rq, rf);
1785 }
1786
1787 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1788}
1789#endif
1790
1791
1792
1793
1794
1795static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1796 int flags)
1797{
1798 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1799 resched_curr(rq);
1800 return;
1801 }
1802
1803#ifdef CONFIG_SMP
1804
1805
1806
1807
1808 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1809 !test_tsk_need_resched(rq->curr))
1810 check_preempt_equal_dl(rq, p);
1811#endif
1812}
1813
1814#ifdef CONFIG_SCHED_HRTICK
1815static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1816{
1817 hrtick_start(rq, p->dl.runtime);
1818}
1819#else
1820static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1821{
1822}
1823#endif
1824
1825static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1826{
1827 p->se.exec_start = rq_clock_task(rq);
1828
1829
1830 dequeue_pushable_dl_task(rq, p);
1831
1832 if (!first)
1833 return;
1834
1835 if (hrtick_enabled_dl(rq))
1836 start_hrtick_dl(rq, p);
1837
1838 if (rq->curr->sched_class != &dl_sched_class)
1839 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1840
1841 deadline_queue_push_tasks(rq);
1842}
1843
1844static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1845 struct dl_rq *dl_rq)
1846{
1847 struct rb_node *left = rb_first_cached(&dl_rq->root);
1848
1849 if (!left)
1850 return NULL;
1851
1852 return rb_entry(left, struct sched_dl_entity, rb_node);
1853}
1854
1855static struct task_struct *pick_task_dl(struct rq *rq)
1856{
1857 struct sched_dl_entity *dl_se;
1858 struct dl_rq *dl_rq = &rq->dl;
1859 struct task_struct *p;
1860
1861 if (!sched_dl_runnable(rq))
1862 return NULL;
1863
1864 dl_se = pick_next_dl_entity(rq, dl_rq);
1865 BUG_ON(!dl_se);
1866 p = dl_task_of(dl_se);
1867
1868 return p;
1869}
1870
1871static struct task_struct *pick_next_task_dl(struct rq *rq)
1872{
1873 struct task_struct *p;
1874
1875 p = pick_task_dl(rq);
1876 if (p)
1877 set_next_task_dl(rq, p, true);
1878
1879 return p;
1880}
1881
1882static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1883{
1884 update_curr_dl(rq);
1885
1886 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1887 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1888 enqueue_pushable_dl_task(rq, p);
1889}
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1900{
1901 update_curr_dl(rq);
1902
1903 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1904
1905
1906
1907
1908
1909 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
1910 is_leftmost(p, &rq->dl))
1911 start_hrtick_dl(rq, p);
1912}
1913
1914static void task_fork_dl(struct task_struct *p)
1915{
1916
1917
1918
1919
1920}
1921
1922#ifdef CONFIG_SMP
1923
1924
1925#define DL_MAX_TRIES 3
1926
1927static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1928{
1929 if (!task_running(rq, p) &&
1930 cpumask_test_cpu(cpu, &p->cpus_mask))
1931 return 1;
1932 return 0;
1933}
1934
1935
1936
1937
1938
1939static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1940{
1941 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1942 struct task_struct *p = NULL;
1943
1944 if (!has_pushable_dl_tasks(rq))
1945 return NULL;
1946
1947next_node:
1948 if (next_node) {
1949 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1950
1951 if (pick_dl_task(rq, p, cpu))
1952 return p;
1953
1954 next_node = rb_next(next_node);
1955 goto next_node;
1956 }
1957
1958 return NULL;
1959}
1960
1961static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1962
1963static int find_later_rq(struct task_struct *task)
1964{
1965 struct sched_domain *sd;
1966 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1967 int this_cpu = smp_processor_id();
1968 int cpu = task_cpu(task);
1969
1970
1971 if (unlikely(!later_mask))
1972 return -1;
1973
1974 if (task->nr_cpus_allowed == 1)
1975 return -1;
1976
1977
1978
1979
1980
1981 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1982 return -1;
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 if (cpumask_test_cpu(cpu, later_mask))
1997 return cpu;
1998
1999
2000
2001
2002 if (!cpumask_test_cpu(this_cpu, later_mask))
2003 this_cpu = -1;
2004
2005 rcu_read_lock();
2006 for_each_domain(cpu, sd) {
2007 if (sd->flags & SD_WAKE_AFFINE) {
2008 int best_cpu;
2009
2010
2011
2012
2013
2014 if (this_cpu != -1 &&
2015 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2016 rcu_read_unlock();
2017 return this_cpu;
2018 }
2019
2020 best_cpu = cpumask_any_and_distribute(later_mask,
2021 sched_domain_span(sd));
2022
2023
2024
2025
2026
2027
2028 if (best_cpu < nr_cpu_ids) {
2029 rcu_read_unlock();
2030 return best_cpu;
2031 }
2032 }
2033 }
2034 rcu_read_unlock();
2035
2036
2037
2038
2039
2040 if (this_cpu != -1)
2041 return this_cpu;
2042
2043 cpu = cpumask_any_distribute(later_mask);
2044 if (cpu < nr_cpu_ids)
2045 return cpu;
2046
2047 return -1;
2048}
2049
2050
2051static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2052{
2053 struct rq *later_rq = NULL;
2054 int tries;
2055 int cpu;
2056
2057 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2058 cpu = find_later_rq(task);
2059
2060 if ((cpu == -1) || (cpu == rq->cpu))
2061 break;
2062
2063 later_rq = cpu_rq(cpu);
2064
2065 if (later_rq->dl.dl_nr_running &&
2066 !dl_time_before(task->dl.deadline,
2067 later_rq->dl.earliest_dl.curr)) {
2068
2069
2070
2071
2072
2073 later_rq = NULL;
2074 break;
2075 }
2076
2077
2078 if (double_lock_balance(rq, later_rq)) {
2079 if (unlikely(task_rq(task) != rq ||
2080 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2081 task_running(rq, task) ||
2082 !dl_task(task) ||
2083 !task_on_rq_queued(task))) {
2084 double_unlock_balance(rq, later_rq);
2085 later_rq = NULL;
2086 break;
2087 }
2088 }
2089
2090
2091
2092
2093
2094
2095 if (!later_rq->dl.dl_nr_running ||
2096 dl_time_before(task->dl.deadline,
2097 later_rq->dl.earliest_dl.curr))
2098 break;
2099
2100
2101 double_unlock_balance(rq, later_rq);
2102 later_rq = NULL;
2103 }
2104
2105 return later_rq;
2106}
2107
2108static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2109{
2110 struct task_struct *p;
2111
2112 if (!has_pushable_dl_tasks(rq))
2113 return NULL;
2114
2115 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2116 struct task_struct, pushable_dl_tasks);
2117
2118 BUG_ON(rq->cpu != task_cpu(p));
2119 BUG_ON(task_current(rq, p));
2120 BUG_ON(p->nr_cpus_allowed <= 1);
2121
2122 BUG_ON(!task_on_rq_queued(p));
2123 BUG_ON(!dl_task(p));
2124
2125 return p;
2126}
2127
2128
2129
2130
2131
2132
2133static int push_dl_task(struct rq *rq)
2134{
2135 struct task_struct *next_task;
2136 struct rq *later_rq;
2137 int ret = 0;
2138
2139 if (!rq->dl.overloaded)
2140 return 0;
2141
2142 next_task = pick_next_pushable_dl_task(rq);
2143 if (!next_task)
2144 return 0;
2145
2146retry:
2147 if (is_migration_disabled(next_task))
2148 return 0;
2149
2150 if (WARN_ON(next_task == rq->curr))
2151 return 0;
2152
2153
2154
2155
2156
2157
2158 if (dl_task(rq->curr) &&
2159 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2160 rq->curr->nr_cpus_allowed > 1) {
2161 resched_curr(rq);
2162 return 0;
2163 }
2164
2165
2166 get_task_struct(next_task);
2167
2168
2169 later_rq = find_lock_later_rq(next_task, rq);
2170 if (!later_rq) {
2171 struct task_struct *task;
2172
2173
2174
2175
2176
2177
2178 task = pick_next_pushable_dl_task(rq);
2179 if (task == next_task) {
2180
2181
2182
2183
2184 goto out;
2185 }
2186
2187 if (!task)
2188
2189 goto out;
2190
2191 put_task_struct(next_task);
2192 next_task = task;
2193 goto retry;
2194 }
2195
2196 deactivate_task(rq, next_task, 0);
2197 set_task_cpu(next_task, later_rq->cpu);
2198
2199
2200
2201
2202
2203 update_rq_clock(later_rq);
2204 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2205 ret = 1;
2206
2207 resched_curr(later_rq);
2208
2209 double_unlock_balance(rq, later_rq);
2210
2211out:
2212 put_task_struct(next_task);
2213
2214 return ret;
2215}
2216
2217static void push_dl_tasks(struct rq *rq)
2218{
2219
2220 while (push_dl_task(rq))
2221 ;
2222}
2223
2224static void pull_dl_task(struct rq *this_rq)
2225{
2226 int this_cpu = this_rq->cpu, cpu;
2227 struct task_struct *p, *push_task;
2228 bool resched = false;
2229 struct rq *src_rq;
2230 u64 dmin = LONG_MAX;
2231
2232 if (likely(!dl_overloaded(this_rq)))
2233 return;
2234
2235
2236
2237
2238
2239 smp_rmb();
2240
2241 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2242 if (this_cpu == cpu)
2243 continue;
2244
2245 src_rq = cpu_rq(cpu);
2246
2247
2248
2249
2250
2251 if (this_rq->dl.dl_nr_running &&
2252 dl_time_before(this_rq->dl.earliest_dl.curr,
2253 src_rq->dl.earliest_dl.next))
2254 continue;
2255
2256
2257 push_task = NULL;
2258 double_lock_balance(this_rq, src_rq);
2259
2260
2261
2262
2263
2264 if (src_rq->dl.dl_nr_running <= 1)
2265 goto skip;
2266
2267 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2268
2269
2270
2271
2272
2273
2274 if (p && dl_time_before(p->dl.deadline, dmin) &&
2275 (!this_rq->dl.dl_nr_running ||
2276 dl_time_before(p->dl.deadline,
2277 this_rq->dl.earliest_dl.curr))) {
2278 WARN_ON(p == src_rq->curr);
2279 WARN_ON(!task_on_rq_queued(p));
2280
2281
2282
2283
2284
2285 if (dl_time_before(p->dl.deadline,
2286 src_rq->curr->dl.deadline))
2287 goto skip;
2288
2289 if (is_migration_disabled(p)) {
2290 push_task = get_push_task(src_rq);
2291 } else {
2292 deactivate_task(src_rq, p, 0);
2293 set_task_cpu(p, this_cpu);
2294 activate_task(this_rq, p, 0);
2295 dmin = p->dl.deadline;
2296 resched = true;
2297 }
2298
2299
2300 }
2301skip:
2302 double_unlock_balance(this_rq, src_rq);
2303
2304 if (push_task) {
2305 raw_spin_rq_unlock(this_rq);
2306 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2307 push_task, &src_rq->push_work);
2308 raw_spin_rq_lock(this_rq);
2309 }
2310 }
2311
2312 if (resched)
2313 resched_curr(this_rq);
2314}
2315
2316
2317
2318
2319
2320static void task_woken_dl(struct rq *rq, struct task_struct *p)
2321{
2322 if (!task_running(rq, p) &&
2323 !test_tsk_need_resched(rq->curr) &&
2324 p->nr_cpus_allowed > 1 &&
2325 dl_task(rq->curr) &&
2326 (rq->curr->nr_cpus_allowed < 2 ||
2327 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2328 push_dl_tasks(rq);
2329 }
2330}
2331
2332static void set_cpus_allowed_dl(struct task_struct *p,
2333 const struct cpumask *new_mask,
2334 u32 flags)
2335{
2336 struct root_domain *src_rd;
2337 struct rq *rq;
2338
2339 BUG_ON(!dl_task(p));
2340
2341 rq = task_rq(p);
2342 src_rd = rq->rd;
2343
2344
2345
2346
2347
2348
2349 if (!cpumask_intersects(src_rd->span, new_mask)) {
2350 struct dl_bw *src_dl_b;
2351
2352 src_dl_b = dl_bw_of(cpu_of(rq));
2353
2354
2355
2356
2357
2358 raw_spin_lock(&src_dl_b->lock);
2359 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2360 raw_spin_unlock(&src_dl_b->lock);
2361 }
2362
2363 set_cpus_allowed_common(p, new_mask, flags);
2364}
2365
2366
2367static void rq_online_dl(struct rq *rq)
2368{
2369 if (rq->dl.overloaded)
2370 dl_set_overload(rq);
2371
2372 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2373 if (rq->dl.dl_nr_running > 0)
2374 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2375}
2376
2377
2378static void rq_offline_dl(struct rq *rq)
2379{
2380 if (rq->dl.overloaded)
2381 dl_clear_overload(rq);
2382
2383 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2384 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2385}
2386
2387void __init init_sched_dl_class(void)
2388{
2389 unsigned int i;
2390
2391 for_each_possible_cpu(i)
2392 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2393 GFP_KERNEL, cpu_to_node(i));
2394}
2395
2396void dl_add_task_root_domain(struct task_struct *p)
2397{
2398 struct rq_flags rf;
2399 struct rq *rq;
2400 struct dl_bw *dl_b;
2401
2402 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2403 if (!dl_task(p)) {
2404 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2405 return;
2406 }
2407
2408 rq = __task_rq_lock(p, &rf);
2409
2410 dl_b = &rq->rd->dl_bw;
2411 raw_spin_lock(&dl_b->lock);
2412
2413 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2414
2415 raw_spin_unlock(&dl_b->lock);
2416
2417 task_rq_unlock(rq, p, &rf);
2418}
2419
2420void dl_clear_root_domain(struct root_domain *rd)
2421{
2422 unsigned long flags;
2423
2424 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2425 rd->dl_bw.total_bw = 0;
2426 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2427}
2428
2429#endif
2430
2431static void switched_from_dl(struct rq *rq, struct task_struct *p)
2432{
2433
2434
2435
2436
2437
2438
2439
2440
2441 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2442 task_non_contending(p);
2443
2444 if (!task_on_rq_queued(p)) {
2445
2446
2447
2448
2449
2450
2451 if (p->dl.dl_non_contending)
2452 sub_running_bw(&p->dl, &rq->dl);
2453 sub_rq_bw(&p->dl, &rq->dl);
2454 }
2455
2456
2457
2458
2459
2460
2461 if (p->dl.dl_non_contending)
2462 p->dl.dl_non_contending = 0;
2463
2464
2465
2466
2467
2468
2469 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2470 return;
2471
2472 deadline_queue_pull_task(rq);
2473}
2474
2475
2476
2477
2478
2479static void switched_to_dl(struct rq *rq, struct task_struct *p)
2480{
2481 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2482 put_task_struct(p);
2483
2484
2485 if (!task_on_rq_queued(p)) {
2486 add_rq_bw(&p->dl, &rq->dl);
2487
2488 return;
2489 }
2490
2491 if (rq->curr != p) {
2492#ifdef CONFIG_SMP
2493 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2494 deadline_queue_push_tasks(rq);
2495#endif
2496 if (dl_task(rq->curr))
2497 check_preempt_curr_dl(rq, p, 0);
2498 else
2499 resched_curr(rq);
2500 } else {
2501 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2502 }
2503}
2504
2505
2506
2507
2508
2509static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2510 int oldprio)
2511{
2512 if (task_on_rq_queued(p) || task_current(rq, p)) {
2513#ifdef CONFIG_SMP
2514
2515
2516
2517
2518
2519
2520 if (!rq->dl.overloaded)
2521 deadline_queue_pull_task(rq);
2522
2523
2524
2525
2526
2527
2528 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2529 resched_curr(rq);
2530#else
2531
2532
2533
2534
2535
2536 resched_curr(rq);
2537#endif
2538 }
2539}
2540
2541DEFINE_SCHED_CLASS(dl) = {
2542
2543 .enqueue_task = enqueue_task_dl,
2544 .dequeue_task = dequeue_task_dl,
2545 .yield_task = yield_task_dl,
2546
2547 .check_preempt_curr = check_preempt_curr_dl,
2548
2549 .pick_next_task = pick_next_task_dl,
2550 .put_prev_task = put_prev_task_dl,
2551 .set_next_task = set_next_task_dl,
2552
2553#ifdef CONFIG_SMP
2554 .balance = balance_dl,
2555 .pick_task = pick_task_dl,
2556 .select_task_rq = select_task_rq_dl,
2557 .migrate_task_rq = migrate_task_rq_dl,
2558 .set_cpus_allowed = set_cpus_allowed_dl,
2559 .rq_online = rq_online_dl,
2560 .rq_offline = rq_offline_dl,
2561 .task_woken = task_woken_dl,
2562 .find_lock_rq = find_lock_later_rq,
2563#endif
2564
2565 .task_tick = task_tick_dl,
2566 .task_fork = task_fork_dl,
2567
2568 .prio_changed = prio_changed_dl,
2569 .switched_from = switched_from_dl,
2570 .switched_to = switched_to_dl,
2571
2572 .update_curr = update_curr_dl,
2573};
2574
2575
2576static u64 dl_generation;
2577
2578int sched_dl_global_validate(void)
2579{
2580 u64 runtime = global_rt_runtime();
2581 u64 period = global_rt_period();
2582 u64 new_bw = to_ratio(period, runtime);
2583 u64 gen = ++dl_generation;
2584 struct dl_bw *dl_b;
2585 int cpu, cpus, ret = 0;
2586 unsigned long flags;
2587
2588
2589
2590
2591
2592
2593 for_each_possible_cpu(cpu) {
2594 rcu_read_lock_sched();
2595
2596 if (dl_bw_visited(cpu, gen))
2597 goto next;
2598
2599 dl_b = dl_bw_of(cpu);
2600 cpus = dl_bw_cpus(cpu);
2601
2602 raw_spin_lock_irqsave(&dl_b->lock, flags);
2603 if (new_bw * cpus < dl_b->total_bw)
2604 ret = -EBUSY;
2605 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2606
2607next:
2608 rcu_read_unlock_sched();
2609
2610 if (ret)
2611 break;
2612 }
2613
2614 return ret;
2615}
2616
2617static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2618{
2619 if (global_rt_runtime() == RUNTIME_INF) {
2620 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2621 dl_rq->extra_bw = 1 << BW_SHIFT;
2622 } else {
2623 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2624 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2625 dl_rq->extra_bw = to_ratio(global_rt_period(),
2626 global_rt_runtime());
2627 }
2628}
2629
2630void sched_dl_do_global(void)
2631{
2632 u64 new_bw = -1;
2633 u64 gen = ++dl_generation;
2634 struct dl_bw *dl_b;
2635 int cpu;
2636 unsigned long flags;
2637
2638 def_dl_bandwidth.dl_period = global_rt_period();
2639 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2640
2641 if (global_rt_runtime() != RUNTIME_INF)
2642 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2643
2644 for_each_possible_cpu(cpu) {
2645 rcu_read_lock_sched();
2646
2647 if (dl_bw_visited(cpu, gen)) {
2648 rcu_read_unlock_sched();
2649 continue;
2650 }
2651
2652 dl_b = dl_bw_of(cpu);
2653
2654 raw_spin_lock_irqsave(&dl_b->lock, flags);
2655 dl_b->bw = new_bw;
2656 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2657
2658 rcu_read_unlock_sched();
2659 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2660 }
2661}
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671int sched_dl_overflow(struct task_struct *p, int policy,
2672 const struct sched_attr *attr)
2673{
2674 u64 period = attr->sched_period ?: attr->sched_deadline;
2675 u64 runtime = attr->sched_runtime;
2676 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2677 int cpus, err = -1, cpu = task_cpu(p);
2678 struct dl_bw *dl_b = dl_bw_of(cpu);
2679 unsigned long cap;
2680
2681 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2682 return 0;
2683
2684
2685 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2686 return 0;
2687
2688
2689
2690
2691
2692
2693 raw_spin_lock(&dl_b->lock);
2694 cpus = dl_bw_cpus(cpu);
2695 cap = dl_bw_capacity(cpu);
2696
2697 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2698 !__dl_overflow(dl_b, cap, 0, new_bw)) {
2699 if (hrtimer_active(&p->dl.inactive_timer))
2700 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2701 __dl_add(dl_b, new_bw, cpus);
2702 err = 0;
2703 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2704 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2705
2706
2707
2708
2709
2710
2711
2712 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2713 __dl_add(dl_b, new_bw, cpus);
2714 dl_change_utilization(p, new_bw);
2715 err = 0;
2716 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2717
2718
2719
2720
2721
2722 err = 0;
2723 }
2724 raw_spin_unlock(&dl_b->lock);
2725
2726 return err;
2727}
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2738{
2739 struct sched_dl_entity *dl_se = &p->dl;
2740
2741 dl_se->dl_runtime = attr->sched_runtime;
2742 dl_se->dl_deadline = attr->sched_deadline;
2743 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2744 dl_se->flags = attr->sched_flags;
2745 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2746 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2747}
2748
2749void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2750{
2751 struct sched_dl_entity *dl_se = &p->dl;
2752
2753 attr->sched_priority = p->rt_priority;
2754 attr->sched_runtime = dl_se->dl_runtime;
2755 attr->sched_deadline = dl_se->dl_deadline;
2756 attr->sched_period = dl_se->dl_period;
2757 attr->sched_flags = dl_se->flags;
2758}
2759
2760
2761
2762
2763
2764
2765unsigned int sysctl_sched_dl_period_max = 1 << 22;
2766unsigned int sysctl_sched_dl_period_min = 100;
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778bool __checkparam_dl(const struct sched_attr *attr)
2779{
2780 u64 period, max, min;
2781
2782
2783 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2784 return true;
2785
2786
2787 if (attr->sched_deadline == 0)
2788 return false;
2789
2790
2791
2792
2793
2794 if (attr->sched_runtime < (1ULL << DL_SCALE))
2795 return false;
2796
2797
2798
2799
2800
2801 if (attr->sched_deadline & (1ULL << 63) ||
2802 attr->sched_period & (1ULL << 63))
2803 return false;
2804
2805 period = attr->sched_period;
2806 if (!period)
2807 period = attr->sched_deadline;
2808
2809
2810 if (period < attr->sched_deadline ||
2811 attr->sched_deadline < attr->sched_runtime)
2812 return false;
2813
2814 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2815 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2816
2817 if (period < min || period > max)
2818 return false;
2819
2820 return true;
2821}
2822
2823
2824
2825
2826void __dl_clear_params(struct task_struct *p)
2827{
2828 struct sched_dl_entity *dl_se = &p->dl;
2829
2830 dl_se->dl_runtime = 0;
2831 dl_se->dl_deadline = 0;
2832 dl_se->dl_period = 0;
2833 dl_se->flags = 0;
2834 dl_se->dl_bw = 0;
2835 dl_se->dl_density = 0;
2836
2837 dl_se->dl_throttled = 0;
2838 dl_se->dl_yielded = 0;
2839 dl_se->dl_non_contending = 0;
2840 dl_se->dl_overrun = 0;
2841
2842#ifdef CONFIG_RT_MUTEXES
2843 dl_se->pi_se = dl_se;
2844#endif
2845}
2846
2847bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2848{
2849 struct sched_dl_entity *dl_se = &p->dl;
2850
2851 if (dl_se->dl_runtime != attr->sched_runtime ||
2852 dl_se->dl_deadline != attr->sched_deadline ||
2853 dl_se->dl_period != attr->sched_period ||
2854 dl_se->flags != attr->sched_flags)
2855 return true;
2856
2857 return false;
2858}
2859
2860#ifdef CONFIG_SMP
2861int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2862{
2863 unsigned long flags, cap;
2864 unsigned int dest_cpu;
2865 struct dl_bw *dl_b;
2866 bool overflow;
2867 int ret;
2868
2869 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2870
2871 rcu_read_lock_sched();
2872 dl_b = dl_bw_of(dest_cpu);
2873 raw_spin_lock_irqsave(&dl_b->lock, flags);
2874 cap = dl_bw_capacity(dest_cpu);
2875 overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
2876 if (overflow) {
2877 ret = -EBUSY;
2878 } else {
2879
2880
2881
2882
2883
2884
2885 int cpus = dl_bw_cpus(dest_cpu);
2886
2887 __dl_add(dl_b, p->dl.dl_bw, cpus);
2888 ret = 0;
2889 }
2890 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2891 rcu_read_unlock_sched();
2892
2893 return ret;
2894}
2895
2896int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2897 const struct cpumask *trial)
2898{
2899 int ret = 1, trial_cpus;
2900 struct dl_bw *cur_dl_b;
2901 unsigned long flags;
2902
2903 rcu_read_lock_sched();
2904 cur_dl_b = dl_bw_of(cpumask_any(cur));
2905 trial_cpus = cpumask_weight(trial);
2906
2907 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2908 if (cur_dl_b->bw != -1 &&
2909 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2910 ret = 0;
2911 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2912 rcu_read_unlock_sched();
2913
2914 return ret;
2915}
2916
2917bool dl_cpu_busy(unsigned int cpu)
2918{
2919 unsigned long flags, cap;
2920 struct dl_bw *dl_b;
2921 bool overflow;
2922
2923 rcu_read_lock_sched();
2924 dl_b = dl_bw_of(cpu);
2925 raw_spin_lock_irqsave(&dl_b->lock, flags);
2926 cap = dl_bw_capacity(cpu);
2927 overflow = __dl_overflow(dl_b, cap, 0, 0);
2928 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2929 rcu_read_unlock_sched();
2930
2931 return overflow;
2932}
2933#endif
2934
2935#ifdef CONFIG_SCHED_DEBUG
2936void print_dl_stats(struct seq_file *m, int cpu)
2937{
2938 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2939}
2940#endif
2941