1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175#include <linux/kernel.h>
176#include <linux/module.h>
177#include <linux/timer.h>
178#include <linux/time64.h>
179#include <linux/parser.h>
180#include <linux/sched/signal.h>
181#include <linux/blk-cgroup.h>
182#include <asm/local.h>
183#include <asm/local64.h>
184#include "blk-rq-qos.h"
185#include "blk-stat.h"
186#include "blk-wbt.h"
187
188#ifdef CONFIG_TRACEPOINTS
189
190
191#define TRACE_IOCG_PATH_LEN 1024
192static DEFINE_SPINLOCK(trace_iocg_path_lock);
193static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195#define TRACE_IOCG_PATH(type, iocg, ...) \
196 do { \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
203 ##__VA_ARGS__); \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
205 } \
206 } while (0)
207
208#else
209#define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210#endif
211
212enum {
213 MILLION = 1000000,
214
215
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
218
219
220
221
222
223
224 MARGIN_MIN_PCT = 10,
225 MARGIN_LOW_PCT = 20,
226 MARGIN_TARGET_PCT = 50,
227
228 INUSE_ADJ_STEP_PCT = 25,
229
230
231 TIMER_SLACK_PCT = 1,
232
233
234 WEIGHT_ONE = 1 << 16,
235
236
237
238
239
240
241
242
243
244
245
246
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
251
252
253 VRATE_MIN_PPM = 10000,
254 VRATE_MAX_PPM = 100000000,
255
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
258
259
260 RQ_WAIT_BUSY_PCT = 5,
261
262
263 UNBUSY_THR_PCT = 75,
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
287 MIN_DELAY = 250,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
289
290
291 DFGV_USAGE_PCT = 50,
292 DFGV_PERIOD = 100 * USEC_PER_MSEC,
293
294
295 MAX_LAGGING_PERIODS = 10,
296
297
298 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
299
300
301
302
303
304
305 IOC_PAGE_SHIFT = 12,
306 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
307 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
308
309
310 LCOEF_RANDIO_PAGES = 4096,
311};
312
313enum ioc_running {
314 IOC_IDLE,
315 IOC_RUNNING,
316 IOC_STOP,
317};
318
319
320enum {
321 QOS_ENABLE,
322 QOS_CTRL,
323 NR_QOS_CTRL_PARAMS,
324};
325
326
327enum {
328 QOS_RPPM,
329 QOS_RLAT,
330 QOS_WPPM,
331 QOS_WLAT,
332 QOS_MIN,
333 QOS_MAX,
334 NR_QOS_PARAMS,
335};
336
337
338enum {
339 COST_CTRL,
340 COST_MODEL,
341 NR_COST_CTRL_PARAMS,
342};
343
344
345enum {
346 I_LCOEF_RBPS,
347 I_LCOEF_RSEQIOPS,
348 I_LCOEF_RRANDIOPS,
349 I_LCOEF_WBPS,
350 I_LCOEF_WSEQIOPS,
351 I_LCOEF_WRANDIOPS,
352 NR_I_LCOEFS,
353};
354
355enum {
356 LCOEF_RPAGE,
357 LCOEF_RSEQIO,
358 LCOEF_RRANDIO,
359 LCOEF_WPAGE,
360 LCOEF_WSEQIO,
361 LCOEF_WRANDIO,
362 NR_LCOEFS,
363};
364
365enum {
366 AUTOP_INVALID,
367 AUTOP_HDD,
368 AUTOP_SSD_QD1,
369 AUTOP_SSD_DFL,
370 AUTOP_SSD_FAST,
371};
372
373struct ioc_params {
374 u32 qos[NR_QOS_PARAMS];
375 u64 i_lcoefs[NR_I_LCOEFS];
376 u64 lcoefs[NR_LCOEFS];
377 u32 too_fast_vrate_pct;
378 u32 too_slow_vrate_pct;
379};
380
381struct ioc_margins {
382 s64 min;
383 s64 low;
384 s64 target;
385};
386
387struct ioc_missed {
388 local_t nr_met;
389 local_t nr_missed;
390 u32 last_met;
391 u32 last_missed;
392};
393
394struct ioc_pcpu_stat {
395 struct ioc_missed missed[2];
396
397 local64_t rq_wait_ns;
398 u64 last_rq_wait_ns;
399};
400
401
402struct ioc {
403 struct rq_qos rqos;
404
405 bool enabled;
406
407 struct ioc_params params;
408 struct ioc_margins margins;
409 u32 period_us;
410 u32 timer_slack_ns;
411 u64 vrate_min;
412 u64 vrate_max;
413
414 spinlock_t lock;
415 struct timer_list timer;
416 struct list_head active_iocgs;
417 struct ioc_pcpu_stat __percpu *pcpu_stat;
418
419 enum ioc_running running;
420 atomic64_t vtime_rate;
421 u64 vtime_base_rate;
422 s64 vtime_err;
423
424 seqcount_spinlock_t period_seqcount;
425 u64 period_at;
426 u64 period_at_vtime;
427
428 atomic64_t cur_period;
429 int busy_level;
430
431 bool weights_updated;
432 atomic_t hweight_gen;
433
434
435 u64 dfgv_period_at;
436 u64 dfgv_period_rem;
437 u64 dfgv_usage_us_sum;
438
439 u64 autop_too_fast_at;
440 u64 autop_too_slow_at;
441 int autop_idx;
442 bool user_qos_params:1;
443 bool user_cost_model:1;
444};
445
446struct iocg_pcpu_stat {
447 local64_t abs_vusage;
448};
449
450struct iocg_stat {
451 u64 usage_us;
452 u64 wait_us;
453 u64 indebt_us;
454 u64 indelay_us;
455};
456
457
458struct ioc_gq {
459 struct blkg_policy_data pd;
460 struct ioc *ioc;
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480 u32 cfg_weight;
481 u32 weight;
482 u32 active;
483 u32 inuse;
484
485 u32 last_inuse;
486 s64 saved_margin;
487
488 sector_t cursor;
489
490
491
492
493
494
495
496
497
498
499
500 atomic64_t vtime;
501 atomic64_t done_vtime;
502 u64 abs_vdebt;
503
504
505 u64 delay;
506 u64 delay_at;
507
508
509
510
511
512 atomic64_t active_period;
513 struct list_head active_list;
514
515
516 u64 child_active_sum;
517 u64 child_inuse_sum;
518 u64 child_adjusted_sum;
519 int hweight_gen;
520 u32 hweight_active;
521 u32 hweight_inuse;
522 u32 hweight_donating;
523 u32 hweight_after_donation;
524
525 struct list_head walk_list;
526 struct list_head surplus_list;
527
528 struct wait_queue_head waitq;
529 struct hrtimer waitq_timer;
530
531
532 u64 activated_at;
533
534
535 struct iocg_pcpu_stat __percpu *pcpu_stat;
536 struct iocg_stat local_stat;
537 struct iocg_stat desc_stat;
538 struct iocg_stat last_stat;
539 u64 last_stat_abs_vusage;
540 u64 usage_delta_us;
541 u64 wait_since;
542 u64 indebt_since;
543 u64 indelay_since;
544
545
546 int level;
547 struct ioc_gq *ancestors[];
548};
549
550
551struct ioc_cgrp {
552 struct blkcg_policy_data cpd;
553 unsigned int dfl_weight;
554};
555
556struct ioc_now {
557 u64 now_ns;
558 u64 now;
559 u64 vnow;
560 u64 vrate;
561};
562
563struct iocg_wait {
564 struct wait_queue_entry wait;
565 struct bio *bio;
566 u64 abs_cost;
567 bool committed;
568};
569
570struct iocg_wake_ctx {
571 struct ioc_gq *iocg;
572 u32 hw_inuse;
573 s64 vbudget;
574};
575
576static const struct ioc_params autop[] = {
577 [AUTOP_HDD] = {
578 .qos = {
579 [QOS_RLAT] = 250000,
580 [QOS_WLAT] = 250000,
581 [QOS_MIN] = VRATE_MIN_PPM,
582 [QOS_MAX] = VRATE_MAX_PPM,
583 },
584 .i_lcoefs = {
585 [I_LCOEF_RBPS] = 174019176,
586 [I_LCOEF_RSEQIOPS] = 41708,
587 [I_LCOEF_RRANDIOPS] = 370,
588 [I_LCOEF_WBPS] = 178075866,
589 [I_LCOEF_WSEQIOPS] = 42705,
590 [I_LCOEF_WRANDIOPS] = 378,
591 },
592 },
593 [AUTOP_SSD_QD1] = {
594 .qos = {
595 [QOS_RLAT] = 25000,
596 [QOS_WLAT] = 25000,
597 [QOS_MIN] = VRATE_MIN_PPM,
598 [QOS_MAX] = VRATE_MAX_PPM,
599 },
600 .i_lcoefs = {
601 [I_LCOEF_RBPS] = 245855193,
602 [I_LCOEF_RSEQIOPS] = 61575,
603 [I_LCOEF_RRANDIOPS] = 6946,
604 [I_LCOEF_WBPS] = 141365009,
605 [I_LCOEF_WSEQIOPS] = 33716,
606 [I_LCOEF_WRANDIOPS] = 26796,
607 },
608 },
609 [AUTOP_SSD_DFL] = {
610 .qos = {
611 [QOS_RLAT] = 25000,
612 [QOS_WLAT] = 25000,
613 [QOS_MIN] = VRATE_MIN_PPM,
614 [QOS_MAX] = VRATE_MAX_PPM,
615 },
616 .i_lcoefs = {
617 [I_LCOEF_RBPS] = 488636629,
618 [I_LCOEF_RSEQIOPS] = 8932,
619 [I_LCOEF_RRANDIOPS] = 8518,
620 [I_LCOEF_WBPS] = 427891549,
621 [I_LCOEF_WSEQIOPS] = 28755,
622 [I_LCOEF_WRANDIOPS] = 21940,
623 },
624 .too_fast_vrate_pct = 500,
625 },
626 [AUTOP_SSD_FAST] = {
627 .qos = {
628 [QOS_RLAT] = 5000,
629 [QOS_WLAT] = 5000,
630 [QOS_MIN] = VRATE_MIN_PPM,
631 [QOS_MAX] = VRATE_MAX_PPM,
632 },
633 .i_lcoefs = {
634 [I_LCOEF_RBPS] = 3102524156LLU,
635 [I_LCOEF_RSEQIOPS] = 724816,
636 [I_LCOEF_RRANDIOPS] = 778122,
637 [I_LCOEF_WBPS] = 1742780862LLU,
638 [I_LCOEF_WSEQIOPS] = 425702,
639 [I_LCOEF_WRANDIOPS] = 443193,
640 },
641 .too_slow_vrate_pct = 10,
642 },
643};
644
645
646
647
648
649static u32 vrate_adj_pct[] =
650 { 0, 0, 0, 0,
651 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
652 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
653 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
654
655static struct blkcg_policy blkcg_policy_iocost;
656
657
658static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
659{
660 return container_of(rqos, struct ioc, rqos);
661}
662
663static struct ioc *q_to_ioc(struct request_queue *q)
664{
665 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
666}
667
668static const char *q_name(struct request_queue *q)
669{
670 if (blk_queue_registered(q))
671 return kobject_name(q->kobj.parent);
672 else
673 return "<unknown>";
674}
675
676static const char __maybe_unused *ioc_name(struct ioc *ioc)
677{
678 return q_name(ioc->rqos.q);
679}
680
681static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
682{
683 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
684}
685
686static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
687{
688 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
689}
690
691static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
692{
693 return pd_to_blkg(&iocg->pd);
694}
695
696static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
697{
698 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
699 struct ioc_cgrp, cpd);
700}
701
702
703
704
705
706static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
707{
708 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
709}
710
711
712
713
714static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
715{
716 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
717}
718
719static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
720 u64 abs_cost, u64 cost)
721{
722 struct iocg_pcpu_stat *gcs;
723
724 bio->bi_iocost_cost = cost;
725 atomic64_add(cost, &iocg->vtime);
726
727 gcs = get_cpu_ptr(iocg->pcpu_stat);
728 local64_add(abs_cost, &gcs->abs_vusage);
729 put_cpu_ptr(gcs);
730}
731
732static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
733{
734 if (lock_ioc) {
735 spin_lock_irqsave(&iocg->ioc->lock, *flags);
736 spin_lock(&iocg->waitq.lock);
737 } else {
738 spin_lock_irqsave(&iocg->waitq.lock, *flags);
739 }
740}
741
742static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
743{
744 if (unlock_ioc) {
745 spin_unlock(&iocg->waitq.lock);
746 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
747 } else {
748 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
749 }
750}
751
752#define CREATE_TRACE_POINTS
753#include <trace/events/iocost.h>
754
755static void ioc_refresh_margins(struct ioc *ioc)
756{
757 struct ioc_margins *margins = &ioc->margins;
758 u32 period_us = ioc->period_us;
759 u64 vrate = ioc->vtime_base_rate;
760
761 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
762 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
763 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
764}
765
766
767static void ioc_refresh_period_us(struct ioc *ioc)
768{
769 u32 ppm, lat, multi, period_us;
770
771 lockdep_assert_held(&ioc->lock);
772
773
774 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
775 ppm = ioc->params.qos[QOS_RPPM];
776 lat = ioc->params.qos[QOS_RLAT];
777 } else {
778 ppm = ioc->params.qos[QOS_WPPM];
779 lat = ioc->params.qos[QOS_WLAT];
780 }
781
782
783
784
785
786
787
788
789
790 if (ppm)
791 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
792 else
793 multi = 2;
794 period_us = multi * lat;
795 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
796
797
798 ioc->period_us = period_us;
799 ioc->timer_slack_ns = div64_u64(
800 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
801 100);
802 ioc_refresh_margins(ioc);
803}
804
805static int ioc_autop_idx(struct ioc *ioc)
806{
807 int idx = ioc->autop_idx;
808 const struct ioc_params *p = &autop[idx];
809 u32 vrate_pct;
810 u64 now_ns;
811
812
813 if (!blk_queue_nonrot(ioc->rqos.q))
814 return AUTOP_HDD;
815
816
817 if (blk_queue_depth(ioc->rqos.q) == 1)
818 return AUTOP_SSD_QD1;
819
820
821 if (idx < AUTOP_SSD_DFL)
822 return AUTOP_SSD_DFL;
823
824
825 if (ioc->user_qos_params || ioc->user_cost_model)
826 return idx;
827
828
829 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
830 now_ns = ktime_get_ns();
831
832 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
833 if (!ioc->autop_too_fast_at)
834 ioc->autop_too_fast_at = now_ns;
835 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
836 return idx + 1;
837 } else {
838 ioc->autop_too_fast_at = 0;
839 }
840
841 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
842 if (!ioc->autop_too_slow_at)
843 ioc->autop_too_slow_at = now_ns;
844 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
845 return idx - 1;
846 } else {
847 ioc->autop_too_slow_at = 0;
848 }
849
850 return idx;
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
867 u64 *page, u64 *seqio, u64 *randio)
868{
869 u64 v;
870
871 *page = *seqio = *randio = 0;
872
873 if (bps)
874 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
875 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
876
877 if (seqiops) {
878 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
879 if (v > *page)
880 *seqio = v - *page;
881 }
882
883 if (randiops) {
884 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
885 if (v > *page)
886 *randio = v - *page;
887 }
888}
889
890static void ioc_refresh_lcoefs(struct ioc *ioc)
891{
892 u64 *u = ioc->params.i_lcoefs;
893 u64 *c = ioc->params.lcoefs;
894
895 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
896 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
897 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
898 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
899}
900
901static bool ioc_refresh_params(struct ioc *ioc, bool force)
902{
903 const struct ioc_params *p;
904 int idx;
905
906 lockdep_assert_held(&ioc->lock);
907
908 idx = ioc_autop_idx(ioc);
909 p = &autop[idx];
910
911 if (idx == ioc->autop_idx && !force)
912 return false;
913
914 if (idx != ioc->autop_idx)
915 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
916
917 ioc->autop_idx = idx;
918 ioc->autop_too_fast_at = 0;
919 ioc->autop_too_slow_at = 0;
920
921 if (!ioc->user_qos_params)
922 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
923 if (!ioc->user_cost_model)
924 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
925
926 ioc_refresh_period_us(ioc);
927 ioc_refresh_lcoefs(ioc);
928
929 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
930 VTIME_PER_USEC, MILLION);
931 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
932 VTIME_PER_USEC, MILLION);
933
934 return true;
935}
936
937
938
939
940
941
942
943
944static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
945{
946 s64 pleft = ioc->period_at + ioc->period_us - now->now;
947 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
948 s64 vcomp, vcomp_min, vcomp_max;
949
950 lockdep_assert_held(&ioc->lock);
951
952
953 if (pleft <= 0)
954 goto done;
955
956
957
958
959
960
961 vcomp = -div64_s64(ioc->vtime_err, pleft);
962 vcomp_min = -(ioc->vtime_base_rate >> 1);
963 vcomp_max = ioc->vtime_base_rate;
964 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
965
966 ioc->vtime_err += vcomp * pleft;
967
968 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
969done:
970
971 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
972}
973
974static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
975 int nr_lagging, int nr_shortages,
976 int prev_busy_level, u32 *missed_ppm)
977{
978 u64 vrate = ioc->vtime_base_rate;
979 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
980
981 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
982 if (ioc->busy_level != prev_busy_level || nr_lagging)
983 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
984 missed_ppm, rq_wait_pct,
985 nr_lagging, nr_shortages);
986
987 return;
988 }
989
990
991
992
993
994
995 if (vrate < vrate_min) {
996 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
997 vrate = min(vrate, vrate_min);
998 } else if (vrate > vrate_max) {
999 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1000 vrate = max(vrate, vrate_max);
1001 } else {
1002 int idx = min_t(int, abs(ioc->busy_level),
1003 ARRAY_SIZE(vrate_adj_pct) - 1);
1004 u32 adj_pct = vrate_adj_pct[idx];
1005
1006 if (ioc->busy_level > 0)
1007 adj_pct = 100 - adj_pct;
1008 else
1009 adj_pct = 100 + adj_pct;
1010
1011 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1012 vrate_min, vrate_max);
1013 }
1014
1015 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1016 nr_lagging, nr_shortages);
1017
1018 ioc->vtime_base_rate = vrate;
1019 ioc_refresh_margins(ioc);
1020}
1021
1022
1023static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1024{
1025 unsigned seq;
1026
1027 now->now_ns = ktime_get();
1028 now->now = ktime_to_us(now->now_ns);
1029 now->vrate = atomic64_read(&ioc->vtime_rate);
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 do {
1040 seq = read_seqcount_begin(&ioc->period_seqcount);
1041 now->vnow = ioc->period_at_vtime +
1042 (now->now - ioc->period_at) * now->vrate;
1043 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1044}
1045
1046static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1047{
1048 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1049
1050 write_seqcount_begin(&ioc->period_seqcount);
1051 ioc->period_at = now->now;
1052 ioc->period_at_vtime = now->vnow;
1053 write_seqcount_end(&ioc->period_seqcount);
1054
1055 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1056 add_timer(&ioc->timer);
1057}
1058
1059
1060
1061
1062
1063
1064static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1065 bool save, struct ioc_now *now)
1066{
1067 struct ioc *ioc = iocg->ioc;
1068 int lvl;
1069
1070 lockdep_assert_held(&ioc->lock);
1071
1072
1073
1074
1075
1076
1077 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1078 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1079 iocg->child_active_sum);
1080 } else {
1081 inuse = clamp_t(u32, inuse, 1, active);
1082 }
1083
1084 iocg->last_inuse = iocg->inuse;
1085 if (save)
1086 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1087
1088 if (active == iocg->active && inuse == iocg->inuse)
1089 return;
1090
1091 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1092 struct ioc_gq *parent = iocg->ancestors[lvl];
1093 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1094 u32 parent_active = 0, parent_inuse = 0;
1095
1096
1097 parent->child_active_sum += (s32)(active - child->active);
1098 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1099
1100 child->active = active;
1101 child->inuse = inuse;
1102
1103
1104
1105
1106
1107
1108 if (parent->child_active_sum) {
1109 parent_active = parent->weight;
1110 parent_inuse = DIV64_U64_ROUND_UP(
1111 parent_active * parent->child_inuse_sum,
1112 parent->child_active_sum);
1113 }
1114
1115
1116 if (parent_active == parent->active &&
1117 parent_inuse == parent->inuse)
1118 break;
1119
1120 active = parent_active;
1121 inuse = parent_inuse;
1122 }
1123
1124 ioc->weights_updated = true;
1125}
1126
1127static void commit_weights(struct ioc *ioc)
1128{
1129 lockdep_assert_held(&ioc->lock);
1130
1131 if (ioc->weights_updated) {
1132
1133 smp_wmb();
1134 atomic_inc(&ioc->hweight_gen);
1135 ioc->weights_updated = false;
1136 }
1137}
1138
1139static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1140 bool save, struct ioc_now *now)
1141{
1142 __propagate_weights(iocg, active, inuse, save, now);
1143 commit_weights(iocg->ioc);
1144}
1145
1146static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1147{
1148 struct ioc *ioc = iocg->ioc;
1149 int lvl;
1150 u32 hwa, hwi;
1151 int ioc_gen;
1152
1153
1154 ioc_gen = atomic_read(&ioc->hweight_gen);
1155 if (ioc_gen == iocg->hweight_gen)
1156 goto out;
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 smp_rmb();
1169
1170 hwa = hwi = WEIGHT_ONE;
1171 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1172 struct ioc_gq *parent = iocg->ancestors[lvl];
1173 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1174 u64 active_sum = READ_ONCE(parent->child_active_sum);
1175 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1176 u32 active = READ_ONCE(child->active);
1177 u32 inuse = READ_ONCE(child->inuse);
1178
1179
1180 if (!active_sum || !inuse_sum)
1181 continue;
1182
1183 active_sum = max_t(u64, active, active_sum);
1184 hwa = div64_u64((u64)hwa * active, active_sum);
1185
1186 inuse_sum = max_t(u64, inuse, inuse_sum);
1187 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1188 }
1189
1190 iocg->hweight_active = max_t(u32, hwa, 1);
1191 iocg->hweight_inuse = max_t(u32, hwi, 1);
1192 iocg->hweight_gen = ioc_gen;
1193out:
1194 if (hw_activep)
1195 *hw_activep = iocg->hweight_active;
1196 if (hw_inusep)
1197 *hw_inusep = iocg->hweight_inuse;
1198}
1199
1200
1201
1202
1203
1204static u32 current_hweight_max(struct ioc_gq *iocg)
1205{
1206 u32 hwm = WEIGHT_ONE;
1207 u32 inuse = iocg->active;
1208 u64 child_inuse_sum;
1209 int lvl;
1210
1211 lockdep_assert_held(&iocg->ioc->lock);
1212
1213 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1214 struct ioc_gq *parent = iocg->ancestors[lvl];
1215 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1216
1217 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1218 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1219 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1220 parent->child_active_sum);
1221 }
1222
1223 return max_t(u32, hwm, 1);
1224}
1225
1226static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1227{
1228 struct ioc *ioc = iocg->ioc;
1229 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1230 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1231 u32 weight;
1232
1233 lockdep_assert_held(&ioc->lock);
1234
1235 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1236 if (weight != iocg->weight && iocg->active)
1237 propagate_weights(iocg, weight, iocg->inuse, true, now);
1238 iocg->weight = weight;
1239}
1240
1241static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1242{
1243 struct ioc *ioc = iocg->ioc;
1244 u64 last_period, cur_period;
1245 u64 vtime, vtarget;
1246 int i;
1247
1248
1249
1250
1251
1252 if (!list_empty(&iocg->active_list)) {
1253 ioc_now(ioc, now);
1254 cur_period = atomic64_read(&ioc->cur_period);
1255 if (atomic64_read(&iocg->active_period) != cur_period)
1256 atomic64_set(&iocg->active_period, cur_period);
1257 return true;
1258 }
1259
1260
1261 if (iocg->child_active_sum)
1262 return false;
1263
1264 spin_lock_irq(&ioc->lock);
1265
1266 ioc_now(ioc, now);
1267
1268
1269 cur_period = atomic64_read(&ioc->cur_period);
1270 last_period = atomic64_read(&iocg->active_period);
1271 atomic64_set(&iocg->active_period, cur_period);
1272
1273
1274 if (!list_empty(&iocg->active_list))
1275 goto succeed_unlock;
1276 for (i = iocg->level - 1; i > 0; i--)
1277 if (!list_empty(&iocg->ancestors[i]->active_list))
1278 goto fail_unlock;
1279
1280 if (iocg->child_active_sum)
1281 goto fail_unlock;
1282
1283
1284
1285
1286
1287 vtarget = now->vnow - ioc->margins.target;
1288 vtime = atomic64_read(&iocg->vtime);
1289
1290 atomic64_add(vtarget - vtime, &iocg->vtime);
1291 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1292 vtime = vtarget;
1293
1294
1295
1296
1297
1298
1299 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1300 list_add(&iocg->active_list, &ioc->active_iocgs);
1301
1302 propagate_weights(iocg, iocg->weight,
1303 iocg->last_inuse ?: iocg->weight, true, now);
1304
1305 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1306 last_period, cur_period, vtime);
1307
1308 iocg->activated_at = now->now;
1309
1310 if (ioc->running == IOC_IDLE) {
1311 ioc->running = IOC_RUNNING;
1312 ioc->dfgv_period_at = now->now;
1313 ioc->dfgv_period_rem = 0;
1314 ioc_start_period(ioc, now);
1315 }
1316
1317succeed_unlock:
1318 spin_unlock_irq(&ioc->lock);
1319 return true;
1320
1321fail_unlock:
1322 spin_unlock_irq(&ioc->lock);
1323 return false;
1324}
1325
1326static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1327{
1328 struct ioc *ioc = iocg->ioc;
1329 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1330 u64 tdelta, delay, new_delay;
1331 s64 vover, vover_pct;
1332 u32 hwa;
1333
1334 lockdep_assert_held(&iocg->waitq.lock);
1335
1336
1337 tdelta = now->now - iocg->delay_at;
1338 if (iocg->delay)
1339 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1340 else
1341 delay = 0;
1342
1343
1344 current_hweight(iocg, &hwa, NULL);
1345 vover = atomic64_read(&iocg->vtime) +
1346 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1347 vover_pct = div64_s64(100 * vover,
1348 ioc->period_us * ioc->vtime_base_rate);
1349
1350 if (vover_pct <= MIN_DELAY_THR_PCT)
1351 new_delay = 0;
1352 else if (vover_pct >= MAX_DELAY_THR_PCT)
1353 new_delay = MAX_DELAY;
1354 else
1355 new_delay = MIN_DELAY +
1356 div_u64((MAX_DELAY - MIN_DELAY) *
1357 (vover_pct - MIN_DELAY_THR_PCT),
1358 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1359
1360
1361 if (new_delay > delay) {
1362 iocg->delay = new_delay;
1363 iocg->delay_at = now->now;
1364 delay = new_delay;
1365 }
1366
1367 if (delay >= MIN_DELAY) {
1368 if (!iocg->indelay_since)
1369 iocg->indelay_since = now->now;
1370 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1371 return true;
1372 } else {
1373 if (iocg->indelay_since) {
1374 iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1375 iocg->indelay_since = 0;
1376 }
1377 iocg->delay = 0;
1378 blkcg_clear_delay(blkg);
1379 return false;
1380 }
1381}
1382
1383static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1384 struct ioc_now *now)
1385{
1386 struct iocg_pcpu_stat *gcs;
1387
1388 lockdep_assert_held(&iocg->ioc->lock);
1389 lockdep_assert_held(&iocg->waitq.lock);
1390 WARN_ON_ONCE(list_empty(&iocg->active_list));
1391
1392
1393
1394
1395
1396 if (!iocg->abs_vdebt && abs_cost) {
1397 iocg->indebt_since = now->now;
1398 propagate_weights(iocg, iocg->active, 0, false, now);
1399 }
1400
1401 iocg->abs_vdebt += abs_cost;
1402
1403 gcs = get_cpu_ptr(iocg->pcpu_stat);
1404 local64_add(abs_cost, &gcs->abs_vusage);
1405 put_cpu_ptr(gcs);
1406}
1407
1408static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1409 struct ioc_now *now)
1410{
1411 lockdep_assert_held(&iocg->ioc->lock);
1412 lockdep_assert_held(&iocg->waitq.lock);
1413
1414
1415 WARN_ON_ONCE(list_empty(&iocg->active_list));
1416 WARN_ON_ONCE(iocg->inuse > 1);
1417
1418 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1419
1420
1421 if (!iocg->abs_vdebt) {
1422 iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1423 iocg->indebt_since = 0;
1424
1425 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1426 false, now);
1427 }
1428}
1429
1430static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1431 int flags, void *key)
1432{
1433 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1434 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1435 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1436
1437 ctx->vbudget -= cost;
1438
1439 if (ctx->vbudget < 0)
1440 return -1;
1441
1442 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1443 wait->committed = true;
1444
1445
1446
1447
1448
1449
1450
1451
1452 default_wake_function(wq_entry, mode, flags, key);
1453 list_del_init_careful(&wq_entry->entry);
1454 return 0;
1455}
1456
1457
1458
1459
1460
1461
1462static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1463 struct ioc_now *now)
1464{
1465 struct ioc *ioc = iocg->ioc;
1466 struct iocg_wake_ctx ctx = { .iocg = iocg };
1467 u64 vshortage, expires, oexpires;
1468 s64 vbudget;
1469 u32 hwa;
1470
1471 lockdep_assert_held(&iocg->waitq.lock);
1472
1473 current_hweight(iocg, &hwa, NULL);
1474 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1475
1476
1477 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1478 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1479 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1480 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1481
1482 lockdep_assert_held(&ioc->lock);
1483
1484 atomic64_add(vpay, &iocg->vtime);
1485 atomic64_add(vpay, &iocg->done_vtime);
1486 iocg_pay_debt(iocg, abs_vpay, now);
1487 vbudget -= vpay;
1488 }
1489
1490 if (iocg->abs_vdebt || iocg->delay)
1491 iocg_kick_delay(iocg, now);
1492
1493
1494
1495
1496
1497
1498
1499 if (iocg->abs_vdebt) {
1500 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1501 vbudget = min_t(s64, 0, vbudget - vdebt);
1502 }
1503
1504
1505
1506
1507
1508
1509 ctx.vbudget = vbudget;
1510 current_hweight(iocg, NULL, &ctx.hw_inuse);
1511
1512 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1513
1514 if (!waitqueue_active(&iocg->waitq)) {
1515 if (iocg->wait_since) {
1516 iocg->local_stat.wait_us += now->now - iocg->wait_since;
1517 iocg->wait_since = 0;
1518 }
1519 return;
1520 }
1521
1522 if (!iocg->wait_since)
1523 iocg->wait_since = now->now;
1524
1525 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1526 return;
1527
1528
1529 vshortage = -ctx.vbudget;
1530 expires = now->now_ns +
1531 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1532 NSEC_PER_USEC;
1533 expires += ioc->timer_slack_ns;
1534
1535
1536 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1537 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1538 abs(oexpires - expires) <= ioc->timer_slack_ns)
1539 return;
1540
1541 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1542 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1543}
1544
1545static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1546{
1547 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1548 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1549 struct ioc_now now;
1550 unsigned long flags;
1551
1552 ioc_now(iocg->ioc, &now);
1553
1554 iocg_lock(iocg, pay_debt, &flags);
1555 iocg_kick_waitq(iocg, pay_debt, &now);
1556 iocg_unlock(iocg, pay_debt, &flags);
1557
1558 return HRTIMER_NORESTART;
1559}
1560
1561static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1562{
1563 u32 nr_met[2] = { };
1564 u32 nr_missed[2] = { };
1565 u64 rq_wait_ns = 0;
1566 int cpu, rw;
1567
1568 for_each_online_cpu(cpu) {
1569 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1570 u64 this_rq_wait_ns;
1571
1572 for (rw = READ; rw <= WRITE; rw++) {
1573 u32 this_met = local_read(&stat->missed[rw].nr_met);
1574 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1575
1576 nr_met[rw] += this_met - stat->missed[rw].last_met;
1577 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1578 stat->missed[rw].last_met = this_met;
1579 stat->missed[rw].last_missed = this_missed;
1580 }
1581
1582 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1583 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1584 stat->last_rq_wait_ns = this_rq_wait_ns;
1585 }
1586
1587 for (rw = READ; rw <= WRITE; rw++) {
1588 if (nr_met[rw] + nr_missed[rw])
1589 missed_ppm_ar[rw] =
1590 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1591 nr_met[rw] + nr_missed[rw]);
1592 else
1593 missed_ppm_ar[rw] = 0;
1594 }
1595
1596 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1597 ioc->period_us * NSEC_PER_USEC);
1598}
1599
1600
1601static bool iocg_is_idle(struct ioc_gq *iocg)
1602{
1603 struct ioc *ioc = iocg->ioc;
1604
1605
1606 if (atomic64_read(&iocg->active_period) ==
1607 atomic64_read(&ioc->cur_period))
1608 return false;
1609
1610
1611 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1612 return false;
1613
1614 return true;
1615}
1616
1617
1618
1619
1620
1621
1622static void iocg_build_inner_walk(struct ioc_gq *iocg,
1623 struct list_head *inner_walk)
1624{
1625 int lvl;
1626
1627 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1628
1629
1630 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1631 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1632 break;
1633 }
1634
1635
1636 while (++lvl <= iocg->level - 1) {
1637 struct ioc_gq *inner = iocg->ancestors[lvl];
1638
1639
1640 list_add_tail(&inner->walk_list, inner_walk);
1641 }
1642}
1643
1644
1645static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1646{
1647 struct ioc *ioc = iocg->ioc;
1648 struct iocg_stat new_stat;
1649 u64 abs_vusage = 0;
1650 u64 vusage_delta;
1651 int cpu;
1652
1653 lockdep_assert_held(&iocg->ioc->lock);
1654
1655
1656 for_each_possible_cpu(cpu) {
1657 abs_vusage += local64_read(
1658 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1659 }
1660 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1661 iocg->last_stat_abs_vusage = abs_vusage;
1662
1663 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1664 iocg->local_stat.usage_us += iocg->usage_delta_us;
1665
1666
1667 new_stat.usage_us =
1668 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1669 new_stat.wait_us =
1670 iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1671 new_stat.indebt_us =
1672 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1673 new_stat.indelay_us =
1674 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1675
1676
1677 if (iocg->level > 0) {
1678 struct iocg_stat *parent_stat =
1679 &iocg->ancestors[iocg->level - 1]->desc_stat;
1680
1681 parent_stat->usage_us +=
1682 new_stat.usage_us - iocg->last_stat.usage_us;
1683 parent_stat->wait_us +=
1684 new_stat.wait_us - iocg->last_stat.wait_us;
1685 parent_stat->indebt_us +=
1686 new_stat.indebt_us - iocg->last_stat.indebt_us;
1687 parent_stat->indelay_us +=
1688 new_stat.indelay_us - iocg->last_stat.indelay_us;
1689 }
1690
1691 iocg->last_stat = new_stat;
1692}
1693
1694
1695static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1696{
1697 LIST_HEAD(inner_walk);
1698 struct ioc_gq *iocg, *tiocg;
1699
1700
1701 list_for_each_entry(iocg, target_iocgs, active_list) {
1702 iocg_flush_stat_one(iocg, now);
1703 iocg_build_inner_walk(iocg, &inner_walk);
1704 }
1705
1706
1707 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1708 iocg_flush_stat_one(iocg, now);
1709 list_del_init(&iocg->walk_list);
1710 }
1711}
1712
1713
1714
1715
1716
1717
1718static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1719 u32 usage, struct ioc_now *now)
1720{
1721 struct ioc *ioc = iocg->ioc;
1722 u64 vtime = atomic64_read(&iocg->vtime);
1723 s64 excess, delta, target, new_hwi;
1724
1725
1726 if (iocg->abs_vdebt)
1727 return 1;
1728
1729
1730 if (waitqueue_active(&iocg->waitq) ||
1731 time_after64(vtime, now->vnow - ioc->margins.min))
1732 return hwm;
1733
1734
1735 excess = now->vnow - vtime - ioc->margins.target;
1736 if (excess > 0) {
1737 atomic64_add(excess, &iocg->vtime);
1738 atomic64_add(excess, &iocg->done_vtime);
1739 vtime += excess;
1740 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1741 }
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1760 now->vnow - ioc->period_at_vtime);
1761 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1762 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1763
1764 return clamp_t(s64, new_hwi, 1, hwm);
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1825{
1826 LIST_HEAD(over_hwa);
1827 LIST_HEAD(inner_walk);
1828 struct ioc_gq *iocg, *tiocg, *root_iocg;
1829 u32 after_sum, over_sum, over_target, gamma;
1830
1831
1832
1833
1834
1835
1836
1837
1838 after_sum = 0;
1839 over_sum = 0;
1840 list_for_each_entry(iocg, surpluses, surplus_list) {
1841 u32 hwa;
1842
1843 current_hweight(iocg, &hwa, NULL);
1844 after_sum += iocg->hweight_after_donation;
1845
1846 if (iocg->hweight_after_donation > hwa) {
1847 over_sum += iocg->hweight_after_donation;
1848 list_add(&iocg->walk_list, &over_hwa);
1849 }
1850 }
1851
1852 if (after_sum >= WEIGHT_ONE) {
1853
1854
1855
1856
1857 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1858 WARN_ON_ONCE(over_sum <= over_delta);
1859 over_target = over_sum - over_delta;
1860 } else {
1861 over_target = 0;
1862 }
1863
1864 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1865 if (over_target)
1866 iocg->hweight_after_donation =
1867 div_u64((u64)iocg->hweight_after_donation *
1868 over_target, over_sum);
1869 list_del_init(&iocg->walk_list);
1870 }
1871
1872
1873
1874
1875
1876 list_for_each_entry(iocg, surpluses, surplus_list) {
1877 iocg_build_inner_walk(iocg, &inner_walk);
1878 }
1879
1880 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1881 WARN_ON_ONCE(root_iocg->level > 0);
1882
1883 list_for_each_entry(iocg, &inner_walk, walk_list) {
1884 iocg->child_adjusted_sum = 0;
1885 iocg->hweight_donating = 0;
1886 iocg->hweight_after_donation = 0;
1887 }
1888
1889
1890
1891
1892
1893 list_for_each_entry(iocg, surpluses, surplus_list) {
1894 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1895
1896 parent->hweight_donating += iocg->hweight_donating;
1897 parent->hweight_after_donation += iocg->hweight_after_donation;
1898 }
1899
1900 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1901 if (iocg->level > 0) {
1902 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1903
1904 parent->hweight_donating += iocg->hweight_donating;
1905 parent->hweight_after_donation += iocg->hweight_after_donation;
1906 }
1907 }
1908
1909
1910
1911
1912
1913
1914 list_for_each_entry(iocg, &inner_walk, walk_list) {
1915 if (iocg->level) {
1916 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1917
1918 iocg->hweight_active = DIV64_U64_ROUND_UP(
1919 (u64)parent->hweight_active * iocg->active,
1920 parent->child_active_sum);
1921
1922 }
1923
1924 iocg->hweight_donating = min(iocg->hweight_donating,
1925 iocg->hweight_active);
1926 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1927 iocg->hweight_donating - 1);
1928 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1929 iocg->hweight_donating <= 1 ||
1930 iocg->hweight_after_donation == 0)) {
1931 pr_warn("iocg: invalid donation weights in ");
1932 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1933 pr_cont(": active=%u donating=%u after=%u\n",
1934 iocg->hweight_active, iocg->hweight_donating,
1935 iocg->hweight_after_donation);
1936 }
1937 }
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 gamma = DIV_ROUND_UP(
1954 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1955 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1956
1957
1958
1959
1960
1961 list_for_each_entry(iocg, &inner_walk, walk_list) {
1962 struct ioc_gq *parent;
1963 u32 inuse, wpt, wptp;
1964 u64 st, sf;
1965
1966 if (iocg->level == 0) {
1967
1968 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1969 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1970 WEIGHT_ONE - iocg->hweight_after_donation);
1971 continue;
1972 }
1973
1974 parent = iocg->ancestors[iocg->level - 1];
1975
1976
1977 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1978 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1979 WEIGHT_ONE) + iocg->hweight_after_donation;
1980
1981
1982 inuse = DIV64_U64_ROUND_UP(
1983 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1984 parent->hweight_inuse);
1985
1986
1987 st = DIV64_U64_ROUND_UP(
1988 iocg->child_active_sum * iocg->hweight_donating,
1989 iocg->hweight_active);
1990 sf = iocg->child_active_sum - st;
1991 wpt = DIV64_U64_ROUND_UP(
1992 (u64)iocg->active * iocg->hweight_donating,
1993 iocg->hweight_active);
1994 wptp = DIV64_U64_ROUND_UP(
1995 (u64)inuse * iocg->hweight_after_donation,
1996 iocg->hweight_inuse);
1997
1998 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1999 }
2000
2001
2002
2003
2004
2005 list_for_each_entry(iocg, surpluses, surplus_list) {
2006 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2007 u32 inuse;
2008
2009
2010
2011
2012
2013
2014
2015
2016 if (iocg->abs_vdebt) {
2017 WARN_ON_ONCE(iocg->inuse > 1);
2018 continue;
2019 }
2020
2021
2022 inuse = DIV64_U64_ROUND_UP(
2023 parent->child_adjusted_sum * iocg->hweight_after_donation,
2024 parent->hweight_inuse);
2025
2026 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2027 iocg->inuse, inuse,
2028 iocg->hweight_inuse,
2029 iocg->hweight_after_donation);
2030
2031 __propagate_weights(iocg, iocg->active, inuse, true, now);
2032 }
2033
2034
2035 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2036 list_del_init(&iocg->walk_list);
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2051 struct ioc_now *now)
2052{
2053 struct ioc_gq *iocg;
2054 u64 dur, usage_pct, nr_cycles;
2055
2056
2057 if (!nr_debtors) {
2058 ioc->dfgv_period_at = now->now;
2059 ioc->dfgv_period_rem = 0;
2060 ioc->dfgv_usage_us_sum = 0;
2061 return;
2062 }
2063
2064
2065
2066
2067
2068
2069
2070 if (ioc->busy_level > 0)
2071 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2072
2073 ioc->dfgv_usage_us_sum += usage_us_sum;
2074 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2075 return;
2076
2077
2078
2079
2080
2081 dur = now->now - ioc->dfgv_period_at;
2082 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2083
2084 ioc->dfgv_period_at = now->now;
2085 ioc->dfgv_usage_us_sum = 0;
2086
2087
2088 if (usage_pct > DFGV_USAGE_PCT) {
2089 ioc->dfgv_period_rem = 0;
2090 return;
2091 }
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102 nr_cycles = dur + ioc->dfgv_period_rem;
2103 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2104
2105 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2106 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2107
2108 if (!iocg->abs_vdebt && !iocg->delay)
2109 continue;
2110
2111 spin_lock(&iocg->waitq.lock);
2112
2113 old_debt = iocg->abs_vdebt;
2114 old_delay = iocg->delay;
2115
2116 if (iocg->abs_vdebt)
2117 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2118 if (iocg->delay)
2119 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2120
2121 iocg_kick_waitq(iocg, true, now);
2122
2123 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2124 old_debt, iocg->abs_vdebt,
2125 old_delay, iocg->delay);
2126
2127 spin_unlock(&iocg->waitq.lock);
2128 }
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2142{
2143 int nr_debtors = 0;
2144 struct ioc_gq *iocg, *tiocg;
2145
2146 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2147 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2148 !iocg->delay && !iocg_is_idle(iocg))
2149 continue;
2150
2151 spin_lock(&iocg->waitq.lock);
2152
2153
2154 if (iocg->wait_since) {
2155 iocg->local_stat.wait_us += now->now - iocg->wait_since;
2156 iocg->wait_since = now->now;
2157 }
2158 if (iocg->indebt_since) {
2159 iocg->local_stat.indebt_us +=
2160 now->now - iocg->indebt_since;
2161 iocg->indebt_since = now->now;
2162 }
2163 if (iocg->indelay_since) {
2164 iocg->local_stat.indelay_us +=
2165 now->now - iocg->indelay_since;
2166 iocg->indelay_since = now->now;
2167 }
2168
2169 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2170 iocg->delay) {
2171
2172 iocg_kick_waitq(iocg, true, now);
2173 if (iocg->abs_vdebt || iocg->delay)
2174 nr_debtors++;
2175 } else if (iocg_is_idle(iocg)) {
2176
2177 u64 vtime = atomic64_read(&iocg->vtime);
2178 s64 excess;
2179
2180
2181
2182
2183
2184
2185
2186 excess = now->vnow - vtime - ioc->margins.target;
2187 if (excess > 0) {
2188 u32 old_hwi;
2189
2190 current_hweight(iocg, NULL, &old_hwi);
2191 ioc->vtime_err -= div64_u64(excess * old_hwi,
2192 WEIGHT_ONE);
2193 }
2194
2195 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2196 atomic64_read(&iocg->active_period),
2197 atomic64_read(&ioc->cur_period), vtime);
2198 __propagate_weights(iocg, 0, 0, false, now);
2199 list_del_init(&iocg->active_list);
2200 }
2201
2202 spin_unlock(&iocg->waitq.lock);
2203 }
2204
2205 commit_weights(ioc);
2206 return nr_debtors;
2207}
2208
2209static void ioc_timer_fn(struct timer_list *timer)
2210{
2211 struct ioc *ioc = container_of(timer, struct ioc, timer);
2212 struct ioc_gq *iocg, *tiocg;
2213 struct ioc_now now;
2214 LIST_HEAD(surpluses);
2215 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2216 u64 usage_us_sum = 0;
2217 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2218 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2219 u32 missed_ppm[2], rq_wait_pct;
2220 u64 period_vtime;
2221 int prev_busy_level;
2222
2223
2224 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2225
2226
2227 spin_lock_irq(&ioc->lock);
2228
2229 ioc_now(ioc, &now);
2230
2231 period_vtime = now.vnow - ioc->period_at_vtime;
2232 if (WARN_ON_ONCE(!period_vtime)) {
2233 spin_unlock_irq(&ioc->lock);
2234 return;
2235 }
2236
2237 nr_debtors = ioc_check_iocgs(ioc, &now);
2238
2239
2240
2241
2242
2243 iocg_flush_stat(&ioc->active_iocgs, &now);
2244
2245
2246 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2247 u64 vdone, vtime, usage_us;
2248 u32 hw_active, hw_inuse;
2249
2250
2251
2252
2253
2254 vdone = atomic64_read(&iocg->done_vtime);
2255 vtime = atomic64_read(&iocg->vtime);
2256 current_hweight(iocg, &hw_active, &hw_inuse);
2257
2258
2259
2260
2261
2262
2263
2264 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2265 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2266 time_after64(vtime, vdone) &&
2267 time_after64(vtime, now.vnow -
2268 MAX_LAGGING_PERIODS * period_vtime) &&
2269 time_before64(vdone, now.vnow - period_vtime))
2270 nr_lagging++;
2271
2272
2273
2274
2275
2276 usage_us = iocg->usage_delta_us;
2277 usage_us_sum += usage_us;
2278
2279
2280 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2281 if (hw_inuse < hw_active ||
2282 (!waitqueue_active(&iocg->waitq) &&
2283 time_before64(vtime, now.vnow - ioc->margins.low))) {
2284 u32 hwa, old_hwi, hwm, new_hwi, usage;
2285 u64 usage_dur;
2286
2287 if (vdone != vtime) {
2288 u64 inflight_us = DIV64_U64_ROUND_UP(
2289 cost_to_abs_cost(vtime - vdone, hw_inuse),
2290 ioc->vtime_base_rate);
2291
2292 usage_us = max(usage_us, inflight_us);
2293 }
2294
2295
2296 if (time_after64(iocg->activated_at, ioc->period_at))
2297 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2298 else
2299 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2300
2301 usage = clamp_t(u32,
2302 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2303 usage_dur),
2304 1, WEIGHT_ONE);
2305
2306
2307
2308
2309
2310 current_hweight(iocg, &hwa, &old_hwi);
2311 hwm = current_hweight_max(iocg);
2312 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2313 usage, &now);
2314 if (new_hwi < hwm) {
2315 iocg->hweight_donating = hwa;
2316 iocg->hweight_after_donation = new_hwi;
2317 list_add(&iocg->surplus_list, &surpluses);
2318 } else {
2319 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2320 iocg->inuse, iocg->active,
2321 iocg->hweight_inuse, new_hwi);
2322
2323 __propagate_weights(iocg, iocg->active,
2324 iocg->active, true, &now);
2325 nr_shortages++;
2326 }
2327 } else {
2328
2329 nr_shortages++;
2330 }
2331 }
2332
2333 if (!list_empty(&surpluses) && nr_shortages)
2334 transfer_surpluses(&surpluses, &now);
2335
2336 commit_weights(ioc);
2337
2338
2339 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2340 list_del_init(&iocg->surplus_list);
2341
2342
2343
2344
2345
2346
2347
2348 prev_busy_level = ioc->busy_level;
2349 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2350 missed_ppm[READ] > ppm_rthr ||
2351 missed_ppm[WRITE] > ppm_wthr) {
2352
2353 ioc->busy_level = max(ioc->busy_level, 0);
2354 ioc->busy_level++;
2355 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2356 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2357 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2358
2359 if (nr_shortages) {
2360
2361
2362
2363
2364 ioc->busy_level = min(ioc->busy_level, 0);
2365
2366
2367
2368
2369
2370 if (!nr_lagging)
2371 ioc->busy_level--;
2372 } else {
2373
2374
2375
2376
2377
2378
2379 ioc->busy_level = 0;
2380 }
2381 } else {
2382
2383 ioc->busy_level = 0;
2384 }
2385
2386 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2387
2388 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2389 prev_busy_level, missed_ppm);
2390
2391 ioc_refresh_params(ioc, false);
2392
2393 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2394
2395
2396
2397
2398
2399 atomic64_inc(&ioc->cur_period);
2400
2401 if (ioc->running != IOC_STOP) {
2402 if (!list_empty(&ioc->active_iocgs)) {
2403 ioc_start_period(ioc, &now);
2404 } else {
2405 ioc->busy_level = 0;
2406 ioc->vtime_err = 0;
2407 ioc->running = IOC_IDLE;
2408 }
2409
2410 ioc_refresh_vrate(ioc, &now);
2411 }
2412
2413 spin_unlock_irq(&ioc->lock);
2414}
2415
2416static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2417 u64 abs_cost, struct ioc_now *now)
2418{
2419 struct ioc *ioc = iocg->ioc;
2420 struct ioc_margins *margins = &ioc->margins;
2421 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2422 u32 hwi, adj_step;
2423 s64 margin;
2424 u64 cost, new_inuse;
2425
2426 current_hweight(iocg, NULL, &hwi);
2427 old_hwi = hwi;
2428 cost = abs_cost_to_cost(abs_cost, hwi);
2429 margin = now->vnow - vtime - cost;
2430
2431
2432 if (iocg->abs_vdebt)
2433 return cost;
2434
2435
2436
2437
2438
2439 if (margin >= iocg->saved_margin || margin >= margins->low ||
2440 iocg->inuse == iocg->active)
2441 return cost;
2442
2443 spin_lock_irq(&ioc->lock);
2444
2445
2446 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2447 spin_unlock_irq(&ioc->lock);
2448 return cost;
2449 }
2450
2451
2452
2453
2454
2455
2456
2457
2458 new_inuse = iocg->inuse;
2459 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2460 do {
2461 new_inuse = new_inuse + adj_step;
2462 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2463 current_hweight(iocg, NULL, &hwi);
2464 cost = abs_cost_to_cost(abs_cost, hwi);
2465 } while (time_after64(vtime + cost, now->vnow) &&
2466 iocg->inuse != iocg->active);
2467
2468 spin_unlock_irq(&ioc->lock);
2469
2470 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2471 old_inuse, iocg->inuse, old_hwi, hwi);
2472
2473 return cost;
2474}
2475
2476static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2477 bool is_merge, u64 *costp)
2478{
2479 struct ioc *ioc = iocg->ioc;
2480 u64 coef_seqio, coef_randio, coef_page;
2481 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2482 u64 seek_pages = 0;
2483 u64 cost = 0;
2484
2485 switch (bio_op(bio)) {
2486 case REQ_OP_READ:
2487 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2488 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2489 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2490 break;
2491 case REQ_OP_WRITE:
2492 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2493 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2494 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2495 break;
2496 default:
2497 goto out;
2498 }
2499
2500 if (iocg->cursor) {
2501 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2502 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2503 }
2504
2505 if (!is_merge) {
2506 if (seek_pages > LCOEF_RANDIO_PAGES) {
2507 cost += coef_randio;
2508 } else {
2509 cost += coef_seqio;
2510 }
2511 }
2512 cost += pages * coef_page;
2513out:
2514 *costp = cost;
2515}
2516
2517static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2518{
2519 u64 cost;
2520
2521 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2522 return cost;
2523}
2524
2525static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2526 u64 *costp)
2527{
2528 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2529
2530 switch (req_op(rq)) {
2531 case REQ_OP_READ:
2532 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2533 break;
2534 case REQ_OP_WRITE:
2535 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2536 break;
2537 default:
2538 *costp = 0;
2539 }
2540}
2541
2542static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2543{
2544 u64 cost;
2545
2546 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2547 return cost;
2548}
2549
2550static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2551{
2552 struct blkcg_gq *blkg = bio->bi_blkg;
2553 struct ioc *ioc = rqos_to_ioc(rqos);
2554 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2555 struct ioc_now now;
2556 struct iocg_wait wait;
2557 u64 abs_cost, cost, vtime;
2558 bool use_debt, ioc_locked;
2559 unsigned long flags;
2560
2561
2562 if (!ioc->enabled || !iocg || !iocg->level)
2563 return;
2564
2565
2566 abs_cost = calc_vtime_cost(bio, iocg, false);
2567 if (!abs_cost)
2568 return;
2569
2570 if (!iocg_activate(iocg, &now))
2571 return;
2572
2573 iocg->cursor = bio_end_sector(bio);
2574 vtime = atomic64_read(&iocg->vtime);
2575 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2576
2577
2578
2579
2580
2581
2582 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2583 time_before_eq64(vtime + cost, now.vnow)) {
2584 iocg_commit_bio(iocg, bio, abs_cost, cost);
2585 return;
2586 }
2587
2588
2589
2590
2591
2592
2593
2594
2595 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2596 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2597retry_lock:
2598 iocg_lock(iocg, ioc_locked, &flags);
2599
2600
2601
2602
2603
2604
2605
2606
2607 if (unlikely(list_empty(&iocg->active_list))) {
2608 iocg_unlock(iocg, ioc_locked, &flags);
2609 iocg_commit_bio(iocg, bio, abs_cost, cost);
2610 return;
2611 }
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630 if (use_debt) {
2631 iocg_incur_debt(iocg, abs_cost, &now);
2632 if (iocg_kick_delay(iocg, &now))
2633 blkcg_schedule_throttle(rqos->q,
2634 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2635 iocg_unlock(iocg, ioc_locked, &flags);
2636 return;
2637 }
2638
2639
2640 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2641 if (!ioc_locked) {
2642 iocg_unlock(iocg, false, &flags);
2643 ioc_locked = true;
2644 goto retry_lock;
2645 }
2646 propagate_weights(iocg, iocg->active, iocg->active, true,
2647 &now);
2648 }
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2664 wait.wait.private = current;
2665 wait.bio = bio;
2666 wait.abs_cost = abs_cost;
2667 wait.committed = false;
2668
2669 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2670 iocg_kick_waitq(iocg, ioc_locked, &now);
2671
2672 iocg_unlock(iocg, ioc_locked, &flags);
2673
2674 while (true) {
2675 set_current_state(TASK_UNINTERRUPTIBLE);
2676 if (wait.committed)
2677 break;
2678 io_schedule();
2679 }
2680
2681
2682 finish_wait(&iocg->waitq, &wait.wait);
2683}
2684
2685static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2686 struct bio *bio)
2687{
2688 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2689 struct ioc *ioc = rqos_to_ioc(rqos);
2690 sector_t bio_end = bio_end_sector(bio);
2691 struct ioc_now now;
2692 u64 vtime, abs_cost, cost;
2693 unsigned long flags;
2694
2695
2696 if (!ioc->enabled || !iocg || !iocg->level)
2697 return;
2698
2699 abs_cost = calc_vtime_cost(bio, iocg, true);
2700 if (!abs_cost)
2701 return;
2702
2703 ioc_now(ioc, &now);
2704
2705 vtime = atomic64_read(&iocg->vtime);
2706 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2707
2708
2709 if (blk_rq_pos(rq) < bio_end &&
2710 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2711 iocg->cursor = bio_end;
2712
2713
2714
2715
2716
2717 if (rq->bio && rq->bio->bi_iocost_cost &&
2718 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2719 iocg_commit_bio(iocg, bio, abs_cost, cost);
2720 return;
2721 }
2722
2723
2724
2725
2726
2727
2728 spin_lock_irqsave(&ioc->lock, flags);
2729 spin_lock(&iocg->waitq.lock);
2730
2731 if (likely(!list_empty(&iocg->active_list))) {
2732 iocg_incur_debt(iocg, abs_cost, &now);
2733 if (iocg_kick_delay(iocg, &now))
2734 blkcg_schedule_throttle(rqos->q,
2735 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2736 } else {
2737 iocg_commit_bio(iocg, bio, abs_cost, cost);
2738 }
2739
2740 spin_unlock(&iocg->waitq.lock);
2741 spin_unlock_irqrestore(&ioc->lock, flags);
2742}
2743
2744static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2745{
2746 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2747
2748 if (iocg && bio->bi_iocost_cost)
2749 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2750}
2751
2752static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2753{
2754 struct ioc *ioc = rqos_to_ioc(rqos);
2755 struct ioc_pcpu_stat *ccs;
2756 u64 on_q_ns, rq_wait_ns, size_nsec;
2757 int pidx, rw;
2758
2759 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2760 return;
2761
2762 switch (req_op(rq) & REQ_OP_MASK) {
2763 case REQ_OP_READ:
2764 pidx = QOS_RLAT;
2765 rw = READ;
2766 break;
2767 case REQ_OP_WRITE:
2768 pidx = QOS_WLAT;
2769 rw = WRITE;
2770 break;
2771 default:
2772 return;
2773 }
2774
2775 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2776 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2777 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2778
2779 ccs = get_cpu_ptr(ioc->pcpu_stat);
2780
2781 if (on_q_ns <= size_nsec ||
2782 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2783 local_inc(&ccs->missed[rw].nr_met);
2784 else
2785 local_inc(&ccs->missed[rw].nr_missed);
2786
2787 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2788
2789 put_cpu_ptr(ccs);
2790}
2791
2792static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2793{
2794 struct ioc *ioc = rqos_to_ioc(rqos);
2795
2796 spin_lock_irq(&ioc->lock);
2797 ioc_refresh_params(ioc, false);
2798 spin_unlock_irq(&ioc->lock);
2799}
2800
2801static void ioc_rqos_exit(struct rq_qos *rqos)
2802{
2803 struct ioc *ioc = rqos_to_ioc(rqos);
2804
2805 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2806
2807 spin_lock_irq(&ioc->lock);
2808 ioc->running = IOC_STOP;
2809 spin_unlock_irq(&ioc->lock);
2810
2811 del_timer_sync(&ioc->timer);
2812 free_percpu(ioc->pcpu_stat);
2813 kfree(ioc);
2814}
2815
2816static struct rq_qos_ops ioc_rqos_ops = {
2817 .throttle = ioc_rqos_throttle,
2818 .merge = ioc_rqos_merge,
2819 .done_bio = ioc_rqos_done_bio,
2820 .done = ioc_rqos_done,
2821 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2822 .exit = ioc_rqos_exit,
2823};
2824
2825static int blk_iocost_init(struct request_queue *q)
2826{
2827 struct ioc *ioc;
2828 struct rq_qos *rqos;
2829 int i, cpu, ret;
2830
2831 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2832 if (!ioc)
2833 return -ENOMEM;
2834
2835 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2836 if (!ioc->pcpu_stat) {
2837 kfree(ioc);
2838 return -ENOMEM;
2839 }
2840
2841 for_each_possible_cpu(cpu) {
2842 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2843
2844 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2845 local_set(&ccs->missed[i].nr_met, 0);
2846 local_set(&ccs->missed[i].nr_missed, 0);
2847 }
2848 local64_set(&ccs->rq_wait_ns, 0);
2849 }
2850
2851 rqos = &ioc->rqos;
2852 rqos->id = RQ_QOS_COST;
2853 rqos->ops = &ioc_rqos_ops;
2854 rqos->q = q;
2855
2856 spin_lock_init(&ioc->lock);
2857 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2858 INIT_LIST_HEAD(&ioc->active_iocgs);
2859
2860 ioc->running = IOC_IDLE;
2861 ioc->vtime_base_rate = VTIME_PER_USEC;
2862 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2863 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2864 ioc->period_at = ktime_to_us(ktime_get());
2865 atomic64_set(&ioc->cur_period, 0);
2866 atomic_set(&ioc->hweight_gen, 0);
2867
2868 spin_lock_irq(&ioc->lock);
2869 ioc->autop_idx = AUTOP_INVALID;
2870 ioc_refresh_params(ioc, true);
2871 spin_unlock_irq(&ioc->lock);
2872
2873
2874
2875
2876
2877
2878
2879 rq_qos_add(q, rqos);
2880 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2881 if (ret) {
2882 rq_qos_del(q, rqos);
2883 free_percpu(ioc->pcpu_stat);
2884 kfree(ioc);
2885 return ret;
2886 }
2887 return 0;
2888}
2889
2890static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2891{
2892 struct ioc_cgrp *iocc;
2893
2894 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2895 if (!iocc)
2896 return NULL;
2897
2898 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2899 return &iocc->cpd;
2900}
2901
2902static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2903{
2904 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2905}
2906
2907static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2908 struct blkcg *blkcg)
2909{
2910 int levels = blkcg->css.cgroup->level + 1;
2911 struct ioc_gq *iocg;
2912
2913 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2914 if (!iocg)
2915 return NULL;
2916
2917 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2918 if (!iocg->pcpu_stat) {
2919 kfree(iocg);
2920 return NULL;
2921 }
2922
2923 return &iocg->pd;
2924}
2925
2926static void ioc_pd_init(struct blkg_policy_data *pd)
2927{
2928 struct ioc_gq *iocg = pd_to_iocg(pd);
2929 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2930 struct ioc *ioc = q_to_ioc(blkg->q);
2931 struct ioc_now now;
2932 struct blkcg_gq *tblkg;
2933 unsigned long flags;
2934
2935 ioc_now(ioc, &now);
2936
2937 iocg->ioc = ioc;
2938 atomic64_set(&iocg->vtime, now.vnow);
2939 atomic64_set(&iocg->done_vtime, now.vnow);
2940 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2941 INIT_LIST_HEAD(&iocg->active_list);
2942 INIT_LIST_HEAD(&iocg->walk_list);
2943 INIT_LIST_HEAD(&iocg->surplus_list);
2944 iocg->hweight_active = WEIGHT_ONE;
2945 iocg->hweight_inuse = WEIGHT_ONE;
2946
2947 init_waitqueue_head(&iocg->waitq);
2948 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2949 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2950
2951 iocg->level = blkg->blkcg->css.cgroup->level;
2952
2953 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2954 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2955 iocg->ancestors[tiocg->level] = tiocg;
2956 }
2957
2958 spin_lock_irqsave(&ioc->lock, flags);
2959 weight_updated(iocg, &now);
2960 spin_unlock_irqrestore(&ioc->lock, flags);
2961}
2962
2963static void ioc_pd_free(struct blkg_policy_data *pd)
2964{
2965 struct ioc_gq *iocg = pd_to_iocg(pd);
2966 struct ioc *ioc = iocg->ioc;
2967 unsigned long flags;
2968
2969 if (ioc) {
2970 spin_lock_irqsave(&ioc->lock, flags);
2971
2972 if (!list_empty(&iocg->active_list)) {
2973 struct ioc_now now;
2974
2975 ioc_now(ioc, &now);
2976 propagate_weights(iocg, 0, 0, false, &now);
2977 list_del_init(&iocg->active_list);
2978 }
2979
2980 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2981 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2982
2983 spin_unlock_irqrestore(&ioc->lock, flags);
2984
2985 hrtimer_cancel(&iocg->waitq_timer);
2986 }
2987 free_percpu(iocg->pcpu_stat);
2988 kfree(iocg);
2989}
2990
2991static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2992{
2993 struct ioc_gq *iocg = pd_to_iocg(pd);
2994 struct ioc *ioc = iocg->ioc;
2995 size_t pos = 0;
2996
2997 if (!ioc->enabled)
2998 return 0;
2999
3000 if (iocg->level == 0) {
3001 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3002 ioc->vtime_base_rate * 10000,
3003 VTIME_PER_USEC);
3004 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
3005 vp10k / 100, vp10k % 100);
3006 }
3007
3008 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
3009 iocg->last_stat.usage_us);
3010
3011 if (blkcg_debug_stats)
3012 pos += scnprintf(buf + pos, size - pos,
3013 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3014 iocg->last_stat.wait_us,
3015 iocg->last_stat.indebt_us,
3016 iocg->last_stat.indelay_us);
3017
3018 return pos;
3019}
3020
3021static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3022 int off)
3023{
3024 const char *dname = blkg_dev_name(pd->blkg);
3025 struct ioc_gq *iocg = pd_to_iocg(pd);
3026
3027 if (dname && iocg->cfg_weight)
3028 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3029 return 0;
3030}
3031
3032
3033static int ioc_weight_show(struct seq_file *sf, void *v)
3034{
3035 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3036 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3037
3038 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3039 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3040 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3041 return 0;
3042}
3043
3044static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3045 size_t nbytes, loff_t off)
3046{
3047 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3048 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3049 struct blkg_conf_ctx ctx;
3050 struct ioc_now now;
3051 struct ioc_gq *iocg;
3052 u32 v;
3053 int ret;
3054
3055 if (!strchr(buf, ':')) {
3056 struct blkcg_gq *blkg;
3057
3058 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3059 return -EINVAL;
3060
3061 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3062 return -EINVAL;
3063
3064 spin_lock_irq(&blkcg->lock);
3065 iocc->dfl_weight = v * WEIGHT_ONE;
3066 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3067 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3068
3069 if (iocg) {
3070 spin_lock(&iocg->ioc->lock);
3071 ioc_now(iocg->ioc, &now);
3072 weight_updated(iocg, &now);
3073 spin_unlock(&iocg->ioc->lock);
3074 }
3075 }
3076 spin_unlock_irq(&blkcg->lock);
3077
3078 return nbytes;
3079 }
3080
3081 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3082 if (ret)
3083 return ret;
3084
3085 iocg = blkg_to_iocg(ctx.blkg);
3086
3087 if (!strncmp(ctx.body, "default", 7)) {
3088 v = 0;
3089 } else {
3090 if (!sscanf(ctx.body, "%u", &v))
3091 goto einval;
3092 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3093 goto einval;
3094 }
3095
3096 spin_lock(&iocg->ioc->lock);
3097 iocg->cfg_weight = v * WEIGHT_ONE;
3098 ioc_now(iocg->ioc, &now);
3099 weight_updated(iocg, &now);
3100 spin_unlock(&iocg->ioc->lock);
3101
3102 blkg_conf_finish(&ctx);
3103 return nbytes;
3104
3105einval:
3106 blkg_conf_finish(&ctx);
3107 return -EINVAL;
3108}
3109
3110static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3111 int off)
3112{
3113 const char *dname = blkg_dev_name(pd->blkg);
3114 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3115
3116 if (!dname)
3117 return 0;
3118
3119 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3120 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3121 ioc->params.qos[QOS_RPPM] / 10000,
3122 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3123 ioc->params.qos[QOS_RLAT],
3124 ioc->params.qos[QOS_WPPM] / 10000,
3125 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3126 ioc->params.qos[QOS_WLAT],
3127 ioc->params.qos[QOS_MIN] / 10000,
3128 ioc->params.qos[QOS_MIN] % 10000 / 100,
3129 ioc->params.qos[QOS_MAX] / 10000,
3130 ioc->params.qos[QOS_MAX] % 10000 / 100);
3131 return 0;
3132}
3133
3134static int ioc_qos_show(struct seq_file *sf, void *v)
3135{
3136 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3137
3138 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3139 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3140 return 0;
3141}
3142
3143static const match_table_t qos_ctrl_tokens = {
3144 { QOS_ENABLE, "enable=%u" },
3145 { QOS_CTRL, "ctrl=%s" },
3146 { NR_QOS_CTRL_PARAMS, NULL },
3147};
3148
3149static const match_table_t qos_tokens = {
3150 { QOS_RPPM, "rpct=%s" },
3151 { QOS_RLAT, "rlat=%u" },
3152 { QOS_WPPM, "wpct=%s" },
3153 { QOS_WLAT, "wlat=%u" },
3154 { QOS_MIN, "min=%s" },
3155 { QOS_MAX, "max=%s" },
3156 { NR_QOS_PARAMS, NULL },
3157};
3158
3159static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3160 size_t nbytes, loff_t off)
3161{
3162 struct block_device *bdev;
3163 struct ioc *ioc;
3164 u32 qos[NR_QOS_PARAMS];
3165 bool enable, user;
3166 char *p;
3167 int ret;
3168
3169 bdev = blkcg_conf_open_bdev(&input);
3170 if (IS_ERR(bdev))
3171 return PTR_ERR(bdev);
3172
3173 ioc = q_to_ioc(bdev->bd_disk->queue);
3174 if (!ioc) {
3175 ret = blk_iocost_init(bdev->bd_disk->queue);
3176 if (ret)
3177 goto err;
3178 ioc = q_to_ioc(bdev->bd_disk->queue);
3179 }
3180
3181 spin_lock_irq(&ioc->lock);
3182 memcpy(qos, ioc->params.qos, sizeof(qos));
3183 enable = ioc->enabled;
3184 user = ioc->user_qos_params;
3185 spin_unlock_irq(&ioc->lock);
3186
3187 while ((p = strsep(&input, " \t\n"))) {
3188 substring_t args[MAX_OPT_ARGS];
3189 char buf[32];
3190 int tok;
3191 s64 v;
3192
3193 if (!*p)
3194 continue;
3195
3196 switch (match_token(p, qos_ctrl_tokens, args)) {
3197 case QOS_ENABLE:
3198 match_u64(&args[0], &v);
3199 enable = v;
3200 continue;
3201 case QOS_CTRL:
3202 match_strlcpy(buf, &args[0], sizeof(buf));
3203 if (!strcmp(buf, "auto"))
3204 user = false;
3205 else if (!strcmp(buf, "user"))
3206 user = true;
3207 else
3208 goto einval;
3209 continue;
3210 }
3211
3212 tok = match_token(p, qos_tokens, args);
3213 switch (tok) {
3214 case QOS_RPPM:
3215 case QOS_WPPM:
3216 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3217 sizeof(buf))
3218 goto einval;
3219 if (cgroup_parse_float(buf, 2, &v))
3220 goto einval;
3221 if (v < 0 || v > 10000)
3222 goto einval;
3223 qos[tok] = v * 100;
3224 break;
3225 case QOS_RLAT:
3226 case QOS_WLAT:
3227 if (match_u64(&args[0], &v))
3228 goto einval;
3229 qos[tok] = v;
3230 break;
3231 case QOS_MIN:
3232 case QOS_MAX:
3233 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3234 sizeof(buf))
3235 goto einval;
3236 if (cgroup_parse_float(buf, 2, &v))
3237 goto einval;
3238 if (v < 0)
3239 goto einval;
3240 qos[tok] = clamp_t(s64, v * 100,
3241 VRATE_MIN_PPM, VRATE_MAX_PPM);
3242 break;
3243 default:
3244 goto einval;
3245 }
3246 user = true;
3247 }
3248
3249 if (qos[QOS_MIN] > qos[QOS_MAX])
3250 goto einval;
3251
3252 spin_lock_irq(&ioc->lock);
3253
3254 if (enable) {
3255 blk_stat_enable_accounting(ioc->rqos.q);
3256 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3257 ioc->enabled = true;
3258 } else {
3259 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3260 ioc->enabled = false;
3261 }
3262
3263 if (user) {
3264 memcpy(ioc->params.qos, qos, sizeof(qos));
3265 ioc->user_qos_params = true;
3266 } else {
3267 ioc->user_qos_params = false;
3268 }
3269
3270 ioc_refresh_params(ioc, true);
3271 spin_unlock_irq(&ioc->lock);
3272
3273 blkdev_put_no_open(bdev);
3274 return nbytes;
3275einval:
3276 ret = -EINVAL;
3277err:
3278 blkdev_put_no_open(bdev);
3279 return ret;
3280}
3281
3282static u64 ioc_cost_model_prfill(struct seq_file *sf,
3283 struct blkg_policy_data *pd, int off)
3284{
3285 const char *dname = blkg_dev_name(pd->blkg);
3286 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3287 u64 *u = ioc->params.i_lcoefs;
3288
3289 if (!dname)
3290 return 0;
3291
3292 seq_printf(sf, "%s ctrl=%s model=linear "
3293 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3294 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3295 dname, ioc->user_cost_model ? "user" : "auto",
3296 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3297 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3298 return 0;
3299}
3300
3301static int ioc_cost_model_show(struct seq_file *sf, void *v)
3302{
3303 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3304
3305 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3306 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3307 return 0;
3308}
3309
3310static const match_table_t cost_ctrl_tokens = {
3311 { COST_CTRL, "ctrl=%s" },
3312 { COST_MODEL, "model=%s" },
3313 { NR_COST_CTRL_PARAMS, NULL },
3314};
3315
3316static const match_table_t i_lcoef_tokens = {
3317 { I_LCOEF_RBPS, "rbps=%u" },
3318 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3319 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3320 { I_LCOEF_WBPS, "wbps=%u" },
3321 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3322 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3323 { NR_I_LCOEFS, NULL },
3324};
3325
3326static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3327 size_t nbytes, loff_t off)
3328{
3329 struct block_device *bdev;
3330 struct ioc *ioc;
3331 u64 u[NR_I_LCOEFS];
3332 bool user;
3333 char *p;
3334 int ret;
3335
3336 bdev = blkcg_conf_open_bdev(&input);
3337 if (IS_ERR(bdev))
3338 return PTR_ERR(bdev);
3339
3340 ioc = q_to_ioc(bdev->bd_disk->queue);
3341 if (!ioc) {
3342 ret = blk_iocost_init(bdev->bd_disk->queue);
3343 if (ret)
3344 goto err;
3345 ioc = q_to_ioc(bdev->bd_disk->queue);
3346 }
3347
3348 spin_lock_irq(&ioc->lock);
3349 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3350 user = ioc->user_cost_model;
3351 spin_unlock_irq(&ioc->lock);
3352
3353 while ((p = strsep(&input, " \t\n"))) {
3354 substring_t args[MAX_OPT_ARGS];
3355 char buf[32];
3356 int tok;
3357 u64 v;
3358
3359 if (!*p)
3360 continue;
3361
3362 switch (match_token(p, cost_ctrl_tokens, args)) {
3363 case COST_CTRL:
3364 match_strlcpy(buf, &args[0], sizeof(buf));
3365 if (!strcmp(buf, "auto"))
3366 user = false;
3367 else if (!strcmp(buf, "user"))
3368 user = true;
3369 else
3370 goto einval;
3371 continue;
3372 case COST_MODEL:
3373 match_strlcpy(buf, &args[0], sizeof(buf));
3374 if (strcmp(buf, "linear"))
3375 goto einval;
3376 continue;
3377 }
3378
3379 tok = match_token(p, i_lcoef_tokens, args);
3380 if (tok == NR_I_LCOEFS)
3381 goto einval;
3382 if (match_u64(&args[0], &v))
3383 goto einval;
3384 u[tok] = v;
3385 user = true;
3386 }
3387
3388 spin_lock_irq(&ioc->lock);
3389 if (user) {
3390 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3391 ioc->user_cost_model = true;
3392 } else {
3393 ioc->user_cost_model = false;
3394 }
3395 ioc_refresh_params(ioc, true);
3396 spin_unlock_irq(&ioc->lock);
3397
3398 blkdev_put_no_open(bdev);
3399 return nbytes;
3400
3401einval:
3402 ret = -EINVAL;
3403err:
3404 blkdev_put_no_open(bdev);
3405 return ret;
3406}
3407
3408static struct cftype ioc_files[] = {
3409 {
3410 .name = "weight",
3411 .flags = CFTYPE_NOT_ON_ROOT,
3412 .seq_show = ioc_weight_show,
3413 .write = ioc_weight_write,
3414 },
3415 {
3416 .name = "cost.qos",
3417 .flags = CFTYPE_ONLY_ON_ROOT,
3418 .seq_show = ioc_qos_show,
3419 .write = ioc_qos_write,
3420 },
3421 {
3422 .name = "cost.model",
3423 .flags = CFTYPE_ONLY_ON_ROOT,
3424 .seq_show = ioc_cost_model_show,
3425 .write = ioc_cost_model_write,
3426 },
3427 {}
3428};
3429
3430static struct blkcg_policy blkcg_policy_iocost = {
3431 .dfl_cftypes = ioc_files,
3432 .cpd_alloc_fn = ioc_cpd_alloc,
3433 .cpd_free_fn = ioc_cpd_free,
3434 .pd_alloc_fn = ioc_pd_alloc,
3435 .pd_init_fn = ioc_pd_init,
3436 .pd_free_fn = ioc_pd_free,
3437 .pd_stat_fn = ioc_pd_stat,
3438};
3439
3440static int __init ioc_init(void)
3441{
3442 return blkcg_policy_register(&blkcg_policy_iocost);
3443}
3444
3445static void __exit ioc_exit(void)
3446{
3447 blkcg_policy_unregister(&blkcg_policy_iocost);
3448}
3449
3450module_init(ioc_init);
3451module_exit(ioc_exit);
3452