1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/ioprio.h>
14#include <linux/kdev_t.h>
15#include <linux/module.h>
16#include <linux/err.h>
17#include <linux/blkdev.h>
18#include <linux/slab.h>
19#include <linux/genhd.h>
20#include <linux/delay.h>
21#include <linux/atomic.h>
22#include "blk-cgroup.h"
23#include "blk.h"
24
25#define MAX_KEY_LEN 100
26
27static DEFINE_MUTEX(blkcg_pol_mutex);
28
29struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
30EXPORT_SYMBOL_GPL(blkcg_root);
31
32static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33
34static bool blkcg_policy_enabled(struct request_queue *q,
35 const struct blkcg_policy *pol)
36{
37 return pol && test_bit(pol->plid, q->blkcg_pols);
38}
39
40
41
42
43
44
45
46static void blkg_free(struct blkcg_gq *blkg)
47{
48 int i;
49
50 if (!blkg)
51 return;
52
53 for (i = 0; i < BLKCG_MAX_POLS; i++) {
54 struct blkcg_policy *pol = blkcg_policy[i];
55 struct blkg_policy_data *pd = blkg->pd[i];
56
57 if (!pd)
58 continue;
59
60 if (pol && pol->pd_exit_fn)
61 pol->pd_exit_fn(blkg);
62
63 kfree(pd);
64 }
65
66 blk_exit_rl(&blkg->rl);
67 kfree(blkg);
68}
69
70
71
72
73
74
75
76
77
78static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
79 gfp_t gfp_mask)
80{
81 struct blkcg_gq *blkg;
82 int i;
83
84
85 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
86 if (!blkg)
87 return NULL;
88
89 blkg->q = q;
90 INIT_LIST_HEAD(&blkg->q_node);
91 blkg->blkcg = blkcg;
92 blkg->refcnt = 1;
93
94
95 if (blkcg != &blkcg_root) {
96 if (blk_init_rl(&blkg->rl, q, gfp_mask))
97 goto err_free;
98 blkg->rl.blkg = blkg;
99 }
100
101 for (i = 0; i < BLKCG_MAX_POLS; i++) {
102 struct blkcg_policy *pol = blkcg_policy[i];
103 struct blkg_policy_data *pd;
104
105 if (!blkcg_policy_enabled(q, pol))
106 continue;
107
108
109 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
110 if (!pd)
111 goto err_free;
112
113 blkg->pd[i] = pd;
114 pd->blkg = blkg;
115
116
117 if (blkcg_policy_enabled(blkg->q, pol))
118 pol->pd_init_fn(blkg);
119 }
120
121 return blkg;
122
123err_free:
124 blkg_free(blkg);
125 return NULL;
126}
127
128static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
129 struct request_queue *q)
130{
131 struct blkcg_gq *blkg;
132
133 blkg = rcu_dereference(blkcg->blkg_hint);
134 if (blkg && blkg->q == q)
135 return blkg;
136
137
138
139
140
141
142
143 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
144 if (blkg && blkg->q == q)
145 return blkg;
146
147 return NULL;
148}
149
150
151
152
153
154
155
156
157
158
159struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
160{
161 WARN_ON_ONCE(!rcu_read_lock_held());
162
163 if (unlikely(blk_queue_bypass(q)))
164 return NULL;
165 return __blkg_lookup(blkcg, q);
166}
167EXPORT_SYMBOL_GPL(blkg_lookup);
168
169
170
171
172
173static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
174 struct request_queue *q,
175 struct blkcg_gq *new_blkg)
176{
177 struct blkcg_gq *blkg;
178 int ret;
179
180 WARN_ON_ONCE(!rcu_read_lock_held());
181 lockdep_assert_held(q->queue_lock);
182
183
184 blkg = __blkg_lookup(blkcg, q);
185 if (blkg) {
186 rcu_assign_pointer(blkcg->blkg_hint, blkg);
187 goto out_free;
188 }
189
190
191 if (!css_tryget(&blkcg->css)) {
192 blkg = ERR_PTR(-EINVAL);
193 goto out_free;
194 }
195
196
197 if (!new_blkg) {
198 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
199 if (unlikely(!new_blkg)) {
200 blkg = ERR_PTR(-ENOMEM);
201 goto out_put;
202 }
203 }
204 blkg = new_blkg;
205
206
207 spin_lock(&blkcg->lock);
208 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
209 if (likely(!ret)) {
210 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
211 list_add(&blkg->q_node, &q->blkg_list);
212 }
213 spin_unlock(&blkcg->lock);
214
215 if (!ret)
216 return blkg;
217
218 blkg = ERR_PTR(ret);
219out_put:
220 css_put(&blkcg->css);
221out_free:
222 blkg_free(new_blkg);
223 return blkg;
224}
225
226struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
227 struct request_queue *q)
228{
229
230
231
232
233 if (unlikely(blk_queue_bypass(q)))
234 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
235 return __blkg_lookup_create(blkcg, q, NULL);
236}
237EXPORT_SYMBOL_GPL(blkg_lookup_create);
238
239static void blkg_destroy(struct blkcg_gq *blkg)
240{
241 struct blkcg *blkcg = blkg->blkcg;
242
243 lockdep_assert_held(blkg->q->queue_lock);
244 lockdep_assert_held(&blkcg->lock);
245
246
247 WARN_ON_ONCE(list_empty(&blkg->q_node));
248 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
249
250 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
251 list_del_init(&blkg->q_node);
252 hlist_del_init_rcu(&blkg->blkcg_node);
253
254
255
256
257
258
259 if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
260 rcu_assign_pointer(blkcg->blkg_hint, NULL);
261
262
263
264
265
266 blkg_put(blkg);
267}
268
269
270
271
272
273
274
275static void blkg_destroy_all(struct request_queue *q)
276{
277 struct blkcg_gq *blkg, *n;
278
279 lockdep_assert_held(q->queue_lock);
280
281 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
282 struct blkcg *blkcg = blkg->blkcg;
283
284 spin_lock(&blkcg->lock);
285 blkg_destroy(blkg);
286 spin_unlock(&blkcg->lock);
287 }
288
289
290
291
292
293 q->root_blkg = NULL;
294 q->root_rl.blkg = NULL;
295}
296
297static void blkg_rcu_free(struct rcu_head *rcu_head)
298{
299 blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
300}
301
302void __blkg_release(struct blkcg_gq *blkg)
303{
304
305 css_put(&blkg->blkcg->css);
306
307
308
309
310
311
312
313
314
315
316 call_rcu(&blkg->rcu_head, blkg_rcu_free);
317}
318EXPORT_SYMBOL_GPL(__blkg_release);
319
320
321
322
323
324struct request_list *__blk_queue_next_rl(struct request_list *rl,
325 struct request_queue *q)
326{
327 struct list_head *ent;
328 struct blkcg_gq *blkg;
329
330
331
332
333
334 if (rl == &q->root_rl) {
335 ent = &q->blkg_list;
336
337 if (list_empty(ent))
338 return NULL;
339 } else {
340 blkg = container_of(rl, struct blkcg_gq, rl);
341 ent = &blkg->q_node;
342 }
343
344
345 ent = ent->next;
346 if (ent == &q->root_blkg->q_node)
347 ent = ent->next;
348 if (ent == &q->blkg_list)
349 return NULL;
350
351 blkg = container_of(ent, struct blkcg_gq, q_node);
352 return &blkg->rl;
353}
354
355static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
356 u64 val)
357{
358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
359 struct blkcg_gq *blkg;
360 struct hlist_node *n;
361 int i;
362
363 mutex_lock(&blkcg_pol_mutex);
364 spin_lock_irq(&blkcg->lock);
365
366
367
368
369
370
371 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
372 for (i = 0; i < BLKCG_MAX_POLS; i++) {
373 struct blkcg_policy *pol = blkcg_policy[i];
374
375 if (blkcg_policy_enabled(blkg->q, pol) &&
376 pol->pd_reset_stats_fn)
377 pol->pd_reset_stats_fn(blkg);
378 }
379 }
380
381 spin_unlock_irq(&blkcg->lock);
382 mutex_unlock(&blkcg_pol_mutex);
383 return 0;
384}
385
386static const char *blkg_dev_name(struct blkcg_gq *blkg)
387{
388
389 if (blkg->q->backing_dev_info.dev)
390 return dev_name(blkg->q->backing_dev_info.dev);
391 return NULL;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
412 u64 (*prfill)(struct seq_file *,
413 struct blkg_policy_data *, int),
414 const struct blkcg_policy *pol, int data,
415 bool show_total)
416{
417 struct blkcg_gq *blkg;
418 struct hlist_node *n;
419 u64 total = 0;
420
421 spin_lock_irq(&blkcg->lock);
422 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
423 if (blkcg_policy_enabled(blkg->q, pol))
424 total += prfill(sf, blkg->pd[pol->plid], data);
425 spin_unlock_irq(&blkcg->lock);
426
427 if (show_total)
428 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
429}
430EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
431
432
433
434
435
436
437
438
439
440u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
441{
442 const char *dname = blkg_dev_name(pd->blkg);
443
444 if (!dname)
445 return 0;
446
447 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
448 return v;
449}
450EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
451
452
453
454
455
456
457
458
459
460u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
461 const struct blkg_rwstat *rwstat)
462{
463 static const char *rwstr[] = {
464 [BLKG_RWSTAT_READ] = "Read",
465 [BLKG_RWSTAT_WRITE] = "Write",
466 [BLKG_RWSTAT_SYNC] = "Sync",
467 [BLKG_RWSTAT_ASYNC] = "Async",
468 };
469 const char *dname = blkg_dev_name(pd->blkg);
470 u64 v;
471 int i;
472
473 if (!dname)
474 return 0;
475
476 for (i = 0; i < BLKG_RWSTAT_NR; i++)
477 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
478 (unsigned long long)rwstat->cnt[i]);
479
480 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
481 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
482 return v;
483}
484
485
486
487
488
489
490
491
492
493u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
494{
495 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
496}
497EXPORT_SYMBOL_GPL(blkg_prfill_stat);
498
499
500
501
502
503
504
505
506
507u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
508 int off)
509{
510 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
511
512 return __blkg_prfill_rwstat(sf, pd, &rwstat);
513}
514EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
515
516
517
518
519
520
521
522
523
524
525
526
527
528int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
529 const char *input, struct blkg_conf_ctx *ctx)
530 __acquires(rcu) __acquires(disk->queue->queue_lock)
531{
532 struct gendisk *disk;
533 struct blkcg_gq *blkg;
534 unsigned int major, minor;
535 unsigned long long v;
536 int part, ret;
537
538 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
539 return -EINVAL;
540
541 disk = get_gendisk(MKDEV(major, minor), &part);
542 if (!disk || part)
543 return -EINVAL;
544
545 rcu_read_lock();
546 spin_lock_irq(disk->queue->queue_lock);
547
548 if (blkcg_policy_enabled(disk->queue, pol))
549 blkg = blkg_lookup_create(blkcg, disk->queue);
550 else
551 blkg = ERR_PTR(-EINVAL);
552
553 if (IS_ERR(blkg)) {
554 ret = PTR_ERR(blkg);
555 rcu_read_unlock();
556 spin_unlock_irq(disk->queue->queue_lock);
557 put_disk(disk);
558
559
560
561
562
563
564 if (ret == -EBUSY) {
565 msleep(10);
566 ret = restart_syscall();
567 }
568 return ret;
569 }
570
571 ctx->disk = disk;
572 ctx->blkg = blkg;
573 ctx->v = v;
574 return 0;
575}
576EXPORT_SYMBOL_GPL(blkg_conf_prep);
577
578
579
580
581
582
583
584
585void blkg_conf_finish(struct blkg_conf_ctx *ctx)
586 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
587{
588 spin_unlock_irq(ctx->disk->queue->queue_lock);
589 rcu_read_unlock();
590 put_disk(ctx->disk);
591}
592EXPORT_SYMBOL_GPL(blkg_conf_finish);
593
594struct cftype blkcg_files[] = {
595 {
596 .name = "reset_stats",
597 .write_u64 = blkcg_reset_stats,
598 },
599 { }
600};
601
602
603
604
605
606
607
608
609
610
611
612
613static void blkcg_css_offline(struct cgroup *cgroup)
614{
615 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
616
617 spin_lock_irq(&blkcg->lock);
618
619 while (!hlist_empty(&blkcg->blkg_list)) {
620 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
621 struct blkcg_gq, blkcg_node);
622 struct request_queue *q = blkg->q;
623
624 if (spin_trylock(q->queue_lock)) {
625 blkg_destroy(blkg);
626 spin_unlock(q->queue_lock);
627 } else {
628 spin_unlock_irq(&blkcg->lock);
629 cpu_relax();
630 spin_lock_irq(&blkcg->lock);
631 }
632 }
633
634 spin_unlock_irq(&blkcg->lock);
635}
636
637static void blkcg_css_free(struct cgroup *cgroup)
638{
639 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
640
641 if (blkcg != &blkcg_root)
642 kfree(blkcg);
643}
644
645static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
646{
647 static atomic64_t id_seq = ATOMIC64_INIT(0);
648 struct blkcg *blkcg;
649 struct cgroup *parent = cgroup->parent;
650
651 if (!parent) {
652 blkcg = &blkcg_root;
653 goto done;
654 }
655
656 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
657 if (!blkcg)
658 return ERR_PTR(-ENOMEM);
659
660 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
661 blkcg->id = atomic64_inc_return(&id_seq);
662done:
663 spin_lock_init(&blkcg->lock);
664 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
665 INIT_HLIST_HEAD(&blkcg->blkg_list);
666
667 return &blkcg->css;
668}
669
670
671
672
673
674
675
676
677
678
679
680int blkcg_init_queue(struct request_queue *q)
681{
682 might_sleep();
683
684 return blk_throtl_init(q);
685}
686
687
688
689
690
691
692
693void blkcg_drain_queue(struct request_queue *q)
694{
695 lockdep_assert_held(q->queue_lock);
696
697 blk_throtl_drain(q);
698}
699
700
701
702
703
704
705
706void blkcg_exit_queue(struct request_queue *q)
707{
708 spin_lock_irq(q->queue_lock);
709 blkg_destroy_all(q);
710 spin_unlock_irq(q->queue_lock);
711
712 blk_throtl_exit(q);
713}
714
715
716
717
718
719
720
721static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
722{
723 struct task_struct *task;
724 struct io_context *ioc;
725 int ret = 0;
726
727
728 cgroup_taskset_for_each(task, cgrp, tset) {
729 task_lock(task);
730 ioc = task->io_context;
731 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
732 ret = -EINVAL;
733 task_unlock(task);
734 if (ret)
735 break;
736 }
737 return ret;
738}
739
740struct cgroup_subsys blkio_subsys = {
741 .name = "blkio",
742 .css_alloc = blkcg_css_alloc,
743 .css_offline = blkcg_css_offline,
744 .css_free = blkcg_css_free,
745 .can_attach = blkcg_can_attach,
746 .subsys_id = blkio_subsys_id,
747 .base_cftypes = blkcg_files,
748 .module = THIS_MODULE,
749
750
751
752
753
754
755
756 .broken_hierarchy = true,
757};
758EXPORT_SYMBOL_GPL(blkio_subsys);
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776int blkcg_activate_policy(struct request_queue *q,
777 const struct blkcg_policy *pol)
778{
779 LIST_HEAD(pds);
780 struct blkcg_gq *blkg;
781 struct blkg_policy_data *pd, *n;
782 int cnt = 0, ret;
783 bool preloaded;
784
785 if (blkcg_policy_enabled(q, pol))
786 return 0;
787
788
789 blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
790 if (!blkg)
791 return -ENOMEM;
792
793 preloaded = !radix_tree_preload(GFP_KERNEL);
794
795 blk_queue_bypass_start(q);
796
797
798 spin_lock_irq(q->queue_lock);
799
800 rcu_read_lock();
801 blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
802 rcu_read_unlock();
803
804 if (preloaded)
805 radix_tree_preload_end();
806
807 if (IS_ERR(blkg)) {
808 ret = PTR_ERR(blkg);
809 goto out_unlock;
810 }
811 q->root_blkg = blkg;
812 q->root_rl.blkg = blkg;
813
814 list_for_each_entry(blkg, &q->blkg_list, q_node)
815 cnt++;
816
817 spin_unlock_irq(q->queue_lock);
818
819
820 while (cnt--) {
821 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
822 if (!pd) {
823 ret = -ENOMEM;
824 goto out_free;
825 }
826 list_add_tail(&pd->alloc_node, &pds);
827 }
828
829
830
831
832
833 spin_lock_irq(q->queue_lock);
834
835 list_for_each_entry(blkg, &q->blkg_list, q_node) {
836 if (WARN_ON(list_empty(&pds))) {
837
838 ret = -ENOMEM;
839 goto out_unlock;
840 }
841 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
842 list_del_init(&pd->alloc_node);
843
844
845 spin_lock(&blkg->blkcg->lock);
846
847 blkg->pd[pol->plid] = pd;
848 pd->blkg = blkg;
849 pol->pd_init_fn(blkg);
850
851 spin_unlock(&blkg->blkcg->lock);
852 }
853
854 __set_bit(pol->plid, q->blkcg_pols);
855 ret = 0;
856out_unlock:
857 spin_unlock_irq(q->queue_lock);
858out_free:
859 blk_queue_bypass_end(q);
860 list_for_each_entry_safe(pd, n, &pds, alloc_node)
861 kfree(pd);
862 return ret;
863}
864EXPORT_SYMBOL_GPL(blkcg_activate_policy);
865
866
867
868
869
870
871
872
873
874void blkcg_deactivate_policy(struct request_queue *q,
875 const struct blkcg_policy *pol)
876{
877 struct blkcg_gq *blkg;
878
879 if (!blkcg_policy_enabled(q, pol))
880 return;
881
882 blk_queue_bypass_start(q);
883 spin_lock_irq(q->queue_lock);
884
885 __clear_bit(pol->plid, q->blkcg_pols);
886
887
888 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
889 blkg_destroy_all(q);
890
891 list_for_each_entry(blkg, &q->blkg_list, q_node) {
892
893 spin_lock(&blkg->blkcg->lock);
894
895 if (pol->pd_exit_fn)
896 pol->pd_exit_fn(blkg);
897
898 kfree(blkg->pd[pol->plid]);
899 blkg->pd[pol->plid] = NULL;
900
901 spin_unlock(&blkg->blkcg->lock);
902 }
903
904 spin_unlock_irq(q->queue_lock);
905 blk_queue_bypass_end(q);
906}
907EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
908
909
910
911
912
913
914
915
916int blkcg_policy_register(struct blkcg_policy *pol)
917{
918 int i, ret;
919
920 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
921 return -EINVAL;
922
923 mutex_lock(&blkcg_pol_mutex);
924
925
926 ret = -ENOSPC;
927 for (i = 0; i < BLKCG_MAX_POLS; i++)
928 if (!blkcg_policy[i])
929 break;
930 if (i >= BLKCG_MAX_POLS)
931 goto out_unlock;
932
933
934 pol->plid = i;
935 blkcg_policy[i] = pol;
936
937
938 if (pol->cftypes)
939 WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
940 ret = 0;
941out_unlock:
942 mutex_unlock(&blkcg_pol_mutex);
943 return ret;
944}
945EXPORT_SYMBOL_GPL(blkcg_policy_register);
946
947
948
949
950
951
952
953void blkcg_policy_unregister(struct blkcg_policy *pol)
954{
955 mutex_lock(&blkcg_pol_mutex);
956
957 if (WARN_ON(blkcg_policy[pol->plid] != pol))
958 goto out_unlock;
959
960
961 if (pol->cftypes)
962 cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
963
964
965 blkcg_policy[pol->plid] = NULL;
966out_unlock:
967 mutex_unlock(&blkcg_pol_mutex);
968}
969EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
970