1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h>
36#include <linux/pagevec.h>
37#include <linux/timer.h>
38#include <trace/events/writeback.h>
39
40
41
42
43#define MAX_PAUSE max(HZ/5, 1)
44
45
46
47
48
49#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
50
51
52
53
54#define BANDWIDTH_INTERVAL max(HZ/5, 1)
55
56#define RATELIMIT_CALC_SHIFT 10
57
58
59
60
61
62static long ratelimit_pages = 32;
63
64
65
66
67
68
69int dirty_background_ratio = 10;
70
71
72
73
74
75unsigned long dirty_background_bytes;
76
77
78
79
80
81int vm_highmem_is_dirtyable;
82
83
84
85
86int vm_dirty_ratio = 20;
87
88
89
90
91
92unsigned long vm_dirty_bytes;
93
94
95
96
97unsigned int dirty_writeback_interval = 5 * 100;
98
99EXPORT_SYMBOL_GPL(dirty_writeback_interval);
100
101
102
103
104unsigned int dirty_expire_interval = 30 * 100;
105
106
107
108
109int block_dump;
110
111
112
113
114
115int laptop_mode;
116
117EXPORT_SYMBOL(laptop_mode);
118
119
120
121unsigned long global_dirty_limit;
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139static struct fprop_global writeout_completions;
140
141static void writeout_period(unsigned long t);
142
143static struct timer_list writeout_period_timer =
144 TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0);
145static unsigned long writeout_period_time = 0;
146
147
148
149
150
151
152#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190static unsigned long highmem_dirtyable_memory(unsigned long total)
191{
192#ifdef CONFIG_HIGHMEM
193 int node;
194 unsigned long x = 0;
195
196 for_each_node_state(node, N_HIGH_MEMORY) {
197 struct zone *z =
198 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
199
200 x += zone_page_state(z, NR_FREE_PAGES) +
201 zone_reclaimable_pages(z) - z->dirty_balance_reserve;
202 }
203
204
205
206
207
208
209
210
211
212 if ((long)x < 0)
213 x = 0;
214
215
216
217
218
219
220
221 return min(x, total);
222#else
223 return 0;
224#endif
225}
226
227
228
229
230
231
232
233static unsigned long global_dirtyable_memory(void)
234{
235 unsigned long x;
236
237 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
238 x -= min(x, dirty_balance_reserve);
239
240 if (!vm_highmem_is_dirtyable)
241 x -= highmem_dirtyable_memory(x);
242
243 return x + 1;
244}
245
246
247
248
249
250
251
252
253
254
255void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
256{
257 unsigned long background;
258 unsigned long dirty;
259 unsigned long uninitialized_var(available_memory);
260 struct task_struct *tsk;
261
262 if (!vm_dirty_bytes || !dirty_background_bytes)
263 available_memory = global_dirtyable_memory();
264
265 if (vm_dirty_bytes)
266 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
267 else
268 dirty = (vm_dirty_ratio * available_memory) / 100;
269
270 if (dirty_background_bytes)
271 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
272 else
273 background = (dirty_background_ratio * available_memory) / 100;
274
275 if (background >= dirty)
276 background = dirty / 2;
277 tsk = current;
278 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
279 background += background / 4;
280 dirty += dirty / 4;
281 }
282 *pbackground = background;
283 *pdirty = dirty;
284 trace_global_dirty_state(background, dirty);
285}
286
287
288
289
290
291
292
293
294static unsigned long zone_dirtyable_memory(struct zone *zone)
295{
296
297
298
299
300
301
302
303
304
305 unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
306 zone_reclaimable_pages(zone);
307
308
309 nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
310 return nr_pages;
311}
312
313
314
315
316
317
318
319
320static unsigned long zone_dirty_limit(struct zone *zone)
321{
322 unsigned long zone_memory = zone_dirtyable_memory(zone);
323 struct task_struct *tsk = current;
324 unsigned long dirty;
325
326 if (vm_dirty_bytes)
327 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
328 zone_memory / global_dirtyable_memory();
329 else
330 dirty = vm_dirty_ratio * zone_memory / 100;
331
332 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
333 dirty += dirty / 4;
334
335 return dirty;
336}
337
338
339
340
341
342
343
344
345bool zone_dirty_ok(struct zone *zone)
346{
347 unsigned long limit = zone_dirty_limit(zone);
348
349 return zone_page_state(zone, NR_FILE_DIRTY) +
350 zone_page_state(zone, NR_UNSTABLE_NFS) +
351 zone_page_state(zone, NR_WRITEBACK) <= limit;
352}
353
354int dirty_background_ratio_handler(struct ctl_table *table, int write,
355 void __user *buffer, size_t *lenp,
356 loff_t *ppos)
357{
358 int ret;
359
360 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
361 if (ret == 0 && write)
362 dirty_background_bytes = 0;
363 return ret;
364}
365
366int dirty_background_bytes_handler(struct ctl_table *table, int write,
367 void __user *buffer, size_t *lenp,
368 loff_t *ppos)
369{
370 int ret;
371
372 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
373 if (ret == 0 && write)
374 dirty_background_ratio = 0;
375 return ret;
376}
377
378int dirty_ratio_handler(struct ctl_table *table, int write,
379 void __user *buffer, size_t *lenp,
380 loff_t *ppos)
381{
382 int old_ratio = vm_dirty_ratio;
383 int ret;
384
385 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
386 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
387 writeback_set_ratelimit();
388 vm_dirty_bytes = 0;
389 }
390 return ret;
391}
392
393int dirty_bytes_handler(struct ctl_table *table, int write,
394 void __user *buffer, size_t *lenp,
395 loff_t *ppos)
396{
397 unsigned long old_bytes = vm_dirty_bytes;
398 int ret;
399
400 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
401 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
402 writeback_set_ratelimit();
403 vm_dirty_ratio = 0;
404 }
405 return ret;
406}
407
408static unsigned long wp_next_time(unsigned long cur_time)
409{
410 cur_time += VM_COMPLETIONS_PERIOD_LEN;
411
412 if (!cur_time)
413 return 1;
414 return cur_time;
415}
416
417
418
419
420
421static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
422{
423 __inc_bdi_stat(bdi, BDI_WRITTEN);
424 __fprop_inc_percpu_max(&writeout_completions, &bdi->completions,
425 bdi->max_prop_frac);
426
427 if (!unlikely(writeout_period_time)) {
428
429
430
431
432
433
434 writeout_period_time = wp_next_time(jiffies);
435 mod_timer(&writeout_period_timer, writeout_period_time);
436 }
437}
438
439void bdi_writeout_inc(struct backing_dev_info *bdi)
440{
441 unsigned long flags;
442
443 local_irq_save(flags);
444 __bdi_writeout_inc(bdi);
445 local_irq_restore(flags);
446}
447EXPORT_SYMBOL_GPL(bdi_writeout_inc);
448
449
450
451
452static void bdi_writeout_fraction(struct backing_dev_info *bdi,
453 long *numerator, long *denominator)
454{
455 fprop_fraction_percpu(&writeout_completions, &bdi->completions,
456 numerator, denominator);
457}
458
459
460
461
462
463static void writeout_period(unsigned long t)
464{
465 int miss_periods = (jiffies - writeout_period_time) /
466 VM_COMPLETIONS_PERIOD_LEN;
467
468 if (fprop_new_period(&writeout_completions, miss_periods + 1)) {
469 writeout_period_time = wp_next_time(writeout_period_time +
470 miss_periods * VM_COMPLETIONS_PERIOD_LEN);
471 mod_timer(&writeout_period_timer, writeout_period_time);
472 } else {
473
474
475
476
477 writeout_period_time = 0;
478 }
479}
480
481
482
483
484
485
486static unsigned int bdi_min_ratio;
487
488int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
489{
490 int ret = 0;
491
492 spin_lock_bh(&bdi_lock);
493 if (min_ratio > bdi->max_ratio) {
494 ret = -EINVAL;
495 } else {
496 min_ratio -= bdi->min_ratio;
497 if (bdi_min_ratio + min_ratio < 100) {
498 bdi_min_ratio += min_ratio;
499 bdi->min_ratio += min_ratio;
500 } else {
501 ret = -EINVAL;
502 }
503 }
504 spin_unlock_bh(&bdi_lock);
505
506 return ret;
507}
508
509int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
510{
511 int ret = 0;
512
513 if (max_ratio > 100)
514 return -EINVAL;
515
516 spin_lock_bh(&bdi_lock);
517 if (bdi->min_ratio > max_ratio) {
518 ret = -EINVAL;
519 } else {
520 bdi->max_ratio = max_ratio;
521 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
522 }
523 spin_unlock_bh(&bdi_lock);
524
525 return ret;
526}
527EXPORT_SYMBOL(bdi_set_max_ratio);
528
529static unsigned long dirty_freerun_ceiling(unsigned long thresh,
530 unsigned long bg_thresh)
531{
532 return (thresh + bg_thresh) / 2;
533}
534
535static unsigned long hard_dirty_limit(unsigned long thresh)
536{
537 return max(thresh, global_dirty_limit);
538}
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
563{
564 u64 bdi_dirty;
565 long numerator, denominator;
566
567
568
569
570 bdi_writeout_fraction(bdi, &numerator, &denominator);
571
572 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
573 bdi_dirty *= numerator;
574 do_div(bdi_dirty, denominator);
575
576 bdi_dirty += (dirty * bdi->min_ratio) / 100;
577 if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
578 bdi_dirty = dirty * bdi->max_ratio / 100;
579
580 return bdi_dirty;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
659 unsigned long thresh,
660 unsigned long bg_thresh,
661 unsigned long dirty,
662 unsigned long bdi_thresh,
663 unsigned long bdi_dirty)
664{
665 unsigned long write_bw = bdi->avg_write_bandwidth;
666 unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
667 unsigned long limit = hard_dirty_limit(thresh);
668 unsigned long x_intercept;
669 unsigned long setpoint;
670 unsigned long bdi_setpoint;
671 unsigned long span;
672 long long pos_ratio;
673 long x;
674
675 if (unlikely(dirty >= limit))
676 return 0;
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694 setpoint = (freerun + limit) / 2;
695 x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
696 limit - setpoint + 1);
697 pos_ratio = x;
698 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
699 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
700 pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 if (unlikely(bdi_thresh > thresh))
734 bdi_thresh = thresh;
735
736
737
738
739
740
741
742 bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
743
744
745
746
747 x = div_u64((u64)bdi_thresh << 16, thresh + 1);
748 bdi_setpoint = setpoint * (u64)x >> 16;
749
750
751
752
753
754
755
756
757 span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
758 x_intercept = bdi_setpoint + span;
759
760 if (bdi_dirty < x_intercept - span / 4) {
761 pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
762 x_intercept - bdi_setpoint + 1);
763 } else
764 pos_ratio /= 4;
765
766
767
768
769
770
771 x_intercept = bdi_thresh / 2;
772 if (bdi_dirty < x_intercept) {
773 if (bdi_dirty > x_intercept / 8)
774 pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
775 else
776 pos_ratio *= 8;
777 }
778
779 return pos_ratio;
780}
781
782static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
783 unsigned long elapsed,
784 unsigned long written)
785{
786 const unsigned long period = roundup_pow_of_two(3 * HZ);
787 unsigned long avg = bdi->avg_write_bandwidth;
788 unsigned long old = bdi->write_bandwidth;
789 u64 bw;
790
791
792
793
794
795
796
797
798 bw = written - bdi->written_stamp;
799 bw *= HZ;
800 if (unlikely(elapsed > period)) {
801 do_div(bw, elapsed);
802 avg = bw;
803 goto out;
804 }
805 bw += (u64)bdi->write_bandwidth * (period - elapsed);
806 bw >>= ilog2(period);
807
808
809
810
811 if (avg > old && old >= (unsigned long)bw)
812 avg -= (avg - old) >> 3;
813
814 if (avg < old && old <= (unsigned long)bw)
815 avg += (old - avg) >> 3;
816
817out:
818 bdi->write_bandwidth = bw;
819 bdi->avg_write_bandwidth = avg;
820}
821
822
823
824
825
826
827
828
829
830static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
831{
832 unsigned long limit = global_dirty_limit;
833
834
835
836
837 if (limit < thresh) {
838 limit = thresh;
839 goto update;
840 }
841
842
843
844
845
846
847 thresh = max(thresh, dirty);
848 if (limit > thresh) {
849 limit -= (limit - thresh) >> 5;
850 goto update;
851 }
852 return;
853update:
854 global_dirty_limit = limit;
855}
856
857static void global_update_bandwidth(unsigned long thresh,
858 unsigned long dirty,
859 unsigned long now)
860{
861 static DEFINE_SPINLOCK(dirty_lock);
862 static unsigned long update_time;
863
864
865
866
867 if (time_before(now, update_time + BANDWIDTH_INTERVAL))
868 return;
869
870 spin_lock(&dirty_lock);
871 if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
872 update_dirty_limit(thresh, dirty);
873 update_time = now;
874 }
875 spin_unlock(&dirty_lock);
876}
877
878
879
880
881
882
883
884static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
885 unsigned long thresh,
886 unsigned long bg_thresh,
887 unsigned long dirty,
888 unsigned long bdi_thresh,
889 unsigned long bdi_dirty,
890 unsigned long dirtied,
891 unsigned long elapsed)
892{
893 unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
894 unsigned long limit = hard_dirty_limit(thresh);
895 unsigned long setpoint = (freerun + limit) / 2;
896 unsigned long write_bw = bdi->avg_write_bandwidth;
897 unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
898 unsigned long dirty_rate;
899 unsigned long task_ratelimit;
900 unsigned long balanced_dirty_ratelimit;
901 unsigned long pos_ratio;
902 unsigned long step;
903 unsigned long x;
904
905
906
907
908
909 dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
910
911 pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
912 bdi_thresh, bdi_dirty);
913
914
915
916 task_ratelimit = (u64)dirty_ratelimit *
917 pos_ratio >> RATELIMIT_CALC_SHIFT;
918 task_ratelimit++;
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
951 dirty_rate | 1);
952
953
954
955 if (unlikely(balanced_dirty_ratelimit > write_bw))
956 balanced_dirty_ratelimit = write_bw;
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992 step = 0;
993 if (dirty < setpoint) {
994 x = min(bdi->balanced_dirty_ratelimit,
995 min(balanced_dirty_ratelimit, task_ratelimit));
996 if (dirty_ratelimit < x)
997 step = x - dirty_ratelimit;
998 } else {
999 x = max(bdi->balanced_dirty_ratelimit,
1000 max(balanced_dirty_ratelimit, task_ratelimit));
1001 if (dirty_ratelimit > x)
1002 step = dirty_ratelimit - x;
1003 }
1004
1005
1006
1007
1008
1009
1010 step >>= dirty_ratelimit / (2 * step + 1);
1011
1012
1013
1014 step = (step + 7) / 8;
1015
1016 if (dirty_ratelimit < balanced_dirty_ratelimit)
1017 dirty_ratelimit += step;
1018 else
1019 dirty_ratelimit -= step;
1020
1021 bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1022 bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1023
1024 trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
1025}
1026
1027void __bdi_update_bandwidth(struct backing_dev_info *bdi,
1028 unsigned long thresh,
1029 unsigned long bg_thresh,
1030 unsigned long dirty,
1031 unsigned long bdi_thresh,
1032 unsigned long bdi_dirty,
1033 unsigned long start_time)
1034{
1035 unsigned long now = jiffies;
1036 unsigned long elapsed = now - bdi->bw_time_stamp;
1037 unsigned long dirtied;
1038 unsigned long written;
1039
1040
1041
1042
1043 if (elapsed < BANDWIDTH_INTERVAL)
1044 return;
1045
1046 dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
1047 written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
1048
1049
1050
1051
1052
1053 if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
1054 goto snapshot;
1055
1056 if (thresh) {
1057 global_update_bandwidth(thresh, dirty, now);
1058 bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
1059 bdi_thresh, bdi_dirty,
1060 dirtied, elapsed);
1061 }
1062 bdi_update_write_bandwidth(bdi, elapsed, written);
1063
1064snapshot:
1065 bdi->dirtied_stamp = dirtied;
1066 bdi->written_stamp = written;
1067 bdi->bw_time_stamp = now;
1068}
1069
1070static void bdi_update_bandwidth(struct backing_dev_info *bdi,
1071 unsigned long thresh,
1072 unsigned long bg_thresh,
1073 unsigned long dirty,
1074 unsigned long bdi_thresh,
1075 unsigned long bdi_dirty,
1076 unsigned long start_time)
1077{
1078 if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
1079 return;
1080 spin_lock(&bdi->wb.list_lock);
1081 __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
1082 bdi_thresh, bdi_dirty, start_time);
1083 spin_unlock(&bdi->wb.list_lock);
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094static unsigned long dirty_poll_interval(unsigned long dirty,
1095 unsigned long thresh)
1096{
1097 if (thresh > dirty)
1098 return 1UL << (ilog2(thresh - dirty) >> 1);
1099
1100 return 1;
1101}
1102
1103static long bdi_max_pause(struct backing_dev_info *bdi,
1104 unsigned long bdi_dirty)
1105{
1106 long bw = bdi->avg_write_bandwidth;
1107 long t;
1108
1109
1110
1111
1112
1113
1114
1115
1116 t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1117 t++;
1118
1119 return min_t(long, t, MAX_PAUSE);
1120}
1121
1122static long bdi_min_pause(struct backing_dev_info *bdi,
1123 long max_pause,
1124 unsigned long task_ratelimit,
1125 unsigned long dirty_ratelimit,
1126 int *nr_dirtied_pause)
1127{
1128 long hi = ilog2(bdi->avg_write_bandwidth);
1129 long lo = ilog2(bdi->dirty_ratelimit);
1130 long t;
1131 long pause;
1132 int pages;
1133
1134
1135 t = max(1, HZ / 100);
1136
1137
1138
1139
1140
1141
1142
1143 if (hi > lo)
1144 t += (hi - lo) * (10 * HZ) / 1024;
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 t = min(t, 1 + max_pause / 2);
1165 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 if (pages < DIRTY_POLL_THRESH) {
1176 t = max_pause;
1177 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1178 if (pages > DIRTY_POLL_THRESH) {
1179 pages = DIRTY_POLL_THRESH;
1180 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1181 }
1182 }
1183
1184 pause = HZ * pages / (task_ratelimit + 1);
1185 if (pause > max_pause) {
1186 t = max_pause;
1187 pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1188 }
1189
1190 *nr_dirtied_pause = pages;
1191
1192
1193
1194 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204static void balance_dirty_pages(struct address_space *mapping,
1205 unsigned long pages_dirtied)
1206{
1207 unsigned long nr_reclaimable;
1208 unsigned long bdi_reclaimable;
1209 unsigned long nr_dirty;
1210 unsigned long bdi_dirty;
1211 unsigned long freerun;
1212 unsigned long background_thresh;
1213 unsigned long dirty_thresh;
1214 unsigned long bdi_thresh;
1215 long period;
1216 long pause;
1217 long max_pause;
1218 long min_pause;
1219 int nr_dirtied_pause;
1220 bool dirty_exceeded = false;
1221 unsigned long task_ratelimit;
1222 unsigned long dirty_ratelimit;
1223 unsigned long pos_ratio;
1224 struct backing_dev_info *bdi = mapping->backing_dev_info;
1225 unsigned long start_time = jiffies;
1226
1227 for (;;) {
1228 unsigned long now = jiffies;
1229
1230
1231
1232
1233
1234
1235
1236 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
1237 global_page_state(NR_UNSTABLE_NFS);
1238 nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1239
1240 global_dirty_limits(&background_thresh, &dirty_thresh);
1241
1242
1243
1244
1245
1246
1247 freerun = dirty_freerun_ceiling(dirty_thresh,
1248 background_thresh);
1249 if (nr_dirty <= freerun) {
1250 current->dirty_paused_when = now;
1251 current->nr_dirtied = 0;
1252 current->nr_dirtied_pause =
1253 dirty_poll_interval(nr_dirty, dirty_thresh);
1254 break;
1255 }
1256
1257 if (unlikely(!writeback_in_progress(bdi)))
1258 bdi_start_background_writeback(bdi);
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
1286 bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
1287 bdi_dirty = bdi_reclaimable +
1288 bdi_stat_sum(bdi, BDI_WRITEBACK);
1289 } else {
1290 bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
1291 bdi_dirty = bdi_reclaimable +
1292 bdi_stat(bdi, BDI_WRITEBACK);
1293 }
1294
1295 dirty_exceeded = (bdi_dirty > bdi_thresh) &&
1296 (nr_dirty > dirty_thresh);
1297 if (dirty_exceeded && !bdi->dirty_exceeded)
1298 bdi->dirty_exceeded = 1;
1299
1300 bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
1301 nr_dirty, bdi_thresh, bdi_dirty,
1302 start_time);
1303
1304 dirty_ratelimit = bdi->dirty_ratelimit;
1305 pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
1306 background_thresh, nr_dirty,
1307 bdi_thresh, bdi_dirty);
1308 task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
1309 RATELIMIT_CALC_SHIFT;
1310 max_pause = bdi_max_pause(bdi, bdi_dirty);
1311 min_pause = bdi_min_pause(bdi, max_pause,
1312 task_ratelimit, dirty_ratelimit,
1313 &nr_dirtied_pause);
1314
1315 if (unlikely(task_ratelimit == 0)) {
1316 period = max_pause;
1317 pause = max_pause;
1318 goto pause;
1319 }
1320 period = HZ * pages_dirtied / task_ratelimit;
1321 pause = period;
1322 if (current->dirty_paused_when)
1323 pause -= now - current->dirty_paused_when;
1324
1325
1326
1327
1328
1329
1330
1331 if (pause < min_pause) {
1332 trace_balance_dirty_pages(bdi,
1333 dirty_thresh,
1334 background_thresh,
1335 nr_dirty,
1336 bdi_thresh,
1337 bdi_dirty,
1338 dirty_ratelimit,
1339 task_ratelimit,
1340 pages_dirtied,
1341 period,
1342 min(pause, 0L),
1343 start_time);
1344 if (pause < -HZ) {
1345 current->dirty_paused_when = now;
1346 current->nr_dirtied = 0;
1347 } else if (period) {
1348 current->dirty_paused_when += period;
1349 current->nr_dirtied = 0;
1350 } else if (current->nr_dirtied_pause <= pages_dirtied)
1351 current->nr_dirtied_pause += pages_dirtied;
1352 break;
1353 }
1354 if (unlikely(pause > max_pause)) {
1355
1356 now += min(pause - max_pause, max_pause);
1357 pause = max_pause;
1358 }
1359
1360pause:
1361 trace_balance_dirty_pages(bdi,
1362 dirty_thresh,
1363 background_thresh,
1364 nr_dirty,
1365 bdi_thresh,
1366 bdi_dirty,
1367 dirty_ratelimit,
1368 task_ratelimit,
1369 pages_dirtied,
1370 period,
1371 pause,
1372 start_time);
1373 __set_current_state(TASK_KILLABLE);
1374 io_schedule_timeout(pause);
1375
1376 current->dirty_paused_when = now + pause;
1377 current->nr_dirtied = 0;
1378 current->nr_dirtied_pause = nr_dirtied_pause;
1379
1380
1381
1382
1383
1384 if (task_ratelimit)
1385 break;
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397 if (bdi_dirty <= bdi_stat_error(bdi))
1398 break;
1399
1400 if (fatal_signal_pending(current))
1401 break;
1402 }
1403
1404 if (!dirty_exceeded && bdi->dirty_exceeded)
1405 bdi->dirty_exceeded = 0;
1406
1407 if (writeback_in_progress(bdi))
1408 return;
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 if (laptop_mode)
1419 return;
1420
1421 if (nr_reclaimable > background_thresh)
1422 bdi_start_background_writeback(bdi);
1423}
1424
1425void set_page_dirty_balance(struct page *page, int page_mkwrite)
1426{
1427 if (set_page_dirty(page) || page_mkwrite) {
1428 struct address_space *mapping = page_mapping(page);
1429
1430 if (mapping)
1431 balance_dirty_pages_ratelimited(mapping);
1432 }
1433}
1434
1435static DEFINE_PER_CPU(int, bdp_ratelimits);
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466void balance_dirty_pages_ratelimited(struct address_space *mapping)
1467{
1468 struct backing_dev_info *bdi = mapping->backing_dev_info;
1469 int ratelimit;
1470 int *p;
1471
1472 if (!bdi_cap_account_dirty(bdi))
1473 return;
1474
1475 ratelimit = current->nr_dirtied_pause;
1476 if (bdi->dirty_exceeded)
1477 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1478
1479 preempt_disable();
1480
1481
1482
1483
1484
1485
1486 p = &__get_cpu_var(bdp_ratelimits);
1487 if (unlikely(current->nr_dirtied >= ratelimit))
1488 *p = 0;
1489 else if (unlikely(*p >= ratelimit_pages)) {
1490 *p = 0;
1491 ratelimit = 0;
1492 }
1493
1494
1495
1496
1497
1498 p = &__get_cpu_var(dirty_throttle_leaks);
1499 if (*p > 0 && current->nr_dirtied < ratelimit) {
1500 unsigned long nr_pages_dirtied;
1501 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1502 *p -= nr_pages_dirtied;
1503 current->nr_dirtied += nr_pages_dirtied;
1504 }
1505 preempt_enable();
1506
1507 if (unlikely(current->nr_dirtied >= ratelimit))
1508 balance_dirty_pages(mapping, current->nr_dirtied);
1509}
1510EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1511
1512void throttle_vm_writeout(gfp_t gfp_mask)
1513{
1514 unsigned long background_thresh;
1515 unsigned long dirty_thresh;
1516
1517 for ( ; ; ) {
1518 global_dirty_limits(&background_thresh, &dirty_thresh);
1519 dirty_thresh = hard_dirty_limit(dirty_thresh);
1520
1521
1522
1523
1524
1525 dirty_thresh += dirty_thresh / 10;
1526
1527 if (global_page_state(NR_UNSTABLE_NFS) +
1528 global_page_state(NR_WRITEBACK) <= dirty_thresh)
1529 break;
1530 congestion_wait(BLK_RW_ASYNC, HZ/10);
1531
1532
1533
1534
1535
1536
1537 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
1538 break;
1539 }
1540}
1541
1542
1543
1544
1545int dirty_writeback_centisecs_handler(ctl_table *table, int write,
1546 void __user *buffer, size_t *length, loff_t *ppos)
1547{
1548 proc_dointvec(table, write, buffer, length, ppos);
1549 return 0;
1550}
1551
1552#ifdef CONFIG_BLOCK
1553void laptop_mode_timer_fn(unsigned long data)
1554{
1555 struct request_queue *q = (struct request_queue *)data;
1556 int nr_pages = global_page_state(NR_FILE_DIRTY) +
1557 global_page_state(NR_UNSTABLE_NFS);
1558
1559
1560
1561
1562
1563 if (bdi_has_dirty_io(&q->backing_dev_info))
1564 bdi_start_writeback(&q->backing_dev_info, nr_pages,
1565 WB_REASON_LAPTOP_TIMER);
1566}
1567
1568
1569
1570
1571
1572
1573void laptop_io_completion(struct backing_dev_info *info)
1574{
1575 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1576}
1577
1578
1579
1580
1581
1582
1583void laptop_sync_completion(void)
1584{
1585 struct backing_dev_info *bdi;
1586
1587 rcu_read_lock();
1588
1589 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
1590 del_timer(&bdi->laptop_mode_wb_timer);
1591
1592 rcu_read_unlock();
1593}
1594#endif
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607void writeback_set_ratelimit(void)
1608{
1609 unsigned long background_thresh;
1610 unsigned long dirty_thresh;
1611 global_dirty_limits(&background_thresh, &dirty_thresh);
1612 global_dirty_limit = dirty_thresh;
1613 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1614 if (ratelimit_pages < 16)
1615 ratelimit_pages = 16;
1616}
1617
1618static int __cpuinit
1619ratelimit_handler(struct notifier_block *self, unsigned long action,
1620 void *hcpu)
1621{
1622
1623 switch (action & ~CPU_TASKS_FROZEN) {
1624 case CPU_ONLINE:
1625 case CPU_DEAD:
1626 writeback_set_ratelimit();
1627 return NOTIFY_OK;
1628 default:
1629 return NOTIFY_DONE;
1630 }
1631}
1632
1633static struct notifier_block __cpuinitdata ratelimit_nb = {
1634 .notifier_call = ratelimit_handler,
1635 .next = NULL,
1636};
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656void __init page_writeback_init(void)
1657{
1658 writeback_set_ratelimit();
1659 register_cpu_notifier(&ratelimit_nb);
1660
1661 fprop_global_init(&writeout_completions);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681void tag_pages_for_writeback(struct address_space *mapping,
1682 pgoff_t start, pgoff_t end)
1683{
1684#define WRITEBACK_TAG_BATCH 4096
1685 unsigned long tagged;
1686
1687 do {
1688 spin_lock_irq(&mapping->tree_lock);
1689 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1690 &start, end, WRITEBACK_TAG_BATCH,
1691 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1692 spin_unlock_irq(&mapping->tree_lock);
1693 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1694 cond_resched();
1695
1696 } while (tagged >= WRITEBACK_TAG_BATCH && start);
1697}
1698EXPORT_SYMBOL(tag_pages_for_writeback);
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722int write_cache_pages(struct address_space *mapping,
1723 struct writeback_control *wbc, writepage_t writepage,
1724 void *data)
1725{
1726 int ret = 0;
1727 int done = 0;
1728 struct pagevec pvec;
1729 int nr_pages;
1730 pgoff_t uninitialized_var(writeback_index);
1731 pgoff_t index;
1732 pgoff_t end;
1733 pgoff_t done_index;
1734 int cycled;
1735 int range_whole = 0;
1736 int tag;
1737
1738 pagevec_init(&pvec, 0);
1739 if (wbc->range_cyclic) {
1740 writeback_index = mapping->writeback_index;
1741 index = writeback_index;
1742 if (index == 0)
1743 cycled = 1;
1744 else
1745 cycled = 0;
1746 end = -1;
1747 } else {
1748 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1749 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1750 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1751 range_whole = 1;
1752 cycled = 1;
1753 }
1754 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1755 tag = PAGECACHE_TAG_TOWRITE;
1756 else
1757 tag = PAGECACHE_TAG_DIRTY;
1758retry:
1759 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1760 tag_pages_for_writeback(mapping, index, end);
1761 done_index = index;
1762 while (!done && (index <= end)) {
1763 int i;
1764
1765 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1766 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1767 if (nr_pages == 0)
1768 break;
1769
1770 for (i = 0; i < nr_pages; i++) {
1771 struct page *page = pvec.pages[i];
1772
1773
1774
1775
1776
1777
1778
1779
1780 if (page->index > end) {
1781
1782
1783
1784
1785 done = 1;
1786 break;
1787 }
1788
1789 done_index = page->index;
1790
1791 lock_page(page);
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801 if (unlikely(page->mapping != mapping)) {
1802continue_unlock:
1803 unlock_page(page);
1804 continue;
1805 }
1806
1807 if (!PageDirty(page)) {
1808
1809 goto continue_unlock;
1810 }
1811
1812 if (PageWriteback(page)) {
1813 if (wbc->sync_mode != WB_SYNC_NONE)
1814 wait_on_page_writeback(page);
1815 else
1816 goto continue_unlock;
1817 }
1818
1819 BUG_ON(PageWriteback(page));
1820 if (!clear_page_dirty_for_io(page))
1821 goto continue_unlock;
1822
1823 trace_wbc_writepage(wbc, mapping->backing_dev_info);
1824 ret = (*writepage)(page, wbc, data);
1825 if (unlikely(ret)) {
1826 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1827 unlock_page(page);
1828 ret = 0;
1829 } else {
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 done_index = page->index + 1;
1840 done = 1;
1841 break;
1842 }
1843 }
1844
1845
1846
1847
1848
1849
1850
1851 if (--wbc->nr_to_write <= 0 &&
1852 wbc->sync_mode == WB_SYNC_NONE) {
1853 done = 1;
1854 break;
1855 }
1856 }
1857 pagevec_release(&pvec);
1858 cond_resched();
1859 }
1860 if (!cycled && !done) {
1861
1862
1863
1864
1865
1866 cycled = 1;
1867 index = 0;
1868 end = writeback_index - 1;
1869 goto retry;
1870 }
1871 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1872 mapping->writeback_index = done_index;
1873
1874 return ret;
1875}
1876EXPORT_SYMBOL(write_cache_pages);
1877
1878
1879
1880
1881
1882static int __writepage(struct page *page, struct writeback_control *wbc,
1883 void *data)
1884{
1885 struct address_space *mapping = data;
1886 int ret = mapping->a_ops->writepage(page, wbc);
1887 mapping_set_error(mapping, ret);
1888 return ret;
1889}
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899int generic_writepages(struct address_space *mapping,
1900 struct writeback_control *wbc)
1901{
1902 struct blk_plug plug;
1903 int ret;
1904
1905
1906 if (!mapping->a_ops->writepage)
1907 return 0;
1908
1909 blk_start_plug(&plug);
1910 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1911 blk_finish_plug(&plug);
1912 return ret;
1913}
1914
1915EXPORT_SYMBOL(generic_writepages);
1916
1917int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1918{
1919 int ret;
1920
1921 if (wbc->nr_to_write <= 0)
1922 return 0;
1923 if (mapping->a_ops->writepages)
1924 ret = mapping->a_ops->writepages(mapping, wbc);
1925 else
1926 ret = generic_writepages(mapping, wbc);
1927 return ret;
1928}
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939int write_one_page(struct page *page, int wait)
1940{
1941 struct address_space *mapping = page->mapping;
1942 int ret = 0;
1943 struct writeback_control wbc = {
1944 .sync_mode = WB_SYNC_ALL,
1945 .nr_to_write = 1,
1946 };
1947
1948 BUG_ON(!PageLocked(page));
1949
1950 if (wait)
1951 wait_on_page_writeback(page);
1952
1953 if (clear_page_dirty_for_io(page)) {
1954 page_cache_get(page);
1955 ret = mapping->a_ops->writepage(page, &wbc);
1956 if (ret == 0 && wait) {
1957 wait_on_page_writeback(page);
1958 if (PageError(page))
1959 ret = -EIO;
1960 }
1961 page_cache_release(page);
1962 } else {
1963 unlock_page(page);
1964 }
1965 return ret;
1966}
1967EXPORT_SYMBOL(write_one_page);
1968
1969
1970
1971
1972int __set_page_dirty_no_writeback(struct page *page)
1973{
1974 if (!PageDirty(page))
1975 return !TestSetPageDirty(page);
1976 return 0;
1977}
1978
1979
1980
1981
1982
1983void account_page_dirtied(struct page *page, struct address_space *mapping)
1984{
1985 if (mapping_cap_account_dirty(mapping)) {
1986 __inc_zone_page_state(page, NR_FILE_DIRTY);
1987 __inc_zone_page_state(page, NR_DIRTIED);
1988 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1989 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
1990 task_io_account_write(PAGE_CACHE_SIZE);
1991 current->nr_dirtied++;
1992 this_cpu_inc(bdp_ratelimits);
1993 }
1994}
1995EXPORT_SYMBOL(account_page_dirtied);
1996
1997
1998
1999
2000
2001
2002void account_page_writeback(struct page *page)
2003{
2004 inc_zone_page_state(page, NR_WRITEBACK);
2005}
2006EXPORT_SYMBOL(account_page_writeback);
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023int __set_page_dirty_nobuffers(struct page *page)
2024{
2025 if (!TestSetPageDirty(page)) {
2026 struct address_space *mapping = page_mapping(page);
2027 struct address_space *mapping2;
2028
2029 if (!mapping)
2030 return 1;
2031
2032 spin_lock_irq(&mapping->tree_lock);
2033 mapping2 = page_mapping(page);
2034 if (mapping2) {
2035 BUG_ON(mapping2 != mapping);
2036 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2037 account_page_dirtied(page, mapping);
2038 radix_tree_tag_set(&mapping->page_tree,
2039 page_index(page), PAGECACHE_TAG_DIRTY);
2040 }
2041 spin_unlock_irq(&mapping->tree_lock);
2042 if (mapping->host) {
2043
2044 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2045 }
2046 return 1;
2047 }
2048 return 0;
2049}
2050EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2051
2052
2053
2054
2055
2056
2057
2058
2059void account_page_redirty(struct page *page)
2060{
2061 struct address_space *mapping = page->mapping;
2062 if (mapping && mapping_cap_account_dirty(mapping)) {
2063 current->nr_dirtied--;
2064 dec_zone_page_state(page, NR_DIRTIED);
2065 dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
2066 }
2067}
2068EXPORT_SYMBOL(account_page_redirty);
2069
2070
2071
2072
2073
2074
2075int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2076{
2077 wbc->pages_skipped++;
2078 account_page_redirty(page);
2079 return __set_page_dirty_nobuffers(page);
2080}
2081EXPORT_SYMBOL(redirty_page_for_writepage);
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094int set_page_dirty(struct page *page)
2095{
2096 struct address_space *mapping = page_mapping(page);
2097
2098 if (likely(mapping)) {
2099 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110 ClearPageReclaim(page);
2111#ifdef CONFIG_BLOCK
2112 if (!spd)
2113 spd = __set_page_dirty_buffers;
2114#endif
2115 return (*spd)(page);
2116 }
2117 if (!PageDirty(page)) {
2118 if (!TestSetPageDirty(page))
2119 return 1;
2120 }
2121 return 0;
2122}
2123EXPORT_SYMBOL(set_page_dirty);
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135int set_page_dirty_lock(struct page *page)
2136{
2137 int ret;
2138
2139 lock_page(page);
2140 ret = set_page_dirty(page);
2141 unlock_page(page);
2142 return ret;
2143}
2144EXPORT_SYMBOL(set_page_dirty_lock);
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160int clear_page_dirty_for_io(struct page *page)
2161{
2162 struct address_space *mapping = page_mapping(page);
2163
2164 BUG_ON(!PageLocked(page));
2165
2166 if (mapping && mapping_cap_account_dirty(mapping)) {
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 if (page_mkclean(page))
2193 set_page_dirty(page);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204 if (TestClearPageDirty(page)) {
2205 dec_zone_page_state(page, NR_FILE_DIRTY);
2206 dec_bdi_stat(mapping->backing_dev_info,
2207 BDI_RECLAIMABLE);
2208 return 1;
2209 }
2210 return 0;
2211 }
2212 return TestClearPageDirty(page);
2213}
2214EXPORT_SYMBOL(clear_page_dirty_for_io);
2215
2216int test_clear_page_writeback(struct page *page)
2217{
2218 struct address_space *mapping = page_mapping(page);
2219 int ret;
2220
2221 if (mapping) {
2222 struct backing_dev_info *bdi = mapping->backing_dev_info;
2223 unsigned long flags;
2224
2225 spin_lock_irqsave(&mapping->tree_lock, flags);
2226 ret = TestClearPageWriteback(page);
2227 if (ret) {
2228 radix_tree_tag_clear(&mapping->page_tree,
2229 page_index(page),
2230 PAGECACHE_TAG_WRITEBACK);
2231 if (bdi_cap_account_writeback(bdi)) {
2232 __dec_bdi_stat(bdi, BDI_WRITEBACK);
2233 __bdi_writeout_inc(bdi);
2234 }
2235 }
2236 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2237 } else {
2238 ret = TestClearPageWriteback(page);
2239 }
2240 if (ret) {
2241 dec_zone_page_state(page, NR_WRITEBACK);
2242 inc_zone_page_state(page, NR_WRITTEN);
2243 }
2244 return ret;
2245}
2246
2247int test_set_page_writeback(struct page *page)
2248{
2249 struct address_space *mapping = page_mapping(page);
2250 int ret;
2251
2252 if (mapping) {
2253 struct backing_dev_info *bdi = mapping->backing_dev_info;
2254 unsigned long flags;
2255
2256 spin_lock_irqsave(&mapping->tree_lock, flags);
2257 ret = TestSetPageWriteback(page);
2258 if (!ret) {
2259 radix_tree_tag_set(&mapping->page_tree,
2260 page_index(page),
2261 PAGECACHE_TAG_WRITEBACK);
2262 if (bdi_cap_account_writeback(bdi))
2263 __inc_bdi_stat(bdi, BDI_WRITEBACK);
2264 }
2265 if (!PageDirty(page))
2266 radix_tree_tag_clear(&mapping->page_tree,
2267 page_index(page),
2268 PAGECACHE_TAG_DIRTY);
2269 radix_tree_tag_clear(&mapping->page_tree,
2270 page_index(page),
2271 PAGECACHE_TAG_TOWRITE);
2272 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2273 } else {
2274 ret = TestSetPageWriteback(page);
2275 }
2276 if (!ret)
2277 account_page_writeback(page);
2278 return ret;
2279
2280}
2281EXPORT_SYMBOL(test_set_page_writeback);
2282
2283
2284
2285
2286
2287int mapping_tagged(struct address_space *mapping, int tag)
2288{
2289 return radix_tree_tagged(&mapping->page_tree, tag);
2290}
2291EXPORT_SYMBOL(mapping_tagged);
2292