1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h>
36#include <linux/pagevec.h>
37
38
39
40
41
42
43
44
45#define MAX_WRITEBACK_PAGES 1024
46
47
48
49
50
51static long ratelimit_pages = 32;
52
53
54
55
56
57
58
59static inline long sync_writeback_pages(void)
60{
61 return ratelimit_pages + ratelimit_pages / 2;
62}
63
64
65
66
67
68
69int dirty_background_ratio = 10;
70
71
72
73
74
75unsigned long dirty_background_bytes;
76
77
78
79
80
81int vm_highmem_is_dirtyable;
82
83
84
85
86int vm_dirty_ratio = 20;
87
88
89
90
91
92unsigned long vm_dirty_bytes;
93
94
95
96
97unsigned int dirty_writeback_interval = 5 * 100;
98
99
100
101
102unsigned int dirty_expire_interval = 30 * 100;
103
104
105
106
107int block_dump;
108
109
110
111
112
113int laptop_mode;
114
115EXPORT_SYMBOL(laptop_mode);
116
117
118
119
120static void background_writeout(unsigned long _min_pages);
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138static struct prop_descriptor vm_completions;
139static struct prop_descriptor vm_dirties;
140
141
142
143
144
145
146static int calc_period_shift(void)
147{
148 unsigned long dirty_total;
149
150 if (vm_dirty_bytes)
151 dirty_total = vm_dirty_bytes / PAGE_SIZE;
152 else
153 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
154 100;
155 return 2 + ilog2(dirty_total - 1);
156}
157
158
159
160
161static void update_completion_period(void)
162{
163 int shift = calc_period_shift();
164 prop_change_shift(&vm_completions, shift);
165 prop_change_shift(&vm_dirties, shift);
166}
167
168int dirty_background_ratio_handler(struct ctl_table *table, int write,
169 struct file *filp, void __user *buffer, size_t *lenp,
170 loff_t *ppos)
171{
172 int ret;
173
174 ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
175 if (ret == 0 && write)
176 dirty_background_bytes = 0;
177 return ret;
178}
179
180int dirty_background_bytes_handler(struct ctl_table *table, int write,
181 struct file *filp, void __user *buffer, size_t *lenp,
182 loff_t *ppos)
183{
184 int ret;
185
186 ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
187 if (ret == 0 && write)
188 dirty_background_ratio = 0;
189 return ret;
190}
191
192int dirty_ratio_handler(struct ctl_table *table, int write,
193 struct file *filp, void __user *buffer, size_t *lenp,
194 loff_t *ppos)
195{
196 int old_ratio = vm_dirty_ratio;
197 int ret;
198
199 ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
200 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
201 update_completion_period();
202 vm_dirty_bytes = 0;
203 }
204 return ret;
205}
206
207
208int dirty_bytes_handler(struct ctl_table *table, int write,
209 struct file *filp, void __user *buffer, size_t *lenp,
210 loff_t *ppos)
211{
212 unsigned long old_bytes = vm_dirty_bytes;
213 int ret;
214
215 ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
216 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
217 update_completion_period();
218 vm_dirty_ratio = 0;
219 }
220 return ret;
221}
222
223
224
225
226
227static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
228{
229 __prop_inc_percpu_max(&vm_completions, &bdi->completions,
230 bdi->max_prop_frac);
231}
232
233void bdi_writeout_inc(struct backing_dev_info *bdi)
234{
235 unsigned long flags;
236
237 local_irq_save(flags);
238 __bdi_writeout_inc(bdi);
239 local_irq_restore(flags);
240}
241EXPORT_SYMBOL_GPL(bdi_writeout_inc);
242
243void task_dirty_inc(struct task_struct *tsk)
244{
245 prop_inc_single(&vm_dirties, &tsk->dirties);
246}
247
248
249
250
251static void bdi_writeout_fraction(struct backing_dev_info *bdi,
252 long *numerator, long *denominator)
253{
254 if (bdi_cap_writeback_dirty(bdi)) {
255 prop_fraction_percpu(&vm_completions, &bdi->completions,
256 numerator, denominator);
257 } else {
258 *numerator = 0;
259 *denominator = 1;
260 }
261}
262
263
264
265
266
267
268static void
269clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
270{
271 long avail_dirty;
272
273 avail_dirty = dirty -
274 (global_page_state(NR_FILE_DIRTY) +
275 global_page_state(NR_WRITEBACK) +
276 global_page_state(NR_UNSTABLE_NFS) +
277 global_page_state(NR_WRITEBACK_TEMP));
278
279 if (avail_dirty < 0)
280 avail_dirty = 0;
281
282 avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
283 bdi_stat(bdi, BDI_WRITEBACK);
284
285 *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
286}
287
288static inline void task_dirties_fraction(struct task_struct *tsk,
289 long *numerator, long *denominator)
290{
291 prop_fraction_single(&vm_dirties, &tsk->dirties,
292 numerator, denominator);
293}
294
295
296
297
298
299
300
301
302static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
303{
304 long numerator, denominator;
305 long dirty = *pdirty;
306 u64 inv = dirty >> 3;
307
308 task_dirties_fraction(tsk, &numerator, &denominator);
309 inv *= numerator;
310 do_div(inv, denominator);
311
312 dirty -= inv;
313 if (dirty < *pdirty/2)
314 dirty = *pdirty/2;
315
316 *pdirty = dirty;
317}
318
319
320
321
322static DEFINE_SPINLOCK(bdi_lock);
323static unsigned int bdi_min_ratio;
324
325int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
326{
327 int ret = 0;
328 unsigned long flags;
329
330 spin_lock_irqsave(&bdi_lock, flags);
331 if (min_ratio > bdi->max_ratio) {
332 ret = -EINVAL;
333 } else {
334 min_ratio -= bdi->min_ratio;
335 if (bdi_min_ratio + min_ratio < 100) {
336 bdi_min_ratio += min_ratio;
337 bdi->min_ratio += min_ratio;
338 } else {
339 ret = -EINVAL;
340 }
341 }
342 spin_unlock_irqrestore(&bdi_lock, flags);
343
344 return ret;
345}
346
347int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
348{
349 unsigned long flags;
350 int ret = 0;
351
352 if (max_ratio > 100)
353 return -EINVAL;
354
355 spin_lock_irqsave(&bdi_lock, flags);
356 if (bdi->min_ratio > max_ratio) {
357 ret = -EINVAL;
358 } else {
359 bdi->max_ratio = max_ratio;
360 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
361 }
362 spin_unlock_irqrestore(&bdi_lock, flags);
363
364 return ret;
365}
366EXPORT_SYMBOL(bdi_set_max_ratio);
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386static unsigned long highmem_dirtyable_memory(unsigned long total)
387{
388#ifdef CONFIG_HIGHMEM
389 int node;
390 unsigned long x = 0;
391
392 for_each_node_state(node, N_HIGH_MEMORY) {
393 struct zone *z =
394 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
395
396 x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
397 }
398
399
400
401
402
403
404 return min(x, total);
405#else
406 return 0;
407#endif
408}
409
410
411
412
413
414
415
416unsigned long determine_dirtyable_memory(void)
417{
418 unsigned long x;
419
420 x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
421
422 if (!vm_highmem_is_dirtyable)
423 x -= highmem_dirtyable_memory(x);
424
425 return x + 1;
426}
427
428void
429get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
430 unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
431{
432 unsigned long background;
433 unsigned long dirty;
434 unsigned long available_memory = determine_dirtyable_memory();
435 struct task_struct *tsk;
436
437 if (vm_dirty_bytes)
438 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
439 else {
440 int dirty_ratio;
441
442 dirty_ratio = vm_dirty_ratio;
443 if (dirty_ratio < 5)
444 dirty_ratio = 5;
445 dirty = (dirty_ratio * available_memory) / 100;
446 }
447
448 if (dirty_background_bytes)
449 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
450 else
451 background = (dirty_background_ratio * available_memory) / 100;
452
453 if (background >= dirty)
454 background = dirty / 2;
455 tsk = current;
456 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
457 background += background / 4;
458 dirty += dirty / 4;
459 }
460 *pbackground = background;
461 *pdirty = dirty;
462
463 if (bdi) {
464 u64 bdi_dirty;
465 long numerator, denominator;
466
467
468
469
470 bdi_writeout_fraction(bdi, &numerator, &denominator);
471
472 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
473 bdi_dirty *= numerator;
474 do_div(bdi_dirty, denominator);
475 bdi_dirty += (dirty * bdi->min_ratio) / 100;
476 if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
477 bdi_dirty = dirty * bdi->max_ratio / 100;
478
479 *pbdi_dirty = bdi_dirty;
480 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
481 task_dirty_limit(current, pbdi_dirty);
482 }
483}
484
485
486
487
488
489
490
491
492static void balance_dirty_pages(struct address_space *mapping)
493{
494 long nr_reclaimable, bdi_nr_reclaimable;
495 long nr_writeback, bdi_nr_writeback;
496 unsigned long background_thresh;
497 unsigned long dirty_thresh;
498 unsigned long bdi_thresh;
499 unsigned long pages_written = 0;
500 unsigned long write_chunk = sync_writeback_pages();
501
502 struct backing_dev_info *bdi = mapping->backing_dev_info;
503
504 for (;;) {
505 struct writeback_control wbc = {
506 .bdi = bdi,
507 .sync_mode = WB_SYNC_NONE,
508 .older_than_this = NULL,
509 .nr_to_write = write_chunk,
510 .range_cyclic = 1,
511 };
512
513 get_dirty_limits(&background_thresh, &dirty_thresh,
514 &bdi_thresh, bdi);
515
516 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
517 global_page_state(NR_UNSTABLE_NFS);
518 nr_writeback = global_page_state(NR_WRITEBACK);
519
520 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
521 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
522
523 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
524 break;
525
526
527
528
529
530
531 if (nr_reclaimable + nr_writeback <
532 (background_thresh + dirty_thresh) / 2)
533 break;
534
535 if (!bdi->dirty_exceeded)
536 bdi->dirty_exceeded = 1;
537
538
539
540
541
542
543
544 if (bdi_nr_reclaimable) {
545 writeback_inodes(&wbc);
546 pages_written += write_chunk - wbc.nr_to_write;
547 get_dirty_limits(&background_thresh, &dirty_thresh,
548 &bdi_thresh, bdi);
549 }
550
551
552
553
554
555
556
557
558
559
560
561 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
562 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
563 bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
564 } else if (bdi_nr_reclaimable) {
565 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
566 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
567 }
568
569 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
570 break;
571 if (pages_written >= write_chunk)
572 break;
573
574 congestion_wait(WRITE, HZ/10);
575 }
576
577 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
578 bdi->dirty_exceeded)
579 bdi->dirty_exceeded = 0;
580
581 if (writeback_in_progress(bdi))
582 return;
583
584
585
586
587
588
589
590
591
592 if ((laptop_mode && pages_written) ||
593 (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
594 + global_page_state(NR_UNSTABLE_NFS)
595 > background_thresh)))
596 pdflush_operation(background_writeout, 0);
597}
598
599void set_page_dirty_balance(struct page *page, int page_mkwrite)
600{
601 if (set_page_dirty(page) || page_mkwrite) {
602 struct address_space *mapping = page_mapping(page);
603
604 if (mapping)
605 balance_dirty_pages_ratelimited(mapping);
606 }
607}
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
624 unsigned long nr_pages_dirtied)
625{
626 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
627 unsigned long ratelimit;
628 unsigned long *p;
629
630 ratelimit = ratelimit_pages;
631 if (mapping->backing_dev_info->dirty_exceeded)
632 ratelimit = 8;
633
634
635
636
637
638 preempt_disable();
639 p = &__get_cpu_var(ratelimits);
640 *p += nr_pages_dirtied;
641 if (unlikely(*p >= ratelimit)) {
642 *p = 0;
643 preempt_enable();
644 balance_dirty_pages(mapping);
645 return;
646 }
647 preempt_enable();
648}
649EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
650
651void throttle_vm_writeout(gfp_t gfp_mask)
652{
653 unsigned long background_thresh;
654 unsigned long dirty_thresh;
655
656 for ( ; ; ) {
657 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
658
659
660
661
662
663 dirty_thresh += dirty_thresh / 10;
664
665 if (global_page_state(NR_UNSTABLE_NFS) +
666 global_page_state(NR_WRITEBACK) <= dirty_thresh)
667 break;
668 congestion_wait(WRITE, HZ/10);
669
670
671
672
673
674
675 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
676 break;
677 }
678}
679
680
681
682
683
684static void background_writeout(unsigned long _min_pages)
685{
686 long min_pages = _min_pages;
687 struct writeback_control wbc = {
688 .bdi = NULL,
689 .sync_mode = WB_SYNC_NONE,
690 .older_than_this = NULL,
691 .nr_to_write = 0,
692 .nonblocking = 1,
693 .range_cyclic = 1,
694 };
695
696 for ( ; ; ) {
697 unsigned long background_thresh;
698 unsigned long dirty_thresh;
699
700 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
701 if (global_page_state(NR_FILE_DIRTY) +
702 global_page_state(NR_UNSTABLE_NFS) < background_thresh
703 && min_pages <= 0)
704 break;
705 wbc.more_io = 0;
706 wbc.encountered_congestion = 0;
707 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
708 wbc.pages_skipped = 0;
709 writeback_inodes(&wbc);
710 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
711 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
712
713 if (wbc.encountered_congestion || wbc.more_io)
714 congestion_wait(WRITE, HZ/10);
715 else
716 break;
717 }
718 }
719}
720
721
722
723
724
725
726int wakeup_pdflush(long nr_pages)
727{
728 if (nr_pages == 0)
729 nr_pages = global_page_state(NR_FILE_DIRTY) +
730 global_page_state(NR_UNSTABLE_NFS);
731 return pdflush_operation(background_writeout, nr_pages);
732}
733
734static void wb_timer_fn(unsigned long unused);
735static void laptop_timer_fn(unsigned long unused);
736
737static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
738static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755static void wb_kupdate(unsigned long arg)
756{
757 unsigned long oldest_jif;
758 unsigned long start_jif;
759 unsigned long next_jif;
760 long nr_to_write;
761 struct writeback_control wbc = {
762 .bdi = NULL,
763 .sync_mode = WB_SYNC_NONE,
764 .older_than_this = &oldest_jif,
765 .nr_to_write = 0,
766 .nonblocking = 1,
767 .for_kupdate = 1,
768 .range_cyclic = 1,
769 };
770
771 sync_supers();
772
773 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
774 start_jif = jiffies;
775 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
776 nr_to_write = global_page_state(NR_FILE_DIRTY) +
777 global_page_state(NR_UNSTABLE_NFS) +
778 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
779 while (nr_to_write > 0) {
780 wbc.more_io = 0;
781 wbc.encountered_congestion = 0;
782 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
783 writeback_inodes(&wbc);
784 if (wbc.nr_to_write > 0) {
785 if (wbc.encountered_congestion || wbc.more_io)
786 congestion_wait(WRITE, HZ/10);
787 else
788 break;
789 }
790 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
791 }
792 if (time_before(next_jif, jiffies + HZ))
793 next_jif = jiffies + HZ;
794 if (dirty_writeback_interval)
795 mod_timer(&wb_timer, next_jif);
796}
797
798
799
800
801int dirty_writeback_centisecs_handler(ctl_table *table, int write,
802 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
803{
804 proc_dointvec(table, write, file, buffer, length, ppos);
805 if (dirty_writeback_interval)
806 mod_timer(&wb_timer, jiffies +
807 msecs_to_jiffies(dirty_writeback_interval * 10));
808 else
809 del_timer(&wb_timer);
810 return 0;
811}
812
813static void wb_timer_fn(unsigned long unused)
814{
815 if (pdflush_operation(wb_kupdate, 0) < 0)
816 mod_timer(&wb_timer, jiffies + HZ);
817}
818
819static void laptop_flush(unsigned long unused)
820{
821 sys_sync();
822}
823
824static void laptop_timer_fn(unsigned long unused)
825{
826 pdflush_operation(laptop_flush, 0);
827}
828
829
830
831
832
833
834void laptop_io_completion(void)
835{
836 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
837}
838
839
840
841
842
843
844void laptop_sync_completion(void)
845{
846 del_timer(&laptop_mode_wb_timer);
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866void writeback_set_ratelimit(void)
867{
868 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
869 if (ratelimit_pages < 16)
870 ratelimit_pages = 16;
871 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
872 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
873}
874
875static int __cpuinit
876ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
877{
878 writeback_set_ratelimit();
879 return NOTIFY_DONE;
880}
881
882static struct notifier_block __cpuinitdata ratelimit_nb = {
883 .notifier_call = ratelimit_handler,
884 .next = NULL,
885};
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905void __init page_writeback_init(void)
906{
907 int shift;
908
909 mod_timer(&wb_timer,
910 jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
911 writeback_set_ratelimit();
912 register_cpu_notifier(&ratelimit_nb);
913
914 shift = calc_period_shift();
915 prop_descriptor_init(&vm_completions, shift);
916 prop_descriptor_init(&vm_dirties, shift);
917}
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934int write_cache_pages(struct address_space *mapping,
935 struct writeback_control *wbc, writepage_t writepage,
936 void *data)
937{
938 struct backing_dev_info *bdi = mapping->backing_dev_info;
939 int ret = 0;
940 int done = 0;
941 struct pagevec pvec;
942 int nr_pages;
943 pgoff_t uninitialized_var(writeback_index);
944 pgoff_t index;
945 pgoff_t end;
946 pgoff_t done_index;
947 int cycled;
948 int range_whole = 0;
949 long nr_to_write = wbc->nr_to_write;
950
951 if (wbc->nonblocking && bdi_write_congested(bdi)) {
952 wbc->encountered_congestion = 1;
953 return 0;
954 }
955
956 pagevec_init(&pvec, 0);
957 if (wbc->range_cyclic) {
958 writeback_index = mapping->writeback_index;
959 index = writeback_index;
960 if (index == 0)
961 cycled = 1;
962 else
963 cycled = 0;
964 end = -1;
965 } else {
966 index = wbc->range_start >> PAGE_CACHE_SHIFT;
967 end = wbc->range_end >> PAGE_CACHE_SHIFT;
968 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
969 range_whole = 1;
970 cycled = 1;
971 }
972retry:
973 done_index = index;
974 while (!done && (index <= end)) {
975 int i;
976
977 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
978 PAGECACHE_TAG_DIRTY,
979 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
980 if (nr_pages == 0)
981 break;
982
983 for (i = 0; i < nr_pages; i++) {
984 struct page *page = pvec.pages[i];
985
986
987
988
989
990
991
992
993 if (page->index > end) {
994
995
996
997
998 done = 1;
999 break;
1000 }
1001
1002 done_index = page->index + 1;
1003
1004 lock_page(page);
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014 if (unlikely(page->mapping != mapping)) {
1015continue_unlock:
1016 unlock_page(page);
1017 continue;
1018 }
1019
1020 if (!PageDirty(page)) {
1021
1022 goto continue_unlock;
1023 }
1024
1025 if (PageWriteback(page)) {
1026 if (wbc->sync_mode != WB_SYNC_NONE)
1027 wait_on_page_writeback(page);
1028 else
1029 goto continue_unlock;
1030 }
1031
1032 BUG_ON(PageWriteback(page));
1033 if (!clear_page_dirty_for_io(page))
1034 goto continue_unlock;
1035
1036 ret = (*writepage)(page, wbc, data);
1037 if (unlikely(ret)) {
1038 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1039 unlock_page(page);
1040 ret = 0;
1041 } else {
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 done = 1;
1052 break;
1053 }
1054 }
1055
1056 if (nr_to_write > 0) {
1057 nr_to_write--;
1058 if (nr_to_write == 0 &&
1059 wbc->sync_mode == WB_SYNC_NONE) {
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 done = 1;
1071 break;
1072 }
1073 }
1074
1075 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1076 wbc->encountered_congestion = 1;
1077 done = 1;
1078 break;
1079 }
1080 }
1081 pagevec_release(&pvec);
1082 cond_resched();
1083 }
1084 if (!cycled && !done) {
1085
1086
1087
1088
1089
1090 cycled = 1;
1091 index = 0;
1092 end = writeback_index - 1;
1093 goto retry;
1094 }
1095 if (!wbc->no_nrwrite_index_update) {
1096 if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
1097 mapping->writeback_index = done_index;
1098 wbc->nr_to_write = nr_to_write;
1099 }
1100
1101 return ret;
1102}
1103EXPORT_SYMBOL(write_cache_pages);
1104
1105
1106
1107
1108
1109static int __writepage(struct page *page, struct writeback_control *wbc,
1110 void *data)
1111{
1112 struct address_space *mapping = data;
1113 int ret = mapping->a_ops->writepage(page, wbc);
1114 mapping_set_error(mapping, ret);
1115 return ret;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126int generic_writepages(struct address_space *mapping,
1127 struct writeback_control *wbc)
1128{
1129
1130 if (!mapping->a_ops->writepage)
1131 return 0;
1132
1133 return write_cache_pages(mapping, wbc, __writepage, mapping);
1134}
1135
1136EXPORT_SYMBOL(generic_writepages);
1137
1138int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1139{
1140 int ret;
1141
1142 if (wbc->nr_to_write <= 0)
1143 return 0;
1144 wbc->for_writepages = 1;
1145 if (mapping->a_ops->writepages)
1146 ret = mapping->a_ops->writepages(mapping, wbc);
1147 else
1148 ret = generic_writepages(mapping, wbc);
1149 wbc->for_writepages = 0;
1150 return ret;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162int write_one_page(struct page *page, int wait)
1163{
1164 struct address_space *mapping = page->mapping;
1165 int ret = 0;
1166 struct writeback_control wbc = {
1167 .sync_mode = WB_SYNC_ALL,
1168 .nr_to_write = 1,
1169 };
1170
1171 BUG_ON(!PageLocked(page));
1172
1173 if (wait)
1174 wait_on_page_writeback(page);
1175
1176 if (clear_page_dirty_for_io(page)) {
1177 page_cache_get(page);
1178 ret = mapping->a_ops->writepage(page, &wbc);
1179 if (ret == 0 && wait) {
1180 wait_on_page_writeback(page);
1181 if (PageError(page))
1182 ret = -EIO;
1183 }
1184 page_cache_release(page);
1185 } else {
1186 unlock_page(page);
1187 }
1188 return ret;
1189}
1190EXPORT_SYMBOL(write_one_page);
1191
1192
1193
1194
1195int __set_page_dirty_no_writeback(struct page *page)
1196{
1197 if (!PageDirty(page))
1198 SetPageDirty(page);
1199 return 0;
1200}
1201
1202
1203
1204
1205
1206void account_page_dirtied(struct page *page, struct address_space *mapping)
1207{
1208 if (mapping_cap_account_dirty(mapping)) {
1209 __inc_zone_page_state(page, NR_FILE_DIRTY);
1210 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1211 task_dirty_inc(current);
1212 task_io_account_write(PAGE_CACHE_SIZE);
1213 }
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231int __set_page_dirty_nobuffers(struct page *page)
1232{
1233 if (!TestSetPageDirty(page)) {
1234 struct address_space *mapping = page_mapping(page);
1235 struct address_space *mapping2;
1236
1237 if (!mapping)
1238 return 1;
1239
1240 spin_lock_irq(&mapping->tree_lock);
1241 mapping2 = page_mapping(page);
1242 if (mapping2) {
1243 BUG_ON(mapping2 != mapping);
1244 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1245 account_page_dirtied(page, mapping);
1246 radix_tree_tag_set(&mapping->page_tree,
1247 page_index(page), PAGECACHE_TAG_DIRTY);
1248 }
1249 spin_unlock_irq(&mapping->tree_lock);
1250 if (mapping->host) {
1251
1252 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1253 }
1254 return 1;
1255 }
1256 return 0;
1257}
1258EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1259
1260
1261
1262
1263
1264
1265int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1266{
1267 wbc->pages_skipped++;
1268 return __set_page_dirty_nobuffers(page);
1269}
1270EXPORT_SYMBOL(redirty_page_for_writepage);
1271
1272
1273
1274
1275
1276int set_page_dirty(struct page *page)
1277{
1278 struct address_space *mapping = page_mapping(page);
1279
1280 if (likely(mapping)) {
1281 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1282#ifdef CONFIG_BLOCK
1283 if (!spd)
1284 spd = __set_page_dirty_buffers;
1285#endif
1286 return (*spd)(page);
1287 }
1288 if (!PageDirty(page)) {
1289 if (!TestSetPageDirty(page))
1290 return 1;
1291 }
1292 return 0;
1293}
1294EXPORT_SYMBOL(set_page_dirty);
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306int set_page_dirty_lock(struct page *page)
1307{
1308 int ret;
1309
1310 lock_page_nosync(page);
1311 ret = set_page_dirty(page);
1312 unlock_page(page);
1313 return ret;
1314}
1315EXPORT_SYMBOL(set_page_dirty_lock);
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331int clear_page_dirty_for_io(struct page *page)
1332{
1333 struct address_space *mapping = page_mapping(page);
1334
1335 BUG_ON(!PageLocked(page));
1336
1337 ClearPageReclaim(page);
1338 if (mapping && mapping_cap_account_dirty(mapping)) {
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 if (page_mkclean(page))
1365 set_page_dirty(page);
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 if (TestClearPageDirty(page)) {
1377 dec_zone_page_state(page, NR_FILE_DIRTY);
1378 dec_bdi_stat(mapping->backing_dev_info,
1379 BDI_RECLAIMABLE);
1380 return 1;
1381 }
1382 return 0;
1383 }
1384 return TestClearPageDirty(page);
1385}
1386EXPORT_SYMBOL(clear_page_dirty_for_io);
1387
1388int test_clear_page_writeback(struct page *page)
1389{
1390 struct address_space *mapping = page_mapping(page);
1391 int ret;
1392
1393 if (mapping) {
1394 struct backing_dev_info *bdi = mapping->backing_dev_info;
1395 unsigned long flags;
1396
1397 spin_lock_irqsave(&mapping->tree_lock, flags);
1398 ret = TestClearPageWriteback(page);
1399 if (ret) {
1400 radix_tree_tag_clear(&mapping->page_tree,
1401 page_index(page),
1402 PAGECACHE_TAG_WRITEBACK);
1403 if (bdi_cap_account_writeback(bdi)) {
1404 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1405 __bdi_writeout_inc(bdi);
1406 }
1407 }
1408 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1409 } else {
1410 ret = TestClearPageWriteback(page);
1411 }
1412 if (ret)
1413 dec_zone_page_state(page, NR_WRITEBACK);
1414 return ret;
1415}
1416
1417int test_set_page_writeback(struct page *page)
1418{
1419 struct address_space *mapping = page_mapping(page);
1420 int ret;
1421
1422 if (mapping) {
1423 struct backing_dev_info *bdi = mapping->backing_dev_info;
1424 unsigned long flags;
1425
1426 spin_lock_irqsave(&mapping->tree_lock, flags);
1427 ret = TestSetPageWriteback(page);
1428 if (!ret) {
1429 radix_tree_tag_set(&mapping->page_tree,
1430 page_index(page),
1431 PAGECACHE_TAG_WRITEBACK);
1432 if (bdi_cap_account_writeback(bdi))
1433 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1434 }
1435 if (!PageDirty(page))
1436 radix_tree_tag_clear(&mapping->page_tree,
1437 page_index(page),
1438 PAGECACHE_TAG_DIRTY);
1439 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1440 } else {
1441 ret = TestSetPageWriteback(page);
1442 }
1443 if (!ret)
1444 inc_zone_page_state(page, NR_WRITEBACK);
1445 return ret;
1446
1447}
1448EXPORT_SYMBOL(test_set_page_writeback);
1449
1450
1451
1452
1453
1454int mapping_tagged(struct address_space *mapping, int tag)
1455{
1456 int ret;
1457 rcu_read_lock();
1458 ret = radix_tree_tagged(&mapping->page_tree, tag);
1459 rcu_read_unlock();
1460 return ret;
1461}
1462EXPORT_SYMBOL(mapping_tagged);
1463