1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h>
36#include <linux/pagevec.h>
37
38
39
40
41
42
43
44
45#define MAX_WRITEBACK_PAGES 1024
46
47
48
49
50
51static long ratelimit_pages = 32;
52
53
54
55
56
57
58
59static inline long sync_writeback_pages(void)
60{
61 return ratelimit_pages + ratelimit_pages / 2;
62}
63
64
65
66
67
68
69int dirty_background_ratio = 5;
70
71
72
73
74
75int vm_highmem_is_dirtyable;
76
77
78
79
80int vm_dirty_ratio = 10;
81
82
83
84
85int dirty_writeback_interval = 5 * HZ;
86
87
88
89
90int dirty_expire_interval = 30 * HZ;
91
92
93
94
95int block_dump;
96
97
98
99
100
101int laptop_mode;
102
103EXPORT_SYMBOL(laptop_mode);
104
105
106
107
108static void background_writeout(unsigned long _min_pages);
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126static struct prop_descriptor vm_completions;
127static struct prop_descriptor vm_dirties;
128
129
130
131
132
133
134static int calc_period_shift(void)
135{
136 unsigned long dirty_total;
137
138 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100;
139 return 2 + ilog2(dirty_total - 1);
140}
141
142
143
144
145int dirty_ratio_handler(struct ctl_table *table, int write,
146 struct file *filp, void __user *buffer, size_t *lenp,
147 loff_t *ppos)
148{
149 int old_ratio = vm_dirty_ratio;
150 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
151 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
152 int shift = calc_period_shift();
153 prop_change_shift(&vm_completions, shift);
154 prop_change_shift(&vm_dirties, shift);
155 }
156 return ret;
157}
158
159
160
161
162
163static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
164{
165 __prop_inc_percpu_max(&vm_completions, &bdi->completions,
166 bdi->max_prop_frac);
167}
168
169void bdi_writeout_inc(struct backing_dev_info *bdi)
170{
171 unsigned long flags;
172
173 local_irq_save(flags);
174 __bdi_writeout_inc(bdi);
175 local_irq_restore(flags);
176}
177EXPORT_SYMBOL_GPL(bdi_writeout_inc);
178
179static inline void task_dirty_inc(struct task_struct *tsk)
180{
181 prop_inc_single(&vm_dirties, &tsk->dirties);
182}
183
184
185
186
187static void bdi_writeout_fraction(struct backing_dev_info *bdi,
188 long *numerator, long *denominator)
189{
190 if (bdi_cap_writeback_dirty(bdi)) {
191 prop_fraction_percpu(&vm_completions, &bdi->completions,
192 numerator, denominator);
193 } else {
194 *numerator = 0;
195 *denominator = 1;
196 }
197}
198
199
200
201
202
203
204static void
205clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
206{
207 long avail_dirty;
208
209 avail_dirty = dirty -
210 (global_page_state(NR_FILE_DIRTY) +
211 global_page_state(NR_WRITEBACK) +
212 global_page_state(NR_UNSTABLE_NFS) +
213 global_page_state(NR_WRITEBACK_TEMP));
214
215 if (avail_dirty < 0)
216 avail_dirty = 0;
217
218 avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
219 bdi_stat(bdi, BDI_WRITEBACK);
220
221 *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
222}
223
224static inline void task_dirties_fraction(struct task_struct *tsk,
225 long *numerator, long *denominator)
226{
227 prop_fraction_single(&vm_dirties, &tsk->dirties,
228 numerator, denominator);
229}
230
231
232
233
234
235
236
237
238static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
239{
240 long numerator, denominator;
241 long dirty = *pdirty;
242 u64 inv = dirty >> 3;
243
244 task_dirties_fraction(tsk, &numerator, &denominator);
245 inv *= numerator;
246 do_div(inv, denominator);
247
248 dirty -= inv;
249 if (dirty < *pdirty/2)
250 dirty = *pdirty/2;
251
252 *pdirty = dirty;
253}
254
255
256
257
258static DEFINE_SPINLOCK(bdi_lock);
259static unsigned int bdi_min_ratio;
260
261int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
262{
263 int ret = 0;
264 unsigned long flags;
265
266 spin_lock_irqsave(&bdi_lock, flags);
267 if (min_ratio > bdi->max_ratio) {
268 ret = -EINVAL;
269 } else {
270 min_ratio -= bdi->min_ratio;
271 if (bdi_min_ratio + min_ratio < 100) {
272 bdi_min_ratio += min_ratio;
273 bdi->min_ratio += min_ratio;
274 } else {
275 ret = -EINVAL;
276 }
277 }
278 spin_unlock_irqrestore(&bdi_lock, flags);
279
280 return ret;
281}
282
283int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
284{
285 unsigned long flags;
286 int ret = 0;
287
288 if (max_ratio > 100)
289 return -EINVAL;
290
291 spin_lock_irqsave(&bdi_lock, flags);
292 if (bdi->min_ratio > max_ratio) {
293 ret = -EINVAL;
294 } else {
295 bdi->max_ratio = max_ratio;
296 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
297 }
298 spin_unlock_irqrestore(&bdi_lock, flags);
299
300 return ret;
301}
302EXPORT_SYMBOL(bdi_set_max_ratio);
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322static unsigned long highmem_dirtyable_memory(unsigned long total)
323{
324#ifdef CONFIG_HIGHMEM
325 int node;
326 unsigned long x = 0;
327
328 for_each_node_state(node, N_HIGH_MEMORY) {
329 struct zone *z =
330 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
331
332 x += zone_page_state(z, NR_FREE_PAGES)
333 + zone_page_state(z, NR_INACTIVE)
334 + zone_page_state(z, NR_ACTIVE);
335 }
336
337
338
339
340
341
342 return min(x, total);
343#else
344 return 0;
345#endif
346}
347
348
349
350
351
352
353
354unsigned long determine_dirtyable_memory(void)
355{
356 unsigned long x;
357
358 x = global_page_state(NR_FREE_PAGES)
359 + global_page_state(NR_INACTIVE)
360 + global_page_state(NR_ACTIVE);
361
362 if (!vm_highmem_is_dirtyable)
363 x -= highmem_dirtyable_memory(x);
364
365 return x + 1;
366}
367
368void
369get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
370 struct backing_dev_info *bdi)
371{
372 int background_ratio;
373 int dirty_ratio;
374 long background;
375 long dirty;
376 unsigned long available_memory = determine_dirtyable_memory();
377 struct task_struct *tsk;
378
379 dirty_ratio = vm_dirty_ratio;
380 if (dirty_ratio < 5)
381 dirty_ratio = 5;
382
383 background_ratio = dirty_background_ratio;
384 if (background_ratio >= dirty_ratio)
385 background_ratio = dirty_ratio / 2;
386
387 background = (background_ratio * available_memory) / 100;
388 dirty = (dirty_ratio * available_memory) / 100;
389 tsk = current;
390 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
391 background += background / 4;
392 dirty += dirty / 4;
393 }
394 *pbackground = background;
395 *pdirty = dirty;
396
397 if (bdi) {
398 u64 bdi_dirty;
399 long numerator, denominator;
400
401
402
403
404 bdi_writeout_fraction(bdi, &numerator, &denominator);
405
406 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
407 bdi_dirty *= numerator;
408 do_div(bdi_dirty, denominator);
409 bdi_dirty += (dirty * bdi->min_ratio) / 100;
410 if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
411 bdi_dirty = dirty * bdi->max_ratio / 100;
412
413 *pbdi_dirty = bdi_dirty;
414 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
415 task_dirty_limit(current, pbdi_dirty);
416 }
417}
418
419
420
421
422
423
424
425
426static void balance_dirty_pages(struct address_space *mapping)
427{
428 long nr_reclaimable, bdi_nr_reclaimable;
429 long nr_writeback, bdi_nr_writeback;
430 long background_thresh;
431 long dirty_thresh;
432 long bdi_thresh;
433 unsigned long pages_written = 0;
434 unsigned long write_chunk = sync_writeback_pages();
435
436 struct backing_dev_info *bdi = mapping->backing_dev_info;
437
438 for (;;) {
439 struct writeback_control wbc = {
440 .bdi = bdi,
441 .sync_mode = WB_SYNC_NONE,
442 .older_than_this = NULL,
443 .nr_to_write = write_chunk,
444 .range_cyclic = 1,
445 };
446
447 get_dirty_limits(&background_thresh, &dirty_thresh,
448 &bdi_thresh, bdi);
449
450 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
451 global_page_state(NR_UNSTABLE_NFS);
452 nr_writeback = global_page_state(NR_WRITEBACK);
453
454 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
455 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
456
457 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
458 break;
459
460
461
462
463
464
465 if (nr_reclaimable + nr_writeback <
466 (background_thresh + dirty_thresh) / 2)
467 break;
468
469 if (!bdi->dirty_exceeded)
470 bdi->dirty_exceeded = 1;
471
472
473
474
475
476
477
478 if (bdi_nr_reclaimable) {
479 writeback_inodes(&wbc);
480 pages_written += write_chunk - wbc.nr_to_write;
481 get_dirty_limits(&background_thresh, &dirty_thresh,
482 &bdi_thresh, bdi);
483 }
484
485
486
487
488
489
490
491
492
493
494
495 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
496 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
497 bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
498 } else if (bdi_nr_reclaimable) {
499 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
500 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
501 }
502
503 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
504 break;
505 if (pages_written >= write_chunk)
506 break;
507
508 congestion_wait(WRITE, HZ/10);
509 }
510
511 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
512 bdi->dirty_exceeded)
513 bdi->dirty_exceeded = 0;
514
515 if (writeback_in_progress(bdi))
516 return;
517
518
519
520
521
522
523
524
525
526 if ((laptop_mode && pages_written) ||
527 (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
528 + global_page_state(NR_UNSTABLE_NFS)
529 > background_thresh)))
530 pdflush_operation(background_writeout, 0);
531}
532
533void set_page_dirty_balance(struct page *page, int page_mkwrite)
534{
535 if (set_page_dirty(page) || page_mkwrite) {
536 struct address_space *mapping = page_mapping(page);
537
538 if (mapping)
539 balance_dirty_pages_ratelimited(mapping);
540 }
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
558 unsigned long nr_pages_dirtied)
559{
560 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
561 unsigned long ratelimit;
562 unsigned long *p;
563
564 ratelimit = ratelimit_pages;
565 if (mapping->backing_dev_info->dirty_exceeded)
566 ratelimit = 8;
567
568
569
570
571
572 preempt_disable();
573 p = &__get_cpu_var(ratelimits);
574 *p += nr_pages_dirtied;
575 if (unlikely(*p >= ratelimit)) {
576 *p = 0;
577 preempt_enable();
578 balance_dirty_pages(mapping);
579 return;
580 }
581 preempt_enable();
582}
583EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
584
585void throttle_vm_writeout(gfp_t gfp_mask)
586{
587 long background_thresh;
588 long dirty_thresh;
589
590 for ( ; ; ) {
591 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
592
593
594
595
596
597 dirty_thresh += dirty_thresh / 10;
598
599 if (global_page_state(NR_UNSTABLE_NFS) +
600 global_page_state(NR_WRITEBACK) <= dirty_thresh)
601 break;
602 congestion_wait(WRITE, HZ/10);
603
604
605
606
607
608
609 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
610 break;
611 }
612}
613
614
615
616
617
618static void background_writeout(unsigned long _min_pages)
619{
620 long min_pages = _min_pages;
621 struct writeback_control wbc = {
622 .bdi = NULL,
623 .sync_mode = WB_SYNC_NONE,
624 .older_than_this = NULL,
625 .nr_to_write = 0,
626 .nonblocking = 1,
627 .range_cyclic = 1,
628 };
629
630 for ( ; ; ) {
631 long background_thresh;
632 long dirty_thresh;
633
634 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
635 if (global_page_state(NR_FILE_DIRTY) +
636 global_page_state(NR_UNSTABLE_NFS) < background_thresh
637 && min_pages <= 0)
638 break;
639 wbc.more_io = 0;
640 wbc.encountered_congestion = 0;
641 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
642 wbc.pages_skipped = 0;
643 writeback_inodes(&wbc);
644 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
645 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
646
647 if (wbc.encountered_congestion || wbc.more_io)
648 congestion_wait(WRITE, HZ/10);
649 else
650 break;
651 }
652 }
653}
654
655
656
657
658
659
660int wakeup_pdflush(long nr_pages)
661{
662 if (nr_pages == 0)
663 nr_pages = global_page_state(NR_FILE_DIRTY) +
664 global_page_state(NR_UNSTABLE_NFS);
665 return pdflush_operation(background_writeout, nr_pages);
666}
667
668static void wb_timer_fn(unsigned long unused);
669static void laptop_timer_fn(unsigned long unused);
670
671static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
672static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689static void wb_kupdate(unsigned long arg)
690{
691 unsigned long oldest_jif;
692 unsigned long start_jif;
693 unsigned long next_jif;
694 long nr_to_write;
695 struct writeback_control wbc = {
696 .bdi = NULL,
697 .sync_mode = WB_SYNC_NONE,
698 .older_than_this = &oldest_jif,
699 .nr_to_write = 0,
700 .nonblocking = 1,
701 .for_kupdate = 1,
702 .range_cyclic = 1,
703 };
704
705 sync_supers();
706
707 oldest_jif = jiffies - dirty_expire_interval;
708 start_jif = jiffies;
709 next_jif = start_jif + dirty_writeback_interval;
710 nr_to_write = global_page_state(NR_FILE_DIRTY) +
711 global_page_state(NR_UNSTABLE_NFS) +
712 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
713 while (nr_to_write > 0) {
714 wbc.more_io = 0;
715 wbc.encountered_congestion = 0;
716 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
717 writeback_inodes(&wbc);
718 if (wbc.nr_to_write > 0) {
719 if (wbc.encountered_congestion || wbc.more_io)
720 congestion_wait(WRITE, HZ/10);
721 else
722 break;
723 }
724 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
725 }
726 if (time_before(next_jif, jiffies + HZ))
727 next_jif = jiffies + HZ;
728 if (dirty_writeback_interval)
729 mod_timer(&wb_timer, next_jif);
730}
731
732
733
734
735int dirty_writeback_centisecs_handler(ctl_table *table, int write,
736 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
737{
738 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
739 if (dirty_writeback_interval)
740 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
741 else
742 del_timer(&wb_timer);
743 return 0;
744}
745
746static void wb_timer_fn(unsigned long unused)
747{
748 if (pdflush_operation(wb_kupdate, 0) < 0)
749 mod_timer(&wb_timer, jiffies + HZ);
750}
751
752static void laptop_flush(unsigned long unused)
753{
754 sys_sync();
755}
756
757static void laptop_timer_fn(unsigned long unused)
758{
759 pdflush_operation(laptop_flush, 0);
760}
761
762
763
764
765
766
767void laptop_io_completion(void)
768{
769 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
770}
771
772
773
774
775
776
777void laptop_sync_completion(void)
778{
779 del_timer(&laptop_mode_wb_timer);
780}
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799void writeback_set_ratelimit(void)
800{
801 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
802 if (ratelimit_pages < 16)
803 ratelimit_pages = 16;
804 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
805 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
806}
807
808static int __cpuinit
809ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
810{
811 writeback_set_ratelimit();
812 return NOTIFY_DONE;
813}
814
815static struct notifier_block __cpuinitdata ratelimit_nb = {
816 .notifier_call = ratelimit_handler,
817 .next = NULL,
818};
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838void __init page_writeback_init(void)
839{
840 int shift;
841
842 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
843 writeback_set_ratelimit();
844 register_cpu_notifier(&ratelimit_nb);
845
846 shift = calc_period_shift();
847 prop_descriptor_init(&vm_completions, shift);
848 prop_descriptor_init(&vm_dirties, shift);
849}
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866int write_cache_pages(struct address_space *mapping,
867 struct writeback_control *wbc, writepage_t writepage,
868 void *data)
869{
870 struct backing_dev_info *bdi = mapping->backing_dev_info;
871 int ret = 0;
872 int done = 0;
873 struct pagevec pvec;
874 int nr_pages;
875 pgoff_t uninitialized_var(writeback_index);
876 pgoff_t index;
877 pgoff_t end;
878 pgoff_t done_index;
879 int cycled;
880 int range_whole = 0;
881
882 if (wbc->nonblocking && bdi_write_congested(bdi)) {
883 wbc->encountered_congestion = 1;
884 return 0;
885 }
886
887 pagevec_init(&pvec, 0);
888 if (wbc->range_cyclic) {
889 writeback_index = mapping->writeback_index;
890 index = writeback_index;
891 if (index == 0)
892 cycled = 1;
893 else
894 cycled = 0;
895 end = -1;
896 } else {
897 index = wbc->range_start >> PAGE_CACHE_SHIFT;
898 end = wbc->range_end >> PAGE_CACHE_SHIFT;
899 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
900 range_whole = 1;
901 cycled = 1;
902 }
903retry:
904 done_index = index;
905 while (!done && (index <= end)) {
906 int i;
907
908 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
909 PAGECACHE_TAG_DIRTY,
910 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
911 if (nr_pages == 0)
912 break;
913
914 for (i = 0; i < nr_pages; i++) {
915 struct page *page = pvec.pages[i];
916
917
918
919
920
921
922
923
924 if (page->index > end) {
925
926
927
928
929 done = 1;
930 break;
931 }
932
933 done_index = page->index + 1;
934
935 lock_page(page);
936
937
938
939
940
941
942
943
944
945 if (unlikely(page->mapping != mapping)) {
946continue_unlock:
947 unlock_page(page);
948 continue;
949 }
950
951 if (!PageDirty(page)) {
952
953 goto continue_unlock;
954 }
955
956 if (PageWriteback(page)) {
957 if (wbc->sync_mode != WB_SYNC_NONE)
958 wait_on_page_writeback(page);
959 else
960 goto continue_unlock;
961 }
962
963 BUG_ON(PageWriteback(page));
964 if (!clear_page_dirty_for_io(page))
965 goto continue_unlock;
966
967 ret = (*writepage)(page, wbc, data);
968
969 if (unlikely(ret)) {
970 if (ret == AOP_WRITEPAGE_ACTIVATE) {
971 unlock_page(page);
972 ret = 0;
973 } else {
974
975
976
977
978
979
980
981
982
983 done = 1;
984 break;
985 }
986 }
987
988 if (wbc->nr_to_write > 0) {
989 wbc->nr_to_write--;
990 if (wbc->nr_to_write == 0 &&
991 wbc->sync_mode == WB_SYNC_NONE) {
992
993
994
995
996
997
998
999
1000
1001
1002 done = 1;
1003 break;
1004 }
1005 }
1006
1007 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1008 wbc->encountered_congestion = 1;
1009 done = 1;
1010 break;
1011 }
1012 }
1013 pagevec_release(&pvec);
1014 cond_resched();
1015 }
1016 if (!cycled && !done) {
1017
1018
1019
1020
1021
1022 cycled = 1;
1023 index = 0;
1024 end = writeback_index - 1;
1025 goto retry;
1026 }
1027 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1028 mapping->writeback_index = done_index;
1029
1030 return ret;
1031}
1032EXPORT_SYMBOL(write_cache_pages);
1033
1034
1035
1036
1037
1038static int __writepage(struct page *page, struct writeback_control *wbc,
1039 void *data)
1040{
1041 struct address_space *mapping = data;
1042 int ret = mapping->a_ops->writepage(page, wbc);
1043 mapping_set_error(mapping, ret);
1044 return ret;
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055int generic_writepages(struct address_space *mapping,
1056 struct writeback_control *wbc)
1057{
1058
1059 if (!mapping->a_ops->writepage)
1060 return 0;
1061
1062 return write_cache_pages(mapping, wbc, __writepage, mapping);
1063}
1064
1065EXPORT_SYMBOL(generic_writepages);
1066
1067int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1068{
1069 int ret;
1070
1071 if (wbc->nr_to_write <= 0)
1072 return 0;
1073 wbc->for_writepages = 1;
1074 if (mapping->a_ops->writepages)
1075 ret = mapping->a_ops->writepages(mapping, wbc);
1076 else
1077 ret = generic_writepages(mapping, wbc);
1078 wbc->for_writepages = 0;
1079 return ret;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091int write_one_page(struct page *page, int wait)
1092{
1093 struct address_space *mapping = page->mapping;
1094 int ret = 0;
1095 struct writeback_control wbc = {
1096 .sync_mode = WB_SYNC_ALL,
1097 .nr_to_write = 1,
1098 };
1099
1100 BUG_ON(!PageLocked(page));
1101
1102 if (wait)
1103 wait_on_page_writeback(page);
1104
1105 if (clear_page_dirty_for_io(page)) {
1106 page_cache_get(page);
1107 ret = mapping->a_ops->writepage(page, &wbc);
1108 if (ret == 0 && wait) {
1109 wait_on_page_writeback(page);
1110 if (PageError(page))
1111 ret = -EIO;
1112 }
1113 page_cache_release(page);
1114 } else {
1115 unlock_page(page);
1116 }
1117 return ret;
1118}
1119EXPORT_SYMBOL(write_one_page);
1120
1121
1122
1123
1124int __set_page_dirty_no_writeback(struct page *page)
1125{
1126 if (!PageDirty(page))
1127 SetPageDirty(page);
1128 return 0;
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146int __set_page_dirty_nobuffers(struct page *page)
1147{
1148 if (!TestSetPageDirty(page)) {
1149 struct address_space *mapping = page_mapping(page);
1150 struct address_space *mapping2;
1151
1152 if (!mapping)
1153 return 1;
1154
1155 spin_lock_irq(&mapping->tree_lock);
1156 mapping2 = page_mapping(page);
1157 if (mapping2) {
1158 BUG_ON(mapping2 != mapping);
1159 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1160 if (mapping_cap_account_dirty(mapping)) {
1161 __inc_zone_page_state(page, NR_FILE_DIRTY);
1162 __inc_bdi_stat(mapping->backing_dev_info,
1163 BDI_RECLAIMABLE);
1164 task_io_account_write(PAGE_CACHE_SIZE);
1165 }
1166 radix_tree_tag_set(&mapping->page_tree,
1167 page_index(page), PAGECACHE_TAG_DIRTY);
1168 }
1169 spin_unlock_irq(&mapping->tree_lock);
1170 if (mapping->host) {
1171
1172 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1173 }
1174 return 1;
1175 }
1176 return 0;
1177}
1178EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1179
1180
1181
1182
1183
1184
1185int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1186{
1187 wbc->pages_skipped++;
1188 return __set_page_dirty_nobuffers(page);
1189}
1190EXPORT_SYMBOL(redirty_page_for_writepage);
1191
1192
1193
1194
1195
1196static int __set_page_dirty(struct page *page)
1197{
1198 struct address_space *mapping = page_mapping(page);
1199
1200 if (likely(mapping)) {
1201 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1202#ifdef CONFIG_BLOCK
1203 if (!spd)
1204 spd = __set_page_dirty_buffers;
1205#endif
1206 return (*spd)(page);
1207 }
1208 if (!PageDirty(page)) {
1209 if (!TestSetPageDirty(page))
1210 return 1;
1211 }
1212 return 0;
1213}
1214
1215int set_page_dirty(struct page *page)
1216{
1217 int ret = __set_page_dirty(page);
1218 if (ret)
1219 task_dirty_inc(current);
1220 return ret;
1221}
1222EXPORT_SYMBOL(set_page_dirty);
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234int set_page_dirty_lock(struct page *page)
1235{
1236 int ret;
1237
1238 lock_page_nosync(page);
1239 ret = set_page_dirty(page);
1240 unlock_page(page);
1241 return ret;
1242}
1243EXPORT_SYMBOL(set_page_dirty_lock);
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259int clear_page_dirty_for_io(struct page *page)
1260{
1261 struct address_space *mapping = page_mapping(page);
1262
1263 BUG_ON(!PageLocked(page));
1264
1265 ClearPageReclaim(page);
1266 if (mapping && mapping_cap_account_dirty(mapping)) {
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 if (page_mkclean(page))
1293 set_page_dirty(page);
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304 if (TestClearPageDirty(page)) {
1305 dec_zone_page_state(page, NR_FILE_DIRTY);
1306 dec_bdi_stat(mapping->backing_dev_info,
1307 BDI_RECLAIMABLE);
1308 return 1;
1309 }
1310 return 0;
1311 }
1312 return TestClearPageDirty(page);
1313}
1314EXPORT_SYMBOL(clear_page_dirty_for_io);
1315
1316int test_clear_page_writeback(struct page *page)
1317{
1318 struct address_space *mapping = page_mapping(page);
1319 int ret;
1320
1321 if (mapping) {
1322 struct backing_dev_info *bdi = mapping->backing_dev_info;
1323 unsigned long flags;
1324
1325 spin_lock_irqsave(&mapping->tree_lock, flags);
1326 ret = TestClearPageWriteback(page);
1327 if (ret) {
1328 radix_tree_tag_clear(&mapping->page_tree,
1329 page_index(page),
1330 PAGECACHE_TAG_WRITEBACK);
1331 if (bdi_cap_account_writeback(bdi)) {
1332 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1333 __bdi_writeout_inc(bdi);
1334 }
1335 }
1336 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1337 } else {
1338 ret = TestClearPageWriteback(page);
1339 }
1340 if (ret)
1341 dec_zone_page_state(page, NR_WRITEBACK);
1342 return ret;
1343}
1344
1345int test_set_page_writeback(struct page *page)
1346{
1347 struct address_space *mapping = page_mapping(page);
1348 int ret;
1349
1350 if (mapping) {
1351 struct backing_dev_info *bdi = mapping->backing_dev_info;
1352 unsigned long flags;
1353
1354 spin_lock_irqsave(&mapping->tree_lock, flags);
1355 ret = TestSetPageWriteback(page);
1356 if (!ret) {
1357 radix_tree_tag_set(&mapping->page_tree,
1358 page_index(page),
1359 PAGECACHE_TAG_WRITEBACK);
1360 if (bdi_cap_account_writeback(bdi))
1361 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1362 }
1363 if (!PageDirty(page))
1364 radix_tree_tag_clear(&mapping->page_tree,
1365 page_index(page),
1366 PAGECACHE_TAG_DIRTY);
1367 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1368 } else {
1369 ret = TestSetPageWriteback(page);
1370 }
1371 if (!ret)
1372 inc_zone_page_state(page, NR_WRITEBACK);
1373 return ret;
1374
1375}
1376EXPORT_SYMBOL(test_set_page_writeback);
1377
1378
1379
1380
1381
1382int mapping_tagged(struct address_space *mapping, int tag)
1383{
1384 int ret;
1385 rcu_read_lock();
1386 ret = radix_tree_tagged(&mapping->page_tree, tag);
1387 rcu_read_unlock();
1388 return ret;
1389}
1390EXPORT_SYMBOL(mapping_tagged);
1391