1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h>
27
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
37#include <linux/delay.h>
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/memcontrol.h>
41#include <linux/delayacct.h>
42#include <linux/sysctl.h>
43
44#include <asm/tlbflush.h>
45#include <asm/div64.h>
46
47#include <linux/swapops.h>
48
49#include "internal.h"
50
51struct scan_control {
52
53 unsigned long nr_scanned;
54
55
56 unsigned long nr_reclaimed;
57
58
59 gfp_t gfp_mask;
60
61 int may_writepage;
62
63
64 int may_unmap;
65
66
67 int may_swap;
68
69
70
71
72
73 int swap_cluster_max;
74
75 int swappiness;
76
77 int all_unreclaimable;
78
79 int order;
80
81
82 struct mem_cgroup *mem_cgroup;
83
84
85
86
87
88 nodemask_t *nodemask;
89
90
91 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
92 unsigned long *scanned, int order, int mode,
93 struct zone *z, struct mem_cgroup *mem_cont,
94 int active, int file);
95};
96
97#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
98
99#ifdef ARCH_HAS_PREFETCH
100#define prefetch_prev_lru_page(_page, _base, _field) \
101 do { \
102 if ((_page)->lru.prev != _base) { \
103 struct page *prev; \
104 \
105 prev = lru_to_page(&(_page->lru)); \
106 prefetch(&prev->_field); \
107 } \
108 } while (0)
109#else
110#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
111#endif
112
113#ifdef ARCH_HAS_PREFETCHW
114#define prefetchw_prev_lru_page(_page, _base, _field) \
115 do { \
116 if ((_page)->lru.prev != _base) { \
117 struct page *prev; \
118 \
119 prev = lru_to_page(&(_page->lru)); \
120 prefetchw(&prev->_field); \
121 } \
122 } while (0)
123#else
124#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
125#endif
126
127
128
129
130int vm_swappiness = 60;
131long vm_total_pages;
132
133static LIST_HEAD(shrinker_list);
134static DECLARE_RWSEM(shrinker_rwsem);
135
136#ifdef CONFIG_CGROUP_MEM_RES_CTLR
137#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
138#else
139#define scanning_global_lru(sc) (1)
140#endif
141
142static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
143 struct scan_control *sc)
144{
145 if (!scanning_global_lru(sc))
146 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
147
148 return &zone->reclaim_stat;
149}
150
151static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
152 enum lru_list lru)
153{
154 if (!scanning_global_lru(sc))
155 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
156
157 return zone_page_state(zone, NR_LRU_BASE + lru);
158}
159
160
161
162
163
164void register_shrinker(struct shrinker *shrinker)
165{
166 shrinker->nr = 0;
167 down_write(&shrinker_rwsem);
168 list_add_tail(&shrinker->list, &shrinker_list);
169 up_write(&shrinker_rwsem);
170}
171EXPORT_SYMBOL(register_shrinker);
172
173
174
175
176void unregister_shrinker(struct shrinker *shrinker)
177{
178 down_write(&shrinker_rwsem);
179 list_del(&shrinker->list);
180 up_write(&shrinker_rwsem);
181}
182EXPORT_SYMBOL(unregister_shrinker);
183
184#define SHRINK_BATCH 128
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
205 unsigned long lru_pages)
206{
207 struct shrinker *shrinker;
208 unsigned long ret = 0;
209
210 if (scanned == 0)
211 scanned = SWAP_CLUSTER_MAX;
212
213 if (!down_read_trylock(&shrinker_rwsem))
214 return 1;
215
216 list_for_each_entry(shrinker, &shrinker_list, list) {
217 unsigned long long delta;
218 unsigned long total_scan;
219 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
220
221 delta = (4 * scanned) / shrinker->seeks;
222 delta *= max_pass;
223 do_div(delta, lru_pages + 1);
224 shrinker->nr += delta;
225 if (shrinker->nr < 0) {
226 printk(KERN_ERR "shrink_slab: %pF negative objects to "
227 "delete nr=%ld\n",
228 shrinker->shrink, shrinker->nr);
229 shrinker->nr = max_pass;
230 }
231
232
233
234
235
236
237 if (shrinker->nr > max_pass * 2)
238 shrinker->nr = max_pass * 2;
239
240 total_scan = shrinker->nr;
241 shrinker->nr = 0;
242
243 while (total_scan >= SHRINK_BATCH) {
244 long this_scan = SHRINK_BATCH;
245 int shrink_ret;
246 int nr_before;
247
248 nr_before = (*shrinker->shrink)(0, gfp_mask);
249 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
250 if (shrink_ret == -1)
251 break;
252 if (shrink_ret < nr_before)
253 ret += nr_before - shrink_ret;
254 count_vm_events(SLABS_SCANNED, this_scan);
255 total_scan -= this_scan;
256
257 cond_resched();
258 }
259
260 shrinker->nr += total_scan;
261 }
262 up_read(&shrinker_rwsem);
263 return ret;
264}
265
266
267static inline int page_mapping_inuse(struct page *page)
268{
269 struct address_space *mapping;
270
271
272 if (page_mapped(page))
273 return 1;
274
275
276 if (PageSwapCache(page))
277 return 1;
278
279 mapping = page_mapping(page);
280 if (!mapping)
281 return 0;
282
283
284 return mapping_mapped(mapping);
285}
286
287static inline int is_page_cache_freeable(struct page *page)
288{
289 return page_count(page) - !!page_has_private(page) == 2;
290}
291
292static int may_write_to_queue(struct backing_dev_info *bdi)
293{
294 if (current->flags & PF_SWAPWRITE)
295 return 1;
296 if (!bdi_write_congested(bdi))
297 return 1;
298 if (bdi == current->backing_dev_info)
299 return 1;
300 return 0;
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315static void handle_write_error(struct address_space *mapping,
316 struct page *page, int error)
317{
318 lock_page(page);
319 if (page_mapping(page) == mapping)
320 mapping_set_error(mapping, error);
321 unlock_page(page);
322}
323
324
325enum pageout_io {
326 PAGEOUT_IO_ASYNC,
327 PAGEOUT_IO_SYNC,
328};
329
330
331typedef enum {
332
333 PAGE_KEEP,
334
335 PAGE_ACTIVATE,
336
337 PAGE_SUCCESS,
338
339 PAGE_CLEAN,
340} pageout_t;
341
342
343
344
345
346static pageout_t pageout(struct page *page, struct address_space *mapping,
347 enum pageout_io sync_writeback)
348{
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366 if (!is_page_cache_freeable(page))
367 return PAGE_KEEP;
368 if (!mapping) {
369
370
371
372
373 if (page_has_private(page)) {
374 if (try_to_free_buffers(page)) {
375 ClearPageDirty(page);
376 printk("%s: orphaned page\n", __func__);
377 return PAGE_CLEAN;
378 }
379 }
380 return PAGE_KEEP;
381 }
382 if (mapping->a_ops->writepage == NULL)
383 return PAGE_ACTIVATE;
384 if (!may_write_to_queue(mapping->backing_dev_info))
385 return PAGE_KEEP;
386
387 if (clear_page_dirty_for_io(page)) {
388 int res;
389 struct writeback_control wbc = {
390 .sync_mode = WB_SYNC_NONE,
391 .nr_to_write = SWAP_CLUSTER_MAX,
392 .range_start = 0,
393 .range_end = LLONG_MAX,
394 .nonblocking = 1,
395 .for_reclaim = 1,
396 };
397
398 SetPageReclaim(page);
399 res = mapping->a_ops->writepage(page, &wbc);
400 if (res < 0)
401 handle_write_error(mapping, page, res);
402 if (res == AOP_WRITEPAGE_ACTIVATE) {
403 ClearPageReclaim(page);
404 return PAGE_ACTIVATE;
405 }
406
407
408
409
410
411
412 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
413 wait_on_page_writeback(page);
414
415 if (!PageWriteback(page)) {
416
417 ClearPageReclaim(page);
418 }
419 inc_zone_page_state(page, NR_VMSCAN_WRITE);
420 return PAGE_SUCCESS;
421 }
422
423 return PAGE_CLEAN;
424}
425
426
427
428
429
430static int __remove_mapping(struct address_space *mapping, struct page *page)
431{
432 BUG_ON(!PageLocked(page));
433 BUG_ON(mapping != page_mapping(page));
434
435 spin_lock_irq(&mapping->tree_lock);
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461 if (!page_freeze_refs(page, 2))
462 goto cannot_free;
463
464 if (unlikely(PageDirty(page))) {
465 page_unfreeze_refs(page, 2);
466 goto cannot_free;
467 }
468
469 if (PageSwapCache(page)) {
470 swp_entry_t swap = { .val = page_private(page) };
471 __delete_from_swap_cache(page);
472 spin_unlock_irq(&mapping->tree_lock);
473 mem_cgroup_uncharge_swapcache(page, swap);
474 swap_free(swap);
475 } else {
476 __remove_from_page_cache(page);
477 spin_unlock_irq(&mapping->tree_lock);
478 mem_cgroup_uncharge_cache_page(page);
479 }
480
481 return 1;
482
483cannot_free:
484 spin_unlock_irq(&mapping->tree_lock);
485 return 0;
486}
487
488
489
490
491
492
493
494int remove_mapping(struct address_space *mapping, struct page *page)
495{
496 if (__remove_mapping(mapping, page)) {
497
498
499
500
501
502 page_unfreeze_refs(page, 1);
503 return 1;
504 }
505 return 0;
506}
507
508
509
510
511
512
513
514
515
516
517#ifdef CONFIG_UNEVICTABLE_LRU
518void putback_lru_page(struct page *page)
519{
520 int lru;
521 int active = !!TestClearPageActive(page);
522 int was_unevictable = PageUnevictable(page);
523
524 VM_BUG_ON(PageLRU(page));
525
526redo:
527 ClearPageUnevictable(page);
528
529 if (page_evictable(page, NULL)) {
530
531
532
533
534
535
536 lru = active + page_is_file_cache(page);
537 lru_cache_add_lru(page, lru);
538 } else {
539
540
541
542
543 lru = LRU_UNEVICTABLE;
544 add_page_to_unevictable_list(page);
545 }
546
547
548
549
550
551
552 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
553 if (!isolate_lru_page(page)) {
554 put_page(page);
555 goto redo;
556 }
557
558
559
560
561 }
562
563 if (was_unevictable && lru != LRU_UNEVICTABLE)
564 count_vm_event(UNEVICTABLE_PGRESCUED);
565 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
566 count_vm_event(UNEVICTABLE_PGCULLED);
567
568 put_page(page);
569}
570
571#else
572
573void putback_lru_page(struct page *page)
574{
575 int lru;
576 VM_BUG_ON(PageLRU(page));
577
578 lru = !!TestClearPageActive(page) + page_is_file_cache(page);
579 lru_cache_add_lru(page, lru);
580 put_page(page);
581}
582#endif
583
584
585
586
587
588static unsigned long shrink_page_list(struct list_head *page_list,
589 struct scan_control *sc,
590 enum pageout_io sync_writeback)
591{
592 LIST_HEAD(ret_pages);
593 struct pagevec freed_pvec;
594 int pgactivate = 0;
595 unsigned long nr_reclaimed = 0;
596
597 cond_resched();
598
599 pagevec_init(&freed_pvec, 1);
600 while (!list_empty(page_list)) {
601 struct address_space *mapping;
602 struct page *page;
603 int may_enter_fs;
604 int referenced;
605
606 cond_resched();
607
608 page = lru_to_page(page_list);
609 list_del(&page->lru);
610
611 if (!trylock_page(page))
612 goto keep;
613
614 VM_BUG_ON(PageActive(page));
615
616 sc->nr_scanned++;
617
618 if (unlikely(!page_evictable(page, NULL)))
619 goto cull_mlocked;
620
621 if (!sc->may_unmap && page_mapped(page))
622 goto keep_locked;
623
624
625 if (page_mapped(page) || PageSwapCache(page))
626 sc->nr_scanned++;
627
628 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
629 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
630
631 if (PageWriteback(page)) {
632
633
634
635
636
637
638
639
640 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
641 wait_on_page_writeback(page);
642 else
643 goto keep_locked;
644 }
645
646 referenced = page_referenced(page, 1, sc->mem_cgroup);
647
648 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
649 referenced && page_mapping_inuse(page))
650 goto activate_locked;
651
652
653
654
655
656 if (PageAnon(page) && !PageSwapCache(page)) {
657 if (!(sc->gfp_mask & __GFP_IO))
658 goto keep_locked;
659 if (!add_to_swap(page))
660 goto activate_locked;
661 may_enter_fs = 1;
662 }
663
664 mapping = page_mapping(page);
665
666
667
668
669
670 if (page_mapped(page) && mapping) {
671 switch (try_to_unmap(page, 0)) {
672 case SWAP_FAIL:
673 goto activate_locked;
674 case SWAP_AGAIN:
675 goto keep_locked;
676 case SWAP_MLOCK:
677 goto cull_mlocked;
678 case SWAP_SUCCESS:
679 ;
680 }
681 }
682
683 if (PageDirty(page)) {
684 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
685 goto keep_locked;
686 if (!may_enter_fs)
687 goto keep_locked;
688 if (!sc->may_writepage)
689 goto keep_locked;
690
691
692 switch (pageout(page, mapping, sync_writeback)) {
693 case PAGE_KEEP:
694 goto keep_locked;
695 case PAGE_ACTIVATE:
696 goto activate_locked;
697 case PAGE_SUCCESS:
698 if (PageWriteback(page) || PageDirty(page))
699 goto keep;
700
701
702
703
704 if (!trylock_page(page))
705 goto keep;
706 if (PageDirty(page) || PageWriteback(page))
707 goto keep_locked;
708 mapping = page_mapping(page);
709 case PAGE_CLEAN:
710 ;
711 }
712 }
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735 if (page_has_private(page)) {
736 if (!try_to_release_page(page, sc->gfp_mask))
737 goto activate_locked;
738 if (!mapping && page_count(page) == 1) {
739 unlock_page(page);
740 if (put_page_testzero(page))
741 goto free_it;
742 else {
743
744
745
746
747
748
749
750 nr_reclaimed++;
751 continue;
752 }
753 }
754 }
755
756 if (!mapping || !__remove_mapping(mapping, page))
757 goto keep_locked;
758
759
760
761
762
763
764
765
766 __clear_page_locked(page);
767free_it:
768 nr_reclaimed++;
769 if (!pagevec_add(&freed_pvec, page)) {
770 __pagevec_free(&freed_pvec);
771 pagevec_reinit(&freed_pvec);
772 }
773 continue;
774
775cull_mlocked:
776 if (PageSwapCache(page))
777 try_to_free_swap(page);
778 unlock_page(page);
779 putback_lru_page(page);
780 continue;
781
782activate_locked:
783
784 if (PageSwapCache(page) && vm_swap_full())
785 try_to_free_swap(page);
786 VM_BUG_ON(PageActive(page));
787 SetPageActive(page);
788 pgactivate++;
789keep_locked:
790 unlock_page(page);
791keep:
792 list_add(&page->lru, &ret_pages);
793 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
794 }
795 list_splice(&ret_pages, page_list);
796 if (pagevec_count(&freed_pvec))
797 __pagevec_free(&freed_pvec);
798 count_vm_events(PGACTIVATE, pgactivate);
799 return nr_reclaimed;
800}
801
802
803#define ISOLATE_INACTIVE 0
804#define ISOLATE_ACTIVE 1
805#define ISOLATE_BOTH 2
806
807
808
809
810
811
812
813
814
815
816
817int __isolate_lru_page(struct page *page, int mode, int file)
818{
819 int ret = -EINVAL;
820
821
822 if (!PageLRU(page))
823 return ret;
824
825
826
827
828
829
830 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
831 return ret;
832
833 if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
834 return ret;
835
836
837
838
839
840
841 if (PageUnevictable(page))
842 return ret;
843
844 ret = -EBUSY;
845
846 if (likely(get_page_unless_zero(page))) {
847
848
849
850
851
852 ClearPageLRU(page);
853 ret = 0;
854 mem_cgroup_del_lru(page);
855 }
856
857 return ret;
858}
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
881 struct list_head *src, struct list_head *dst,
882 unsigned long *scanned, int order, int mode, int file)
883{
884 unsigned long nr_taken = 0;
885 unsigned long scan;
886
887 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
888 struct page *page;
889 unsigned long pfn;
890 unsigned long end_pfn;
891 unsigned long page_pfn;
892 int zone_id;
893
894 page = lru_to_page(src);
895 prefetchw_prev_lru_page(page, src, flags);
896
897 VM_BUG_ON(!PageLRU(page));
898
899 switch (__isolate_lru_page(page, mode, file)) {
900 case 0:
901 list_move(&page->lru, dst);
902 nr_taken++;
903 break;
904
905 case -EBUSY:
906
907 list_move(&page->lru, src);
908 continue;
909
910 default:
911 BUG();
912 }
913
914 if (!order)
915 continue;
916
917
918
919
920
921
922
923
924
925
926 zone_id = page_zone_id(page);
927 page_pfn = page_to_pfn(page);
928 pfn = page_pfn & ~((1 << order) - 1);
929 end_pfn = pfn + (1 << order);
930 for (; pfn < end_pfn; pfn++) {
931 struct page *cursor_page;
932
933
934 if (unlikely(pfn == page_pfn))
935 continue;
936
937
938 if (unlikely(!pfn_valid_within(pfn)))
939 break;
940
941 cursor_page = pfn_to_page(pfn);
942
943
944 if (unlikely(page_zone_id(cursor_page) != zone_id))
945 continue;
946 switch (__isolate_lru_page(cursor_page, mode, file)) {
947 case 0:
948 list_move(&cursor_page->lru, dst);
949 nr_taken++;
950 scan++;
951 break;
952
953 case -EBUSY:
954
955 list_move(&cursor_page->lru, src);
956 default:
957 break;
958 }
959 }
960 }
961
962 *scanned = scan;
963 return nr_taken;
964}
965
966static unsigned long isolate_pages_global(unsigned long nr,
967 struct list_head *dst,
968 unsigned long *scanned, int order,
969 int mode, struct zone *z,
970 struct mem_cgroup *mem_cont,
971 int active, int file)
972{
973 int lru = LRU_BASE;
974 if (active)
975 lru += LRU_ACTIVE;
976 if (file)
977 lru += LRU_FILE;
978 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
979 mode, !!file);
980}
981
982
983
984
985
986static unsigned long clear_active_flags(struct list_head *page_list,
987 unsigned int *count)
988{
989 int nr_active = 0;
990 int lru;
991 struct page *page;
992
993 list_for_each_entry(page, page_list, lru) {
994 lru = page_is_file_cache(page);
995 if (PageActive(page)) {
996 lru += LRU_ACTIVE;
997 ClearPageActive(page);
998 nr_active++;
999 }
1000 count[lru]++;
1001 }
1002
1003 return nr_active;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031int isolate_lru_page(struct page *page)
1032{
1033 int ret = -EBUSY;
1034
1035 if (PageLRU(page)) {
1036 struct zone *zone = page_zone(page);
1037
1038 spin_lock_irq(&zone->lru_lock);
1039 if (PageLRU(page) && get_page_unless_zero(page)) {
1040 int lru = page_lru(page);
1041 ret = 0;
1042 ClearPageLRU(page);
1043
1044 del_page_from_lru_list(zone, page, lru);
1045 }
1046 spin_unlock_irq(&zone->lru_lock);
1047 }
1048 return ret;
1049}
1050
1051
1052
1053
1054
1055static unsigned long shrink_inactive_list(unsigned long max_scan,
1056 struct zone *zone, struct scan_control *sc,
1057 int priority, int file)
1058{
1059 LIST_HEAD(page_list);
1060 struct pagevec pvec;
1061 unsigned long nr_scanned = 0;
1062 unsigned long nr_reclaimed = 0;
1063 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1064
1065 pagevec_init(&pvec, 1);
1066
1067 lru_add_drain();
1068 spin_lock_irq(&zone->lru_lock);
1069 do {
1070 struct page *page;
1071 unsigned long nr_taken;
1072 unsigned long nr_scan;
1073 unsigned long nr_freed;
1074 unsigned long nr_active;
1075 unsigned int count[NR_LRU_LISTS] = { 0, };
1076 int mode = ISOLATE_INACTIVE;
1077
1078
1079
1080
1081
1082
1083
1084
1085 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1086 mode = ISOLATE_BOTH;
1087 else if (sc->order && priority < DEF_PRIORITY - 2)
1088 mode = ISOLATE_BOTH;
1089
1090 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
1091 &page_list, &nr_scan, sc->order, mode,
1092 zone, sc->mem_cgroup, 0, file);
1093 nr_active = clear_active_flags(&page_list, count);
1094 __count_vm_events(PGDEACTIVATE, nr_active);
1095
1096 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1097 -count[LRU_ACTIVE_FILE]);
1098 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1099 -count[LRU_INACTIVE_FILE]);
1100 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1101 -count[LRU_ACTIVE_ANON]);
1102 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1103 -count[LRU_INACTIVE_ANON]);
1104
1105 if (scanning_global_lru(sc))
1106 zone->pages_scanned += nr_scan;
1107
1108 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1109 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1110 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1111 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1112
1113 spin_unlock_irq(&zone->lru_lock);
1114
1115 nr_scanned += nr_scan;
1116 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1117
1118
1119
1120
1121
1122
1123
1124 if (nr_freed < nr_taken && !current_is_kswapd() &&
1125 sc->order > PAGE_ALLOC_COSTLY_ORDER) {
1126 congestion_wait(WRITE, HZ/10);
1127
1128
1129
1130
1131
1132 nr_active = clear_active_flags(&page_list, count);
1133 count_vm_events(PGDEACTIVATE, nr_active);
1134
1135 nr_freed += shrink_page_list(&page_list, sc,
1136 PAGEOUT_IO_SYNC);
1137 }
1138
1139 nr_reclaimed += nr_freed;
1140 local_irq_disable();
1141 if (current_is_kswapd()) {
1142 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
1143 __count_vm_events(KSWAPD_STEAL, nr_freed);
1144 } else if (scanning_global_lru(sc))
1145 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
1146
1147 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
1148
1149 if (nr_taken == 0)
1150 goto done;
1151
1152 spin_lock(&zone->lru_lock);
1153
1154
1155
1156 while (!list_empty(&page_list)) {
1157 int lru;
1158 page = lru_to_page(&page_list);
1159 VM_BUG_ON(PageLRU(page));
1160 list_del(&page->lru);
1161 if (unlikely(!page_evictable(page, NULL))) {
1162 spin_unlock_irq(&zone->lru_lock);
1163 putback_lru_page(page);
1164 spin_lock_irq(&zone->lru_lock);
1165 continue;
1166 }
1167 SetPageLRU(page);
1168 lru = page_lru(page);
1169 add_page_to_lru_list(zone, page, lru);
1170 if (PageActive(page)) {
1171 int file = !!page_is_file_cache(page);
1172 reclaim_stat->recent_rotated[file]++;
1173 }
1174 if (!pagevec_add(&pvec, page)) {
1175 spin_unlock_irq(&zone->lru_lock);
1176 __pagevec_release(&pvec);
1177 spin_lock_irq(&zone->lru_lock);
1178 }
1179 }
1180 } while (nr_scanned < max_scan);
1181 spin_unlock(&zone->lru_lock);
1182done:
1183 local_irq_enable();
1184 pagevec_release(&pvec);
1185 return nr_reclaimed;
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1197{
1198 if (priority < zone->prev_priority)
1199 zone->prev_priority = priority;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1222 struct scan_control *sc, int priority, int file)
1223{
1224 unsigned long pgmoved;
1225 int pgdeactivate = 0;
1226 unsigned long pgscanned;
1227 LIST_HEAD(l_hold);
1228 LIST_HEAD(l_inactive);
1229 struct page *page;
1230 struct pagevec pvec;
1231 enum lru_list lru;
1232 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1233
1234 lru_add_drain();
1235 spin_lock_irq(&zone->lru_lock);
1236 pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1237 ISOLATE_ACTIVE, zone,
1238 sc->mem_cgroup, 1, file);
1239
1240
1241
1242
1243 if (scanning_global_lru(sc)) {
1244 zone->pages_scanned += pgscanned;
1245 }
1246 reclaim_stat->recent_scanned[!!file] += pgmoved;
1247
1248 if (file)
1249 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
1250 else
1251 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
1252 spin_unlock_irq(&zone->lru_lock);
1253
1254 pgmoved = 0;
1255 while (!list_empty(&l_hold)) {
1256 cond_resched();
1257 page = lru_to_page(&l_hold);
1258 list_del(&page->lru);
1259
1260 if (unlikely(!page_evictable(page, NULL))) {
1261 putback_lru_page(page);
1262 continue;
1263 }
1264
1265
1266 if (page_mapping_inuse(page) &&
1267 page_referenced(page, 0, sc->mem_cgroup))
1268 pgmoved++;
1269
1270 list_add(&page->lru, &l_inactive);
1271 }
1272
1273
1274
1275
1276 pagevec_init(&pvec, 1);
1277 lru = LRU_BASE + file * LRU_FILE;
1278
1279 spin_lock_irq(&zone->lru_lock);
1280
1281
1282
1283
1284
1285
1286 reclaim_stat->recent_rotated[!!file] += pgmoved;
1287
1288 pgmoved = 0;
1289 while (!list_empty(&l_inactive)) {
1290 page = lru_to_page(&l_inactive);
1291 prefetchw_prev_lru_page(page, &l_inactive, flags);
1292 VM_BUG_ON(PageLRU(page));
1293 SetPageLRU(page);
1294 VM_BUG_ON(!PageActive(page));
1295 ClearPageActive(page);
1296
1297 list_move(&page->lru, &zone->lru[lru].list);
1298 mem_cgroup_add_lru_list(page, lru);
1299 pgmoved++;
1300 if (!pagevec_add(&pvec, page)) {
1301 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1302 spin_unlock_irq(&zone->lru_lock);
1303 pgdeactivate += pgmoved;
1304 pgmoved = 0;
1305 if (buffer_heads_over_limit)
1306 pagevec_strip(&pvec);
1307 __pagevec_release(&pvec);
1308 spin_lock_irq(&zone->lru_lock);
1309 }
1310 }
1311 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1312 pgdeactivate += pgmoved;
1313 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1314 __count_vm_events(PGDEACTIVATE, pgdeactivate);
1315 spin_unlock_irq(&zone->lru_lock);
1316 if (buffer_heads_over_limit)
1317 pagevec_strip(&pvec);
1318 pagevec_release(&pvec);
1319}
1320
1321static int inactive_anon_is_low_global(struct zone *zone)
1322{
1323 unsigned long active, inactive;
1324
1325 active = zone_page_state(zone, NR_ACTIVE_ANON);
1326 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1327
1328 if (inactive * zone->inactive_ratio < active)
1329 return 1;
1330
1331 return 0;
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1343{
1344 int low;
1345
1346 if (scanning_global_lru(sc))
1347 low = inactive_anon_is_low_global(zone);
1348 else
1349 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1350 return low;
1351}
1352
1353static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1354 struct zone *zone, struct scan_control *sc, int priority)
1355{
1356 int file = is_file_lru(lru);
1357
1358 if (lru == LRU_ACTIVE_FILE) {
1359 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1360 return 0;
1361 }
1362
1363 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
1364 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1365 return 0;
1366 }
1367 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1380 unsigned long *percent)
1381{
1382 unsigned long anon, file, free;
1383 unsigned long anon_prio, file_prio;
1384 unsigned long ap, fp;
1385 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1386
1387
1388 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1389 percent[0] = 0;
1390 percent[1] = 100;
1391 return;
1392 }
1393
1394 anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
1395 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
1396 file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
1397 zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
1398
1399 if (scanning_global_lru(sc)) {
1400 free = zone_page_state(zone, NR_FREE_PAGES);
1401
1402
1403 if (unlikely(file + free <= zone->pages_high)) {
1404 percent[0] = 100;
1405 percent[1] = 0;
1406 return;
1407 }
1408 }
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1422 spin_lock_irq(&zone->lru_lock);
1423 reclaim_stat->recent_scanned[0] /= 2;
1424 reclaim_stat->recent_rotated[0] /= 2;
1425 spin_unlock_irq(&zone->lru_lock);
1426 }
1427
1428 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1429 spin_lock_irq(&zone->lru_lock);
1430 reclaim_stat->recent_scanned[1] /= 2;
1431 reclaim_stat->recent_rotated[1] /= 2;
1432 spin_unlock_irq(&zone->lru_lock);
1433 }
1434
1435
1436
1437
1438
1439 anon_prio = sc->swappiness;
1440 file_prio = 200 - sc->swappiness;
1441
1442
1443
1444
1445
1446
1447 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1448 ap /= reclaim_stat->recent_rotated[0] + 1;
1449
1450 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1451 fp /= reclaim_stat->recent_rotated[1] + 1;
1452
1453
1454 percent[0] = 100 * ap / (ap + fp + 1);
1455 percent[1] = 100 - percent[0];
1456}
1457
1458
1459
1460
1461
1462static void shrink_zone(int priority, struct zone *zone,
1463 struct scan_control *sc)
1464{
1465 unsigned long nr[NR_LRU_LISTS];
1466 unsigned long nr_to_scan;
1467 unsigned long percent[2];
1468 enum lru_list l;
1469 unsigned long nr_reclaimed = sc->nr_reclaimed;
1470 unsigned long swap_cluster_max = sc->swap_cluster_max;
1471
1472 get_scan_ratio(zone, sc, percent);
1473
1474 for_each_evictable_lru(l) {
1475 int file = is_file_lru(l);
1476 unsigned long scan;
1477
1478 scan = zone_nr_pages(zone, sc, l);
1479 if (priority) {
1480 scan >>= priority;
1481 scan = (scan * percent[file]) / 100;
1482 }
1483 if (scanning_global_lru(sc)) {
1484 zone->lru[l].nr_scan += scan;
1485 nr[l] = zone->lru[l].nr_scan;
1486 if (nr[l] >= swap_cluster_max)
1487 zone->lru[l].nr_scan = 0;
1488 else
1489 nr[l] = 0;
1490 } else
1491 nr[l] = scan;
1492 }
1493
1494 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1495 nr[LRU_INACTIVE_FILE]) {
1496 for_each_evictable_lru(l) {
1497 if (nr[l]) {
1498 nr_to_scan = min(nr[l], swap_cluster_max);
1499 nr[l] -= nr_to_scan;
1500
1501 nr_reclaimed += shrink_list(l, nr_to_scan,
1502 zone, sc, priority);
1503 }
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513 if (nr_reclaimed > swap_cluster_max &&
1514 priority < DEF_PRIORITY && !current_is_kswapd())
1515 break;
1516 }
1517
1518 sc->nr_reclaimed = nr_reclaimed;
1519
1520
1521
1522
1523
1524 if (inactive_anon_is_low(zone, sc))
1525 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1526
1527 throttle_vm_writeout(sc->gfp_mask);
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544static void shrink_zones(int priority, struct zonelist *zonelist,
1545 struct scan_control *sc)
1546{
1547 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1548 struct zoneref *z;
1549 struct zone *zone;
1550
1551 sc->all_unreclaimable = 1;
1552 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1553 sc->nodemask) {
1554 if (!populated_zone(zone))
1555 continue;
1556
1557
1558
1559
1560 if (scanning_global_lru(sc)) {
1561 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1562 continue;
1563 note_zone_scanning_priority(zone, priority);
1564
1565 if (zone_is_all_unreclaimable(zone) &&
1566 priority != DEF_PRIORITY)
1567 continue;
1568 sc->all_unreclaimable = 0;
1569 } else {
1570
1571
1572
1573
1574 sc->all_unreclaimable = 0;
1575 mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1576 priority);
1577 }
1578
1579 shrink_zone(priority, zone, sc);
1580 }
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1600 struct scan_control *sc)
1601{
1602 int priority;
1603 unsigned long ret = 0;
1604 unsigned long total_scanned = 0;
1605 struct reclaim_state *reclaim_state = current->reclaim_state;
1606 unsigned long lru_pages = 0;
1607 struct zoneref *z;
1608 struct zone *zone;
1609 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1610
1611 delayacct_freepages_start();
1612
1613 if (scanning_global_lru(sc))
1614 count_vm_event(ALLOCSTALL);
1615
1616
1617
1618 if (scanning_global_lru(sc)) {
1619 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1620
1621 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1622 continue;
1623
1624 lru_pages += zone_lru_pages(zone);
1625 }
1626 }
1627
1628 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1629 sc->nr_scanned = 0;
1630 if (!priority)
1631 disable_swap_token();
1632 shrink_zones(priority, zonelist, sc);
1633
1634
1635
1636
1637 if (scanning_global_lru(sc)) {
1638 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1639 if (reclaim_state) {
1640 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
1641 reclaim_state->reclaimed_slab = 0;
1642 }
1643 }
1644 total_scanned += sc->nr_scanned;
1645 if (sc->nr_reclaimed >= sc->swap_cluster_max) {
1646 ret = sc->nr_reclaimed;
1647 goto out;
1648 }
1649
1650
1651
1652
1653
1654
1655
1656
1657 if (total_scanned > sc->swap_cluster_max +
1658 sc->swap_cluster_max / 2) {
1659 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1660 sc->may_writepage = 1;
1661 }
1662
1663
1664 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1665 congestion_wait(WRITE, HZ/10);
1666 }
1667
1668 if (!sc->all_unreclaimable && scanning_global_lru(sc))
1669 ret = sc->nr_reclaimed;
1670out:
1671
1672
1673
1674
1675
1676
1677
1678 if (priority < 0)
1679 priority = 0;
1680
1681 if (scanning_global_lru(sc)) {
1682 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1683
1684 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1685 continue;
1686
1687 zone->prev_priority = priority;
1688 }
1689 } else
1690 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1691
1692 delayacct_freepages_end();
1693
1694 return ret;
1695}
1696
1697unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1698 gfp_t gfp_mask, nodemask_t *nodemask)
1699{
1700 struct scan_control sc = {
1701 .gfp_mask = gfp_mask,
1702 .may_writepage = !laptop_mode,
1703 .swap_cluster_max = SWAP_CLUSTER_MAX,
1704 .may_unmap = 1,
1705 .may_swap = 1,
1706 .swappiness = vm_swappiness,
1707 .order = order,
1708 .mem_cgroup = NULL,
1709 .isolate_pages = isolate_pages_global,
1710 .nodemask = nodemask,
1711 };
1712
1713 return do_try_to_free_pages(zonelist, &sc);
1714}
1715
1716#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1717
1718unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1719 gfp_t gfp_mask,
1720 bool noswap,
1721 unsigned int swappiness)
1722{
1723 struct scan_control sc = {
1724 .may_writepage = !laptop_mode,
1725 .may_unmap = 1,
1726 .may_swap = !noswap,
1727 .swap_cluster_max = SWAP_CLUSTER_MAX,
1728 .swappiness = swappiness,
1729 .order = 0,
1730 .mem_cgroup = mem_cont,
1731 .isolate_pages = mem_cgroup_isolate_pages,
1732 .nodemask = NULL,
1733 };
1734 struct zonelist *zonelist;
1735
1736 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1737 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1738 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1739 return do_try_to_free_pages(zonelist, &sc);
1740}
1741#endif
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1765{
1766 int all_zones_ok;
1767 int priority;
1768 int i;
1769 unsigned long total_scanned;
1770 struct reclaim_state *reclaim_state = current->reclaim_state;
1771 struct scan_control sc = {
1772 .gfp_mask = GFP_KERNEL,
1773 .may_unmap = 1,
1774 .may_swap = 1,
1775 .swap_cluster_max = SWAP_CLUSTER_MAX,
1776 .swappiness = vm_swappiness,
1777 .order = order,
1778 .mem_cgroup = NULL,
1779 .isolate_pages = isolate_pages_global,
1780 };
1781
1782
1783
1784
1785 int temp_priority[MAX_NR_ZONES];
1786
1787loop_again:
1788 total_scanned = 0;
1789 sc.nr_reclaimed = 0;
1790 sc.may_writepage = !laptop_mode;
1791 count_vm_event(PAGEOUTRUN);
1792
1793 for (i = 0; i < pgdat->nr_zones; i++)
1794 temp_priority[i] = DEF_PRIORITY;
1795
1796 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1797 int end_zone = 0;
1798 unsigned long lru_pages = 0;
1799
1800
1801 if (!priority)
1802 disable_swap_token();
1803
1804 all_zones_ok = 1;
1805
1806
1807
1808
1809
1810 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1811 struct zone *zone = pgdat->node_zones + i;
1812
1813 if (!populated_zone(zone))
1814 continue;
1815
1816 if (zone_is_all_unreclaimable(zone) &&
1817 priority != DEF_PRIORITY)
1818 continue;
1819
1820
1821
1822
1823
1824 if (inactive_anon_is_low(zone, &sc))
1825 shrink_active_list(SWAP_CLUSTER_MAX, zone,
1826 &sc, priority, 0);
1827
1828 if (!zone_watermark_ok(zone, order, zone->pages_high,
1829 0, 0)) {
1830 end_zone = i;
1831 break;
1832 }
1833 }
1834 if (i < 0)
1835 goto out;
1836
1837 for (i = 0; i <= end_zone; i++) {
1838 struct zone *zone = pgdat->node_zones + i;
1839
1840 lru_pages += zone_lru_pages(zone);
1841 }
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852 for (i = 0; i <= end_zone; i++) {
1853 struct zone *zone = pgdat->node_zones + i;
1854 int nr_slab;
1855
1856 if (!populated_zone(zone))
1857 continue;
1858
1859 if (zone_is_all_unreclaimable(zone) &&
1860 priority != DEF_PRIORITY)
1861 continue;
1862
1863 if (!zone_watermark_ok(zone, order, zone->pages_high,
1864 end_zone, 0))
1865 all_zones_ok = 0;
1866 temp_priority[i] = priority;
1867 sc.nr_scanned = 0;
1868 note_zone_scanning_priority(zone, priority);
1869
1870
1871
1872
1873 if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
1874 end_zone, 0))
1875 shrink_zone(priority, zone, &sc);
1876 reclaim_state->reclaimed_slab = 0;
1877 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1878 lru_pages);
1879 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1880 total_scanned += sc.nr_scanned;
1881 if (zone_is_all_unreclaimable(zone))
1882 continue;
1883 if (nr_slab == 0 && zone->pages_scanned >=
1884 (zone_lru_pages(zone) * 6))
1885 zone_set_flag(zone,
1886 ZONE_ALL_UNRECLAIMABLE);
1887
1888
1889
1890
1891
1892 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1893 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
1894 sc.may_writepage = 1;
1895 }
1896 if (all_zones_ok)
1897 break;
1898
1899
1900
1901
1902 if (total_scanned && priority < DEF_PRIORITY - 2)
1903 congestion_wait(WRITE, HZ/10);
1904
1905
1906
1907
1908
1909
1910
1911 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
1912 break;
1913 }
1914out:
1915
1916
1917
1918
1919
1920 for (i = 0; i < pgdat->nr_zones; i++) {
1921 struct zone *zone = pgdat->node_zones + i;
1922
1923 zone->prev_priority = temp_priority[i];
1924 }
1925 if (!all_zones_ok) {
1926 cond_resched();
1927
1928 try_to_freeze();
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
1945 order = sc.order = 0;
1946
1947 goto loop_again;
1948 }
1949
1950 return sc.nr_reclaimed;
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966static int kswapd(void *p)
1967{
1968 unsigned long order;
1969 pg_data_t *pgdat = (pg_data_t*)p;
1970 struct task_struct *tsk = current;
1971 DEFINE_WAIT(wait);
1972 struct reclaim_state reclaim_state = {
1973 .reclaimed_slab = 0,
1974 };
1975 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1976
1977 lockdep_set_current_reclaim_state(GFP_KERNEL);
1978
1979 if (!cpumask_empty(cpumask))
1980 set_cpus_allowed_ptr(tsk, cpumask);
1981 current->reclaim_state = &reclaim_state;
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1996 set_freezable();
1997
1998 order = 0;
1999 for ( ; ; ) {
2000 unsigned long new_order;
2001
2002 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2003 new_order = pgdat->kswapd_max_order;
2004 pgdat->kswapd_max_order = 0;
2005 if (order < new_order) {
2006
2007
2008
2009
2010 order = new_order;
2011 } else {
2012 if (!freezing(current))
2013 schedule();
2014
2015 order = pgdat->kswapd_max_order;
2016 }
2017 finish_wait(&pgdat->kswapd_wait, &wait);
2018
2019 if (!try_to_freeze()) {
2020
2021
2022
2023 balance_pgdat(pgdat, order);
2024 }
2025 }
2026 return 0;
2027}
2028
2029
2030
2031
2032void wakeup_kswapd(struct zone *zone, int order)
2033{
2034 pg_data_t *pgdat;
2035
2036 if (!populated_zone(zone))
2037 return;
2038
2039 pgdat = zone->zone_pgdat;
2040 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
2041 return;
2042 if (pgdat->kswapd_max_order < order)
2043 pgdat->kswapd_max_order = order;
2044 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2045 return;
2046 if (!waitqueue_active(&pgdat->kswapd_wait))
2047 return;
2048 wake_up_interruptible(&pgdat->kswapd_wait);
2049}
2050
2051unsigned long global_lru_pages(void)
2052{
2053 return global_page_state(NR_ACTIVE_ANON)
2054 + global_page_state(NR_ACTIVE_FILE)
2055 + global_page_state(NR_INACTIVE_ANON)
2056 + global_page_state(NR_INACTIVE_FILE);
2057}
2058
2059#ifdef CONFIG_PM
2060
2061
2062
2063
2064
2065
2066static void shrink_all_zones(unsigned long nr_pages, int prio,
2067 int pass, struct scan_control *sc)
2068{
2069 struct zone *zone;
2070 unsigned long nr_reclaimed = 0;
2071
2072 for_each_populated_zone(zone) {
2073 enum lru_list l;
2074
2075 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2076 continue;
2077
2078 for_each_evictable_lru(l) {
2079 enum zone_stat_item ls = NR_LRU_BASE + l;
2080 unsigned long lru_pages = zone_page_state(zone, ls);
2081
2082
2083 if (pass == 0 && (l == LRU_ACTIVE_ANON ||
2084 l == LRU_ACTIVE_FILE))
2085 continue;
2086
2087 zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
2088 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2089 unsigned long nr_to_scan;
2090
2091 zone->lru[l].nr_scan = 0;
2092 nr_to_scan = min(nr_pages, lru_pages);
2093 nr_reclaimed += shrink_list(l, nr_to_scan, zone,
2094 sc, prio);
2095 if (nr_reclaimed >= nr_pages) {
2096 sc->nr_reclaimed += nr_reclaimed;
2097 return;
2098 }
2099 }
2100 }
2101 }
2102 sc->nr_reclaimed += nr_reclaimed;
2103}
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113unsigned long shrink_all_memory(unsigned long nr_pages)
2114{
2115 unsigned long lru_pages, nr_slab;
2116 int pass;
2117 struct reclaim_state reclaim_state;
2118 struct scan_control sc = {
2119 .gfp_mask = GFP_KERNEL,
2120 .may_unmap = 0,
2121 .may_writepage = 1,
2122 .isolate_pages = isolate_pages_global,
2123 .nr_reclaimed = 0,
2124 };
2125
2126 current->reclaim_state = &reclaim_state;
2127
2128 lru_pages = global_lru_pages();
2129 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
2130
2131 while (nr_slab >= lru_pages) {
2132 reclaim_state.reclaimed_slab = 0;
2133 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
2134 if (!reclaim_state.reclaimed_slab)
2135 break;
2136
2137 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2138 if (sc.nr_reclaimed >= nr_pages)
2139 goto out;
2140
2141 nr_slab -= reclaim_state.reclaimed_slab;
2142 }
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152 for (pass = 0; pass < 5; pass++) {
2153 int prio;
2154
2155
2156 if (pass > 2)
2157 sc.may_unmap = 1;
2158
2159 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2160 unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
2161
2162 sc.nr_scanned = 0;
2163 sc.swap_cluster_max = nr_to_scan;
2164 shrink_all_zones(nr_to_scan, prio, pass, &sc);
2165 if (sc.nr_reclaimed >= nr_pages)
2166 goto out;
2167
2168 reclaim_state.reclaimed_slab = 0;
2169 shrink_slab(sc.nr_scanned, sc.gfp_mask,
2170 global_lru_pages());
2171 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2172 if (sc.nr_reclaimed >= nr_pages)
2173 goto out;
2174
2175 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
2176 congestion_wait(WRITE, HZ / 10);
2177 }
2178 }
2179
2180
2181
2182
2183
2184 if (!sc.nr_reclaimed) {
2185 do {
2186 reclaim_state.reclaimed_slab = 0;
2187 shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
2188 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2189 } while (sc.nr_reclaimed < nr_pages &&
2190 reclaim_state.reclaimed_slab > 0);
2191 }
2192
2193
2194out:
2195 current->reclaim_state = NULL;
2196
2197 return sc.nr_reclaimed;
2198}
2199#endif
2200
2201
2202
2203
2204
2205static int __devinit cpu_callback(struct notifier_block *nfb,
2206 unsigned long action, void *hcpu)
2207{
2208 int nid;
2209
2210 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2211 for_each_node_state(nid, N_HIGH_MEMORY) {
2212 pg_data_t *pgdat = NODE_DATA(nid);
2213 const struct cpumask *mask;
2214
2215 mask = cpumask_of_node(pgdat->node_id);
2216
2217 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2218
2219 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2220 }
2221 }
2222 return NOTIFY_OK;
2223}
2224
2225
2226
2227
2228
2229int kswapd_run(int nid)
2230{
2231 pg_data_t *pgdat = NODE_DATA(nid);
2232 int ret = 0;
2233
2234 if (pgdat->kswapd)
2235 return 0;
2236
2237 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2238 if (IS_ERR(pgdat->kswapd)) {
2239
2240 BUG_ON(system_state == SYSTEM_BOOTING);
2241 printk("Failed to start kswapd on node %d\n",nid);
2242 ret = -1;
2243 }
2244 return ret;
2245}
2246
2247static int __init kswapd_init(void)
2248{
2249 int nid;
2250
2251 swap_setup();
2252 for_each_node_state(nid, N_HIGH_MEMORY)
2253 kswapd_run(nid);
2254 hotcpu_notifier(cpu_callback, 0);
2255 return 0;
2256}
2257
2258module_init(kswapd_init)
2259
2260#ifdef CONFIG_NUMA
2261
2262
2263
2264
2265
2266
2267int zone_reclaim_mode __read_mostly;
2268
2269#define RECLAIM_OFF 0
2270#define RECLAIM_ZONE (1<<0)
2271#define RECLAIM_WRITE (1<<1)
2272#define RECLAIM_SWAP (1<<2)
2273
2274
2275
2276
2277
2278
2279#define ZONE_RECLAIM_PRIORITY 4
2280
2281
2282
2283
2284
2285int sysctl_min_unmapped_ratio = 1;
2286
2287
2288
2289
2290
2291int sysctl_min_slab_ratio = 5;
2292
2293static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2294{
2295 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2296 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2297 zone_page_state(zone, NR_ACTIVE_FILE);
2298
2299
2300
2301
2302
2303
2304 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2305}
2306
2307
2308static long zone_pagecache_reclaimable(struct zone *zone)
2309{
2310 long nr_pagecache_reclaimable;
2311 long delta = 0;
2312
2313
2314
2315
2316
2317
2318
2319 if (zone_reclaim_mode & RECLAIM_SWAP)
2320 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2321 else
2322 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2323
2324
2325 if (!(zone_reclaim_mode & RECLAIM_WRITE))
2326 delta += zone_page_state(zone, NR_FILE_DIRTY);
2327
2328
2329 if (unlikely(delta > nr_pagecache_reclaimable))
2330 delta = nr_pagecache_reclaimable;
2331
2332 return nr_pagecache_reclaimable - delta;
2333}
2334
2335
2336
2337
2338static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2339{
2340
2341 const unsigned long nr_pages = 1 << order;
2342 struct task_struct *p = current;
2343 struct reclaim_state reclaim_state;
2344 int priority;
2345 struct scan_control sc = {
2346 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2347 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2348 .may_swap = 1,
2349 .swap_cluster_max = max_t(unsigned long, nr_pages,
2350 SWAP_CLUSTER_MAX),
2351 .gfp_mask = gfp_mask,
2352 .swappiness = vm_swappiness,
2353 .order = order,
2354 .isolate_pages = isolate_pages_global,
2355 };
2356 unsigned long slab_reclaimable;
2357
2358 disable_swap_token();
2359 cond_resched();
2360
2361
2362
2363
2364
2365 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2366 reclaim_state.reclaimed_slab = 0;
2367 p->reclaim_state = &reclaim_state;
2368
2369 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
2370
2371
2372
2373
2374 priority = ZONE_RECLAIM_PRIORITY;
2375 do {
2376 note_zone_scanning_priority(zone, priority);
2377 shrink_zone(priority, zone, &sc);
2378 priority--;
2379 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
2380 }
2381
2382 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2383 if (slab_reclaimable > zone->min_slab_pages) {
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2395 zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2396 slab_reclaimable - nr_pages)
2397 ;
2398
2399
2400
2401
2402
2403 sc.nr_reclaimed += slab_reclaimable -
2404 zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2405 }
2406
2407 p->reclaim_state = NULL;
2408 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2409 return sc.nr_reclaimed >= nr_pages;
2410}
2411
2412int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2413{
2414 int node_id;
2415 int ret;
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
2428 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2429 return 0;
2430
2431 if (zone_is_all_unreclaimable(zone))
2432 return 0;
2433
2434
2435
2436
2437 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2438 return 0;
2439
2440
2441
2442
2443
2444
2445
2446 node_id = zone_to_nid(zone);
2447 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2448 return 0;
2449
2450 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2451 return 0;
2452 ret = __zone_reclaim(zone, gfp_mask, order);
2453 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2454
2455 if (!ret)
2456 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
2457
2458 return ret;
2459}
2460#endif
2461
2462#ifdef CONFIG_UNEVICTABLE_LRU
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477int page_evictable(struct page *page, struct vm_area_struct *vma)
2478{
2479
2480 if (mapping_unevictable(page_mapping(page)))
2481 return 0;
2482
2483 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2484 return 0;
2485
2486 return 1;
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500static void check_move_unevictable_page(struct page *page, struct zone *zone)
2501{
2502 VM_BUG_ON(PageActive(page));
2503
2504retry:
2505 ClearPageUnevictable(page);
2506 if (page_evictable(page, NULL)) {
2507 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
2508
2509 __dec_zone_state(zone, NR_UNEVICTABLE);
2510 list_move(&page->lru, &zone->lru[l].list);
2511 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2512 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2513 __count_vm_event(UNEVICTABLE_PGRESCUED);
2514 } else {
2515
2516
2517
2518 SetPageUnevictable(page);
2519 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2520 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2521 if (page_evictable(page, NULL))
2522 goto retry;
2523 }
2524}
2525
2526
2527
2528
2529
2530
2531
2532
2533void scan_mapping_unevictable_pages(struct address_space *mapping)
2534{
2535 pgoff_t next = 0;
2536 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2537 PAGE_CACHE_SHIFT;
2538 struct zone *zone;
2539 struct pagevec pvec;
2540
2541 if (mapping->nrpages == 0)
2542 return;
2543
2544 pagevec_init(&pvec, 0);
2545 while (next < end &&
2546 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2547 int i;
2548 int pg_scanned = 0;
2549
2550 zone = NULL;
2551
2552 for (i = 0; i < pagevec_count(&pvec); i++) {
2553 struct page *page = pvec.pages[i];
2554 pgoff_t page_index = page->index;
2555 struct zone *pagezone = page_zone(page);
2556
2557 pg_scanned++;
2558 if (page_index > next)
2559 next = page_index;
2560 next++;
2561
2562 if (pagezone != zone) {
2563 if (zone)
2564 spin_unlock_irq(&zone->lru_lock);
2565 zone = pagezone;
2566 spin_lock_irq(&zone->lru_lock);
2567 }
2568
2569 if (PageLRU(page) && PageUnevictable(page))
2570 check_move_unevictable_page(page, zone);
2571 }
2572 if (zone)
2573 spin_unlock_irq(&zone->lru_lock);
2574 pagevec_release(&pvec);
2575
2576 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2577 }
2578
2579}
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL
2592static void scan_zone_unevictable_pages(struct zone *zone)
2593{
2594 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2595 unsigned long scan;
2596 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2597
2598 while (nr_to_scan > 0) {
2599 unsigned long batch_size = min(nr_to_scan,
2600 SCAN_UNEVICTABLE_BATCH_SIZE);
2601
2602 spin_lock_irq(&zone->lru_lock);
2603 for (scan = 0; scan < batch_size; scan++) {
2604 struct page *page = lru_to_page(l_unevictable);
2605
2606 if (!trylock_page(page))
2607 continue;
2608
2609 prefetchw_prev_lru_page(page, l_unevictable, flags);
2610
2611 if (likely(PageLRU(page) && PageUnevictable(page)))
2612 check_move_unevictable_page(page, zone);
2613
2614 unlock_page(page);
2615 }
2616 spin_unlock_irq(&zone->lru_lock);
2617
2618 nr_to_scan -= batch_size;
2619 }
2620}
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634static void scan_all_zones_unevictable_pages(void)
2635{
2636 struct zone *zone;
2637
2638 for_each_zone(zone) {
2639 scan_zone_unevictable_pages(zone);
2640 }
2641}
2642
2643
2644
2645
2646
2647unsigned long scan_unevictable_pages;
2648
2649int scan_unevictable_handler(struct ctl_table *table, int write,
2650 struct file *file, void __user *buffer,
2651 size_t *length, loff_t *ppos)
2652{
2653 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
2654
2655 if (write && *(unsigned long *)table->data)
2656 scan_all_zones_unevictable_pages();
2657
2658 scan_unevictable_pages = 0;
2659 return 0;
2660}
2661
2662
2663
2664
2665
2666
2667static ssize_t read_scan_unevictable_node(struct sys_device *dev,
2668 struct sysdev_attribute *attr,
2669 char *buf)
2670{
2671 return sprintf(buf, "0\n");
2672}
2673
2674static ssize_t write_scan_unevictable_node(struct sys_device *dev,
2675 struct sysdev_attribute *attr,
2676 const char *buf, size_t count)
2677{
2678 struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
2679 struct zone *zone;
2680 unsigned long res;
2681 unsigned long req = strict_strtoul(buf, 10, &res);
2682
2683 if (!req)
2684 return 1;
2685
2686 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2687 if (!populated_zone(zone))
2688 continue;
2689 scan_zone_unevictable_pages(zone);
2690 }
2691 return 1;
2692}
2693
2694
2695static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
2696 read_scan_unevictable_node,
2697 write_scan_unevictable_node);
2698
2699int scan_unevictable_register_node(struct node *node)
2700{
2701 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
2702}
2703
2704void scan_unevictable_unregister_node(struct node *node)
2705{
2706 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
2707}
2708
2709#endif
2710