1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h>
27
28#include <linux/mm_inline.h>
29#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
34#include <linux/compaction.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
37#include <linux/delay.h>
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/memcontrol.h>
41#include <linux/delayacct.h>
42#include <linux/sysctl.h>
43#include <linux/oom.h>
44#include <linux/prefetch.h>
45
46#include <asm/tlbflush.h>
47#include <asm/div64.h>
48
49#include <linux/swapops.h>
50
51#include "internal.h"
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/vmscan.h>
55
56struct scan_control {
57
58 unsigned long nr_scanned;
59
60
61 unsigned long nr_reclaimed;
62
63
64 unsigned long nr_to_reclaim;
65
66 unsigned long hibernation_mode;
67
68
69 gfp_t gfp_mask;
70
71 int may_writepage;
72
73
74 int may_unmap;
75
76
77 int may_swap;
78
79 int order;
80
81
82 int priority;
83
84
85
86
87
88 struct mem_cgroup *target_mem_cgroup;
89
90
91
92
93
94 nodemask_t *nodemask;
95};
96
97#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
98
99#ifdef ARCH_HAS_PREFETCH
100#define prefetch_prev_lru_page(_page, _base, _field) \
101 do { \
102 if ((_page)->lru.prev != _base) { \
103 struct page *prev; \
104 \
105 prev = lru_to_page(&(_page->lru)); \
106 prefetch(&prev->_field); \
107 } \
108 } while (0)
109#else
110#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
111#endif
112
113#ifdef ARCH_HAS_PREFETCHW
114#define prefetchw_prev_lru_page(_page, _base, _field) \
115 do { \
116 if ((_page)->lru.prev != _base) { \
117 struct page *prev; \
118 \
119 prev = lru_to_page(&(_page->lru)); \
120 prefetchw(&prev->_field); \
121 } \
122 } while (0)
123#else
124#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
125#endif
126
127
128
129
130int vm_swappiness = 60;
131long vm_total_pages;
132
133static LIST_HEAD(shrinker_list);
134static DECLARE_RWSEM(shrinker_rwsem);
135
136#ifdef CONFIG_MEMCG
137static bool global_reclaim(struct scan_control *sc)
138{
139 return !sc->target_mem_cgroup;
140}
141#else
142static bool global_reclaim(struct scan_control *sc)
143{
144 return true;
145}
146#endif
147
148static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
149{
150 if (!mem_cgroup_disabled())
151 return mem_cgroup_get_lru_size(lruvec, lru);
152
153 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
154}
155
156
157
158
159void register_shrinker(struct shrinker *shrinker)
160{
161 atomic_long_set(&shrinker->nr_in_batch, 0);
162 down_write(&shrinker_rwsem);
163 list_add_tail(&shrinker->list, &shrinker_list);
164 up_write(&shrinker_rwsem);
165}
166EXPORT_SYMBOL(register_shrinker);
167
168
169
170
171void unregister_shrinker(struct shrinker *shrinker)
172{
173 down_write(&shrinker_rwsem);
174 list_del(&shrinker->list);
175 up_write(&shrinker_rwsem);
176}
177EXPORT_SYMBOL(unregister_shrinker);
178
179static inline int do_shrinker_shrink(struct shrinker *shrinker,
180 struct shrink_control *sc,
181 unsigned long nr_to_scan)
182{
183 sc->nr_to_scan = nr_to_scan;
184 return (*shrinker->shrink)(shrinker, sc);
185}
186
187#define SHRINK_BATCH 128
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207unsigned long shrink_slab(struct shrink_control *shrink,
208 unsigned long nr_pages_scanned,
209 unsigned long lru_pages)
210{
211 struct shrinker *shrinker;
212 unsigned long ret = 0;
213
214 if (nr_pages_scanned == 0)
215 nr_pages_scanned = SWAP_CLUSTER_MAX;
216
217 if (!down_read_trylock(&shrinker_rwsem)) {
218
219 ret = 1;
220 goto out;
221 }
222
223 list_for_each_entry(shrinker, &shrinker_list, list) {
224 unsigned long long delta;
225 long total_scan;
226 long max_pass;
227 int shrink_ret = 0;
228 long nr;
229 long new_nr;
230 long batch_size = shrinker->batch ? shrinker->batch
231 : SHRINK_BATCH;
232
233 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
234 if (max_pass <= 0)
235 continue;
236
237
238
239
240
241
242 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
243
244 total_scan = nr;
245 delta = (4 * nr_pages_scanned) / shrinker->seeks;
246 delta *= max_pass;
247 do_div(delta, lru_pages + 1);
248 total_scan += delta;
249 if (total_scan < 0) {
250 printk(KERN_ERR "shrink_slab: %pF negative objects to "
251 "delete nr=%ld\n",
252 shrinker->shrink, total_scan);
253 total_scan = max_pass;
254 }
255
256
257
258
259
260
261
262
263
264
265
266
267
268 if (delta < max_pass / 4)
269 total_scan = min(total_scan, max_pass / 2);
270
271
272
273
274
275
276 if (total_scan > max_pass * 2)
277 total_scan = max_pass * 2;
278
279 trace_mm_shrink_slab_start(shrinker, shrink, nr,
280 nr_pages_scanned, lru_pages,
281 max_pass, delta, total_scan);
282
283 while (total_scan >= batch_size) {
284 int nr_before;
285
286 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
287 shrink_ret = do_shrinker_shrink(shrinker, shrink,
288 batch_size);
289 if (shrink_ret == -1)
290 break;
291 if (shrink_ret < nr_before)
292 ret += nr_before - shrink_ret;
293 count_vm_events(SLABS_SCANNED, batch_size);
294 total_scan -= batch_size;
295
296 cond_resched();
297 }
298
299
300
301
302
303
304 if (total_scan > 0)
305 new_nr = atomic_long_add_return(total_scan,
306 &shrinker->nr_in_batch);
307 else
308 new_nr = atomic_long_read(&shrinker->nr_in_batch);
309
310 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
311 }
312 up_read(&shrinker_rwsem);
313out:
314 cond_resched();
315 return ret;
316}
317
318static inline int is_page_cache_freeable(struct page *page)
319{
320
321
322
323
324
325 return page_count(page) - page_has_private(page) == 2;
326}
327
328static int may_write_to_queue(struct backing_dev_info *bdi,
329 struct scan_control *sc)
330{
331 if (current->flags & PF_SWAPWRITE)
332 return 1;
333 if (!bdi_write_congested(bdi))
334 return 1;
335 if (bdi == current->backing_dev_info)
336 return 1;
337 return 0;
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352static void handle_write_error(struct address_space *mapping,
353 struct page *page, int error)
354{
355 lock_page(page);
356 if (page_mapping(page) == mapping)
357 mapping_set_error(mapping, error);
358 unlock_page(page);
359}
360
361
362typedef enum {
363
364 PAGE_KEEP,
365
366 PAGE_ACTIVATE,
367
368 PAGE_SUCCESS,
369
370 PAGE_CLEAN,
371} pageout_t;
372
373
374
375
376
377static pageout_t pageout(struct page *page, struct address_space *mapping,
378 struct scan_control *sc)
379{
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396 if (!is_page_cache_freeable(page))
397 return PAGE_KEEP;
398 if (!mapping) {
399
400
401
402
403 if (page_has_private(page)) {
404 if (try_to_free_buffers(page)) {
405 ClearPageDirty(page);
406 printk("%s: orphaned page\n", __func__);
407 return PAGE_CLEAN;
408 }
409 }
410 return PAGE_KEEP;
411 }
412 if (mapping->a_ops->writepage == NULL)
413 return PAGE_ACTIVATE;
414 if (!may_write_to_queue(mapping->backing_dev_info, sc))
415 return PAGE_KEEP;
416
417 if (clear_page_dirty_for_io(page)) {
418 int res;
419 struct writeback_control wbc = {
420 .sync_mode = WB_SYNC_NONE,
421 .nr_to_write = SWAP_CLUSTER_MAX,
422 .range_start = 0,
423 .range_end = LLONG_MAX,
424 .for_reclaim = 1,
425 };
426
427 SetPageReclaim(page);
428 res = mapping->a_ops->writepage(page, &wbc);
429 if (res < 0)
430 handle_write_error(mapping, page, res);
431 if (res == AOP_WRITEPAGE_ACTIVATE) {
432 ClearPageReclaim(page);
433 return PAGE_ACTIVATE;
434 }
435
436 if (!PageWriteback(page)) {
437
438 ClearPageReclaim(page);
439 }
440 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
441 inc_zone_page_state(page, NR_VMSCAN_WRITE);
442 return PAGE_SUCCESS;
443 }
444
445 return PAGE_CLEAN;
446}
447
448
449
450
451
452static int __remove_mapping(struct address_space *mapping, struct page *page)
453{
454 BUG_ON(!PageLocked(page));
455 BUG_ON(mapping != page_mapping(page));
456
457 spin_lock_irq(&mapping->tree_lock);
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483 if (!page_freeze_refs(page, 2))
484 goto cannot_free;
485
486 if (unlikely(PageDirty(page))) {
487 page_unfreeze_refs(page, 2);
488 goto cannot_free;
489 }
490
491 if (PageSwapCache(page)) {
492 swp_entry_t swap = { .val = page_private(page) };
493 __delete_from_swap_cache(page);
494 spin_unlock_irq(&mapping->tree_lock);
495 swapcache_free(swap, page);
496 } else {
497 void (*freepage)(struct page *);
498
499 freepage = mapping->a_ops->freepage;
500
501 __delete_from_page_cache(page);
502 spin_unlock_irq(&mapping->tree_lock);
503 mem_cgroup_uncharge_cache_page(page);
504
505 if (freepage != NULL)
506 freepage(page);
507 }
508
509 return 1;
510
511cannot_free:
512 spin_unlock_irq(&mapping->tree_lock);
513 return 0;
514}
515
516
517
518
519
520
521
522int remove_mapping(struct address_space *mapping, struct page *page)
523{
524 if (__remove_mapping(mapping, page)) {
525
526
527
528
529
530 page_unfreeze_refs(page, 1);
531 return 1;
532 }
533 return 0;
534}
535
536
537
538
539
540
541
542
543
544
545void putback_lru_page(struct page *page)
546{
547 int lru;
548 int active = !!TestClearPageActive(page);
549 int was_unevictable = PageUnevictable(page);
550
551 VM_BUG_ON(PageLRU(page));
552
553redo:
554 ClearPageUnevictable(page);
555
556 if (page_evictable(page)) {
557
558
559
560
561
562
563 lru = active + page_lru_base_type(page);
564 lru_cache_add_lru(page, lru);
565 } else {
566
567
568
569
570 lru = LRU_UNEVICTABLE;
571 add_page_to_unevictable_list(page);
572
573
574
575
576
577
578
579
580
581
582 smp_mb();
583 }
584
585
586
587
588
589
590 if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
591 if (!isolate_lru_page(page)) {
592 put_page(page);
593 goto redo;
594 }
595
596
597
598
599 }
600
601 if (was_unevictable && lru != LRU_UNEVICTABLE)
602 count_vm_event(UNEVICTABLE_PGRESCUED);
603 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
604 count_vm_event(UNEVICTABLE_PGCULLED);
605
606 put_page(page);
607}
608
609enum page_references {
610 PAGEREF_RECLAIM,
611 PAGEREF_RECLAIM_CLEAN,
612 PAGEREF_KEEP,
613 PAGEREF_ACTIVATE,
614};
615
616static enum page_references page_check_references(struct page *page,
617 struct scan_control *sc)
618{
619 int referenced_ptes, referenced_page;
620 unsigned long vm_flags;
621
622 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
623 &vm_flags);
624 referenced_page = TestClearPageReferenced(page);
625
626
627
628
629
630 if (vm_flags & VM_LOCKED)
631 return PAGEREF_RECLAIM;
632
633 if (referenced_ptes) {
634 if (PageSwapBacked(page))
635 return PAGEREF_ACTIVATE;
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 SetPageReferenced(page);
651
652 if (referenced_page || referenced_ptes > 1)
653 return PAGEREF_ACTIVATE;
654
655
656
657
658 if (vm_flags & VM_EXEC)
659 return PAGEREF_ACTIVATE;
660
661 return PAGEREF_KEEP;
662 }
663
664
665 if (referenced_page && !PageSwapBacked(page))
666 return PAGEREF_RECLAIM_CLEAN;
667
668 return PAGEREF_RECLAIM;
669}
670
671
672
673
674static unsigned long shrink_page_list(struct list_head *page_list,
675 struct zone *zone,
676 struct scan_control *sc,
677 enum ttu_flags ttu_flags,
678 unsigned long *ret_nr_dirty,
679 unsigned long *ret_nr_writeback,
680 bool force_reclaim)
681{
682 LIST_HEAD(ret_pages);
683 LIST_HEAD(free_pages);
684 int pgactivate = 0;
685 unsigned long nr_dirty = 0;
686 unsigned long nr_congested = 0;
687 unsigned long nr_reclaimed = 0;
688 unsigned long nr_writeback = 0;
689
690 cond_resched();
691
692 mem_cgroup_uncharge_start();
693 while (!list_empty(page_list)) {
694 struct address_space *mapping;
695 struct page *page;
696 int may_enter_fs;
697 enum page_references references = PAGEREF_RECLAIM_CLEAN;
698
699 cond_resched();
700
701 page = lru_to_page(page_list);
702 list_del(&page->lru);
703
704 if (!trylock_page(page))
705 goto keep;
706
707 VM_BUG_ON(PageActive(page));
708 VM_BUG_ON(page_zone(page) != zone);
709
710 sc->nr_scanned++;
711
712 if (unlikely(!page_evictable(page)))
713 goto cull_mlocked;
714
715 if (!sc->may_unmap && page_mapped(page))
716 goto keep_locked;
717
718
719 if (page_mapped(page) || PageSwapCache(page))
720 sc->nr_scanned++;
721
722 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
723 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
724
725 if (PageWriteback(page)) {
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743 if (global_reclaim(sc) ||
744 !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
745
746
747
748
749
750
751
752
753
754
755
756 SetPageReclaim(page);
757 nr_writeback++;
758 goto keep_locked;
759 }
760 wait_on_page_writeback(page);
761 }
762
763 if (!force_reclaim)
764 references = page_check_references(page, sc);
765
766 switch (references) {
767 case PAGEREF_ACTIVATE:
768 goto activate_locked;
769 case PAGEREF_KEEP:
770 goto keep_locked;
771 case PAGEREF_RECLAIM:
772 case PAGEREF_RECLAIM_CLEAN:
773 ;
774 }
775
776
777
778
779
780 if (PageAnon(page) && !PageSwapCache(page)) {
781 if (!(sc->gfp_mask & __GFP_IO))
782 goto keep_locked;
783 if (!add_to_swap(page))
784 goto activate_locked;
785 may_enter_fs = 1;
786 }
787
788 mapping = page_mapping(page);
789
790
791
792
793
794 if (page_mapped(page) && mapping) {
795 switch (try_to_unmap(page, ttu_flags)) {
796 case SWAP_FAIL:
797 goto activate_locked;
798 case SWAP_AGAIN:
799 goto keep_locked;
800 case SWAP_MLOCK:
801 goto cull_mlocked;
802 case SWAP_SUCCESS:
803 ;
804 }
805 }
806
807 if (PageDirty(page)) {
808 nr_dirty++;
809
810
811
812
813
814
815 if (page_is_file_cache(page) &&
816 (!current_is_kswapd() ||
817 sc->priority >= DEF_PRIORITY - 2)) {
818
819
820
821
822
823
824 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
825 SetPageReclaim(page);
826
827 goto keep_locked;
828 }
829
830 if (references == PAGEREF_RECLAIM_CLEAN)
831 goto keep_locked;
832 if (!may_enter_fs)
833 goto keep_locked;
834 if (!sc->may_writepage)
835 goto keep_locked;
836
837
838 switch (pageout(page, mapping, sc)) {
839 case PAGE_KEEP:
840 nr_congested++;
841 goto keep_locked;
842 case PAGE_ACTIVATE:
843 goto activate_locked;
844 case PAGE_SUCCESS:
845 if (PageWriteback(page))
846 goto keep;
847 if (PageDirty(page))
848 goto keep;
849
850
851
852
853
854 if (!trylock_page(page))
855 goto keep;
856 if (PageDirty(page) || PageWriteback(page))
857 goto keep_locked;
858 mapping = page_mapping(page);
859 case PAGE_CLEAN:
860 ;
861 }
862 }
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885 if (page_has_private(page)) {
886 if (!try_to_release_page(page, sc->gfp_mask))
887 goto activate_locked;
888 if (!mapping && page_count(page) == 1) {
889 unlock_page(page);
890 if (put_page_testzero(page))
891 goto free_it;
892 else {
893
894
895
896
897
898
899
900 nr_reclaimed++;
901 continue;
902 }
903 }
904 }
905
906 if (!mapping || !__remove_mapping(mapping, page))
907 goto keep_locked;
908
909
910
911
912
913
914
915
916 __clear_page_locked(page);
917free_it:
918 nr_reclaimed++;
919
920
921
922
923
924 list_add(&page->lru, &free_pages);
925 continue;
926
927cull_mlocked:
928 if (PageSwapCache(page))
929 try_to_free_swap(page);
930 unlock_page(page);
931 putback_lru_page(page);
932 continue;
933
934activate_locked:
935
936 if (PageSwapCache(page) && vm_swap_full())
937 try_to_free_swap(page);
938 VM_BUG_ON(PageActive(page));
939 SetPageActive(page);
940 pgactivate++;
941keep_locked:
942 unlock_page(page);
943keep:
944 list_add(&page->lru, &ret_pages);
945 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
946 }
947
948
949
950
951
952
953
954 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
955 zone_set_flag(zone, ZONE_CONGESTED);
956
957 free_hot_cold_page_list(&free_pages, 1);
958
959 list_splice(&ret_pages, page_list);
960 count_vm_events(PGACTIVATE, pgactivate);
961 mem_cgroup_uncharge_end();
962 *ret_nr_dirty += nr_dirty;
963 *ret_nr_writeback += nr_writeback;
964 return nr_reclaimed;
965}
966
967unsigned long reclaim_clean_pages_from_list(struct zone *zone,
968 struct list_head *page_list)
969{
970 struct scan_control sc = {
971 .gfp_mask = GFP_KERNEL,
972 .priority = DEF_PRIORITY,
973 .may_unmap = 1,
974 };
975 unsigned long ret, dummy1, dummy2;
976 struct page *page, *next;
977 LIST_HEAD(clean_pages);
978
979 list_for_each_entry_safe(page, next, page_list, lru) {
980 if (page_is_file_cache(page) && !PageDirty(page)) {
981 ClearPageActive(page);
982 list_move(&page->lru, &clean_pages);
983 }
984 }
985
986 ret = shrink_page_list(&clean_pages, zone, &sc,
987 TTU_UNMAP|TTU_IGNORE_ACCESS,
988 &dummy1, &dummy2, true);
989 list_splice(&clean_pages, page_list);
990 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
991 return ret;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1005{
1006 int ret = -EINVAL;
1007
1008
1009 if (!PageLRU(page))
1010 return ret;
1011
1012
1013 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1014 return ret;
1015
1016 ret = -EBUSY;
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1030
1031 if (PageWriteback(page))
1032 return ret;
1033
1034 if (PageDirty(page)) {
1035 struct address_space *mapping;
1036
1037
1038 if (mode & ISOLATE_CLEAN)
1039 return ret;
1040
1041
1042
1043
1044
1045
1046 mapping = page_mapping(page);
1047 if (mapping && !mapping->a_ops->migratepage)
1048 return ret;
1049 }
1050 }
1051
1052 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1053 return ret;
1054
1055 if (likely(get_page_unless_zero(page))) {
1056
1057
1058
1059
1060
1061 ClearPageLRU(page);
1062 ret = 0;
1063 }
1064
1065 return ret;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1089 struct lruvec *lruvec, struct list_head *dst,
1090 unsigned long *nr_scanned, struct scan_control *sc,
1091 isolate_mode_t mode, enum lru_list lru)
1092{
1093 struct list_head *src = &lruvec->lists[lru];
1094 unsigned long nr_taken = 0;
1095 unsigned long scan;
1096
1097 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1098 struct page *page;
1099 int nr_pages;
1100
1101 page = lru_to_page(src);
1102 prefetchw_prev_lru_page(page, src, flags);
1103
1104 VM_BUG_ON(!PageLRU(page));
1105
1106 switch (__isolate_lru_page(page, mode)) {
1107 case 0:
1108 nr_pages = hpage_nr_pages(page);
1109 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
1110 list_move(&page->lru, dst);
1111 nr_taken += nr_pages;
1112 break;
1113
1114 case -EBUSY:
1115
1116 list_move(&page->lru, src);
1117 continue;
1118
1119 default:
1120 BUG();
1121 }
1122 }
1123
1124 *nr_scanned = scan;
1125 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1126 nr_taken, mode, is_file_lru(lru));
1127 return nr_taken;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155int isolate_lru_page(struct page *page)
1156{
1157 int ret = -EBUSY;
1158
1159 VM_BUG_ON(!page_count(page));
1160
1161 if (PageLRU(page)) {
1162 struct zone *zone = page_zone(page);
1163 struct lruvec *lruvec;
1164
1165 spin_lock_irq(&zone->lru_lock);
1166 lruvec = mem_cgroup_page_lruvec(page, zone);
1167 if (PageLRU(page)) {
1168 int lru = page_lru(page);
1169 get_page(page);
1170 ClearPageLRU(page);
1171 del_page_from_lru_list(page, lruvec, lru);
1172 ret = 0;
1173 }
1174 spin_unlock_irq(&zone->lru_lock);
1175 }
1176 return ret;
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186static int too_many_isolated(struct zone *zone, int file,
1187 struct scan_control *sc)
1188{
1189 unsigned long inactive, isolated;
1190
1191 if (current_is_kswapd())
1192 return 0;
1193
1194 if (!global_reclaim(sc))
1195 return 0;
1196
1197 if (file) {
1198 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1199 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1200 } else {
1201 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1202 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1203 }
1204
1205
1206
1207
1208
1209
1210 if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
1211 inactive >>= 3;
1212
1213 return isolated > inactive;
1214}
1215
1216static noinline_for_stack void
1217putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1218{
1219 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1220 struct zone *zone = lruvec_zone(lruvec);
1221 LIST_HEAD(pages_to_free);
1222
1223
1224
1225
1226 while (!list_empty(page_list)) {
1227 struct page *page = lru_to_page(page_list);
1228 int lru;
1229
1230 VM_BUG_ON(PageLRU(page));
1231 list_del(&page->lru);
1232 if (unlikely(!page_evictable(page))) {
1233 spin_unlock_irq(&zone->lru_lock);
1234 putback_lru_page(page);
1235 spin_lock_irq(&zone->lru_lock);
1236 continue;
1237 }
1238
1239 lruvec = mem_cgroup_page_lruvec(page, zone);
1240
1241 SetPageLRU(page);
1242 lru = page_lru(page);
1243 add_page_to_lru_list(page, lruvec, lru);
1244
1245 if (is_active_lru(lru)) {
1246 int file = is_file_lru(lru);
1247 int numpages = hpage_nr_pages(page);
1248 reclaim_stat->recent_rotated[file] += numpages;
1249 }
1250 if (put_page_testzero(page)) {
1251 __ClearPageLRU(page);
1252 __ClearPageActive(page);
1253 del_page_from_lru_list(page, lruvec, lru);
1254
1255 if (unlikely(PageCompound(page))) {
1256 spin_unlock_irq(&zone->lru_lock);
1257 (*get_compound_page_dtor(page))(page);
1258 spin_lock_irq(&zone->lru_lock);
1259 } else
1260 list_add(&page->lru, &pages_to_free);
1261 }
1262 }
1263
1264
1265
1266
1267 list_splice(&pages_to_free, page_list);
1268}
1269
1270
1271
1272
1273
1274static noinline_for_stack unsigned long
1275shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1276 struct scan_control *sc, enum lru_list lru)
1277{
1278 LIST_HEAD(page_list);
1279 unsigned long nr_scanned;
1280 unsigned long nr_reclaimed = 0;
1281 unsigned long nr_taken;
1282 unsigned long nr_dirty = 0;
1283 unsigned long nr_writeback = 0;
1284 isolate_mode_t isolate_mode = 0;
1285 int file = is_file_lru(lru);
1286 struct zone *zone = lruvec_zone(lruvec);
1287 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1288
1289 while (unlikely(too_many_isolated(zone, file, sc))) {
1290 congestion_wait(BLK_RW_ASYNC, HZ/10);
1291
1292
1293 if (fatal_signal_pending(current))
1294 return SWAP_CLUSTER_MAX;
1295 }
1296
1297 lru_add_drain();
1298
1299 if (!sc->may_unmap)
1300 isolate_mode |= ISOLATE_UNMAPPED;
1301 if (!sc->may_writepage)
1302 isolate_mode |= ISOLATE_CLEAN;
1303
1304 spin_lock_irq(&zone->lru_lock);
1305
1306 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1307 &nr_scanned, sc, isolate_mode, lru);
1308
1309 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1310 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1311
1312 if (global_reclaim(sc)) {
1313 zone->pages_scanned += nr_scanned;
1314 if (current_is_kswapd())
1315 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1316 else
1317 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1318 }
1319 spin_unlock_irq(&zone->lru_lock);
1320
1321 if (nr_taken == 0)
1322 return 0;
1323
1324 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1325 &nr_dirty, &nr_writeback, false);
1326
1327 spin_lock_irq(&zone->lru_lock);
1328
1329 reclaim_stat->recent_scanned[file] += nr_taken;
1330
1331 if (global_reclaim(sc)) {
1332 if (current_is_kswapd())
1333 __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1334 nr_reclaimed);
1335 else
1336 __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1337 nr_reclaimed);
1338 }
1339
1340 putback_inactive_pages(lruvec, &page_list);
1341
1342 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1343
1344 spin_unlock_irq(&zone->lru_lock);
1345
1346 free_hot_cold_page_list(&page_list, 1);
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371 if (nr_writeback && nr_writeback >=
1372 (nr_taken >> (DEF_PRIORITY - sc->priority)))
1373 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1374
1375 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1376 zone_idx(zone),
1377 nr_scanned, nr_reclaimed,
1378 sc->priority,
1379 trace_shrink_flags(file));
1380 return nr_reclaimed;
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static void move_active_pages_to_lru(struct lruvec *lruvec,
1402 struct list_head *list,
1403 struct list_head *pages_to_free,
1404 enum lru_list lru)
1405{
1406 struct zone *zone = lruvec_zone(lruvec);
1407 unsigned long pgmoved = 0;
1408 struct page *page;
1409 int nr_pages;
1410
1411 while (!list_empty(list)) {
1412 page = lru_to_page(list);
1413 lruvec = mem_cgroup_page_lruvec(page, zone);
1414
1415 VM_BUG_ON(PageLRU(page));
1416 SetPageLRU(page);
1417
1418 nr_pages = hpage_nr_pages(page);
1419 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
1420 list_move(&page->lru, &lruvec->lists[lru]);
1421 pgmoved += nr_pages;
1422
1423 if (put_page_testzero(page)) {
1424 __ClearPageLRU(page);
1425 __ClearPageActive(page);
1426 del_page_from_lru_list(page, lruvec, lru);
1427
1428 if (unlikely(PageCompound(page))) {
1429 spin_unlock_irq(&zone->lru_lock);
1430 (*get_compound_page_dtor(page))(page);
1431 spin_lock_irq(&zone->lru_lock);
1432 } else
1433 list_add(&page->lru, pages_to_free);
1434 }
1435 }
1436 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1437 if (!is_active_lru(lru))
1438 __count_vm_events(PGDEACTIVATE, pgmoved);
1439}
1440
1441static void shrink_active_list(unsigned long nr_to_scan,
1442 struct lruvec *lruvec,
1443 struct scan_control *sc,
1444 enum lru_list lru)
1445{
1446 unsigned long nr_taken;
1447 unsigned long nr_scanned;
1448 unsigned long vm_flags;
1449 LIST_HEAD(l_hold);
1450 LIST_HEAD(l_active);
1451 LIST_HEAD(l_inactive);
1452 struct page *page;
1453 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1454 unsigned long nr_rotated = 0;
1455 isolate_mode_t isolate_mode = 0;
1456 int file = is_file_lru(lru);
1457 struct zone *zone = lruvec_zone(lruvec);
1458
1459 lru_add_drain();
1460
1461 if (!sc->may_unmap)
1462 isolate_mode |= ISOLATE_UNMAPPED;
1463 if (!sc->may_writepage)
1464 isolate_mode |= ISOLATE_CLEAN;
1465
1466 spin_lock_irq(&zone->lru_lock);
1467
1468 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1469 &nr_scanned, sc, isolate_mode, lru);
1470 if (global_reclaim(sc))
1471 zone->pages_scanned += nr_scanned;
1472
1473 reclaim_stat->recent_scanned[file] += nr_taken;
1474
1475 __count_zone_vm_events(PGREFILL, zone, nr_scanned);
1476 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1477 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1478 spin_unlock_irq(&zone->lru_lock);
1479
1480 while (!list_empty(&l_hold)) {
1481 cond_resched();
1482 page = lru_to_page(&l_hold);
1483 list_del(&page->lru);
1484
1485 if (unlikely(!page_evictable(page))) {
1486 putback_lru_page(page);
1487 continue;
1488 }
1489
1490 if (unlikely(buffer_heads_over_limit)) {
1491 if (page_has_private(page) && trylock_page(page)) {
1492 if (page_has_private(page))
1493 try_to_release_page(page, 0);
1494 unlock_page(page);
1495 }
1496 }
1497
1498 if (page_referenced(page, 0, sc->target_mem_cgroup,
1499 &vm_flags)) {
1500 nr_rotated += hpage_nr_pages(page);
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1511 list_add(&page->lru, &l_active);
1512 continue;
1513 }
1514 }
1515
1516 ClearPageActive(page);
1517 list_add(&page->lru, &l_inactive);
1518 }
1519
1520
1521
1522
1523 spin_lock_irq(&zone->lru_lock);
1524
1525
1526
1527
1528
1529
1530 reclaim_stat->recent_rotated[file] += nr_rotated;
1531
1532 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1533 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1534 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1535 spin_unlock_irq(&zone->lru_lock);
1536
1537 free_hot_cold_page_list(&l_hold, 1);
1538}
1539
1540#ifdef CONFIG_SWAP
1541static int inactive_anon_is_low_global(struct zone *zone)
1542{
1543 unsigned long active, inactive;
1544
1545 active = zone_page_state(zone, NR_ACTIVE_ANON);
1546 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1547
1548 if (inactive * zone->inactive_ratio < active)
1549 return 1;
1550
1551 return 0;
1552}
1553
1554
1555
1556
1557
1558
1559
1560
1561static int inactive_anon_is_low(struct lruvec *lruvec)
1562{
1563
1564
1565
1566
1567 if (!total_swap_pages)
1568 return 0;
1569
1570 if (!mem_cgroup_disabled())
1571 return mem_cgroup_inactive_anon_is_low(lruvec);
1572
1573 return inactive_anon_is_low_global(lruvec_zone(lruvec));
1574}
1575#else
1576static inline int inactive_anon_is_low(struct lruvec *lruvec)
1577{
1578 return 0;
1579}
1580#endif
1581
1582static int inactive_file_is_low_global(struct zone *zone)
1583{
1584 unsigned long active, inactive;
1585
1586 active = zone_page_state(zone, NR_ACTIVE_FILE);
1587 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1588
1589 return (active > inactive);
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606static int inactive_file_is_low(struct lruvec *lruvec)
1607{
1608 if (!mem_cgroup_disabled())
1609 return mem_cgroup_inactive_file_is_low(lruvec);
1610
1611 return inactive_file_is_low_global(lruvec_zone(lruvec));
1612}
1613
1614static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
1615{
1616 if (is_file_lru(lru))
1617 return inactive_file_is_low(lruvec);
1618 else
1619 return inactive_anon_is_low(lruvec);
1620}
1621
1622static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1623 struct lruvec *lruvec, struct scan_control *sc)
1624{
1625 if (is_active_lru(lru)) {
1626 if (inactive_list_is_low(lruvec, lru))
1627 shrink_active_list(nr_to_scan, lruvec, sc, lru);
1628 return 0;
1629 }
1630
1631 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1632}
1633
1634static int vmscan_swappiness(struct scan_control *sc)
1635{
1636 if (global_reclaim(sc))
1637 return vm_swappiness;
1638 return mem_cgroup_swappiness(sc->target_mem_cgroup);
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1651 unsigned long *nr)
1652{
1653 unsigned long anon, file, free;
1654 unsigned long anon_prio, file_prio;
1655 unsigned long ap, fp;
1656 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1657 u64 fraction[2], denominator;
1658 enum lru_list lru;
1659 int noswap = 0;
1660 bool force_scan = false;
1661 struct zone *zone = lruvec_zone(lruvec);
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673 if (current_is_kswapd() && zone->all_unreclaimable)
1674 force_scan = true;
1675 if (!global_reclaim(sc))
1676 force_scan = true;
1677
1678
1679 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1680 noswap = 1;
1681 fraction[0] = 0;
1682 fraction[1] = 1;
1683 denominator = 1;
1684 goto out;
1685 }
1686
1687 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1688 get_lru_size(lruvec, LRU_INACTIVE_ANON);
1689 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1690 get_lru_size(lruvec, LRU_INACTIVE_FILE);
1691
1692 if (global_reclaim(sc)) {
1693 free = zone_page_state(zone, NR_FREE_PAGES);
1694 if (unlikely(file + free <= high_wmark_pages(zone))) {
1695
1696
1697
1698
1699 fraction[0] = 1;
1700 fraction[1] = 0;
1701 denominator = 1;
1702 goto out;
1703 } else if (!inactive_file_is_low_global(zone)) {
1704
1705
1706
1707
1708 fraction[0] = 0;
1709 fraction[1] = 1;
1710 denominator = 1;
1711 goto out;
1712 }
1713 }
1714
1715
1716
1717
1718
1719 anon_prio = vmscan_swappiness(sc);
1720 file_prio = 200 - anon_prio;
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 spin_lock_irq(&zone->lru_lock);
1734 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1735 reclaim_stat->recent_scanned[0] /= 2;
1736 reclaim_stat->recent_rotated[0] /= 2;
1737 }
1738
1739 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1740 reclaim_stat->recent_scanned[1] /= 2;
1741 reclaim_stat->recent_rotated[1] /= 2;
1742 }
1743
1744
1745
1746
1747
1748
1749 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
1750 ap /= reclaim_stat->recent_rotated[0] + 1;
1751
1752 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
1753 fp /= reclaim_stat->recent_rotated[1] + 1;
1754 spin_unlock_irq(&zone->lru_lock);
1755
1756 fraction[0] = ap;
1757 fraction[1] = fp;
1758 denominator = ap + fp + 1;
1759out:
1760 for_each_evictable_lru(lru) {
1761 int file = is_file_lru(lru);
1762 unsigned long scan;
1763
1764 scan = get_lru_size(lruvec, lru);
1765 if (sc->priority || noswap || !vmscan_swappiness(sc)) {
1766 scan >>= sc->priority;
1767 if (!scan && force_scan)
1768 scan = SWAP_CLUSTER_MAX;
1769 scan = div64_u64(scan * fraction[file], denominator);
1770 }
1771 nr[lru] = scan;
1772 }
1773}
1774
1775
1776static bool in_reclaim_compaction(struct scan_control *sc)
1777{
1778 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
1779 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
1780 sc->priority < DEF_PRIORITY - 2))
1781 return true;
1782
1783 return false;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793static inline bool should_continue_reclaim(struct lruvec *lruvec,
1794 unsigned long nr_reclaimed,
1795 unsigned long nr_scanned,
1796 struct scan_control *sc)
1797{
1798 unsigned long pages_for_compaction;
1799 unsigned long inactive_lru_pages;
1800
1801
1802 if (!in_reclaim_compaction(sc))
1803 return false;
1804
1805
1806 if (sc->gfp_mask & __GFP_REPEAT) {
1807
1808
1809
1810
1811
1812
1813 if (!nr_reclaimed && !nr_scanned)
1814 return false;
1815 } else {
1816
1817
1818
1819
1820
1821
1822
1823
1824 if (!nr_reclaimed)
1825 return false;
1826 }
1827
1828
1829
1830
1831
1832 pages_for_compaction = (2UL << sc->order);
1833 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1834 if (nr_swap_pages > 0)
1835 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
1836 if (sc->nr_reclaimed < pages_for_compaction &&
1837 inactive_lru_pages > pages_for_compaction)
1838 return true;
1839
1840
1841 switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
1842 case COMPACT_PARTIAL:
1843 case COMPACT_CONTINUE:
1844 return false;
1845 default:
1846 return true;
1847 }
1848}
1849
1850
1851
1852
1853static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
1854{
1855 unsigned long nr[NR_LRU_LISTS];
1856 unsigned long nr_to_scan;
1857 enum lru_list lru;
1858 unsigned long nr_reclaimed, nr_scanned;
1859 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1860 struct blk_plug plug;
1861
1862restart:
1863 nr_reclaimed = 0;
1864 nr_scanned = sc->nr_scanned;
1865 get_scan_count(lruvec, sc, nr);
1866
1867 blk_start_plug(&plug);
1868 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1869 nr[LRU_INACTIVE_FILE]) {
1870 for_each_evictable_lru(lru) {
1871 if (nr[lru]) {
1872 nr_to_scan = min_t(unsigned long,
1873 nr[lru], SWAP_CLUSTER_MAX);
1874 nr[lru] -= nr_to_scan;
1875
1876 nr_reclaimed += shrink_list(lru, nr_to_scan,
1877 lruvec, sc);
1878 }
1879 }
1880
1881
1882
1883
1884
1885
1886
1887
1888 if (nr_reclaimed >= nr_to_reclaim &&
1889 sc->priority < DEF_PRIORITY)
1890 break;
1891 }
1892 blk_finish_plug(&plug);
1893 sc->nr_reclaimed += nr_reclaimed;
1894
1895
1896
1897
1898
1899 if (inactive_anon_is_low(lruvec))
1900 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
1901 sc, LRU_ACTIVE_ANON);
1902
1903
1904 if (should_continue_reclaim(lruvec, nr_reclaimed,
1905 sc->nr_scanned - nr_scanned, sc))
1906 goto restart;
1907
1908 throttle_vm_writeout(sc->gfp_mask);
1909}
1910
1911static void shrink_zone(struct zone *zone, struct scan_control *sc)
1912{
1913 struct mem_cgroup *root = sc->target_mem_cgroup;
1914 struct mem_cgroup_reclaim_cookie reclaim = {
1915 .zone = zone,
1916 .priority = sc->priority,
1917 };
1918 struct mem_cgroup *memcg;
1919
1920 memcg = mem_cgroup_iter(root, NULL, &reclaim);
1921 do {
1922 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
1923
1924 shrink_lruvec(lruvec, sc);
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 if (!global_reclaim(sc)) {
1937 mem_cgroup_iter_break(root, memcg);
1938 break;
1939 }
1940 memcg = mem_cgroup_iter(root, memcg, &reclaim);
1941 } while (memcg);
1942}
1943
1944
1945static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
1946{
1947 unsigned long balance_gap, watermark;
1948 bool watermark_ok;
1949
1950
1951 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
1952 return false;
1953
1954
1955
1956
1957
1958
1959
1960 balance_gap = min(low_wmark_pages(zone),
1961 (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
1962 KSWAPD_ZONE_BALANCE_GAP_RATIO);
1963 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
1964 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
1965
1966
1967
1968
1969
1970 if (compaction_deferred(zone, sc->order))
1971 return watermark_ok;
1972
1973
1974 if (!compaction_suitable(zone, sc->order))
1975 return false;
1976
1977 return watermark_ok;
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2002{
2003 struct zoneref *z;
2004 struct zone *zone;
2005 unsigned long nr_soft_reclaimed;
2006 unsigned long nr_soft_scanned;
2007 bool aborted_reclaim = false;
2008
2009
2010
2011
2012
2013
2014 if (buffer_heads_over_limit)
2015 sc->gfp_mask |= __GFP_HIGHMEM;
2016
2017 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2018 gfp_zone(sc->gfp_mask), sc->nodemask) {
2019 if (!populated_zone(zone))
2020 continue;
2021
2022
2023
2024
2025 if (global_reclaim(sc)) {
2026 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2027 continue;
2028 if (zone->all_unreclaimable &&
2029 sc->priority != DEF_PRIORITY)
2030 continue;
2031 if (IS_ENABLED(CONFIG_COMPACTION)) {
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041 if (compaction_ready(zone, sc)) {
2042 aborted_reclaim = true;
2043 continue;
2044 }
2045 }
2046
2047
2048
2049
2050
2051
2052 nr_soft_scanned = 0;
2053 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2054 sc->order, sc->gfp_mask,
2055 &nr_soft_scanned);
2056 sc->nr_reclaimed += nr_soft_reclaimed;
2057 sc->nr_scanned += nr_soft_scanned;
2058
2059 }
2060
2061 shrink_zone(zone, sc);
2062 }
2063
2064 return aborted_reclaim;
2065}
2066
2067static bool zone_reclaimable(struct zone *zone)
2068{
2069 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
2070}
2071
2072
2073static bool all_unreclaimable(struct zonelist *zonelist,
2074 struct scan_control *sc)
2075{
2076 struct zoneref *z;
2077 struct zone *zone;
2078
2079 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2080 gfp_zone(sc->gfp_mask), sc->nodemask) {
2081 if (!populated_zone(zone))
2082 continue;
2083 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2084 continue;
2085 if (!zone->all_unreclaimable)
2086 return false;
2087 }
2088
2089 return true;
2090}
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2109 struct scan_control *sc,
2110 struct shrink_control *shrink)
2111{
2112 unsigned long total_scanned = 0;
2113 struct reclaim_state *reclaim_state = current->reclaim_state;
2114 struct zoneref *z;
2115 struct zone *zone;
2116 unsigned long writeback_threshold;
2117 bool aborted_reclaim;
2118
2119 delayacct_freepages_start();
2120
2121 if (global_reclaim(sc))
2122 count_vm_event(ALLOCSTALL);
2123
2124 do {
2125 sc->nr_scanned = 0;
2126 aborted_reclaim = shrink_zones(zonelist, sc);
2127
2128
2129
2130
2131
2132 if (global_reclaim(sc)) {
2133 unsigned long lru_pages = 0;
2134 for_each_zone_zonelist(zone, z, zonelist,
2135 gfp_zone(sc->gfp_mask)) {
2136 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2137 continue;
2138
2139 lru_pages += zone_reclaimable_pages(zone);
2140 }
2141
2142 shrink_slab(shrink, sc->nr_scanned, lru_pages);
2143 if (reclaim_state) {
2144 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2145 reclaim_state->reclaimed_slab = 0;
2146 }
2147 }
2148 total_scanned += sc->nr_scanned;
2149 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2150 goto out;
2151
2152
2153
2154
2155
2156
2157
2158
2159 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2160 if (total_scanned > writeback_threshold) {
2161 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2162 WB_REASON_TRY_TO_FREE_PAGES);
2163 sc->may_writepage = 1;
2164 }
2165
2166
2167 if (!sc->hibernation_mode && sc->nr_scanned &&
2168 sc->priority < DEF_PRIORITY - 2) {
2169 struct zone *preferred_zone;
2170
2171 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2172 &cpuset_current_mems_allowed,
2173 &preferred_zone);
2174 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2175 }
2176 } while (--sc->priority >= 0);
2177
2178out:
2179 delayacct_freepages_end();
2180
2181 if (sc->nr_reclaimed)
2182 return sc->nr_reclaimed;
2183
2184
2185
2186
2187
2188
2189 if (oom_killer_disabled)
2190 return 0;
2191
2192
2193 if (aborted_reclaim)
2194 return 1;
2195
2196
2197 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
2198 return 1;
2199
2200 return 0;
2201}
2202
2203static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2204{
2205 struct zone *zone;
2206 unsigned long pfmemalloc_reserve = 0;
2207 unsigned long free_pages = 0;
2208 int i;
2209 bool wmark_ok;
2210
2211 for (i = 0; i <= ZONE_NORMAL; i++) {
2212 zone = &pgdat->node_zones[i];
2213 pfmemalloc_reserve += min_wmark_pages(zone);
2214 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2215 }
2216
2217 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2218
2219
2220 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2221 pgdat->classzone_idx = min(pgdat->classzone_idx,
2222 (enum zone_type)ZONE_NORMAL);
2223 wake_up_interruptible(&pgdat->kswapd_wait);
2224 }
2225
2226 return wmark_ok;
2227}
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2239 nodemask_t *nodemask)
2240{
2241 struct zone *zone;
2242 int high_zoneidx = gfp_zone(gfp_mask);
2243 pg_data_t *pgdat;
2244
2245
2246
2247
2248
2249
2250
2251
2252 if (current->flags & PF_KTHREAD)
2253 goto out;
2254
2255
2256
2257
2258
2259 if (fatal_signal_pending(current))
2260 goto out;
2261
2262
2263 first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
2264 pgdat = zone->zone_pgdat;
2265 if (pfmemalloc_watermark_ok(pgdat))
2266 goto out;
2267
2268
2269 count_vm_event(PGSCAN_DIRECT_THROTTLE);
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279 if (!(gfp_mask & __GFP_FS)) {
2280 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2281 pfmemalloc_watermark_ok(pgdat), HZ);
2282
2283 goto check_pending;
2284 }
2285
2286
2287 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2288 pfmemalloc_watermark_ok(pgdat));
2289
2290check_pending:
2291 if (fatal_signal_pending(current))
2292 return true;
2293
2294out:
2295 return false;
2296}
2297
2298unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2299 gfp_t gfp_mask, nodemask_t *nodemask)
2300{
2301 unsigned long nr_reclaimed;
2302 struct scan_control sc = {
2303 .gfp_mask = gfp_mask,
2304 .may_writepage = !laptop_mode,
2305 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2306 .may_unmap = 1,
2307 .may_swap = 1,
2308 .order = order,
2309 .priority = DEF_PRIORITY,
2310 .target_mem_cgroup = NULL,
2311 .nodemask = nodemask,
2312 };
2313 struct shrink_control shrink = {
2314 .gfp_mask = sc.gfp_mask,
2315 };
2316
2317
2318
2319
2320
2321
2322 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
2323 return 1;
2324
2325 trace_mm_vmscan_direct_reclaim_begin(order,
2326 sc.may_writepage,
2327 gfp_mask);
2328
2329 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2330
2331 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2332
2333 return nr_reclaimed;
2334}
2335
2336#ifdef CONFIG_MEMCG
2337
2338unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2339 gfp_t gfp_mask, bool noswap,
2340 struct zone *zone,
2341 unsigned long *nr_scanned)
2342{
2343 struct scan_control sc = {
2344 .nr_scanned = 0,
2345 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2346 .may_writepage = !laptop_mode,
2347 .may_unmap = 1,
2348 .may_swap = !noswap,
2349 .order = 0,
2350 .priority = 0,
2351 .target_mem_cgroup = memcg,
2352 };
2353 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2354
2355 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2356 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2357
2358 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2359 sc.may_writepage,
2360 sc.gfp_mask);
2361
2362
2363
2364
2365
2366
2367
2368
2369 shrink_lruvec(lruvec, &sc);
2370
2371 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2372
2373 *nr_scanned = sc.nr_scanned;
2374 return sc.nr_reclaimed;
2375}
2376
2377unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2378 gfp_t gfp_mask,
2379 bool noswap)
2380{
2381 struct zonelist *zonelist;
2382 unsigned long nr_reclaimed;
2383 int nid;
2384 struct scan_control sc = {
2385 .may_writepage = !laptop_mode,
2386 .may_unmap = 1,
2387 .may_swap = !noswap,
2388 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2389 .order = 0,
2390 .priority = DEF_PRIORITY,
2391 .target_mem_cgroup = memcg,
2392 .nodemask = NULL,
2393 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2394 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2395 };
2396 struct shrink_control shrink = {
2397 .gfp_mask = sc.gfp_mask,
2398 };
2399
2400
2401
2402
2403
2404
2405 nid = mem_cgroup_select_victim_node(memcg);
2406
2407 zonelist = NODE_DATA(nid)->node_zonelists;
2408
2409 trace_mm_vmscan_memcg_reclaim_begin(0,
2410 sc.may_writepage,
2411 sc.gfp_mask);
2412
2413 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2414
2415 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2416
2417 return nr_reclaimed;
2418}
2419#endif
2420
2421static void age_active_anon(struct zone *zone, struct scan_control *sc)
2422{
2423 struct mem_cgroup *memcg;
2424
2425 if (!total_swap_pages)
2426 return;
2427
2428 memcg = mem_cgroup_iter(NULL, NULL, NULL);
2429 do {
2430 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2431
2432 if (inactive_anon_is_low(lruvec))
2433 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2434 sc, LRU_ACTIVE_ANON);
2435
2436 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2437 } while (memcg);
2438}
2439
2440static bool zone_balanced(struct zone *zone, int order,
2441 unsigned long balance_gap, int classzone_idx)
2442{
2443 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
2444 balance_gap, classzone_idx, 0))
2445 return false;
2446
2447 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2448 !compaction_suitable(zone, order))
2449 return false;
2450
2451 return true;
2452}
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
2475{
2476 unsigned long present_pages = 0;
2477 unsigned long balanced_pages = 0;
2478 int i;
2479
2480
2481 for (i = 0; i <= classzone_idx; i++) {
2482 struct zone *zone = pgdat->node_zones + i;
2483
2484 if (!populated_zone(zone))
2485 continue;
2486
2487 present_pages += zone->present_pages;
2488
2489
2490
2491
2492
2493
2494
2495
2496 if (zone->all_unreclaimable) {
2497 balanced_pages += zone->present_pages;
2498 continue;
2499 }
2500
2501 if (zone_balanced(zone, order, 0, i))
2502 balanced_pages += zone->present_pages;
2503 else if (!order)
2504 return false;
2505 }
2506
2507 if (order)
2508 return balanced_pages >= (present_pages >> 2);
2509 else
2510 return true;
2511}
2512
2513
2514
2515
2516
2517
2518
2519static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2520 int classzone_idx)
2521{
2522
2523 if (remaining)
2524 return false;
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535 if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
2536 wake_up(&pgdat->pfmemalloc_wait);
2537 return false;
2538 }
2539
2540 return pgdat_balanced(pgdat, order, classzone_idx);
2541}
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2565 int *classzone_idx)
2566{
2567 struct zone *unbalanced_zone;
2568 int i;
2569 int end_zone = 0;
2570 unsigned long total_scanned;
2571 struct reclaim_state *reclaim_state = current->reclaim_state;
2572 unsigned long nr_soft_reclaimed;
2573 unsigned long nr_soft_scanned;
2574 struct scan_control sc = {
2575 .gfp_mask = GFP_KERNEL,
2576 .may_unmap = 1,
2577 .may_swap = 1,
2578
2579
2580
2581
2582 .nr_to_reclaim = ULONG_MAX,
2583 .order = order,
2584 .target_mem_cgroup = NULL,
2585 };
2586 struct shrink_control shrink = {
2587 .gfp_mask = sc.gfp_mask,
2588 };
2589loop_again:
2590 total_scanned = 0;
2591 sc.priority = DEF_PRIORITY;
2592 sc.nr_reclaimed = 0;
2593 sc.may_writepage = !laptop_mode;
2594 count_vm_event(PAGEOUTRUN);
2595
2596 do {
2597 unsigned long lru_pages = 0;
2598 int has_under_min_watermark_zone = 0;
2599
2600 unbalanced_zone = NULL;
2601
2602
2603
2604
2605
2606 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2607 struct zone *zone = pgdat->node_zones + i;
2608
2609 if (!populated_zone(zone))
2610 continue;
2611
2612 if (zone->all_unreclaimable &&
2613 sc.priority != DEF_PRIORITY)
2614 continue;
2615
2616
2617
2618
2619
2620 age_active_anon(zone, &sc);
2621
2622
2623
2624
2625
2626
2627
2628 if (buffer_heads_over_limit && is_highmem_idx(i)) {
2629 end_zone = i;
2630 break;
2631 }
2632
2633 if (!zone_balanced(zone, order, 0, 0)) {
2634 end_zone = i;
2635 break;
2636 } else {
2637
2638 zone_clear_flag(zone, ZONE_CONGESTED);
2639 }
2640 }
2641 if (i < 0)
2642 goto out;
2643
2644 for (i = 0; i <= end_zone; i++) {
2645 struct zone *zone = pgdat->node_zones + i;
2646
2647 lru_pages += zone_reclaimable_pages(zone);
2648 }
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659 for (i = 0; i <= end_zone; i++) {
2660 struct zone *zone = pgdat->node_zones + i;
2661 int nr_slab, testorder;
2662 unsigned long balance_gap;
2663
2664 if (!populated_zone(zone))
2665 continue;
2666
2667 if (zone->all_unreclaimable &&
2668 sc.priority != DEF_PRIORITY)
2669 continue;
2670
2671 sc.nr_scanned = 0;
2672
2673 nr_soft_scanned = 0;
2674
2675
2676
2677 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2678 order, sc.gfp_mask,
2679 &nr_soft_scanned);
2680 sc.nr_reclaimed += nr_soft_reclaimed;
2681 total_scanned += nr_soft_scanned;
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691 balance_gap = min(low_wmark_pages(zone),
2692 (zone->present_pages +
2693 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2694 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2695
2696
2697
2698
2699
2700
2701
2702 testorder = order;
2703 if (IS_ENABLED(CONFIG_COMPACTION) && order &&
2704 compaction_suitable(zone, order) !=
2705 COMPACT_SKIPPED)
2706 testorder = 0;
2707
2708 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2709 !zone_balanced(zone, testorder,
2710 balance_gap, end_zone)) {
2711 shrink_zone(zone, &sc);
2712
2713 reclaim_state->reclaimed_slab = 0;
2714 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2715 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2716 total_scanned += sc.nr_scanned;
2717
2718 if (nr_slab == 0 && !zone_reclaimable(zone))
2719 zone->all_unreclaimable = 1;
2720 }
2721
2722
2723
2724
2725
2726
2727 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2728 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2729 sc.may_writepage = 1;
2730
2731 if (zone->all_unreclaimable) {
2732 if (end_zone && end_zone == i)
2733 end_zone--;
2734 continue;
2735 }
2736
2737 if (!zone_balanced(zone, testorder, 0, end_zone)) {
2738 unbalanced_zone = zone;
2739
2740
2741
2742
2743
2744 if (!zone_watermark_ok_safe(zone, order,
2745 min_wmark_pages(zone), end_zone, 0))
2746 has_under_min_watermark_zone = 1;
2747 } else {
2748
2749
2750
2751
2752
2753
2754
2755 zone_clear_flag(zone, ZONE_CONGESTED);
2756 }
2757
2758 }
2759
2760
2761
2762
2763
2764
2765 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
2766 pfmemalloc_watermark_ok(pgdat))
2767 wake_up(&pgdat->pfmemalloc_wait);
2768
2769 if (pgdat_balanced(pgdat, order, *classzone_idx))
2770 break;
2771
2772
2773
2774
2775 if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
2776 if (has_under_min_watermark_zone)
2777 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2778 else if (unbalanced_zone)
2779 wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10);
2780 }
2781
2782
2783
2784
2785
2786
2787
2788 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2789 break;
2790 } while (--sc.priority >= 0);
2791out:
2792
2793 if (!pgdat_balanced(pgdat, order, *classzone_idx)) {
2794 cond_resched();
2795
2796 try_to_freeze();
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2813 order = sc.order = 0;
2814
2815 goto loop_again;
2816 }
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826 if (order) {
2827 int zones_need_compaction = 1;
2828
2829 for (i = 0; i <= end_zone; i++) {
2830 struct zone *zone = pgdat->node_zones + i;
2831
2832 if (!populated_zone(zone))
2833 continue;
2834
2835
2836 if (zone_watermark_ok(zone, order,
2837 low_wmark_pages(zone), *classzone_idx, 0))
2838 zones_need_compaction = 0;
2839 }
2840
2841 if (zones_need_compaction)
2842 compact_pgdat(pgdat, order);
2843 }
2844
2845
2846
2847
2848
2849
2850
2851 *classzone_idx = end_zone;
2852 return order;
2853}
2854
2855static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
2856{
2857 long remaining = 0;
2858 DEFINE_WAIT(wait);
2859
2860 if (freezing(current) || kthread_should_stop())
2861 return;
2862
2863 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2864
2865
2866 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
2867 remaining = schedule_timeout(HZ/10);
2868 finish_wait(&pgdat->kswapd_wait, &wait);
2869 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2870 }
2871
2872
2873
2874
2875
2876 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
2877 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
2888
2889
2890
2891
2892
2893
2894
2895 reset_isolation_suitable(pgdat);
2896
2897 if (!kthread_should_stop())
2898 schedule();
2899
2900 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2901 } else {
2902 if (remaining)
2903 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2904 else
2905 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2906 }
2907 finish_wait(&pgdat->kswapd_wait, &wait);
2908}
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923static int kswapd(void *p)
2924{
2925 unsigned long order, new_order;
2926 unsigned balanced_order;
2927 int classzone_idx, new_classzone_idx;
2928 int balanced_classzone_idx;
2929 pg_data_t *pgdat = (pg_data_t*)p;
2930 struct task_struct *tsk = current;
2931
2932 struct reclaim_state reclaim_state = {
2933 .reclaimed_slab = 0,
2934 };
2935 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2936
2937 lockdep_set_current_reclaim_state(GFP_KERNEL);
2938
2939 if (!cpumask_empty(cpumask))
2940 set_cpus_allowed_ptr(tsk, cpumask);
2941 current->reclaim_state = &reclaim_state;
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2956 set_freezable();
2957
2958 order = new_order = 0;
2959 balanced_order = 0;
2960 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
2961 balanced_classzone_idx = classzone_idx;
2962 for ( ; ; ) {
2963 bool ret;
2964
2965
2966
2967
2968
2969
2970 if (balanced_classzone_idx >= new_classzone_idx &&
2971 balanced_order == new_order) {
2972 new_order = pgdat->kswapd_max_order;
2973 new_classzone_idx = pgdat->classzone_idx;
2974 pgdat->kswapd_max_order = 0;
2975 pgdat->classzone_idx = pgdat->nr_zones - 1;
2976 }
2977
2978 if (order < new_order || classzone_idx > new_classzone_idx) {
2979
2980
2981
2982
2983 order = new_order;
2984 classzone_idx = new_classzone_idx;
2985 } else {
2986 kswapd_try_to_sleep(pgdat, balanced_order,
2987 balanced_classzone_idx);
2988 order = pgdat->kswapd_max_order;
2989 classzone_idx = pgdat->classzone_idx;
2990 new_order = order;
2991 new_classzone_idx = classzone_idx;
2992 pgdat->kswapd_max_order = 0;
2993 pgdat->classzone_idx = pgdat->nr_zones - 1;
2994 }
2995
2996 ret = try_to_freeze();
2997 if (kthread_should_stop())
2998 break;
2999
3000
3001
3002
3003
3004 if (!ret) {
3005 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
3006 balanced_classzone_idx = classzone_idx;
3007 balanced_order = balance_pgdat(pgdat, order,
3008 &balanced_classzone_idx);
3009 }
3010 }
3011
3012 current->reclaim_state = NULL;
3013 return 0;
3014}
3015
3016
3017
3018
3019void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3020{
3021 pg_data_t *pgdat;
3022
3023 if (!populated_zone(zone))
3024 return;
3025
3026 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
3027 return;
3028 pgdat = zone->zone_pgdat;
3029 if (pgdat->kswapd_max_order < order) {
3030 pgdat->kswapd_max_order = order;
3031 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
3032 }
3033 if (!waitqueue_active(&pgdat->kswapd_wait))
3034 return;
3035 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
3036 return;
3037
3038 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
3039 wake_up_interruptible(&pgdat->kswapd_wait);
3040}
3041
3042
3043
3044
3045
3046
3047
3048
3049unsigned long global_reclaimable_pages(void)
3050{
3051 int nr;
3052
3053 nr = global_page_state(NR_ACTIVE_FILE) +
3054 global_page_state(NR_INACTIVE_FILE);
3055
3056 if (nr_swap_pages > 0)
3057 nr += global_page_state(NR_ACTIVE_ANON) +
3058 global_page_state(NR_INACTIVE_ANON);
3059
3060 return nr;
3061}
3062
3063unsigned long zone_reclaimable_pages(struct zone *zone)
3064{
3065 int nr;
3066
3067 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
3068 zone_page_state(zone, NR_INACTIVE_FILE);
3069
3070 if (nr_swap_pages > 0)
3071 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
3072 zone_page_state(zone, NR_INACTIVE_ANON);
3073
3074 return nr;
3075}
3076
3077#ifdef CONFIG_HIBERNATION
3078
3079
3080
3081
3082
3083
3084
3085
3086unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3087{
3088 struct reclaim_state reclaim_state;
3089 struct scan_control sc = {
3090 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3091 .may_swap = 1,
3092 .may_unmap = 1,
3093 .may_writepage = 1,
3094 .nr_to_reclaim = nr_to_reclaim,
3095 .hibernation_mode = 1,
3096 .order = 0,
3097 .priority = DEF_PRIORITY,
3098 };
3099 struct shrink_control shrink = {
3100 .gfp_mask = sc.gfp_mask,
3101 };
3102 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3103 struct task_struct *p = current;
3104 unsigned long nr_reclaimed;
3105
3106 p->flags |= PF_MEMALLOC;
3107 lockdep_set_current_reclaim_state(sc.gfp_mask);
3108 reclaim_state.reclaimed_slab = 0;
3109 p->reclaim_state = &reclaim_state;
3110
3111 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
3112
3113 p->reclaim_state = NULL;
3114 lockdep_clear_current_reclaim_state();
3115 p->flags &= ~PF_MEMALLOC;
3116
3117 return nr_reclaimed;
3118}
3119#endif
3120
3121
3122
3123
3124
3125static int cpu_callback(struct notifier_block *nfb, unsigned long action,
3126 void *hcpu)
3127{
3128 int nid;
3129
3130 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
3131 for_each_node_state(nid, N_MEMORY) {
3132 pg_data_t *pgdat = NODE_DATA(nid);
3133 const struct cpumask *mask;
3134
3135 mask = cpumask_of_node(pgdat->node_id);
3136
3137 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3138
3139 set_cpus_allowed_ptr(pgdat->kswapd, mask);
3140 }
3141 }
3142 return NOTIFY_OK;
3143}
3144
3145
3146
3147
3148
3149int kswapd_run(int nid)
3150{
3151 pg_data_t *pgdat = NODE_DATA(nid);
3152 int ret = 0;
3153
3154 if (pgdat->kswapd)
3155 return 0;
3156
3157 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3158 if (IS_ERR(pgdat->kswapd)) {
3159
3160 BUG_ON(system_state == SYSTEM_BOOTING);
3161 pgdat->kswapd = NULL;
3162 pr_err("Failed to start kswapd on node %d\n", nid);
3163 ret = PTR_ERR(pgdat->kswapd);
3164 }
3165 return ret;
3166}
3167
3168
3169
3170
3171
3172void kswapd_stop(int nid)
3173{
3174 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3175
3176 if (kswapd) {
3177 kthread_stop(kswapd);
3178 NODE_DATA(nid)->kswapd = NULL;
3179 }
3180}
3181
3182static int __init kswapd_init(void)
3183{
3184 int nid;
3185
3186 swap_setup();
3187 for_each_node_state(nid, N_MEMORY)
3188 kswapd_run(nid);
3189 hotcpu_notifier(cpu_callback, 0);
3190 return 0;
3191}
3192
3193module_init(kswapd_init)
3194
3195#ifdef CONFIG_NUMA
3196
3197
3198
3199
3200
3201
3202int zone_reclaim_mode __read_mostly;
3203
3204#define RECLAIM_OFF 0
3205#define RECLAIM_ZONE (1<<0)
3206#define RECLAIM_WRITE (1<<1)
3207#define RECLAIM_SWAP (1<<2)
3208
3209
3210
3211
3212
3213
3214#define ZONE_RECLAIM_PRIORITY 4
3215
3216
3217
3218
3219
3220int sysctl_min_unmapped_ratio = 1;
3221
3222
3223
3224
3225
3226int sysctl_min_slab_ratio = 5;
3227
3228static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3229{
3230 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3231 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3232 zone_page_state(zone, NR_ACTIVE_FILE);
3233
3234
3235
3236
3237
3238
3239 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3240}
3241
3242
3243static long zone_pagecache_reclaimable(struct zone *zone)
3244{
3245 long nr_pagecache_reclaimable;
3246 long delta = 0;
3247
3248
3249
3250
3251
3252
3253
3254 if (zone_reclaim_mode & RECLAIM_SWAP)
3255 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3256 else
3257 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3258
3259
3260 if (!(zone_reclaim_mode & RECLAIM_WRITE))
3261 delta += zone_page_state(zone, NR_FILE_DIRTY);
3262
3263
3264 if (unlikely(delta > nr_pagecache_reclaimable))
3265 delta = nr_pagecache_reclaimable;
3266
3267 return nr_pagecache_reclaimable - delta;
3268}
3269
3270
3271
3272
3273static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3274{
3275
3276 const unsigned long nr_pages = 1 << order;
3277 struct task_struct *p = current;
3278 struct reclaim_state reclaim_state;
3279 struct scan_control sc = {
3280 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3281 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3282 .may_swap = 1,
3283 .nr_to_reclaim = max_t(unsigned long, nr_pages,
3284 SWAP_CLUSTER_MAX),
3285 .gfp_mask = gfp_mask,
3286 .order = order,
3287 .priority = ZONE_RECLAIM_PRIORITY,
3288 };
3289 struct shrink_control shrink = {
3290 .gfp_mask = sc.gfp_mask,
3291 };
3292 unsigned long nr_slab_pages0, nr_slab_pages1;
3293
3294 cond_resched();
3295
3296
3297
3298
3299
3300 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3301 lockdep_set_current_reclaim_state(gfp_mask);
3302 reclaim_state.reclaimed_slab = 0;
3303 p->reclaim_state = &reclaim_state;
3304
3305 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3306
3307
3308
3309
3310 do {
3311 shrink_zone(zone, &sc);
3312 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3313 }
3314
3315 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3316 if (nr_slab_pages0 > zone->min_slab_pages) {
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327 for (;;) {
3328 unsigned long lru_pages = zone_reclaimable_pages(zone);
3329
3330
3331 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3332 break;
3333
3334
3335 nr_slab_pages1 = zone_page_state(zone,
3336 NR_SLAB_RECLAIMABLE);
3337 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3338 break;
3339 }
3340
3341
3342
3343
3344
3345 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3346 if (nr_slab_pages1 < nr_slab_pages0)
3347 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
3348 }
3349
3350 p->reclaim_state = NULL;
3351 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3352 lockdep_clear_current_reclaim_state();
3353 return sc.nr_reclaimed >= nr_pages;
3354}
3355
3356int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3357{
3358 int node_id;
3359 int ret;
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3372 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3373 return ZONE_RECLAIM_FULL;
3374
3375 if (zone->all_unreclaimable)
3376 return ZONE_RECLAIM_FULL;
3377
3378
3379
3380
3381 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3382 return ZONE_RECLAIM_NOSCAN;
3383
3384
3385
3386
3387
3388
3389
3390 node_id = zone_to_nid(zone);
3391 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3392 return ZONE_RECLAIM_NOSCAN;
3393
3394 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3395 return ZONE_RECLAIM_NOSCAN;
3396
3397 ret = __zone_reclaim(zone, gfp_mask, order);
3398 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3399
3400 if (!ret)
3401 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3402
3403 return ret;
3404}
3405#endif
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419int page_evictable(struct page *page)
3420{
3421 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3422}
3423
3424#ifdef CONFIG_SHMEM
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434void check_move_unevictable_pages(struct page **pages, int nr_pages)
3435{
3436 struct lruvec *lruvec;
3437 struct zone *zone = NULL;
3438 int pgscanned = 0;
3439 int pgrescued = 0;
3440 int i;
3441
3442 for (i = 0; i < nr_pages; i++) {
3443 struct page *page = pages[i];
3444 struct zone *pagezone;
3445
3446 pgscanned++;
3447 pagezone = page_zone(page);
3448 if (pagezone != zone) {
3449 if (zone)
3450 spin_unlock_irq(&zone->lru_lock);
3451 zone = pagezone;
3452 spin_lock_irq(&zone->lru_lock);
3453 }
3454 lruvec = mem_cgroup_page_lruvec(page, zone);
3455
3456 if (!PageLRU(page) || !PageUnevictable(page))
3457 continue;
3458
3459 if (page_evictable(page)) {
3460 enum lru_list lru = page_lru_base_type(page);
3461
3462 VM_BUG_ON(PageActive(page));
3463 ClearPageUnevictable(page);
3464 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3465 add_page_to_lru_list(page, lruvec, lru);
3466 pgrescued++;
3467 }
3468 }
3469
3470 if (zone) {
3471 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3472 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3473 spin_unlock_irq(&zone->lru_lock);
3474 }
3475}
3476#endif
3477
3478static void warn_scan_unevictable_pages(void)
3479{
3480 printk_once(KERN_WARNING
3481 "%s: The scan_unevictable_pages sysctl/node-interface has been "
3482 "disabled for lack of a legitimate use case. If you have "
3483 "one, please send an email to linux-mm@kvack.org.\n",
3484 current->comm);
3485}
3486
3487
3488
3489
3490
3491unsigned long scan_unevictable_pages;
3492
3493int scan_unevictable_handler(struct ctl_table *table, int write,
3494 void __user *buffer,
3495 size_t *length, loff_t *ppos)
3496{
3497 warn_scan_unevictable_pages();
3498 proc_doulongvec_minmax(table, write, buffer, length, ppos);
3499 scan_unevictable_pages = 0;
3500 return 0;
3501}
3502
3503#ifdef CONFIG_NUMA
3504
3505
3506
3507
3508
3509static ssize_t read_scan_unevictable_node(struct device *dev,
3510 struct device_attribute *attr,
3511 char *buf)
3512{
3513 warn_scan_unevictable_pages();
3514 return sprintf(buf, "0\n");
3515}
3516
3517static ssize_t write_scan_unevictable_node(struct device *dev,
3518 struct device_attribute *attr,
3519 const char *buf, size_t count)
3520{
3521 warn_scan_unevictable_pages();
3522 return 1;
3523}
3524
3525
3526static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3527 read_scan_unevictable_node,
3528 write_scan_unevictable_node);
3529
3530int scan_unevictable_register_node(struct node *node)
3531{
3532 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
3533}
3534
3535void scan_unevictable_unregister_node(struct node *node)
3536{
3537 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
3538}
3539#endif
3540