1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h>
27
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
37#include <linux/delay.h>
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/memcontrol.h>
41#include <linux/delayacct.h>
42
43#include <asm/tlbflush.h>
44#include <asm/div64.h>
45
46#include <linux/swapops.h>
47
48#include "internal.h"
49
50struct scan_control {
51
52 unsigned long nr_scanned;
53
54
55 gfp_t gfp_mask;
56
57 int may_writepage;
58
59
60 int may_swap;
61
62
63
64
65
66 int swap_cluster_max;
67
68 int swappiness;
69
70 int all_unreclaimable;
71
72 int order;
73
74
75 struct mem_cgroup *mem_cgroup;
76
77
78 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
79 unsigned long *scanned, int order, int mode,
80 struct zone *z, struct mem_cgroup *mem_cont,
81 int active);
82};
83
84#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
85
86#ifdef ARCH_HAS_PREFETCH
87#define prefetch_prev_lru_page(_page, _base, _field) \
88 do { \
89 if ((_page)->lru.prev != _base) { \
90 struct page *prev; \
91 \
92 prev = lru_to_page(&(_page->lru)); \
93 prefetch(&prev->_field); \
94 } \
95 } while (0)
96#else
97#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
98#endif
99
100#ifdef ARCH_HAS_PREFETCHW
101#define prefetchw_prev_lru_page(_page, _base, _field) \
102 do { \
103 if ((_page)->lru.prev != _base) { \
104 struct page *prev; \
105 \
106 prev = lru_to_page(&(_page->lru)); \
107 prefetchw(&prev->_field); \
108 } \
109 } while (0)
110#else
111#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
112#endif
113
114
115
116
117int vm_swappiness = 60;
118long vm_total_pages;
119
120static LIST_HEAD(shrinker_list);
121static DECLARE_RWSEM(shrinker_rwsem);
122
123#ifdef CONFIG_CGROUP_MEM_RES_CTLR
124#define scan_global_lru(sc) (!(sc)->mem_cgroup)
125#else
126#define scan_global_lru(sc) (1)
127#endif
128
129
130
131
132void register_shrinker(struct shrinker *shrinker)
133{
134 shrinker->nr = 0;
135 down_write(&shrinker_rwsem);
136 list_add_tail(&shrinker->list, &shrinker_list);
137 up_write(&shrinker_rwsem);
138}
139EXPORT_SYMBOL(register_shrinker);
140
141
142
143
144void unregister_shrinker(struct shrinker *shrinker)
145{
146 down_write(&shrinker_rwsem);
147 list_del(&shrinker->list);
148 up_write(&shrinker_rwsem);
149}
150EXPORT_SYMBOL(unregister_shrinker);
151
152#define SHRINK_BATCH 128
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
173 unsigned long lru_pages)
174{
175 struct shrinker *shrinker;
176 unsigned long ret = 0;
177
178 if (scanned == 0)
179 scanned = SWAP_CLUSTER_MAX;
180
181 if (!down_read_trylock(&shrinker_rwsem))
182 return 1;
183
184 list_for_each_entry(shrinker, &shrinker_list, list) {
185 unsigned long long delta;
186 unsigned long total_scan;
187 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
188
189 delta = (4 * scanned) / shrinker->seeks;
190 delta *= max_pass;
191 do_div(delta, lru_pages + 1);
192 shrinker->nr += delta;
193 if (shrinker->nr < 0) {
194 printk(KERN_ERR "%s: nr=%ld\n",
195 __func__, shrinker->nr);
196 shrinker->nr = max_pass;
197 }
198
199
200
201
202
203
204 if (shrinker->nr > max_pass * 2)
205 shrinker->nr = max_pass * 2;
206
207 total_scan = shrinker->nr;
208 shrinker->nr = 0;
209
210 while (total_scan >= SHRINK_BATCH) {
211 long this_scan = SHRINK_BATCH;
212 int shrink_ret;
213 int nr_before;
214
215 nr_before = (*shrinker->shrink)(0, gfp_mask);
216 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
217 if (shrink_ret == -1)
218 break;
219 if (shrink_ret < nr_before)
220 ret += nr_before - shrink_ret;
221 count_vm_events(SLABS_SCANNED, this_scan);
222 total_scan -= this_scan;
223
224 cond_resched();
225 }
226
227 shrinker->nr += total_scan;
228 }
229 up_read(&shrinker_rwsem);
230 return ret;
231}
232
233
234static inline int page_mapping_inuse(struct page *page)
235{
236 struct address_space *mapping;
237
238
239 if (page_mapped(page))
240 return 1;
241
242
243 if (PageSwapCache(page))
244 return 1;
245
246 mapping = page_mapping(page);
247 if (!mapping)
248 return 0;
249
250
251 return mapping_mapped(mapping);
252}
253
254static inline int is_page_cache_freeable(struct page *page)
255{
256 return page_count(page) - !!PagePrivate(page) == 2;
257}
258
259static int may_write_to_queue(struct backing_dev_info *bdi)
260{
261 if (current->flags & PF_SWAPWRITE)
262 return 1;
263 if (!bdi_write_congested(bdi))
264 return 1;
265 if (bdi == current->backing_dev_info)
266 return 1;
267 return 0;
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282static void handle_write_error(struct address_space *mapping,
283 struct page *page, int error)
284{
285 lock_page(page);
286 if (page_mapping(page) == mapping)
287 mapping_set_error(mapping, error);
288 unlock_page(page);
289}
290
291
292enum pageout_io {
293 PAGEOUT_IO_ASYNC,
294 PAGEOUT_IO_SYNC,
295};
296
297
298typedef enum {
299
300 PAGE_KEEP,
301
302 PAGE_ACTIVATE,
303
304 PAGE_SUCCESS,
305
306 PAGE_CLEAN,
307} pageout_t;
308
309
310
311
312
313static pageout_t pageout(struct page *page, struct address_space *mapping,
314 enum pageout_io sync_writeback)
315{
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333 if (!is_page_cache_freeable(page))
334 return PAGE_KEEP;
335 if (!mapping) {
336
337
338
339
340 if (PagePrivate(page)) {
341 if (try_to_free_buffers(page)) {
342 ClearPageDirty(page);
343 printk("%s: orphaned page\n", __func__);
344 return PAGE_CLEAN;
345 }
346 }
347 return PAGE_KEEP;
348 }
349 if (mapping->a_ops->writepage == NULL)
350 return PAGE_ACTIVATE;
351 if (!may_write_to_queue(mapping->backing_dev_info))
352 return PAGE_KEEP;
353
354 if (clear_page_dirty_for_io(page)) {
355 int res;
356 struct writeback_control wbc = {
357 .sync_mode = WB_SYNC_NONE,
358 .nr_to_write = SWAP_CLUSTER_MAX,
359 .range_start = 0,
360 .range_end = LLONG_MAX,
361 .nonblocking = 1,
362 .for_reclaim = 1,
363 };
364
365 SetPageReclaim(page);
366 res = mapping->a_ops->writepage(page, &wbc);
367 if (res < 0)
368 handle_write_error(mapping, page, res);
369 if (res == AOP_WRITEPAGE_ACTIVATE) {
370 ClearPageReclaim(page);
371 return PAGE_ACTIVATE;
372 }
373
374
375
376
377
378
379 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
380 wait_on_page_writeback(page);
381
382 if (!PageWriteback(page)) {
383
384 ClearPageReclaim(page);
385 }
386 inc_zone_page_state(page, NR_VMSCAN_WRITE);
387 return PAGE_SUCCESS;
388 }
389
390 return PAGE_CLEAN;
391}
392
393
394
395
396
397static int __remove_mapping(struct address_space *mapping, struct page *page)
398{
399 BUG_ON(!PageLocked(page));
400 BUG_ON(mapping != page_mapping(page));
401
402 spin_lock_irq(&mapping->tree_lock);
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428 if (!page_freeze_refs(page, 2))
429 goto cannot_free;
430
431 if (unlikely(PageDirty(page))) {
432 page_unfreeze_refs(page, 2);
433 goto cannot_free;
434 }
435
436 if (PageSwapCache(page)) {
437 swp_entry_t swap = { .val = page_private(page) };
438 __delete_from_swap_cache(page);
439 spin_unlock_irq(&mapping->tree_lock);
440 swap_free(swap);
441 } else {
442 __remove_from_page_cache(page);
443 spin_unlock_irq(&mapping->tree_lock);
444 }
445
446 return 1;
447
448cannot_free:
449 spin_unlock_irq(&mapping->tree_lock);
450 return 0;
451}
452
453
454
455
456
457
458
459int remove_mapping(struct address_space *mapping, struct page *page)
460{
461 if (__remove_mapping(mapping, page)) {
462
463
464
465
466
467 page_unfreeze_refs(page, 1);
468 return 1;
469 }
470 return 0;
471}
472
473
474
475
476static unsigned long shrink_page_list(struct list_head *page_list,
477 struct scan_control *sc,
478 enum pageout_io sync_writeback)
479{
480 LIST_HEAD(ret_pages);
481 struct pagevec freed_pvec;
482 int pgactivate = 0;
483 unsigned long nr_reclaimed = 0;
484
485 cond_resched();
486
487 pagevec_init(&freed_pvec, 1);
488 while (!list_empty(page_list)) {
489 struct address_space *mapping;
490 struct page *page;
491 int may_enter_fs;
492 int referenced;
493
494 cond_resched();
495
496 page = lru_to_page(page_list);
497 list_del(&page->lru);
498
499 if (!trylock_page(page))
500 goto keep;
501
502 VM_BUG_ON(PageActive(page));
503
504 sc->nr_scanned++;
505
506 if (!sc->may_swap && page_mapped(page))
507 goto keep_locked;
508
509
510 if (page_mapped(page) || PageSwapCache(page))
511 sc->nr_scanned++;
512
513 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
514 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
515
516 if (PageWriteback(page)) {
517
518
519
520
521
522
523
524
525 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
526 wait_on_page_writeback(page);
527 else
528 goto keep_locked;
529 }
530
531 referenced = page_referenced(page, 1, sc->mem_cgroup);
532
533 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
534 referenced && page_mapping_inuse(page))
535 goto activate_locked;
536
537#ifdef CONFIG_SWAP
538
539
540
541
542 if (PageAnon(page) && !PageSwapCache(page))
543 if (!add_to_swap(page, GFP_ATOMIC))
544 goto activate_locked;
545#endif
546
547 mapping = page_mapping(page);
548
549
550
551
552
553 if (page_mapped(page) && mapping) {
554 switch (try_to_unmap(page, 0)) {
555 case SWAP_FAIL:
556 goto activate_locked;
557 case SWAP_AGAIN:
558 goto keep_locked;
559 case SWAP_SUCCESS:
560 ;
561 }
562 }
563
564 if (PageDirty(page)) {
565 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
566 goto keep_locked;
567 if (!may_enter_fs)
568 goto keep_locked;
569 if (!sc->may_writepage)
570 goto keep_locked;
571
572
573 switch (pageout(page, mapping, sync_writeback)) {
574 case PAGE_KEEP:
575 goto keep_locked;
576 case PAGE_ACTIVATE:
577 goto activate_locked;
578 case PAGE_SUCCESS:
579 if (PageWriteback(page) || PageDirty(page))
580 goto keep;
581
582
583
584
585 if (!trylock_page(page))
586 goto keep;
587 if (PageDirty(page) || PageWriteback(page))
588 goto keep_locked;
589 mapping = page_mapping(page);
590 case PAGE_CLEAN:
591 ;
592 }
593 }
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616 if (PagePrivate(page)) {
617 if (!try_to_release_page(page, sc->gfp_mask))
618 goto activate_locked;
619 if (!mapping && page_count(page) == 1) {
620 unlock_page(page);
621 if (put_page_testzero(page))
622 goto free_it;
623 else {
624
625
626
627
628
629
630
631 nr_reclaimed++;
632 continue;
633 }
634 }
635 }
636
637 if (!mapping || !__remove_mapping(mapping, page))
638 goto keep_locked;
639
640 unlock_page(page);
641free_it:
642 nr_reclaimed++;
643 if (!pagevec_add(&freed_pvec, page)) {
644 __pagevec_free(&freed_pvec);
645 pagevec_reinit(&freed_pvec);
646 }
647 continue;
648
649activate_locked:
650 SetPageActive(page);
651 pgactivate++;
652keep_locked:
653 unlock_page(page);
654keep:
655 list_add(&page->lru, &ret_pages);
656 VM_BUG_ON(PageLRU(page));
657 }
658 list_splice(&ret_pages, page_list);
659 if (pagevec_count(&freed_pvec))
660 __pagevec_free(&freed_pvec);
661 count_vm_events(PGACTIVATE, pgactivate);
662 return nr_reclaimed;
663}
664
665
666#define ISOLATE_INACTIVE 0
667#define ISOLATE_ACTIVE 1
668#define ISOLATE_BOTH 2
669
670
671
672
673
674
675
676
677
678
679
680int __isolate_lru_page(struct page *page, int mode)
681{
682 int ret = -EINVAL;
683
684
685 if (!PageLRU(page))
686 return ret;
687
688
689
690
691
692
693 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
694 return ret;
695
696 ret = -EBUSY;
697 if (likely(get_page_unless_zero(page))) {
698
699
700
701
702
703 ClearPageLRU(page);
704 ret = 0;
705 }
706
707 return ret;
708}
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
730 struct list_head *src, struct list_head *dst,
731 unsigned long *scanned, int order, int mode)
732{
733 unsigned long nr_taken = 0;
734 unsigned long scan;
735
736 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
737 struct page *page;
738 unsigned long pfn;
739 unsigned long end_pfn;
740 unsigned long page_pfn;
741 int zone_id;
742
743 page = lru_to_page(src);
744 prefetchw_prev_lru_page(page, src, flags);
745
746 VM_BUG_ON(!PageLRU(page));
747
748 switch (__isolate_lru_page(page, mode)) {
749 case 0:
750 list_move(&page->lru, dst);
751 nr_taken++;
752 break;
753
754 case -EBUSY:
755
756 list_move(&page->lru, src);
757 continue;
758
759 default:
760 BUG();
761 }
762
763 if (!order)
764 continue;
765
766
767
768
769
770
771
772
773
774
775 zone_id = page_zone_id(page);
776 page_pfn = page_to_pfn(page);
777 pfn = page_pfn & ~((1 << order) - 1);
778 end_pfn = pfn + (1 << order);
779 for (; pfn < end_pfn; pfn++) {
780 struct page *cursor_page;
781
782
783 if (unlikely(pfn == page_pfn))
784 continue;
785
786
787 if (unlikely(!pfn_valid_within(pfn)))
788 break;
789
790 cursor_page = pfn_to_page(pfn);
791
792 if (unlikely(page_zone_id(cursor_page) != zone_id))
793 continue;
794 switch (__isolate_lru_page(cursor_page, mode)) {
795 case 0:
796 list_move(&cursor_page->lru, dst);
797 nr_taken++;
798 scan++;
799 break;
800
801 case -EBUSY:
802
803 list_move(&cursor_page->lru, src);
804 default:
805 break;
806 }
807 }
808 }
809
810 *scanned = scan;
811 return nr_taken;
812}
813
814static unsigned long isolate_pages_global(unsigned long nr,
815 struct list_head *dst,
816 unsigned long *scanned, int order,
817 int mode, struct zone *z,
818 struct mem_cgroup *mem_cont,
819 int active)
820{
821 if (active)
822 return isolate_lru_pages(nr, &z->active_list, dst,
823 scanned, order, mode);
824 else
825 return isolate_lru_pages(nr, &z->inactive_list, dst,
826 scanned, order, mode);
827}
828
829
830
831
832
833static unsigned long clear_active_flags(struct list_head *page_list)
834{
835 int nr_active = 0;
836 struct page *page;
837
838 list_for_each_entry(page, page_list, lru)
839 if (PageActive(page)) {
840 ClearPageActive(page);
841 nr_active++;
842 }
843
844 return nr_active;
845}
846
847
848
849
850
851static unsigned long shrink_inactive_list(unsigned long max_scan,
852 struct zone *zone, struct scan_control *sc)
853{
854 LIST_HEAD(page_list);
855 struct pagevec pvec;
856 unsigned long nr_scanned = 0;
857 unsigned long nr_reclaimed = 0;
858
859 pagevec_init(&pvec, 1);
860
861 lru_add_drain();
862 spin_lock_irq(&zone->lru_lock);
863 do {
864 struct page *page;
865 unsigned long nr_taken;
866 unsigned long nr_scan;
867 unsigned long nr_freed;
868 unsigned long nr_active;
869
870 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
871 &page_list, &nr_scan, sc->order,
872 (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
873 ISOLATE_BOTH : ISOLATE_INACTIVE,
874 zone, sc->mem_cgroup, 0);
875 nr_active = clear_active_flags(&page_list);
876 __count_vm_events(PGDEACTIVATE, nr_active);
877
878 __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
879 __mod_zone_page_state(zone, NR_INACTIVE,
880 -(nr_taken - nr_active));
881 if (scan_global_lru(sc))
882 zone->pages_scanned += nr_scan;
883 spin_unlock_irq(&zone->lru_lock);
884
885 nr_scanned += nr_scan;
886 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
887
888
889
890
891
892
893
894 if (nr_freed < nr_taken && !current_is_kswapd() &&
895 sc->order > PAGE_ALLOC_COSTLY_ORDER) {
896 congestion_wait(WRITE, HZ/10);
897
898
899
900
901
902 nr_active = clear_active_flags(&page_list);
903 count_vm_events(PGDEACTIVATE, nr_active);
904
905 nr_freed += shrink_page_list(&page_list, sc,
906 PAGEOUT_IO_SYNC);
907 }
908
909 nr_reclaimed += nr_freed;
910 local_irq_disable();
911 if (current_is_kswapd()) {
912 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
913 __count_vm_events(KSWAPD_STEAL, nr_freed);
914 } else if (scan_global_lru(sc))
915 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
916
917 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
918
919 if (nr_taken == 0)
920 goto done;
921
922 spin_lock(&zone->lru_lock);
923
924
925
926 while (!list_empty(&page_list)) {
927 page = lru_to_page(&page_list);
928 VM_BUG_ON(PageLRU(page));
929 SetPageLRU(page);
930 list_del(&page->lru);
931 if (PageActive(page))
932 add_page_to_active_list(zone, page);
933 else
934 add_page_to_inactive_list(zone, page);
935 if (!pagevec_add(&pvec, page)) {
936 spin_unlock_irq(&zone->lru_lock);
937 __pagevec_release(&pvec);
938 spin_lock_irq(&zone->lru_lock);
939 }
940 }
941 } while (nr_scanned < max_scan);
942 spin_unlock(&zone->lru_lock);
943done:
944 local_irq_enable();
945 pagevec_release(&pvec);
946 return nr_reclaimed;
947}
948
949
950
951
952
953
954
955
956
957static inline void note_zone_scanning_priority(struct zone *zone, int priority)
958{
959 if (priority < zone->prev_priority)
960 zone->prev_priority = priority;
961}
962
963static inline int zone_is_near_oom(struct zone *zone)
964{
965 return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
966 + zone_page_state(zone, NR_INACTIVE))*3;
967}
968
969
970
971
972
973static int calc_reclaim_mapped(struct scan_control *sc, struct zone *zone,
974 int priority)
975{
976 long mapped_ratio;
977 long distress;
978 long swap_tendency;
979 long imbalance;
980 int reclaim_mapped = 0;
981 int prev_priority;
982
983 if (scan_global_lru(sc) && zone_is_near_oom(zone))
984 return 1;
985
986
987
988
989 if (scan_global_lru(sc))
990 prev_priority = zone->prev_priority;
991 else
992 prev_priority = mem_cgroup_get_reclaim_priority(sc->mem_cgroup);
993
994 distress = 100 >> min(prev_priority, priority);
995
996
997
998
999
1000
1001
1002 if (scan_global_lru(sc))
1003 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
1004 global_page_state(NR_ANON_PAGES)) * 100) /
1005 vm_total_pages;
1006 else
1007 mapped_ratio = mem_cgroup_calc_mapped_ratio(sc->mem_cgroup);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 if (scan_global_lru(sc)) {
1036 imbalance = zone_page_state(zone, NR_ACTIVE);
1037 imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
1038 } else
1039 imbalance = mem_cgroup_reclaim_imbalance(sc->mem_cgroup);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 imbalance *= (vm_swappiness + 1);
1050 imbalance /= 100;
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 imbalance *= mapped_ratio;
1061 imbalance /= 100;
1062
1063
1064 swap_tendency += imbalance;
1065
1066
1067
1068
1069
1070 if (swap_tendency >= 100)
1071 reclaim_mapped = 1;
1072
1073 return reclaim_mapped;
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1096 struct scan_control *sc, int priority)
1097{
1098 unsigned long pgmoved;
1099 int pgdeactivate = 0;
1100 unsigned long pgscanned;
1101 LIST_HEAD(l_hold);
1102 LIST_HEAD(l_inactive);
1103 LIST_HEAD(l_active);
1104 struct page *page;
1105 struct pagevec pvec;
1106 int reclaim_mapped = 0;
1107
1108 if (sc->may_swap)
1109 reclaim_mapped = calc_reclaim_mapped(sc, zone, priority);
1110
1111 lru_add_drain();
1112 spin_lock_irq(&zone->lru_lock);
1113 pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1114 ISOLATE_ACTIVE, zone,
1115 sc->mem_cgroup, 1);
1116
1117
1118
1119
1120 if (scan_global_lru(sc))
1121 zone->pages_scanned += pgscanned;
1122
1123 __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
1124 spin_unlock_irq(&zone->lru_lock);
1125
1126 while (!list_empty(&l_hold)) {
1127 cond_resched();
1128 page = lru_to_page(&l_hold);
1129 list_del(&page->lru);
1130 if (page_mapped(page)) {
1131 if (!reclaim_mapped ||
1132 (total_swap_pages == 0 && PageAnon(page)) ||
1133 page_referenced(page, 0, sc->mem_cgroup)) {
1134 list_add(&page->lru, &l_active);
1135 continue;
1136 }
1137 }
1138 list_add(&page->lru, &l_inactive);
1139 }
1140
1141 pagevec_init(&pvec, 1);
1142 pgmoved = 0;
1143 spin_lock_irq(&zone->lru_lock);
1144 while (!list_empty(&l_inactive)) {
1145 page = lru_to_page(&l_inactive);
1146 prefetchw_prev_lru_page(page, &l_inactive, flags);
1147 VM_BUG_ON(PageLRU(page));
1148 SetPageLRU(page);
1149 VM_BUG_ON(!PageActive(page));
1150 ClearPageActive(page);
1151
1152 list_move(&page->lru, &zone->inactive_list);
1153 mem_cgroup_move_lists(page, false);
1154 pgmoved++;
1155 if (!pagevec_add(&pvec, page)) {
1156 __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1157 spin_unlock_irq(&zone->lru_lock);
1158 pgdeactivate += pgmoved;
1159 pgmoved = 0;
1160 if (buffer_heads_over_limit)
1161 pagevec_strip(&pvec);
1162 __pagevec_release(&pvec);
1163 spin_lock_irq(&zone->lru_lock);
1164 }
1165 }
1166 __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1167 pgdeactivate += pgmoved;
1168 if (buffer_heads_over_limit) {
1169 spin_unlock_irq(&zone->lru_lock);
1170 pagevec_strip(&pvec);
1171 spin_lock_irq(&zone->lru_lock);
1172 }
1173
1174 pgmoved = 0;
1175 while (!list_empty(&l_active)) {
1176 page = lru_to_page(&l_active);
1177 prefetchw_prev_lru_page(page, &l_active, flags);
1178 VM_BUG_ON(PageLRU(page));
1179 SetPageLRU(page);
1180 VM_BUG_ON(!PageActive(page));
1181
1182 list_move(&page->lru, &zone->active_list);
1183 mem_cgroup_move_lists(page, true);
1184 pgmoved++;
1185 if (!pagevec_add(&pvec, page)) {
1186 __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1187 pgmoved = 0;
1188 spin_unlock_irq(&zone->lru_lock);
1189 __pagevec_release(&pvec);
1190 spin_lock_irq(&zone->lru_lock);
1191 }
1192 }
1193 __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1194
1195 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1196 __count_vm_events(PGDEACTIVATE, pgdeactivate);
1197 spin_unlock_irq(&zone->lru_lock);
1198
1199 pagevec_release(&pvec);
1200}
1201
1202
1203
1204
1205static unsigned long shrink_zone(int priority, struct zone *zone,
1206 struct scan_control *sc)
1207{
1208 unsigned long nr_active;
1209 unsigned long nr_inactive;
1210 unsigned long nr_to_scan;
1211 unsigned long nr_reclaimed = 0;
1212
1213 if (scan_global_lru(sc)) {
1214
1215
1216
1217
1218 zone->nr_scan_active +=
1219 (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1220 nr_active = zone->nr_scan_active;
1221 zone->nr_scan_inactive +=
1222 (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
1223 nr_inactive = zone->nr_scan_inactive;
1224 if (nr_inactive >= sc->swap_cluster_max)
1225 zone->nr_scan_inactive = 0;
1226 else
1227 nr_inactive = 0;
1228
1229 if (nr_active >= sc->swap_cluster_max)
1230 zone->nr_scan_active = 0;
1231 else
1232 nr_active = 0;
1233 } else {
1234
1235
1236
1237
1238
1239 nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
1240 zone, priority);
1241
1242 nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
1243 zone, priority);
1244 }
1245
1246
1247 while (nr_active || nr_inactive) {
1248 if (nr_active) {
1249 nr_to_scan = min(nr_active,
1250 (unsigned long)sc->swap_cluster_max);
1251 nr_active -= nr_to_scan;
1252 shrink_active_list(nr_to_scan, zone, sc, priority);
1253 }
1254
1255 if (nr_inactive) {
1256 nr_to_scan = min(nr_inactive,
1257 (unsigned long)sc->swap_cluster_max);
1258 nr_inactive -= nr_to_scan;
1259 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1260 sc);
1261 }
1262 }
1263
1264 throttle_vm_writeout(sc->gfp_mask);
1265 return nr_reclaimed;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1285 struct scan_control *sc)
1286{
1287 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1288 unsigned long nr_reclaimed = 0;
1289 struct zoneref *z;
1290 struct zone *zone;
1291
1292 sc->all_unreclaimable = 1;
1293 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1294 if (!populated_zone(zone))
1295 continue;
1296
1297
1298
1299
1300 if (scan_global_lru(sc)) {
1301 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1302 continue;
1303 note_zone_scanning_priority(zone, priority);
1304
1305 if (zone_is_all_unreclaimable(zone) &&
1306 priority != DEF_PRIORITY)
1307 continue;
1308 sc->all_unreclaimable = 0;
1309 } else {
1310
1311
1312
1313
1314 sc->all_unreclaimable = 0;
1315 mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1316 priority);
1317 }
1318
1319 nr_reclaimed += shrink_zone(priority, zone, sc);
1320 }
1321
1322 return nr_reclaimed;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1342 struct scan_control *sc)
1343{
1344 int priority;
1345 unsigned long ret = 0;
1346 unsigned long total_scanned = 0;
1347 unsigned long nr_reclaimed = 0;
1348 struct reclaim_state *reclaim_state = current->reclaim_state;
1349 unsigned long lru_pages = 0;
1350 struct zoneref *z;
1351 struct zone *zone;
1352 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1353
1354 delayacct_freepages_start();
1355
1356 if (scan_global_lru(sc))
1357 count_vm_event(ALLOCSTALL);
1358
1359
1360
1361 if (scan_global_lru(sc)) {
1362 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1363
1364 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1365 continue;
1366
1367 lru_pages += zone_page_state(zone, NR_ACTIVE)
1368 + zone_page_state(zone, NR_INACTIVE);
1369 }
1370 }
1371
1372 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1373 sc->nr_scanned = 0;
1374 if (!priority)
1375 disable_swap_token();
1376 nr_reclaimed += shrink_zones(priority, zonelist, sc);
1377
1378
1379
1380
1381 if (scan_global_lru(sc)) {
1382 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1383 if (reclaim_state) {
1384 nr_reclaimed += reclaim_state->reclaimed_slab;
1385 reclaim_state->reclaimed_slab = 0;
1386 }
1387 }
1388 total_scanned += sc->nr_scanned;
1389 if (nr_reclaimed >= sc->swap_cluster_max) {
1390 ret = nr_reclaimed;
1391 goto out;
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401 if (total_scanned > sc->swap_cluster_max +
1402 sc->swap_cluster_max / 2) {
1403 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1404 sc->may_writepage = 1;
1405 }
1406
1407
1408 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1409 congestion_wait(WRITE, HZ/10);
1410 }
1411
1412 if (!sc->all_unreclaimable && scan_global_lru(sc))
1413 ret = nr_reclaimed;
1414out:
1415
1416
1417
1418
1419
1420
1421
1422 if (priority < 0)
1423 priority = 0;
1424
1425 if (scan_global_lru(sc)) {
1426 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1427
1428 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1429 continue;
1430
1431 zone->prev_priority = priority;
1432 }
1433 } else
1434 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1435
1436 delayacct_freepages_end();
1437
1438 return ret;
1439}
1440
1441unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1442 gfp_t gfp_mask)
1443{
1444 struct scan_control sc = {
1445 .gfp_mask = gfp_mask,
1446 .may_writepage = !laptop_mode,
1447 .swap_cluster_max = SWAP_CLUSTER_MAX,
1448 .may_swap = 1,
1449 .swappiness = vm_swappiness,
1450 .order = order,
1451 .mem_cgroup = NULL,
1452 .isolate_pages = isolate_pages_global,
1453 };
1454
1455 return do_try_to_free_pages(zonelist, &sc);
1456}
1457
1458#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1459
1460unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1461 gfp_t gfp_mask)
1462{
1463 struct scan_control sc = {
1464 .may_writepage = !laptop_mode,
1465 .may_swap = 1,
1466 .swap_cluster_max = SWAP_CLUSTER_MAX,
1467 .swappiness = vm_swappiness,
1468 .order = 0,
1469 .mem_cgroup = mem_cont,
1470 .isolate_pages = mem_cgroup_isolate_pages,
1471 };
1472 struct zonelist *zonelist;
1473
1474 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1475 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1476 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1477 return do_try_to_free_pages(zonelist, &sc);
1478}
1479#endif
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1503{
1504 int all_zones_ok;
1505 int priority;
1506 int i;
1507 unsigned long total_scanned;
1508 unsigned long nr_reclaimed;
1509 struct reclaim_state *reclaim_state = current->reclaim_state;
1510 struct scan_control sc = {
1511 .gfp_mask = GFP_KERNEL,
1512 .may_swap = 1,
1513 .swap_cluster_max = SWAP_CLUSTER_MAX,
1514 .swappiness = vm_swappiness,
1515 .order = order,
1516 .mem_cgroup = NULL,
1517 .isolate_pages = isolate_pages_global,
1518 };
1519
1520
1521
1522
1523 int temp_priority[MAX_NR_ZONES];
1524
1525loop_again:
1526 total_scanned = 0;
1527 nr_reclaimed = 0;
1528 sc.may_writepage = !laptop_mode;
1529 count_vm_event(PAGEOUTRUN);
1530
1531 for (i = 0; i < pgdat->nr_zones; i++)
1532 temp_priority[i] = DEF_PRIORITY;
1533
1534 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1535 int end_zone = 0;
1536 unsigned long lru_pages = 0;
1537
1538
1539 if (!priority)
1540 disable_swap_token();
1541
1542 all_zones_ok = 1;
1543
1544
1545
1546
1547
1548 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1549 struct zone *zone = pgdat->node_zones + i;
1550
1551 if (!populated_zone(zone))
1552 continue;
1553
1554 if (zone_is_all_unreclaimable(zone) &&
1555 priority != DEF_PRIORITY)
1556 continue;
1557
1558 if (!zone_watermark_ok(zone, order, zone->pages_high,
1559 0, 0)) {
1560 end_zone = i;
1561 break;
1562 }
1563 }
1564 if (i < 0)
1565 goto out;
1566
1567 for (i = 0; i <= end_zone; i++) {
1568 struct zone *zone = pgdat->node_zones + i;
1569
1570 lru_pages += zone_page_state(zone, NR_ACTIVE)
1571 + zone_page_state(zone, NR_INACTIVE);
1572 }
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583 for (i = 0; i <= end_zone; i++) {
1584 struct zone *zone = pgdat->node_zones + i;
1585 int nr_slab;
1586
1587 if (!populated_zone(zone))
1588 continue;
1589
1590 if (zone_is_all_unreclaimable(zone) &&
1591 priority != DEF_PRIORITY)
1592 continue;
1593
1594 if (!zone_watermark_ok(zone, order, zone->pages_high,
1595 end_zone, 0))
1596 all_zones_ok = 0;
1597 temp_priority[i] = priority;
1598 sc.nr_scanned = 0;
1599 note_zone_scanning_priority(zone, priority);
1600
1601
1602
1603
1604 if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
1605 end_zone, 0))
1606 nr_reclaimed += shrink_zone(priority, zone, &sc);
1607 reclaim_state->reclaimed_slab = 0;
1608 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1609 lru_pages);
1610 nr_reclaimed += reclaim_state->reclaimed_slab;
1611 total_scanned += sc.nr_scanned;
1612 if (zone_is_all_unreclaimable(zone))
1613 continue;
1614 if (nr_slab == 0 && zone->pages_scanned >=
1615 (zone_page_state(zone, NR_ACTIVE)
1616 + zone_page_state(zone, NR_INACTIVE)) * 6)
1617 zone_set_flag(zone,
1618 ZONE_ALL_UNRECLAIMABLE);
1619
1620
1621
1622
1623
1624 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1625 total_scanned > nr_reclaimed + nr_reclaimed / 2)
1626 sc.may_writepage = 1;
1627 }
1628 if (all_zones_ok)
1629 break;
1630
1631
1632
1633
1634 if (total_scanned && priority < DEF_PRIORITY - 2)
1635 congestion_wait(WRITE, HZ/10);
1636
1637
1638
1639
1640
1641
1642
1643 if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1644 break;
1645 }
1646out:
1647
1648
1649
1650
1651
1652 for (i = 0; i < pgdat->nr_zones; i++) {
1653 struct zone *zone = pgdat->node_zones + i;
1654
1655 zone->prev_priority = temp_priority[i];
1656 }
1657 if (!all_zones_ok) {
1658 cond_resched();
1659
1660 try_to_freeze();
1661
1662 goto loop_again;
1663 }
1664
1665 return nr_reclaimed;
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681static int kswapd(void *p)
1682{
1683 unsigned long order;
1684 pg_data_t *pgdat = (pg_data_t*)p;
1685 struct task_struct *tsk = current;
1686 DEFINE_WAIT(wait);
1687 struct reclaim_state reclaim_state = {
1688 .reclaimed_slab = 0,
1689 };
1690 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1691
1692 if (!cpus_empty(*cpumask))
1693 set_cpus_allowed_ptr(tsk, cpumask);
1694 current->reclaim_state = &reclaim_state;
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1709 set_freezable();
1710
1711 order = 0;
1712 for ( ; ; ) {
1713 unsigned long new_order;
1714
1715 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1716 new_order = pgdat->kswapd_max_order;
1717 pgdat->kswapd_max_order = 0;
1718 if (order < new_order) {
1719
1720
1721
1722
1723 order = new_order;
1724 } else {
1725 if (!freezing(current))
1726 schedule();
1727
1728 order = pgdat->kswapd_max_order;
1729 }
1730 finish_wait(&pgdat->kswapd_wait, &wait);
1731
1732 if (!try_to_freeze()) {
1733
1734
1735
1736 balance_pgdat(pgdat, order);
1737 }
1738 }
1739 return 0;
1740}
1741
1742
1743
1744
1745void wakeup_kswapd(struct zone *zone, int order)
1746{
1747 pg_data_t *pgdat;
1748
1749 if (!populated_zone(zone))
1750 return;
1751
1752 pgdat = zone->zone_pgdat;
1753 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1754 return;
1755 if (pgdat->kswapd_max_order < order)
1756 pgdat->kswapd_max_order = order;
1757 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1758 return;
1759 if (!waitqueue_active(&pgdat->kswapd_wait))
1760 return;
1761 wake_up_interruptible(&pgdat->kswapd_wait);
1762}
1763
1764#ifdef CONFIG_PM
1765
1766
1767
1768
1769
1770
1771
1772static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1773 int pass, struct scan_control *sc)
1774{
1775 struct zone *zone;
1776 unsigned long nr_to_scan, ret = 0;
1777
1778 for_each_zone(zone) {
1779
1780 if (!populated_zone(zone))
1781 continue;
1782
1783 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
1784 continue;
1785
1786
1787 if (pass > 0) {
1788 zone->nr_scan_active +=
1789 (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
1790 if (zone->nr_scan_active >= nr_pages || pass > 3) {
1791 zone->nr_scan_active = 0;
1792 nr_to_scan = min(nr_pages,
1793 zone_page_state(zone, NR_ACTIVE));
1794 shrink_active_list(nr_to_scan, zone, sc, prio);
1795 }
1796 }
1797
1798 zone->nr_scan_inactive +=
1799 (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
1800 if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1801 zone->nr_scan_inactive = 0;
1802 nr_to_scan = min(nr_pages,
1803 zone_page_state(zone, NR_INACTIVE));
1804 ret += shrink_inactive_list(nr_to_scan, zone, sc);
1805 if (ret >= nr_pages)
1806 return ret;
1807 }
1808 }
1809
1810 return ret;
1811}
1812
1813static unsigned long count_lru_pages(void)
1814{
1815 return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826unsigned long shrink_all_memory(unsigned long nr_pages)
1827{
1828 unsigned long lru_pages, nr_slab;
1829 unsigned long ret = 0;
1830 int pass;
1831 struct reclaim_state reclaim_state;
1832 struct scan_control sc = {
1833 .gfp_mask = GFP_KERNEL,
1834 .may_swap = 0,
1835 .swap_cluster_max = nr_pages,
1836 .may_writepage = 1,
1837 .swappiness = vm_swappiness,
1838 .isolate_pages = isolate_pages_global,
1839 };
1840
1841 current->reclaim_state = &reclaim_state;
1842
1843 lru_pages = count_lru_pages();
1844 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1845
1846 while (nr_slab >= lru_pages) {
1847 reclaim_state.reclaimed_slab = 0;
1848 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1849 if (!reclaim_state.reclaimed_slab)
1850 break;
1851
1852 ret += reclaim_state.reclaimed_slab;
1853 if (ret >= nr_pages)
1854 goto out;
1855
1856 nr_slab -= reclaim_state.reclaimed_slab;
1857 }
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867 for (pass = 0; pass < 5; pass++) {
1868 int prio;
1869
1870
1871 if (pass > 2) {
1872 sc.may_swap = 1;
1873 sc.swappiness = 100;
1874 }
1875
1876 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1877 unsigned long nr_to_scan = nr_pages - ret;
1878
1879 sc.nr_scanned = 0;
1880 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1881 if (ret >= nr_pages)
1882 goto out;
1883
1884 reclaim_state.reclaimed_slab = 0;
1885 shrink_slab(sc.nr_scanned, sc.gfp_mask,
1886 count_lru_pages());
1887 ret += reclaim_state.reclaimed_slab;
1888 if (ret >= nr_pages)
1889 goto out;
1890
1891 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1892 congestion_wait(WRITE, HZ / 10);
1893 }
1894 }
1895
1896
1897
1898
1899
1900 if (!ret) {
1901 do {
1902 reclaim_state.reclaimed_slab = 0;
1903 shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1904 ret += reclaim_state.reclaimed_slab;
1905 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1906 }
1907
1908out:
1909 current->reclaim_state = NULL;
1910
1911 return ret;
1912}
1913#endif
1914
1915
1916
1917
1918
1919static int __devinit cpu_callback(struct notifier_block *nfb,
1920 unsigned long action, void *hcpu)
1921{
1922 int nid;
1923
1924 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1925 for_each_node_state(nid, N_HIGH_MEMORY) {
1926 pg_data_t *pgdat = NODE_DATA(nid);
1927 node_to_cpumask_ptr(mask, pgdat->node_id);
1928
1929 if (any_online_cpu(*mask) < nr_cpu_ids)
1930
1931 set_cpus_allowed_ptr(pgdat->kswapd, mask);
1932 }
1933 }
1934 return NOTIFY_OK;
1935}
1936
1937
1938
1939
1940
1941int kswapd_run(int nid)
1942{
1943 pg_data_t *pgdat = NODE_DATA(nid);
1944 int ret = 0;
1945
1946 if (pgdat->kswapd)
1947 return 0;
1948
1949 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1950 if (IS_ERR(pgdat->kswapd)) {
1951
1952 BUG_ON(system_state == SYSTEM_BOOTING);
1953 printk("Failed to start kswapd on node %d\n",nid);
1954 ret = -1;
1955 }
1956 return ret;
1957}
1958
1959static int __init kswapd_init(void)
1960{
1961 int nid;
1962
1963 swap_setup();
1964 for_each_node_state(nid, N_HIGH_MEMORY)
1965 kswapd_run(nid);
1966 hotcpu_notifier(cpu_callback, 0);
1967 return 0;
1968}
1969
1970module_init(kswapd_init)
1971
1972#ifdef CONFIG_NUMA
1973
1974
1975
1976
1977
1978
1979int zone_reclaim_mode __read_mostly;
1980
1981#define RECLAIM_OFF 0
1982#define RECLAIM_ZONE (1<<0)
1983#define RECLAIM_WRITE (1<<1)
1984#define RECLAIM_SWAP (1<<2)
1985
1986
1987
1988
1989
1990
1991#define ZONE_RECLAIM_PRIORITY 4
1992
1993
1994
1995
1996
1997int sysctl_min_unmapped_ratio = 1;
1998
1999
2000
2001
2002
2003int sysctl_min_slab_ratio = 5;
2004
2005
2006
2007
2008static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2009{
2010
2011 const unsigned long nr_pages = 1 << order;
2012 struct task_struct *p = current;
2013 struct reclaim_state reclaim_state;
2014 int priority;
2015 unsigned long nr_reclaimed = 0;
2016 struct scan_control sc = {
2017 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2018 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2019 .swap_cluster_max = max_t(unsigned long, nr_pages,
2020 SWAP_CLUSTER_MAX),
2021 .gfp_mask = gfp_mask,
2022 .swappiness = vm_swappiness,
2023 .isolate_pages = isolate_pages_global,
2024 };
2025 unsigned long slab_reclaimable;
2026
2027 disable_swap_token();
2028 cond_resched();
2029
2030
2031
2032
2033
2034 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2035 reclaim_state.reclaimed_slab = 0;
2036 p->reclaim_state = &reclaim_state;
2037
2038 if (zone_page_state(zone, NR_FILE_PAGES) -
2039 zone_page_state(zone, NR_FILE_MAPPED) >
2040 zone->min_unmapped_pages) {
2041
2042
2043
2044
2045 priority = ZONE_RECLAIM_PRIORITY;
2046 do {
2047 note_zone_scanning_priority(zone, priority);
2048 nr_reclaimed += shrink_zone(priority, zone, &sc);
2049 priority--;
2050 } while (priority >= 0 && nr_reclaimed < nr_pages);
2051 }
2052
2053 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2054 if (slab_reclaimable > zone->min_slab_pages) {
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2066 zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2067 slab_reclaimable - nr_pages)
2068 ;
2069
2070
2071
2072
2073
2074 nr_reclaimed += slab_reclaimable -
2075 zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2076 }
2077
2078 p->reclaim_state = NULL;
2079 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2080 return nr_reclaimed >= nr_pages;
2081}
2082
2083int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2084{
2085 int node_id;
2086 int ret;
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098 if (zone_page_state(zone, NR_FILE_PAGES) -
2099 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
2100 && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
2101 <= zone->min_slab_pages)
2102 return 0;
2103
2104 if (zone_is_all_unreclaimable(zone))
2105 return 0;
2106
2107
2108
2109
2110 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2111 return 0;
2112
2113
2114
2115
2116
2117
2118
2119 node_id = zone_to_nid(zone);
2120 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2121 return 0;
2122
2123 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2124 return 0;
2125 ret = __zone_reclaim(zone, gfp_mask, order);
2126 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2127
2128 return ret;
2129}
2130#endif
2131