1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25#include <linux/compiler.h>
26#include <linux/kernel.h>
27#include <linux/kmemcheck.h>
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/ratelimit.h>
34#include <linux/oom.h>
35#include <linux/notifier.h>
36#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
40#include <linux/memory_hotplug.h>
41#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
43#include <linux/vmstat.h>
44#include <linux/mempolicy.h>
45#include <linux/stop_machine.h>
46#include <linux/sort.h>
47#include <linux/pfn.h>
48#include <linux/backing-dev.h>
49#include <linux/fault-inject.h>
50#include <linux/page-isolation.h>
51#include <linux/page_cgroup.h>
52#include <linux/debugobjects.h>
53#include <linux/kmemleak.h>
54#include <linux/compaction.h>
55#include <trace/events/kmem.h>
56#include <linux/ftrace_event.h>
57#include <linux/memcontrol.h>
58#include <linux/prefetch.h>
59#include <linux/migrate.h>
60#include <linux/page-debug-flags.h>
61
62#include <asm/tlbflush.h>
63#include <asm/div64.h>
64#include "internal.h"
65
66#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
67DEFINE_PER_CPU(int, numa_node);
68EXPORT_PER_CPU_SYMBOL(numa_node);
69#endif
70
71#ifdef CONFIG_HAVE_MEMORYLESS_NODES
72
73
74
75
76
77
78DEFINE_PER_CPU(int, _numa_mem_);
79EXPORT_PER_CPU_SYMBOL(_numa_mem_);
80#endif
81
82
83
84
85nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
86 [N_POSSIBLE] = NODE_MASK_ALL,
87 [N_ONLINE] = { { [0] = 1UL } },
88#ifndef CONFIG_NUMA
89 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
90#ifdef CONFIG_HIGHMEM
91 [N_HIGH_MEMORY] = { { [0] = 1UL } },
92#endif
93#ifdef CONFIG_MOVABLE_NODE
94 [N_MEMORY] = { { [0] = 1UL } },
95#endif
96 [N_CPU] = { { [0] = 1UL } },
97#endif
98};
99EXPORT_SYMBOL(node_states);
100
101unsigned long totalram_pages __read_mostly;
102unsigned long totalreserve_pages __read_mostly;
103
104
105
106
107
108
109unsigned long dirty_balance_reserve __read_mostly;
110
111int percpu_pagelist_fraction;
112gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
113
114#ifdef CONFIG_PM_SLEEP
115
116
117
118
119
120
121
122
123
124static gfp_t saved_gfp_mask;
125
126void pm_restore_gfp_mask(void)
127{
128 WARN_ON(!mutex_is_locked(&pm_mutex));
129 if (saved_gfp_mask) {
130 gfp_allowed_mask = saved_gfp_mask;
131 saved_gfp_mask = 0;
132 }
133}
134
135void pm_restrict_gfp_mask(void)
136{
137 WARN_ON(!mutex_is_locked(&pm_mutex));
138 WARN_ON(saved_gfp_mask);
139 saved_gfp_mask = gfp_allowed_mask;
140 gfp_allowed_mask &= ~GFP_IOFS;
141}
142
143bool pm_suspended_storage(void)
144{
145 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
146 return false;
147 return true;
148}
149#endif
150
151#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
152int pageblock_order __read_mostly;
153#endif
154
155static void __free_pages_ok(struct page *page, unsigned int order);
156
157
158
159
160
161
162
163
164
165
166
167
168int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
169#ifdef CONFIG_ZONE_DMA
170 256,
171#endif
172#ifdef CONFIG_ZONE_DMA32
173 256,
174#endif
175#ifdef CONFIG_HIGHMEM
176 32,
177#endif
178 32,
179};
180
181EXPORT_SYMBOL(totalram_pages);
182
183static char * const zone_names[MAX_NR_ZONES] = {
184#ifdef CONFIG_ZONE_DMA
185 "DMA",
186#endif
187#ifdef CONFIG_ZONE_DMA32
188 "DMA32",
189#endif
190 "Normal",
191#ifdef CONFIG_HIGHMEM
192 "HighMem",
193#endif
194 "Movable",
195};
196
197int min_free_kbytes = 1024;
198
199static unsigned long __meminitdata nr_kernel_pages;
200static unsigned long __meminitdata nr_all_pages;
201static unsigned long __meminitdata dma_reserve;
202
203#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
204static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
205static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
206static unsigned long __initdata required_kernelcore;
207static unsigned long __initdata required_movablecore;
208static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
209
210
211int movable_zone;
212EXPORT_SYMBOL(movable_zone);
213#endif
214
215#if MAX_NUMNODES > 1
216int nr_node_ids __read_mostly = MAX_NUMNODES;
217int nr_online_nodes __read_mostly = 1;
218EXPORT_SYMBOL(nr_node_ids);
219EXPORT_SYMBOL(nr_online_nodes);
220#endif
221
222int page_group_by_mobility_disabled __read_mostly;
223
224void set_pageblock_migratetype(struct page *page, int migratetype)
225{
226
227 if (unlikely(page_group_by_mobility_disabled))
228 migratetype = MIGRATE_UNMOVABLE;
229
230 set_pageblock_flags_group(page, (unsigned long)migratetype,
231 PB_migrate, PB_migrate_end);
232}
233
234bool oom_killer_disabled __read_mostly;
235
236#ifdef CONFIG_DEBUG_VM
237static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
238{
239 int ret = 0;
240 unsigned seq;
241 unsigned long pfn = page_to_pfn(page);
242
243 do {
244 seq = zone_span_seqbegin(zone);
245 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
246 ret = 1;
247 else if (pfn < zone->zone_start_pfn)
248 ret = 1;
249 } while (zone_span_seqretry(zone, seq));
250
251 return ret;
252}
253
254static int page_is_consistent(struct zone *zone, struct page *page)
255{
256 if (!pfn_valid_within(page_to_pfn(page)))
257 return 0;
258 if (zone != page_zone(page))
259 return 0;
260
261 return 1;
262}
263
264
265
266static int bad_range(struct zone *zone, struct page *page)
267{
268 if (page_outside_zone_boundaries(zone, page))
269 return 1;
270 if (!page_is_consistent(zone, page))
271 return 1;
272
273 return 0;
274}
275#else
276static inline int bad_range(struct zone *zone, struct page *page)
277{
278 return 0;
279}
280#endif
281
282static void bad_page(struct page *page)
283{
284 static unsigned long resume;
285 static unsigned long nr_shown;
286 static unsigned long nr_unshown;
287
288
289 if (PageHWPoison(page)) {
290 reset_page_mapcount(page);
291 return;
292 }
293
294
295
296
297
298 if (nr_shown == 60) {
299 if (time_before(jiffies, resume)) {
300 nr_unshown++;
301 goto out;
302 }
303 if (nr_unshown) {
304 printk(KERN_ALERT
305 "BUG: Bad page state: %lu messages suppressed\n",
306 nr_unshown);
307 nr_unshown = 0;
308 }
309 nr_shown = 0;
310 }
311 if (nr_shown++ == 0)
312 resume = jiffies + 60 * HZ;
313
314 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
315 current->comm, page_to_pfn(page));
316 dump_page(page);
317
318 print_modules();
319 dump_stack();
320out:
321
322 reset_page_mapcount(page);
323 add_taint(TAINT_BAD_PAGE);
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341static void free_compound_page(struct page *page)
342{
343 __free_pages_ok(page, compound_order(page));
344}
345
346void prep_compound_page(struct page *page, unsigned long order)
347{
348 int i;
349 int nr_pages = 1 << order;
350
351 set_compound_page_dtor(page, free_compound_page);
352 set_compound_order(page, order);
353 __SetPageHead(page);
354 for (i = 1; i < nr_pages; i++) {
355 struct page *p = page + i;
356 __SetPageTail(p);
357 set_page_count(p, 0);
358 p->first_page = page;
359 }
360}
361
362
363static int destroy_compound_page(struct page *page, unsigned long order)
364{
365 int i;
366 int nr_pages = 1 << order;
367 int bad = 0;
368
369 if (unlikely(compound_order(page) != order)) {
370 bad_page(page);
371 bad++;
372 }
373
374 __ClearPageHead(page);
375
376 for (i = 1; i < nr_pages; i++) {
377 struct page *p = page + i;
378
379 if (unlikely(!PageTail(p) || (p->first_page != page))) {
380 bad_page(page);
381 bad++;
382 }
383 __ClearPageTail(p);
384 }
385
386 return bad;
387}
388
389static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
390{
391 int i;
392
393
394
395
396
397 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
398 for (i = 0; i < (1 << order); i++)
399 clear_highpage(page + i);
400}
401
402#ifdef CONFIG_DEBUG_PAGEALLOC
403unsigned int _debug_guardpage_minorder;
404
405static int __init debug_guardpage_minorder_setup(char *buf)
406{
407 unsigned long res;
408
409 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
410 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
411 return 0;
412 }
413 _debug_guardpage_minorder = res;
414 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
415 return 0;
416}
417__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
418
419static inline void set_page_guard_flag(struct page *page)
420{
421 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
422}
423
424static inline void clear_page_guard_flag(struct page *page)
425{
426 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
427}
428#else
429static inline void set_page_guard_flag(struct page *page) { }
430static inline void clear_page_guard_flag(struct page *page) { }
431#endif
432
433static inline void set_page_order(struct page *page, int order)
434{
435 set_page_private(page, order);
436 __SetPageBuddy(page);
437}
438
439static inline void rmv_page_order(struct page *page)
440{
441 __ClearPageBuddy(page);
442 set_page_private(page, 0);
443}
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462static inline unsigned long
463__find_buddy_index(unsigned long page_idx, unsigned int order)
464{
465 return page_idx ^ (1 << order);
466}
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481static inline int page_is_buddy(struct page *page, struct page *buddy,
482 int order)
483{
484 if (!pfn_valid_within(page_to_pfn(buddy)))
485 return 0;
486
487 if (page_zone_id(page) != page_zone_id(buddy))
488 return 0;
489
490 if (page_is_guard(buddy) && page_order(buddy) == order) {
491 VM_BUG_ON(page_count(buddy) != 0);
492 return 1;
493 }
494
495 if (PageBuddy(buddy) && page_order(buddy) == order) {
496 VM_BUG_ON(page_count(buddy) != 0);
497 return 1;
498 }
499 return 0;
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526static inline void __free_one_page(struct page *page,
527 struct zone *zone, unsigned int order,
528 int migratetype)
529{
530 unsigned long page_idx;
531 unsigned long combined_idx;
532 unsigned long uninitialized_var(buddy_idx);
533 struct page *buddy;
534
535 if (unlikely(PageCompound(page)))
536 if (unlikely(destroy_compound_page(page, order)))
537 return;
538
539 VM_BUG_ON(migratetype == -1);
540
541 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
542
543 VM_BUG_ON(page_idx & ((1 << order) - 1));
544 VM_BUG_ON(bad_range(zone, page));
545
546 while (order < MAX_ORDER-1) {
547 buddy_idx = __find_buddy_index(page_idx, order);
548 buddy = page + (buddy_idx - page_idx);
549 if (!page_is_buddy(page, buddy, order))
550 break;
551
552
553
554
555 if (page_is_guard(buddy)) {
556 clear_page_guard_flag(buddy);
557 set_page_private(page, 0);
558 __mod_zone_freepage_state(zone, 1 << order,
559 migratetype);
560 } else {
561 list_del(&buddy->lru);
562 zone->free_area[order].nr_free--;
563 rmv_page_order(buddy);
564 }
565 combined_idx = buddy_idx & page_idx;
566 page = page + (combined_idx - page_idx);
567 page_idx = combined_idx;
568 order++;
569 }
570 set_page_order(page, order);
571
572
573
574
575
576
577
578
579
580 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
581 struct page *higher_page, *higher_buddy;
582 combined_idx = buddy_idx & page_idx;
583 higher_page = page + (combined_idx - page_idx);
584 buddy_idx = __find_buddy_index(combined_idx, order + 1);
585 higher_buddy = higher_page + (buddy_idx - combined_idx);
586 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
587 list_add_tail(&page->lru,
588 &zone->free_area[order].free_list[migratetype]);
589 goto out;
590 }
591 }
592
593 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
594out:
595 zone->free_area[order].nr_free++;
596}
597
598static inline int free_pages_check(struct page *page)
599{
600 if (unlikely(page_mapcount(page) |
601 (page->mapping != NULL) |
602 (atomic_read(&page->_count) != 0) |
603 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
604 (mem_cgroup_bad_page_check(page)))) {
605 bad_page(page);
606 return 1;
607 }
608 reset_page_last_nid(page);
609 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
610 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
611 return 0;
612}
613
614
615
616
617
618
619
620
621
622
623
624
625static void free_pcppages_bulk(struct zone *zone, int count,
626 struct per_cpu_pages *pcp)
627{
628 int migratetype = 0;
629 int batch_free = 0;
630 int to_free = count;
631
632 spin_lock(&zone->lock);
633 zone->all_unreclaimable = 0;
634 zone->pages_scanned = 0;
635
636 while (to_free) {
637 struct page *page;
638 struct list_head *list;
639
640
641
642
643
644
645
646
647 do {
648 batch_free++;
649 if (++migratetype == MIGRATE_PCPTYPES)
650 migratetype = 0;
651 list = &pcp->lists[migratetype];
652 } while (list_empty(list));
653
654
655 if (batch_free == MIGRATE_PCPTYPES)
656 batch_free = to_free;
657
658 do {
659 int mt;
660
661 page = list_entry(list->prev, struct page, lru);
662
663 list_del(&page->lru);
664 mt = get_freepage_migratetype(page);
665
666 __free_one_page(page, zone, 0, mt);
667 trace_mm_page_pcpu_drain(page, 0, mt);
668 if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) {
669 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
670 if (is_migrate_cma(mt))
671 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
672 }
673 } while (--to_free && --batch_free && !list_empty(list));
674 }
675 spin_unlock(&zone->lock);
676}
677
678static void free_one_page(struct zone *zone, struct page *page, int order,
679 int migratetype)
680{
681 spin_lock(&zone->lock);
682 zone->all_unreclaimable = 0;
683 zone->pages_scanned = 0;
684
685 __free_one_page(page, zone, order, migratetype);
686 if (unlikely(migratetype != MIGRATE_ISOLATE))
687 __mod_zone_freepage_state(zone, 1 << order, migratetype);
688 spin_unlock(&zone->lock);
689}
690
691static bool free_pages_prepare(struct page *page, unsigned int order)
692{
693 int i;
694 int bad = 0;
695
696 trace_mm_page_free(page, order);
697 kmemcheck_free_shadow(page, order);
698
699 if (PageAnon(page))
700 page->mapping = NULL;
701 for (i = 0; i < (1 << order); i++)
702 bad += free_pages_check(page + i);
703 if (bad)
704 return false;
705
706 if (!PageHighMem(page)) {
707 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
708 debug_check_no_obj_freed(page_address(page),
709 PAGE_SIZE << order);
710 }
711 arch_free_page(page, order);
712 kernel_map_pages(page, 1 << order, 0);
713
714 return true;
715}
716
717static void __free_pages_ok(struct page *page, unsigned int order)
718{
719 unsigned long flags;
720 int migratetype;
721
722 if (!free_pages_prepare(page, order))
723 return;
724
725 local_irq_save(flags);
726 __count_vm_events(PGFREE, 1 << order);
727 migratetype = get_pageblock_migratetype(page);
728 set_freepage_migratetype(page, migratetype);
729 free_one_page(page_zone(page), page, order, migratetype);
730 local_irq_restore(flags);
731}
732
733
734
735
736
737
738
739
740void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
741{
742 unsigned int nr_pages = 1 << order;
743 unsigned int loop;
744
745 prefetchw(page);
746 for (loop = 0; loop < nr_pages; loop++) {
747 struct page *p = &page[loop];
748
749 if (loop + 1 < nr_pages)
750 prefetchw(p + 1);
751 __ClearPageReserved(p);
752 set_page_count(p, 0);
753 }
754
755 page_zone(page)->managed_pages += 1 << order;
756 set_page_refcounted(page);
757 __free_pages(page, order);
758}
759
760#ifdef CONFIG_CMA
761
762void __init init_cma_reserved_pageblock(struct page *page)
763{
764 unsigned i = pageblock_nr_pages;
765 struct page *p = page;
766
767 do {
768 __ClearPageReserved(p);
769 set_page_count(p, 0);
770 } while (++p, --i);
771
772 set_page_refcounted(page);
773 set_pageblock_migratetype(page, MIGRATE_CMA);
774 __free_pages(page, pageblock_order);
775 totalram_pages += pageblock_nr_pages;
776#ifdef CONFIG_HIGHMEM
777 if (PageHighMem(page))
778 totalhigh_pages += pageblock_nr_pages;
779#endif
780}
781#endif
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797static inline void expand(struct zone *zone, struct page *page,
798 int low, int high, struct free_area *area,
799 int migratetype)
800{
801 unsigned long size = 1 << high;
802
803 while (high > low) {
804 area--;
805 high--;
806 size >>= 1;
807 VM_BUG_ON(bad_range(zone, &page[size]));
808
809#ifdef CONFIG_DEBUG_PAGEALLOC
810 if (high < debug_guardpage_minorder()) {
811
812
813
814
815
816
817 INIT_LIST_HEAD(&page[size].lru);
818 set_page_guard_flag(&page[size]);
819 set_page_private(&page[size], high);
820
821 __mod_zone_freepage_state(zone, -(1 << high),
822 migratetype);
823 continue;
824 }
825#endif
826 list_add(&page[size].lru, &area->free_list[migratetype]);
827 area->nr_free++;
828 set_page_order(&page[size], high);
829 }
830}
831
832
833
834
835static inline int check_new_page(struct page *page)
836{
837 if (unlikely(page_mapcount(page) |
838 (page->mapping != NULL) |
839 (atomic_read(&page->_count) != 0) |
840 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
841 (mem_cgroup_bad_page_check(page)))) {
842 bad_page(page);
843 return 1;
844 }
845 return 0;
846}
847
848static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
849{
850 int i;
851
852 for (i = 0; i < (1 << order); i++) {
853 struct page *p = page + i;
854 if (unlikely(check_new_page(p)))
855 return 1;
856 }
857
858 set_page_private(page, 0);
859 set_page_refcounted(page);
860
861 arch_alloc_page(page, order);
862 kernel_map_pages(page, 1 << order, 1);
863
864 if (gfp_flags & __GFP_ZERO)
865 prep_zero_page(page, order, gfp_flags);
866
867 if (order && (gfp_flags & __GFP_COMP))
868 prep_compound_page(page, order);
869
870 return 0;
871}
872
873
874
875
876
877static inline
878struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
879 int migratetype)
880{
881 unsigned int current_order;
882 struct free_area * area;
883 struct page *page;
884
885
886 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
887 area = &(zone->free_area[current_order]);
888 if (list_empty(&area->free_list[migratetype]))
889 continue;
890
891 page = list_entry(area->free_list[migratetype].next,
892 struct page, lru);
893 list_del(&page->lru);
894 rmv_page_order(page);
895 area->nr_free--;
896 expand(zone, page, order, current_order, area, migratetype);
897 return page;
898 }
899
900 return NULL;
901}
902
903
904
905
906
907
908static int fallbacks[MIGRATE_TYPES][4] = {
909 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
910 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
911#ifdef CONFIG_CMA
912 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
913 [MIGRATE_CMA] = { MIGRATE_RESERVE },
914#else
915 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
916#endif
917 [MIGRATE_RESERVE] = { MIGRATE_RESERVE },
918 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE },
919};
920
921
922
923
924
925
926int move_freepages(struct zone *zone,
927 struct page *start_page, struct page *end_page,
928 int migratetype)
929{
930 struct page *page;
931 unsigned long order;
932 int pages_moved = 0;
933
934#ifndef CONFIG_HOLES_IN_ZONE
935
936
937
938
939
940
941
942 BUG_ON(page_zone(start_page) != page_zone(end_page));
943#endif
944
945 for (page = start_page; page <= end_page;) {
946
947 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
948
949 if (!pfn_valid_within(page_to_pfn(page))) {
950 page++;
951 continue;
952 }
953
954 if (!PageBuddy(page)) {
955 page++;
956 continue;
957 }
958
959 order = page_order(page);
960 list_move(&page->lru,
961 &zone->free_area[order].free_list[migratetype]);
962 set_freepage_migratetype(page, migratetype);
963 page += 1 << order;
964 pages_moved += 1 << order;
965 }
966
967 return pages_moved;
968}
969
970int move_freepages_block(struct zone *zone, struct page *page,
971 int migratetype)
972{
973 unsigned long start_pfn, end_pfn;
974 struct page *start_page, *end_page;
975
976 start_pfn = page_to_pfn(page);
977 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
978 start_page = pfn_to_page(start_pfn);
979 end_page = start_page + pageblock_nr_pages - 1;
980 end_pfn = start_pfn + pageblock_nr_pages - 1;
981
982
983 if (start_pfn < zone->zone_start_pfn)
984 start_page = page;
985 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
986 return 0;
987
988 return move_freepages(zone, start_page, end_page, migratetype);
989}
990
991static void change_pageblock_range(struct page *pageblock_page,
992 int start_order, int migratetype)
993{
994 int nr_pageblocks = 1 << (start_order - pageblock_order);
995
996 while (nr_pageblocks--) {
997 set_pageblock_migratetype(pageblock_page, migratetype);
998 pageblock_page += pageblock_nr_pages;
999 }
1000}
1001
1002
1003static inline struct page *
1004__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1005{
1006 struct free_area * area;
1007 int current_order;
1008 struct page *page;
1009 int migratetype, i;
1010
1011
1012 for (current_order = MAX_ORDER-1; current_order >= order;
1013 --current_order) {
1014 for (i = 0;; i++) {
1015 migratetype = fallbacks[start_migratetype][i];
1016
1017
1018 if (migratetype == MIGRATE_RESERVE)
1019 break;
1020
1021 area = &(zone->free_area[current_order]);
1022 if (list_empty(&area->free_list[migratetype]))
1023 continue;
1024
1025 page = list_entry(area->free_list[migratetype].next,
1026 struct page, lru);
1027 area->nr_free--;
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 if (!is_migrate_cma(migratetype) &&
1042 (unlikely(current_order >= pageblock_order / 2) ||
1043 start_migratetype == MIGRATE_RECLAIMABLE ||
1044 page_group_by_mobility_disabled)) {
1045 int pages;
1046 pages = move_freepages_block(zone, page,
1047 start_migratetype);
1048
1049
1050 if (pages >= (1 << (pageblock_order-1)) ||
1051 page_group_by_mobility_disabled)
1052 set_pageblock_migratetype(page,
1053 start_migratetype);
1054
1055 migratetype = start_migratetype;
1056 }
1057
1058
1059 list_del(&page->lru);
1060 rmv_page_order(page);
1061
1062
1063 if (current_order >= pageblock_order &&
1064 !is_migrate_cma(migratetype))
1065 change_pageblock_range(page, current_order,
1066 start_migratetype);
1067
1068 expand(zone, page, order, current_order, area,
1069 is_migrate_cma(migratetype)
1070 ? migratetype : start_migratetype);
1071
1072 trace_mm_page_alloc_extfrag(page, order, current_order,
1073 start_migratetype, migratetype);
1074
1075 return page;
1076 }
1077 }
1078
1079 return NULL;
1080}
1081
1082
1083
1084
1085
1086static struct page *__rmqueue(struct zone *zone, unsigned int order,
1087 int migratetype)
1088{
1089 struct page *page;
1090
1091retry_reserve:
1092 page = __rmqueue_smallest(zone, order, migratetype);
1093
1094 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1095 page = __rmqueue_fallback(zone, order, migratetype);
1096
1097
1098
1099
1100
1101
1102 if (!page) {
1103 migratetype = MIGRATE_RESERVE;
1104 goto retry_reserve;
1105 }
1106 }
1107
1108 trace_mm_page_alloc_zone_locked(page, order, migratetype);
1109 return page;
1110}
1111
1112
1113
1114
1115
1116
1117static int rmqueue_bulk(struct zone *zone, unsigned int order,
1118 unsigned long count, struct list_head *list,
1119 int migratetype, int cold)
1120{
1121 int mt = migratetype, i;
1122
1123 spin_lock(&zone->lock);
1124 for (i = 0; i < count; ++i) {
1125 struct page *page = __rmqueue(zone, order, migratetype);
1126 if (unlikely(page == NULL))
1127 break;
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 if (likely(cold == 0))
1139 list_add(&page->lru, list);
1140 else
1141 list_add_tail(&page->lru, list);
1142 if (IS_ENABLED(CONFIG_CMA)) {
1143 mt = get_pageblock_migratetype(page);
1144 if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1145 mt = migratetype;
1146 }
1147 set_freepage_migratetype(page, mt);
1148 list = &page->lru;
1149 if (is_migrate_cma(mt))
1150 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1151 -(1 << order));
1152 }
1153 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1154 spin_unlock(&zone->lock);
1155 return i;
1156}
1157
1158#ifdef CONFIG_NUMA
1159
1160
1161
1162
1163
1164
1165
1166
1167void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1168{
1169 unsigned long flags;
1170 int to_drain;
1171
1172 local_irq_save(flags);
1173 if (pcp->count >= pcp->batch)
1174 to_drain = pcp->batch;
1175 else
1176 to_drain = pcp->count;
1177 if (to_drain > 0) {
1178 free_pcppages_bulk(zone, to_drain, pcp);
1179 pcp->count -= to_drain;
1180 }
1181 local_irq_restore(flags);
1182}
1183#endif
1184
1185
1186
1187
1188
1189
1190
1191
1192static void drain_pages(unsigned int cpu)
1193{
1194 unsigned long flags;
1195 struct zone *zone;
1196
1197 for_each_populated_zone(zone) {
1198 struct per_cpu_pageset *pset;
1199 struct per_cpu_pages *pcp;
1200
1201 local_irq_save(flags);
1202 pset = per_cpu_ptr(zone->pageset, cpu);
1203
1204 pcp = &pset->pcp;
1205 if (pcp->count) {
1206 free_pcppages_bulk(zone, pcp->count, pcp);
1207 pcp->count = 0;
1208 }
1209 local_irq_restore(flags);
1210 }
1211}
1212
1213
1214
1215
1216void drain_local_pages(void *arg)
1217{
1218 drain_pages(smp_processor_id());
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230void drain_all_pages(void)
1231{
1232 int cpu;
1233 struct per_cpu_pageset *pcp;
1234 struct zone *zone;
1235
1236
1237
1238
1239
1240 static cpumask_t cpus_with_pcps;
1241
1242
1243
1244
1245
1246
1247
1248 for_each_online_cpu(cpu) {
1249 bool has_pcps = false;
1250 for_each_populated_zone(zone) {
1251 pcp = per_cpu_ptr(zone->pageset, cpu);
1252 if (pcp->pcp.count) {
1253 has_pcps = true;
1254 break;
1255 }
1256 }
1257 if (has_pcps)
1258 cpumask_set_cpu(cpu, &cpus_with_pcps);
1259 else
1260 cpumask_clear_cpu(cpu, &cpus_with_pcps);
1261 }
1262 on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
1263}
1264
1265#ifdef CONFIG_HIBERNATION
1266
1267void mark_free_pages(struct zone *zone)
1268{
1269 unsigned long pfn, max_zone_pfn;
1270 unsigned long flags;
1271 int order, t;
1272 struct list_head *curr;
1273
1274 if (!zone->spanned_pages)
1275 return;
1276
1277 spin_lock_irqsave(&zone->lock, flags);
1278
1279 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1280 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1281 if (pfn_valid(pfn)) {
1282 struct page *page = pfn_to_page(pfn);
1283
1284 if (!swsusp_page_is_forbidden(page))
1285 swsusp_unset_page_free(page);
1286 }
1287
1288 for_each_migratetype_order(order, t) {
1289 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1290 unsigned long i;
1291
1292 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1293 for (i = 0; i < (1UL << order); i++)
1294 swsusp_set_page_free(pfn_to_page(pfn + i));
1295 }
1296 }
1297 spin_unlock_irqrestore(&zone->lock, flags);
1298}
1299#endif
1300
1301
1302
1303
1304
1305void free_hot_cold_page(struct page *page, int cold)
1306{
1307 struct zone *zone = page_zone(page);
1308 struct per_cpu_pages *pcp;
1309 unsigned long flags;
1310 int migratetype;
1311
1312 if (!free_pages_prepare(page, 0))
1313 return;
1314
1315 migratetype = get_pageblock_migratetype(page);
1316 set_freepage_migratetype(page, migratetype);
1317 local_irq_save(flags);
1318 __count_vm_event(PGFREE);
1319
1320
1321
1322
1323
1324
1325
1326
1327 if (migratetype >= MIGRATE_PCPTYPES) {
1328 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1329 free_one_page(zone, page, 0, migratetype);
1330 goto out;
1331 }
1332 migratetype = MIGRATE_MOVABLE;
1333 }
1334
1335 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1336 if (cold)
1337 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1338 else
1339 list_add(&page->lru, &pcp->lists[migratetype]);
1340 pcp->count++;
1341 if (pcp->count >= pcp->high) {
1342 free_pcppages_bulk(zone, pcp->batch, pcp);
1343 pcp->count -= pcp->batch;
1344 }
1345
1346out:
1347 local_irq_restore(flags);
1348}
1349
1350
1351
1352
1353void free_hot_cold_page_list(struct list_head *list, int cold)
1354{
1355 struct page *page, *next;
1356
1357 list_for_each_entry_safe(page, next, list, lru) {
1358 trace_mm_page_free_batched(page, cold);
1359 free_hot_cold_page(page, cold);
1360 }
1361}
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371void split_page(struct page *page, unsigned int order)
1372{
1373 int i;
1374
1375 VM_BUG_ON(PageCompound(page));
1376 VM_BUG_ON(!page_count(page));
1377
1378#ifdef CONFIG_KMEMCHECK
1379
1380
1381
1382
1383 if (kmemcheck_page_is_tracked(page))
1384 split_page(virt_to_page(page[0].shadow), order);
1385#endif
1386
1387 for (i = 1; i < (1 << order); i++)
1388 set_page_refcounted(page + i);
1389}
1390
1391static int __isolate_free_page(struct page *page, unsigned int order)
1392{
1393 unsigned long watermark;
1394 struct zone *zone;
1395 int mt;
1396
1397 BUG_ON(!PageBuddy(page));
1398
1399 zone = page_zone(page);
1400 mt = get_pageblock_migratetype(page);
1401
1402 if (mt != MIGRATE_ISOLATE) {
1403
1404 watermark = low_wmark_pages(zone) + (1 << order);
1405 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1406 return 0;
1407
1408 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1409 }
1410
1411
1412 list_del(&page->lru);
1413 zone->free_area[order].nr_free--;
1414 rmv_page_order(page);
1415
1416
1417 if (order >= pageblock_order - 1) {
1418 struct page *endpage = page + (1 << order) - 1;
1419 for (; page < endpage; page += pageblock_nr_pages) {
1420 int mt = get_pageblock_migratetype(page);
1421 if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
1422 set_pageblock_migratetype(page,
1423 MIGRATE_MOVABLE);
1424 }
1425 }
1426
1427 return 1UL << order;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440int split_free_page(struct page *page)
1441{
1442 unsigned int order;
1443 int nr_pages;
1444
1445 order = page_order(page);
1446
1447 nr_pages = __isolate_free_page(page, order);
1448 if (!nr_pages)
1449 return 0;
1450
1451
1452 set_page_refcounted(page);
1453 split_page(page, order);
1454 return nr_pages;
1455}
1456
1457
1458
1459
1460
1461
1462static inline
1463struct page *buffered_rmqueue(struct zone *preferred_zone,
1464 struct zone *zone, int order, gfp_t gfp_flags,
1465 int migratetype)
1466{
1467 unsigned long flags;
1468 struct page *page;
1469 int cold = !!(gfp_flags & __GFP_COLD);
1470
1471again:
1472 if (likely(order == 0)) {
1473 struct per_cpu_pages *pcp;
1474 struct list_head *list;
1475
1476 local_irq_save(flags);
1477 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1478 list = &pcp->lists[migratetype];
1479 if (list_empty(list)) {
1480 pcp->count += rmqueue_bulk(zone, 0,
1481 pcp->batch, list,
1482 migratetype, cold);
1483 if (unlikely(list_empty(list)))
1484 goto failed;
1485 }
1486
1487 if (cold)
1488 page = list_entry(list->prev, struct page, lru);
1489 else
1490 page = list_entry(list->next, struct page, lru);
1491
1492 list_del(&page->lru);
1493 pcp->count--;
1494 } else {
1495 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506 WARN_ON_ONCE(order > 1);
1507 }
1508 spin_lock_irqsave(&zone->lock, flags);
1509 page = __rmqueue(zone, order, migratetype);
1510 spin_unlock(&zone->lock);
1511 if (!page)
1512 goto failed;
1513 __mod_zone_freepage_state(zone, -(1 << order),
1514 get_pageblock_migratetype(page));
1515 }
1516
1517 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1518 zone_statistics(preferred_zone, zone, gfp_flags);
1519 local_irq_restore(flags);
1520
1521 VM_BUG_ON(bad_range(zone, page));
1522 if (prep_new_page(page, order, gfp_flags))
1523 goto again;
1524 return page;
1525
1526failed:
1527 local_irq_restore(flags);
1528 return NULL;
1529}
1530
1531#ifdef CONFIG_FAIL_PAGE_ALLOC
1532
1533static struct {
1534 struct fault_attr attr;
1535
1536 u32 ignore_gfp_highmem;
1537 u32 ignore_gfp_wait;
1538 u32 min_order;
1539} fail_page_alloc = {
1540 .attr = FAULT_ATTR_INITIALIZER,
1541 .ignore_gfp_wait = 1,
1542 .ignore_gfp_highmem = 1,
1543 .min_order = 1,
1544};
1545
1546static int __init setup_fail_page_alloc(char *str)
1547{
1548 return setup_fault_attr(&fail_page_alloc.attr, str);
1549}
1550__setup("fail_page_alloc=", setup_fail_page_alloc);
1551
1552static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1553{
1554 if (order < fail_page_alloc.min_order)
1555 return false;
1556 if (gfp_mask & __GFP_NOFAIL)
1557 return false;
1558 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1559 return false;
1560 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1561 return false;
1562
1563 return should_fail(&fail_page_alloc.attr, 1 << order);
1564}
1565
1566#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1567
1568static int __init fail_page_alloc_debugfs(void)
1569{
1570 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1571 struct dentry *dir;
1572
1573 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1574 &fail_page_alloc.attr);
1575 if (IS_ERR(dir))
1576 return PTR_ERR(dir);
1577
1578 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1579 &fail_page_alloc.ignore_gfp_wait))
1580 goto fail;
1581 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1582 &fail_page_alloc.ignore_gfp_highmem))
1583 goto fail;
1584 if (!debugfs_create_u32("min-order", mode, dir,
1585 &fail_page_alloc.min_order))
1586 goto fail;
1587
1588 return 0;
1589fail:
1590 debugfs_remove_recursive(dir);
1591
1592 return -ENOMEM;
1593}
1594
1595late_initcall(fail_page_alloc_debugfs);
1596
1597#endif
1598
1599#else
1600
1601static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1602{
1603 return false;
1604}
1605
1606#endif
1607
1608
1609
1610
1611
1612static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1613 int classzone_idx, int alloc_flags, long free_pages)
1614{
1615
1616 long min = mark;
1617 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1618 int o;
1619
1620 free_pages -= (1 << order) - 1;
1621 if (alloc_flags & ALLOC_HIGH)
1622 min -= min / 2;
1623 if (alloc_flags & ALLOC_HARDER)
1624 min -= min / 4;
1625#ifdef CONFIG_CMA
1626
1627 if (!(alloc_flags & ALLOC_CMA))
1628 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1629#endif
1630 if (free_pages <= min + lowmem_reserve)
1631 return false;
1632 for (o = 0; o < order; o++) {
1633
1634 free_pages -= z->free_area[o].nr_free << o;
1635
1636
1637 min >>= 1;
1638
1639 if (free_pages <= min)
1640 return false;
1641 }
1642 return true;
1643}
1644
1645bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1646 int classzone_idx, int alloc_flags)
1647{
1648 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1649 zone_page_state(z, NR_FREE_PAGES));
1650}
1651
1652bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1653 int classzone_idx, int alloc_flags)
1654{
1655 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1656
1657 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1658 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1659
1660 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1661 free_pages);
1662}
1663
1664#ifdef CONFIG_NUMA
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1688{
1689 struct zonelist_cache *zlc;
1690 nodemask_t *allowednodes;
1691
1692 zlc = zonelist->zlcache_ptr;
1693 if (!zlc)
1694 return NULL;
1695
1696 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1697 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1698 zlc->last_full_zap = jiffies;
1699 }
1700
1701 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1702 &cpuset_current_mems_allowed :
1703 &node_states[N_MEMORY];
1704 return allowednodes;
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1730 nodemask_t *allowednodes)
1731{
1732 struct zonelist_cache *zlc;
1733 int i;
1734 int n;
1735
1736 zlc = zonelist->zlcache_ptr;
1737 if (!zlc)
1738 return 1;
1739
1740 i = z - zonelist->_zonerefs;
1741 n = zlc->z_to_n[i];
1742
1743
1744 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1745}
1746
1747
1748
1749
1750
1751
1752static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1753{
1754 struct zonelist_cache *zlc;
1755 int i;
1756
1757 zlc = zonelist->zlcache_ptr;
1758 if (!zlc)
1759 return;
1760
1761 i = z - zonelist->_zonerefs;
1762
1763 set_bit(i, zlc->fullzones);
1764}
1765
1766
1767
1768
1769
1770static void zlc_clear_zones_full(struct zonelist *zonelist)
1771{
1772 struct zonelist_cache *zlc;
1773
1774 zlc = zonelist->zlcache_ptr;
1775 if (!zlc)
1776 return;
1777
1778 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1779}
1780
1781static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1782{
1783 return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
1784}
1785
1786static void __paginginit init_zone_allows_reclaim(int nid)
1787{
1788 int i;
1789
1790 for_each_online_node(i)
1791 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
1792 node_set(i, NODE_DATA(nid)->reclaim_nodes);
1793 else
1794 zone_reclaim_mode = 1;
1795}
1796
1797#else
1798
1799static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1800{
1801 return NULL;
1802}
1803
1804static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1805 nodemask_t *allowednodes)
1806{
1807 return 1;
1808}
1809
1810static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1811{
1812}
1813
1814static void zlc_clear_zones_full(struct zonelist *zonelist)
1815{
1816}
1817
1818static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1819{
1820 return true;
1821}
1822
1823static inline void init_zone_allows_reclaim(int nid)
1824{
1825}
1826#endif
1827
1828
1829
1830
1831
1832static struct page *
1833get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1834 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1835 struct zone *preferred_zone, int migratetype)
1836{
1837 struct zoneref *z;
1838 struct page *page = NULL;
1839 int classzone_idx;
1840 struct zone *zone;
1841 nodemask_t *allowednodes = NULL;
1842 int zlc_active = 0;
1843 int did_zlc_setup = 0;
1844
1845 classzone_idx = zone_idx(preferred_zone);
1846zonelist_scan:
1847
1848
1849
1850
1851 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1852 high_zoneidx, nodemask) {
1853 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
1854 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1855 continue;
1856 if ((alloc_flags & ALLOC_CPUSET) &&
1857 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1858 continue;
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 if ((alloc_flags & ALLOC_WMARK_LOW) &&
1886 (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1887 goto this_zone_full;
1888
1889 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1890 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1891 unsigned long mark;
1892 int ret;
1893
1894 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1895 if (zone_watermark_ok(zone, order, mark,
1896 classzone_idx, alloc_flags))
1897 goto try_this_zone;
1898
1899 if (IS_ENABLED(CONFIG_NUMA) &&
1900 !did_zlc_setup && nr_online_nodes > 1) {
1901
1902
1903
1904
1905
1906 allowednodes = zlc_setup(zonelist, alloc_flags);
1907 zlc_active = 1;
1908 did_zlc_setup = 1;
1909 }
1910
1911 if (zone_reclaim_mode == 0 ||
1912 !zone_allows_reclaim(preferred_zone, zone))
1913 goto this_zone_full;
1914
1915
1916
1917
1918
1919 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
1920 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1921 continue;
1922
1923 ret = zone_reclaim(zone, gfp_mask, order);
1924 switch (ret) {
1925 case ZONE_RECLAIM_NOSCAN:
1926
1927 continue;
1928 case ZONE_RECLAIM_FULL:
1929
1930 continue;
1931 default:
1932
1933 if (!zone_watermark_ok(zone, order, mark,
1934 classzone_idx, alloc_flags))
1935 goto this_zone_full;
1936 }
1937 }
1938
1939try_this_zone:
1940 page = buffered_rmqueue(preferred_zone, zone, order,
1941 gfp_mask, migratetype);
1942 if (page)
1943 break;
1944this_zone_full:
1945 if (IS_ENABLED(CONFIG_NUMA))
1946 zlc_mark_zone_full(zonelist, z);
1947 }
1948
1949 if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
1950
1951 zlc_active = 0;
1952 goto zonelist_scan;
1953 }
1954
1955 if (page)
1956
1957
1958
1959
1960
1961
1962
1963 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1964
1965 return page;
1966}
1967
1968
1969
1970
1971
1972static inline bool should_suppress_show_mem(void)
1973{
1974 bool ret = false;
1975
1976#if NODES_SHIFT > 8
1977 ret = in_interrupt();
1978#endif
1979 return ret;
1980}
1981
1982static DEFINE_RATELIMIT_STATE(nopage_rs,
1983 DEFAULT_RATELIMIT_INTERVAL,
1984 DEFAULT_RATELIMIT_BURST);
1985
1986void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1987{
1988 unsigned int filter = SHOW_MEM_FILTER_NODES;
1989
1990 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
1991 debug_guardpage_minorder() > 0)
1992 return;
1993
1994
1995
1996
1997
1998
1999 if (!(gfp_mask & __GFP_NOMEMALLOC))
2000 if (test_thread_flag(TIF_MEMDIE) ||
2001 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2002 filter &= ~SHOW_MEM_FILTER_NODES;
2003 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2004 filter &= ~SHOW_MEM_FILTER_NODES;
2005
2006 if (fmt) {
2007 struct va_format vaf;
2008 va_list args;
2009
2010 va_start(args, fmt);
2011
2012 vaf.fmt = fmt;
2013 vaf.va = &args;
2014
2015 pr_warn("%pV", &vaf);
2016
2017 va_end(args);
2018 }
2019
2020 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2021 current->comm, order, gfp_mask);
2022
2023 dump_stack();
2024 if (!should_suppress_show_mem())
2025 show_mem(filter);
2026}
2027
2028static inline int
2029should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2030 unsigned long did_some_progress,
2031 unsigned long pages_reclaimed)
2032{
2033
2034 if (gfp_mask & __GFP_NORETRY)
2035 return 0;
2036
2037
2038 if (gfp_mask & __GFP_NOFAIL)
2039 return 1;
2040
2041
2042
2043
2044
2045
2046 if (!did_some_progress && pm_suspended_storage())
2047 return 0;
2048
2049
2050
2051
2052
2053
2054 if (order <= PAGE_ALLOC_COSTLY_ORDER)
2055 return 1;
2056
2057
2058
2059
2060
2061
2062
2063
2064 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2065 return 1;
2066
2067 return 0;
2068}
2069
2070static inline struct page *
2071__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2072 struct zonelist *zonelist, enum zone_type high_zoneidx,
2073 nodemask_t *nodemask, struct zone *preferred_zone,
2074 int migratetype)
2075{
2076 struct page *page;
2077
2078
2079 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
2080 schedule_timeout_uninterruptible(1);
2081 return NULL;
2082 }
2083
2084
2085
2086
2087
2088
2089 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2090 order, zonelist, high_zoneidx,
2091 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2092 preferred_zone, migratetype);
2093 if (page)
2094 goto out;
2095
2096 if (!(gfp_mask & __GFP_NOFAIL)) {
2097
2098 if (order > PAGE_ALLOC_COSTLY_ORDER)
2099 goto out;
2100
2101 if (high_zoneidx < ZONE_NORMAL)
2102 goto out;
2103
2104
2105
2106
2107
2108
2109
2110 if (gfp_mask & __GFP_THISNODE)
2111 goto out;
2112 }
2113
2114 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2115
2116out:
2117 clear_zonelist_oom(zonelist, gfp_mask);
2118 return page;
2119}
2120
2121#ifdef CONFIG_COMPACTION
2122
2123static struct page *
2124__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2125 struct zonelist *zonelist, enum zone_type high_zoneidx,
2126 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2127 int migratetype, bool sync_migration,
2128 bool *contended_compaction, bool *deferred_compaction,
2129 unsigned long *did_some_progress)
2130{
2131 if (!order)
2132 return NULL;
2133
2134 if (compaction_deferred(preferred_zone, order)) {
2135 *deferred_compaction = true;
2136 return NULL;
2137 }
2138
2139 current->flags |= PF_MEMALLOC;
2140 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2141 nodemask, sync_migration,
2142 contended_compaction);
2143 current->flags &= ~PF_MEMALLOC;
2144
2145 if (*did_some_progress != COMPACT_SKIPPED) {
2146 struct page *page;
2147
2148
2149 drain_pages(get_cpu());
2150 put_cpu();
2151
2152 page = get_page_from_freelist(gfp_mask, nodemask,
2153 order, zonelist, high_zoneidx,
2154 alloc_flags & ~ALLOC_NO_WATERMARKS,
2155 preferred_zone, migratetype);
2156 if (page) {
2157 preferred_zone->compact_blockskip_flush = false;
2158 preferred_zone->compact_considered = 0;
2159 preferred_zone->compact_defer_shift = 0;
2160 if (order >= preferred_zone->compact_order_failed)
2161 preferred_zone->compact_order_failed = order + 1;
2162 count_vm_event(COMPACTSUCCESS);
2163 return page;
2164 }
2165
2166
2167
2168
2169
2170
2171 count_vm_event(COMPACTFAIL);
2172
2173
2174
2175
2176
2177 if (sync_migration)
2178 defer_compaction(preferred_zone, order);
2179
2180 cond_resched();
2181 }
2182
2183 return NULL;
2184}
2185#else
2186static inline struct page *
2187__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2188 struct zonelist *zonelist, enum zone_type high_zoneidx,
2189 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2190 int migratetype, bool sync_migration,
2191 bool *contended_compaction, bool *deferred_compaction,
2192 unsigned long *did_some_progress)
2193{
2194 return NULL;
2195}
2196#endif
2197
2198
2199static int
2200__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2201 nodemask_t *nodemask)
2202{
2203 struct reclaim_state reclaim_state;
2204 int progress;
2205
2206 cond_resched();
2207
2208
2209 cpuset_memory_pressure_bump();
2210 current->flags |= PF_MEMALLOC;
2211 lockdep_set_current_reclaim_state(gfp_mask);
2212 reclaim_state.reclaimed_slab = 0;
2213 current->reclaim_state = &reclaim_state;
2214
2215 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2216
2217 current->reclaim_state = NULL;
2218 lockdep_clear_current_reclaim_state();
2219 current->flags &= ~PF_MEMALLOC;
2220
2221 cond_resched();
2222
2223 return progress;
2224}
2225
2226
2227static inline struct page *
2228__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2229 struct zonelist *zonelist, enum zone_type high_zoneidx,
2230 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2231 int migratetype, unsigned long *did_some_progress)
2232{
2233 struct page *page = NULL;
2234 bool drained = false;
2235
2236 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2237 nodemask);
2238 if (unlikely(!(*did_some_progress)))
2239 return NULL;
2240
2241
2242 if (IS_ENABLED(CONFIG_NUMA))
2243 zlc_clear_zones_full(zonelist);
2244
2245retry:
2246 page = get_page_from_freelist(gfp_mask, nodemask, order,
2247 zonelist, high_zoneidx,
2248 alloc_flags & ~ALLOC_NO_WATERMARKS,
2249 preferred_zone, migratetype);
2250
2251
2252
2253
2254
2255 if (!page && !drained) {
2256 drain_all_pages();
2257 drained = true;
2258 goto retry;
2259 }
2260
2261 return page;
2262}
2263
2264
2265
2266
2267
2268static inline struct page *
2269__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2270 struct zonelist *zonelist, enum zone_type high_zoneidx,
2271 nodemask_t *nodemask, struct zone *preferred_zone,
2272 int migratetype)
2273{
2274 struct page *page;
2275
2276 do {
2277 page = get_page_from_freelist(gfp_mask, nodemask, order,
2278 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2279 preferred_zone, migratetype);
2280
2281 if (!page && gfp_mask & __GFP_NOFAIL)
2282 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2283 } while (!page && (gfp_mask & __GFP_NOFAIL));
2284
2285 return page;
2286}
2287
2288static inline
2289void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2290 enum zone_type high_zoneidx,
2291 enum zone_type classzone_idx)
2292{
2293 struct zoneref *z;
2294 struct zone *zone;
2295
2296 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2297 wakeup_kswapd(zone, order, classzone_idx);
2298}
2299
2300static inline int
2301gfp_to_alloc_flags(gfp_t gfp_mask)
2302{
2303 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2304 const gfp_t wait = gfp_mask & __GFP_WAIT;
2305
2306
2307 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2308
2309
2310
2311
2312
2313
2314
2315 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2316
2317 if (!wait) {
2318
2319
2320
2321
2322 if (!(gfp_mask & __GFP_NOMEMALLOC))
2323 alloc_flags |= ALLOC_HARDER;
2324
2325
2326
2327
2328 alloc_flags &= ~ALLOC_CPUSET;
2329 } else if (unlikely(rt_task(current)) && !in_interrupt())
2330 alloc_flags |= ALLOC_HARDER;
2331
2332 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2333 if (gfp_mask & __GFP_MEMALLOC)
2334 alloc_flags |= ALLOC_NO_WATERMARKS;
2335 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2336 alloc_flags |= ALLOC_NO_WATERMARKS;
2337 else if (!in_interrupt() &&
2338 ((current->flags & PF_MEMALLOC) ||
2339 unlikely(test_thread_flag(TIF_MEMDIE))))
2340 alloc_flags |= ALLOC_NO_WATERMARKS;
2341 }
2342#ifdef CONFIG_CMA
2343 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2344 alloc_flags |= ALLOC_CMA;
2345#endif
2346 return alloc_flags;
2347}
2348
2349bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2350{
2351 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2352}
2353
2354static inline struct page *
2355__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2356 struct zonelist *zonelist, enum zone_type high_zoneidx,
2357 nodemask_t *nodemask, struct zone *preferred_zone,
2358 int migratetype)
2359{
2360 const gfp_t wait = gfp_mask & __GFP_WAIT;
2361 struct page *page = NULL;
2362 int alloc_flags;
2363 unsigned long pages_reclaimed = 0;
2364 unsigned long did_some_progress;
2365 bool sync_migration = false;
2366 bool deferred_compaction = false;
2367 bool contended_compaction = false;
2368
2369
2370
2371
2372
2373
2374
2375 if (order >= MAX_ORDER) {
2376 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2377 return NULL;
2378 }
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388 if (IS_ENABLED(CONFIG_NUMA) &&
2389 (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2390 goto nopage;
2391
2392restart:
2393 if (!(gfp_mask & __GFP_NO_KSWAPD))
2394 wake_all_kswapd(order, zonelist, high_zoneidx,
2395 zone_idx(preferred_zone));
2396
2397
2398
2399
2400
2401
2402 alloc_flags = gfp_to_alloc_flags(gfp_mask);
2403
2404
2405
2406
2407
2408 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2409 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2410 &preferred_zone);
2411
2412rebalance:
2413
2414 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2415 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2416 preferred_zone, migratetype);
2417 if (page)
2418 goto got_pg;
2419
2420
2421 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2422
2423
2424
2425
2426
2427 zonelist = node_zonelist(numa_node_id(), gfp_mask);
2428
2429 page = __alloc_pages_high_priority(gfp_mask, order,
2430 zonelist, high_zoneidx, nodemask,
2431 preferred_zone, migratetype);
2432 if (page) {
2433 goto got_pg;
2434 }
2435 }
2436
2437
2438 if (!wait)
2439 goto nopage;
2440
2441
2442 if (current->flags & PF_MEMALLOC)
2443 goto nopage;
2444
2445
2446 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2447 goto nopage;
2448
2449
2450
2451
2452
2453 page = __alloc_pages_direct_compact(gfp_mask, order,
2454 zonelist, high_zoneidx,
2455 nodemask,
2456 alloc_flags, preferred_zone,
2457 migratetype, sync_migration,
2458 &contended_compaction,
2459 &deferred_compaction,
2460 &did_some_progress);
2461 if (page)
2462 goto got_pg;
2463 sync_migration = true;
2464
2465
2466
2467
2468
2469
2470
2471 if ((deferred_compaction || contended_compaction) &&
2472 (gfp_mask & __GFP_NO_KSWAPD))
2473 goto nopage;
2474
2475
2476 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2477 zonelist, high_zoneidx,
2478 nodemask,
2479 alloc_flags, preferred_zone,
2480 migratetype, &did_some_progress);
2481 if (page)
2482 goto got_pg;
2483
2484
2485
2486
2487
2488 if (!did_some_progress) {
2489 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2490 if (oom_killer_disabled)
2491 goto nopage;
2492
2493 if ((current->flags & PF_DUMPCORE) &&
2494 !(gfp_mask & __GFP_NOFAIL))
2495 goto nopage;
2496 page = __alloc_pages_may_oom(gfp_mask, order,
2497 zonelist, high_zoneidx,
2498 nodemask, preferred_zone,
2499 migratetype);
2500 if (page)
2501 goto got_pg;
2502
2503 if (!(gfp_mask & __GFP_NOFAIL)) {
2504
2505
2506
2507
2508
2509
2510 if (order > PAGE_ALLOC_COSTLY_ORDER)
2511 goto nopage;
2512
2513
2514
2515
2516
2517 if (high_zoneidx < ZONE_NORMAL)
2518 goto nopage;
2519 }
2520
2521 goto restart;
2522 }
2523 }
2524
2525
2526 pages_reclaimed += did_some_progress;
2527 if (should_alloc_retry(gfp_mask, order, did_some_progress,
2528 pages_reclaimed)) {
2529
2530 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2531 goto rebalance;
2532 } else {
2533
2534
2535
2536
2537
2538 page = __alloc_pages_direct_compact(gfp_mask, order,
2539 zonelist, high_zoneidx,
2540 nodemask,
2541 alloc_flags, preferred_zone,
2542 migratetype, sync_migration,
2543 &contended_compaction,
2544 &deferred_compaction,
2545 &did_some_progress);
2546 if (page)
2547 goto got_pg;
2548 }
2549
2550nopage:
2551 warn_alloc_failed(gfp_mask, order, NULL);
2552 return page;
2553got_pg:
2554 if (kmemcheck_enabled)
2555 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2556
2557 return page;
2558}
2559
2560
2561
2562
2563struct page *
2564__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2565 struct zonelist *zonelist, nodemask_t *nodemask)
2566{
2567 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2568 struct zone *preferred_zone;
2569 struct page *page = NULL;
2570 int migratetype = allocflags_to_migratetype(gfp_mask);
2571 unsigned int cpuset_mems_cookie;
2572 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2573 struct mem_cgroup *memcg = NULL;
2574
2575 gfp_mask &= gfp_allowed_mask;
2576
2577 lockdep_trace_alloc(gfp_mask);
2578
2579 might_sleep_if(gfp_mask & __GFP_WAIT);
2580
2581 if (should_fail_alloc_page(gfp_mask, order))
2582 return NULL;
2583
2584
2585
2586
2587
2588
2589 if (unlikely(!zonelist->_zonerefs->zone))
2590 return NULL;
2591
2592
2593
2594
2595
2596 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2597 return NULL;
2598
2599retry_cpuset:
2600 cpuset_mems_cookie = get_mems_allowed();
2601
2602
2603 first_zones_zonelist(zonelist, high_zoneidx,
2604 nodemask ? : &cpuset_current_mems_allowed,
2605 &preferred_zone);
2606 if (!preferred_zone)
2607 goto out;
2608
2609#ifdef CONFIG_CMA
2610 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2611 alloc_flags |= ALLOC_CMA;
2612#endif
2613
2614 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2615 zonelist, high_zoneidx, alloc_flags,
2616 preferred_zone, migratetype);
2617 if (unlikely(!page))
2618 page = __alloc_pages_slowpath(gfp_mask, order,
2619 zonelist, high_zoneidx, nodemask,
2620 preferred_zone, migratetype);
2621
2622 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2623
2624out:
2625
2626
2627
2628
2629
2630
2631 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2632 goto retry_cpuset;
2633
2634 memcg_kmem_commit_charge(page, memcg, order);
2635
2636 return page;
2637}
2638EXPORT_SYMBOL(__alloc_pages_nodemask);
2639
2640
2641
2642
2643unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2644{
2645 struct page *page;
2646
2647
2648
2649
2650
2651 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2652
2653 page = alloc_pages(gfp_mask, order);
2654 if (!page)
2655 return 0;
2656 return (unsigned long) page_address(page);
2657}
2658EXPORT_SYMBOL(__get_free_pages);
2659
2660unsigned long get_zeroed_page(gfp_t gfp_mask)
2661{
2662 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2663}
2664EXPORT_SYMBOL(get_zeroed_page);
2665
2666void __free_pages(struct page *page, unsigned int order)
2667{
2668 if (put_page_testzero(page)) {
2669 if (order == 0)
2670 free_hot_cold_page(page, 0);
2671 else
2672 __free_pages_ok(page, order);
2673 }
2674}
2675
2676EXPORT_SYMBOL(__free_pages);
2677
2678void free_pages(unsigned long addr, unsigned int order)
2679{
2680 if (addr != 0) {
2681 VM_BUG_ON(!virt_addr_valid((void *)addr));
2682 __free_pages(virt_to_page((void *)addr), order);
2683 }
2684}
2685
2686EXPORT_SYMBOL(free_pages);
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699void __free_memcg_kmem_pages(struct page *page, unsigned int order)
2700{
2701 memcg_kmem_uncharge_pages(page, order);
2702 __free_pages(page, order);
2703}
2704
2705void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
2706{
2707 if (addr != 0) {
2708 VM_BUG_ON(!virt_addr_valid((void *)addr));
2709 __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
2710 }
2711}
2712
2713static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2714{
2715 if (addr) {
2716 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2717 unsigned long used = addr + PAGE_ALIGN(size);
2718
2719 split_page(virt_to_page((void *)addr), order);
2720 while (used < alloc_end) {
2721 free_page(used);
2722 used += PAGE_SIZE;
2723 }
2724 }
2725 return (void *)addr;
2726}
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2742{
2743 unsigned int order = get_order(size);
2744 unsigned long addr;
2745
2746 addr = __get_free_pages(gfp_mask, order);
2747 return make_alloc_exact(addr, order, size);
2748}
2749EXPORT_SYMBOL(alloc_pages_exact);
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2764{
2765 unsigned order = get_order(size);
2766 struct page *p = alloc_pages_node(nid, gfp_mask, order);
2767 if (!p)
2768 return NULL;
2769 return make_alloc_exact((unsigned long)page_address(p), order, size);
2770}
2771EXPORT_SYMBOL(alloc_pages_exact_nid);
2772
2773
2774
2775
2776
2777
2778
2779
2780void free_pages_exact(void *virt, size_t size)
2781{
2782 unsigned long addr = (unsigned long)virt;
2783 unsigned long end = addr + PAGE_ALIGN(size);
2784
2785 while (addr < end) {
2786 free_page(addr);
2787 addr += PAGE_SIZE;
2788 }
2789}
2790EXPORT_SYMBOL(free_pages_exact);
2791
2792static unsigned int nr_free_zone_pages(int offset)
2793{
2794 struct zoneref *z;
2795 struct zone *zone;
2796
2797
2798 unsigned int sum = 0;
2799
2800 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2801
2802 for_each_zone_zonelist(zone, z, zonelist, offset) {
2803 unsigned long size = zone->present_pages;
2804 unsigned long high = high_wmark_pages(zone);
2805 if (size > high)
2806 sum += size - high;
2807 }
2808
2809 return sum;
2810}
2811
2812
2813
2814
2815unsigned int nr_free_buffer_pages(void)
2816{
2817 return nr_free_zone_pages(gfp_zone(GFP_USER));
2818}
2819EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2820
2821
2822
2823
2824unsigned int nr_free_pagecache_pages(void)
2825{
2826 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2827}
2828
2829static inline void show_node(struct zone *zone)
2830{
2831 if (IS_ENABLED(CONFIG_NUMA))
2832 printk("Node %d ", zone_to_nid(zone));
2833}
2834
2835void si_meminfo(struct sysinfo *val)
2836{
2837 val->totalram = totalram_pages;
2838 val->sharedram = 0;
2839 val->freeram = global_page_state(NR_FREE_PAGES);
2840 val->bufferram = nr_blockdev_pages();
2841 val->totalhigh = totalhigh_pages;
2842 val->freehigh = nr_free_highpages();
2843 val->mem_unit = PAGE_SIZE;
2844}
2845
2846EXPORT_SYMBOL(si_meminfo);
2847
2848#ifdef CONFIG_NUMA
2849void si_meminfo_node(struct sysinfo *val, int nid)
2850{
2851 pg_data_t *pgdat = NODE_DATA(nid);
2852
2853 val->totalram = pgdat->node_present_pages;
2854 val->freeram = node_page_state(nid, NR_FREE_PAGES);
2855#ifdef CONFIG_HIGHMEM
2856 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2857 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2858 NR_FREE_PAGES);
2859#else
2860 val->totalhigh = 0;
2861 val->freehigh = 0;
2862#endif
2863 val->mem_unit = PAGE_SIZE;
2864}
2865#endif
2866
2867
2868
2869
2870
2871bool skip_free_areas_node(unsigned int flags, int nid)
2872{
2873 bool ret = false;
2874 unsigned int cpuset_mems_cookie;
2875
2876 if (!(flags & SHOW_MEM_FILTER_NODES))
2877 goto out;
2878
2879 do {
2880 cpuset_mems_cookie = get_mems_allowed();
2881 ret = !node_isset(nid, cpuset_current_mems_allowed);
2882 } while (!put_mems_allowed(cpuset_mems_cookie));
2883out:
2884 return ret;
2885}
2886
2887#define K(x) ((x) << (PAGE_SHIFT-10))
2888
2889static void show_migration_types(unsigned char type)
2890{
2891 static const char types[MIGRATE_TYPES] = {
2892 [MIGRATE_UNMOVABLE] = 'U',
2893 [MIGRATE_RECLAIMABLE] = 'E',
2894 [MIGRATE_MOVABLE] = 'M',
2895 [MIGRATE_RESERVE] = 'R',
2896#ifdef CONFIG_CMA
2897 [MIGRATE_CMA] = 'C',
2898#endif
2899 [MIGRATE_ISOLATE] = 'I',
2900 };
2901 char tmp[MIGRATE_TYPES + 1];
2902 char *p = tmp;
2903 int i;
2904
2905 for (i = 0; i < MIGRATE_TYPES; i++) {
2906 if (type & (1 << i))
2907 *p++ = types[i];
2908 }
2909
2910 *p = '\0';
2911 printk("(%s) ", tmp);
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921void show_free_areas(unsigned int filter)
2922{
2923 int cpu;
2924 struct zone *zone;
2925
2926 for_each_populated_zone(zone) {
2927 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2928 continue;
2929 show_node(zone);
2930 printk("%s per-cpu:\n", zone->name);
2931
2932 for_each_online_cpu(cpu) {
2933 struct per_cpu_pageset *pageset;
2934
2935 pageset = per_cpu_ptr(zone->pageset, cpu);
2936
2937 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2938 cpu, pageset->pcp.high,
2939 pageset->pcp.batch, pageset->pcp.count);
2940 }
2941 }
2942
2943 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2944 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2945 " unevictable:%lu"
2946 " dirty:%lu writeback:%lu unstable:%lu\n"
2947 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2948 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
2949 " free_cma:%lu\n",
2950 global_page_state(NR_ACTIVE_ANON),
2951 global_page_state(NR_INACTIVE_ANON),
2952 global_page_state(NR_ISOLATED_ANON),
2953 global_page_state(NR_ACTIVE_FILE),
2954 global_page_state(NR_INACTIVE_FILE),
2955 global_page_state(NR_ISOLATED_FILE),
2956 global_page_state(NR_UNEVICTABLE),
2957 global_page_state(NR_FILE_DIRTY),
2958 global_page_state(NR_WRITEBACK),
2959 global_page_state(NR_UNSTABLE_NFS),
2960 global_page_state(NR_FREE_PAGES),
2961 global_page_state(NR_SLAB_RECLAIMABLE),
2962 global_page_state(NR_SLAB_UNRECLAIMABLE),
2963 global_page_state(NR_FILE_MAPPED),
2964 global_page_state(NR_SHMEM),
2965 global_page_state(NR_PAGETABLE),
2966 global_page_state(NR_BOUNCE),
2967 global_page_state(NR_FREE_CMA_PAGES));
2968
2969 for_each_populated_zone(zone) {
2970 int i;
2971
2972 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2973 continue;
2974 show_node(zone);
2975 printk("%s"
2976 " free:%lukB"
2977 " min:%lukB"
2978 " low:%lukB"
2979 " high:%lukB"
2980 " active_anon:%lukB"
2981 " inactive_anon:%lukB"
2982 " active_file:%lukB"
2983 " inactive_file:%lukB"
2984 " unevictable:%lukB"
2985 " isolated(anon):%lukB"
2986 " isolated(file):%lukB"
2987 " present:%lukB"
2988 " managed:%lukB"
2989 " mlocked:%lukB"
2990 " dirty:%lukB"
2991 " writeback:%lukB"
2992 " mapped:%lukB"
2993 " shmem:%lukB"
2994 " slab_reclaimable:%lukB"
2995 " slab_unreclaimable:%lukB"
2996 " kernel_stack:%lukB"
2997 " pagetables:%lukB"
2998 " unstable:%lukB"
2999 " bounce:%lukB"
3000 " free_cma:%lukB"
3001 " writeback_tmp:%lukB"
3002 " pages_scanned:%lu"
3003 " all_unreclaimable? %s"
3004 "\n",
3005 zone->name,
3006 K(zone_page_state(zone, NR_FREE_PAGES)),
3007 K(min_wmark_pages(zone)),
3008 K(low_wmark_pages(zone)),
3009 K(high_wmark_pages(zone)),
3010 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3011 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3012 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3013 K(zone_page_state(zone, NR_INACTIVE_FILE)),
3014 K(zone_page_state(zone, NR_UNEVICTABLE)),
3015 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3016 K(zone_page_state(zone, NR_ISOLATED_FILE)),
3017 K(zone->present_pages),
3018 K(zone->managed_pages),
3019 K(zone_page_state(zone, NR_MLOCK)),
3020 K(zone_page_state(zone, NR_FILE_DIRTY)),
3021 K(zone_page_state(zone, NR_WRITEBACK)),
3022 K(zone_page_state(zone, NR_FILE_MAPPED)),
3023 K(zone_page_state(zone, NR_SHMEM)),
3024 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3025 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3026 zone_page_state(zone, NR_KERNEL_STACK) *
3027 THREAD_SIZE / 1024,
3028 K(zone_page_state(zone, NR_PAGETABLE)),
3029 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3030 K(zone_page_state(zone, NR_BOUNCE)),
3031 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3032 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3033 zone->pages_scanned,
3034 (zone->all_unreclaimable ? "yes" : "no")
3035 );
3036 printk("lowmem_reserve[]:");
3037 for (i = 0; i < MAX_NR_ZONES; i++)
3038 printk(" %lu", zone->lowmem_reserve[i]);
3039 printk("\n");
3040 }
3041
3042 for_each_populated_zone(zone) {
3043 unsigned long nr[MAX_ORDER], flags, order, total = 0;
3044 unsigned char types[MAX_ORDER];
3045
3046 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3047 continue;
3048 show_node(zone);
3049 printk("%s: ", zone->name);
3050
3051 spin_lock_irqsave(&zone->lock, flags);
3052 for (order = 0; order < MAX_ORDER; order++) {
3053 struct free_area *area = &zone->free_area[order];
3054 int type;
3055
3056 nr[order] = area->nr_free;
3057 total += nr[order] << order;
3058
3059 types[order] = 0;
3060 for (type = 0; type < MIGRATE_TYPES; type++) {
3061 if (!list_empty(&area->free_list[type]))
3062 types[order] |= 1 << type;
3063 }
3064 }
3065 spin_unlock_irqrestore(&zone->lock, flags);
3066 for (order = 0; order < MAX_ORDER; order++) {
3067 printk("%lu*%lukB ", nr[order], K(1UL) << order);
3068 if (nr[order])
3069 show_migration_types(types[order]);
3070 }
3071 printk("= %lukB\n", K(total));
3072 }
3073
3074 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3075
3076 show_swap_cache_info();
3077}
3078
3079static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3080{
3081 zoneref->zone = zone;
3082 zoneref->zone_idx = zone_idx(zone);
3083}
3084
3085
3086
3087
3088
3089
3090static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3091 int nr_zones, enum zone_type zone_type)
3092{
3093 struct zone *zone;
3094
3095 BUG_ON(zone_type >= MAX_NR_ZONES);
3096 zone_type++;
3097
3098 do {
3099 zone_type--;
3100 zone = pgdat->node_zones + zone_type;
3101 if (populated_zone(zone)) {
3102 zoneref_set_zone(zone,
3103 &zonelist->_zonerefs[nr_zones++]);
3104 check_highest_zone(zone_type);
3105 }
3106
3107 } while (zone_type);
3108 return nr_zones;
3109}
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121#define ZONELIST_ORDER_DEFAULT 0
3122#define ZONELIST_ORDER_NODE 1
3123#define ZONELIST_ORDER_ZONE 2
3124
3125
3126
3127
3128static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3129static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3130
3131
3132#ifdef CONFIG_NUMA
3133
3134static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3135
3136#define NUMA_ZONELIST_ORDER_LEN 16
3137char numa_zonelist_order[16] = "default";
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147static int __parse_numa_zonelist_order(char *s)
3148{
3149 if (*s == 'd' || *s == 'D') {
3150 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3151 } else if (*s == 'n' || *s == 'N') {
3152 user_zonelist_order = ZONELIST_ORDER_NODE;
3153 } else if (*s == 'z' || *s == 'Z') {
3154 user_zonelist_order = ZONELIST_ORDER_ZONE;
3155 } else {
3156 printk(KERN_WARNING
3157 "Ignoring invalid numa_zonelist_order value: "
3158 "%s\n", s);
3159 return -EINVAL;
3160 }
3161 return 0;
3162}
3163
3164static __init int setup_numa_zonelist_order(char *s)
3165{
3166 int ret;
3167
3168 if (!s)
3169 return 0;
3170
3171 ret = __parse_numa_zonelist_order(s);
3172 if (ret == 0)
3173 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3174
3175 return ret;
3176}
3177early_param("numa_zonelist_order", setup_numa_zonelist_order);
3178
3179
3180
3181
3182int numa_zonelist_order_handler(ctl_table *table, int write,
3183 void __user *buffer, size_t *length,
3184 loff_t *ppos)
3185{
3186 char saved_string[NUMA_ZONELIST_ORDER_LEN];
3187 int ret;
3188 static DEFINE_MUTEX(zl_order_mutex);
3189
3190 mutex_lock(&zl_order_mutex);
3191 if (write)
3192 strcpy(saved_string, (char*)table->data);
3193 ret = proc_dostring(table, write, buffer, length, ppos);
3194 if (ret)
3195 goto out;
3196 if (write) {
3197 int oldval = user_zonelist_order;
3198 if (__parse_numa_zonelist_order((char*)table->data)) {
3199
3200
3201
3202 strncpy((char*)table->data, saved_string,
3203 NUMA_ZONELIST_ORDER_LEN);
3204 user_zonelist_order = oldval;
3205 } else if (oldval != user_zonelist_order) {
3206 mutex_lock(&zonelists_mutex);
3207 build_all_zonelists(NULL, NULL);
3208 mutex_unlock(&zonelists_mutex);
3209 }
3210 }
3211out:
3212 mutex_unlock(&zl_order_mutex);
3213 return ret;
3214}
3215
3216
3217#define MAX_NODE_LOAD (nr_online_nodes)
3218static int node_load[MAX_NUMNODES];
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234static int find_next_best_node(int node, nodemask_t *used_node_mask)
3235{
3236 int n, val;
3237 int min_val = INT_MAX;
3238 int best_node = -1;
3239 const struct cpumask *tmp = cpumask_of_node(0);
3240
3241
3242 if (!node_isset(node, *used_node_mask)) {
3243 node_set(node, *used_node_mask);
3244 return node;
3245 }
3246
3247 for_each_node_state(n, N_MEMORY) {
3248
3249
3250 if (node_isset(n, *used_node_mask))
3251 continue;
3252
3253
3254 val = node_distance(node, n);
3255
3256
3257 val += (n < node);
3258
3259
3260 tmp = cpumask_of_node(n);
3261 if (!cpumask_empty(tmp))
3262 val += PENALTY_FOR_NODE_WITH_CPUS;
3263
3264
3265 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3266 val += node_load[n];
3267
3268 if (val < min_val) {
3269 min_val = val;
3270 best_node = n;
3271 }
3272 }
3273
3274 if (best_node >= 0)
3275 node_set(best_node, *used_node_mask);
3276
3277 return best_node;
3278}
3279
3280
3281
3282
3283
3284
3285
3286static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3287{
3288 int j;
3289 struct zonelist *zonelist;
3290
3291 zonelist = &pgdat->node_zonelists[0];
3292 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3293 ;
3294 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3295 MAX_NR_ZONES - 1);
3296 zonelist->_zonerefs[j].zone = NULL;
3297 zonelist->_zonerefs[j].zone_idx = 0;
3298}
3299
3300
3301
3302
3303static void build_thisnode_zonelists(pg_data_t *pgdat)
3304{
3305 int j;
3306 struct zonelist *zonelist;
3307
3308 zonelist = &pgdat->node_zonelists[1];
3309 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3310 zonelist->_zonerefs[j].zone = NULL;
3311 zonelist->_zonerefs[j].zone_idx = 0;
3312}
3313
3314
3315
3316
3317
3318
3319
3320static int node_order[MAX_NUMNODES];
3321
3322static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3323{
3324 int pos, j, node;
3325 int zone_type;
3326 struct zone *z;
3327 struct zonelist *zonelist;
3328
3329 zonelist = &pgdat->node_zonelists[0];
3330 pos = 0;
3331 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3332 for (j = 0; j < nr_nodes; j++) {
3333 node = node_order[j];
3334 z = &NODE_DATA(node)->node_zones[zone_type];
3335 if (populated_zone(z)) {
3336 zoneref_set_zone(z,
3337 &zonelist->_zonerefs[pos++]);
3338 check_highest_zone(zone_type);
3339 }
3340 }
3341 }
3342 zonelist->_zonerefs[pos].zone = NULL;
3343 zonelist->_zonerefs[pos].zone_idx = 0;
3344}
3345
3346static int default_zonelist_order(void)
3347{
3348 int nid, zone_type;
3349 unsigned long low_kmem_size,total_size;
3350 struct zone *z;
3351 int average_size;
3352
3353
3354
3355
3356
3357
3358
3359 low_kmem_size = 0;
3360 total_size = 0;
3361 for_each_online_node(nid) {
3362 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3363 z = &NODE_DATA(nid)->node_zones[zone_type];
3364 if (populated_zone(z)) {
3365 if (zone_type < ZONE_NORMAL)
3366 low_kmem_size += z->present_pages;
3367 total_size += z->present_pages;
3368 } else if (zone_type == ZONE_NORMAL) {
3369
3370
3371
3372
3373
3374
3375
3376 return ZONELIST_ORDER_NODE;
3377 }
3378 }
3379 }
3380 if (!low_kmem_size ||
3381 low_kmem_size > total_size/2)
3382 return ZONELIST_ORDER_NODE;
3383
3384
3385
3386
3387
3388 average_size = total_size /
3389 (nodes_weight(node_states[N_MEMORY]) + 1);
3390 for_each_online_node(nid) {
3391 low_kmem_size = 0;
3392 total_size = 0;
3393 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3394 z = &NODE_DATA(nid)->node_zones[zone_type];
3395 if (populated_zone(z)) {
3396 if (zone_type < ZONE_NORMAL)
3397 low_kmem_size += z->present_pages;
3398 total_size += z->present_pages;
3399 }
3400 }
3401 if (low_kmem_size &&
3402 total_size > average_size &&
3403 low_kmem_size > total_size * 70/100)
3404 return ZONELIST_ORDER_NODE;
3405 }
3406 return ZONELIST_ORDER_ZONE;
3407}
3408
3409static void set_zonelist_order(void)
3410{
3411 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3412 current_zonelist_order = default_zonelist_order();
3413 else
3414 current_zonelist_order = user_zonelist_order;
3415}
3416
3417static void build_zonelists(pg_data_t *pgdat)
3418{
3419 int j, node, load;
3420 enum zone_type i;
3421 nodemask_t used_mask;
3422 int local_node, prev_node;
3423 struct zonelist *zonelist;
3424 int order = current_zonelist_order;
3425
3426
3427 for (i = 0; i < MAX_ZONELISTS; i++) {
3428 zonelist = pgdat->node_zonelists + i;
3429 zonelist->_zonerefs[0].zone = NULL;
3430 zonelist->_zonerefs[0].zone_idx = 0;
3431 }
3432
3433
3434 local_node = pgdat->node_id;
3435 load = nr_online_nodes;
3436 prev_node = local_node;
3437 nodes_clear(used_mask);
3438
3439 memset(node_order, 0, sizeof(node_order));
3440 j = 0;
3441
3442 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3443
3444
3445
3446
3447
3448 if (node_distance(local_node, node) !=
3449 node_distance(local_node, prev_node))
3450 node_load[node] = load;
3451
3452 prev_node = node;
3453 load--;
3454 if (order == ZONELIST_ORDER_NODE)
3455 build_zonelists_in_node_order(pgdat, node);
3456 else
3457 node_order[j++] = node;
3458 }
3459
3460 if (order == ZONELIST_ORDER_ZONE) {
3461
3462 build_zonelists_in_zone_order(pgdat, j);
3463 }
3464
3465 build_thisnode_zonelists(pgdat);
3466}
3467
3468
3469static void build_zonelist_cache(pg_data_t *pgdat)
3470{
3471 struct zonelist *zonelist;
3472 struct zonelist_cache *zlc;
3473 struct zoneref *z;
3474
3475 zonelist = &pgdat->node_zonelists[0];
3476 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3477 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3478 for (z = zonelist->_zonerefs; z->zone; z++)
3479 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3480}
3481
3482#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3483
3484
3485
3486
3487
3488
3489int local_memory_node(int node)
3490{
3491 struct zone *zone;
3492
3493 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3494 gfp_zone(GFP_KERNEL),
3495 NULL,
3496 &zone);
3497 return zone->node;
3498}
3499#endif
3500
3501#else
3502
3503static void set_zonelist_order(void)
3504{
3505 current_zonelist_order = ZONELIST_ORDER_ZONE;
3506}
3507
3508static void build_zonelists(pg_data_t *pgdat)
3509{
3510 int node, local_node;
3511 enum zone_type j;
3512 struct zonelist *zonelist;
3513
3514 local_node = pgdat->node_id;
3515
3516 zonelist = &pgdat->node_zonelists[0];
3517 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3528 if (!node_online(node))
3529 continue;
3530 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3531 MAX_NR_ZONES - 1);
3532 }
3533 for (node = 0; node < local_node; node++) {
3534 if (!node_online(node))
3535 continue;
3536 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3537 MAX_NR_ZONES - 1);
3538 }
3539
3540 zonelist->_zonerefs[j].zone = NULL;
3541 zonelist->_zonerefs[j].zone_idx = 0;
3542}
3543
3544
3545static void build_zonelist_cache(pg_data_t *pgdat)
3546{
3547 pgdat->node_zonelists[0].zlcache_ptr = NULL;
3548}
3549
3550#endif
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3568static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3569static void setup_zone_pageset(struct zone *zone);
3570
3571
3572
3573
3574
3575DEFINE_MUTEX(zonelists_mutex);
3576
3577
3578static int __build_all_zonelists(void *data)
3579{
3580 int nid;
3581 int cpu;
3582 pg_data_t *self = data;
3583
3584#ifdef CONFIG_NUMA
3585 memset(node_load, 0, sizeof(node_load));
3586#endif
3587
3588 if (self && !node_online(self->node_id)) {
3589 build_zonelists(self);
3590 build_zonelist_cache(self);
3591 }
3592
3593 for_each_online_node(nid) {
3594 pg_data_t *pgdat = NODE_DATA(nid);
3595
3596 build_zonelists(pgdat);
3597 build_zonelist_cache(pgdat);
3598 }
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613 for_each_possible_cpu(cpu) {
3614 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3615
3616#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3617
3618
3619
3620
3621
3622
3623
3624
3625 if (cpu_online(cpu))
3626 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3627#endif
3628 }
3629
3630 return 0;
3631}
3632
3633
3634
3635
3636
3637void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
3638{
3639 set_zonelist_order();
3640
3641 if (system_state == SYSTEM_BOOTING) {
3642 __build_all_zonelists(NULL);
3643 mminit_verify_zonelist();
3644 cpuset_init_current_mems_allowed();
3645 } else {
3646
3647
3648#ifdef CONFIG_MEMORY_HOTPLUG
3649 if (zone)
3650 setup_zone_pageset(zone);
3651#endif
3652 stop_machine(__build_all_zonelists, pgdat, NULL);
3653
3654 }
3655 vm_total_pages = nr_free_pagecache_pages();
3656
3657
3658
3659
3660
3661
3662
3663 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3664 page_group_by_mobility_disabled = 1;
3665 else
3666 page_group_by_mobility_disabled = 0;
3667
3668 printk("Built %i zonelists in %s order, mobility grouping %s. "
3669 "Total pages: %ld\n",
3670 nr_online_nodes,
3671 zonelist_order_name[current_zonelist_order],
3672 page_group_by_mobility_disabled ? "off" : "on",
3673 vm_total_pages);
3674#ifdef CONFIG_NUMA
3675 printk("Policy zone: %s\n", zone_names[policy_zone]);
3676#endif
3677}
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690#define PAGES_PER_WAITQUEUE 256
3691
3692#ifndef CONFIG_MEMORY_HOTPLUG
3693static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3694{
3695 unsigned long size = 1;
3696
3697 pages /= PAGES_PER_WAITQUEUE;
3698
3699 while (size < pages)
3700 size <<= 1;
3701
3702
3703
3704
3705
3706
3707 size = min(size, 4096UL);
3708
3709 return max(size, 4UL);
3710}
3711#else
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3730{
3731 return 4096UL;
3732}
3733#endif
3734
3735
3736
3737
3738
3739
3740static inline unsigned long wait_table_bits(unsigned long size)
3741{
3742 return ffz(~size);
3743}
3744
3745#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3746
3747
3748
3749
3750static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3751{
3752 unsigned long pfn;
3753
3754 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3755 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3756 return 1;
3757 }
3758 return 0;
3759}
3760
3761
3762
3763
3764
3765
3766
3767
3768static void setup_zone_migrate_reserve(struct zone *zone)
3769{
3770 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3771 struct page *page;
3772 unsigned long block_migratetype;
3773 int reserve;
3774
3775
3776
3777
3778
3779
3780
3781 start_pfn = zone->zone_start_pfn;
3782 end_pfn = start_pfn + zone->spanned_pages;
3783 start_pfn = roundup(start_pfn, pageblock_nr_pages);
3784 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3785 pageblock_order;
3786
3787
3788
3789
3790
3791
3792
3793
3794 reserve = min(2, reserve);
3795
3796 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3797 if (!pfn_valid(pfn))
3798 continue;
3799 page = pfn_to_page(pfn);
3800
3801
3802 if (page_to_nid(page) != zone_to_nid(zone))
3803 continue;
3804
3805 block_migratetype = get_pageblock_migratetype(page);
3806
3807
3808 if (reserve > 0) {
3809
3810
3811
3812
3813 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3814 if (pageblock_is_reserved(pfn, block_end_pfn))
3815 continue;
3816
3817
3818 if (block_migratetype == MIGRATE_RESERVE) {
3819 reserve--;
3820 continue;
3821 }
3822
3823
3824 if (block_migratetype == MIGRATE_MOVABLE) {
3825 set_pageblock_migratetype(page,
3826 MIGRATE_RESERVE);
3827 move_freepages_block(zone, page,
3828 MIGRATE_RESERVE);
3829 reserve--;
3830 continue;
3831 }
3832 }
3833
3834
3835
3836
3837
3838 if (block_migratetype == MIGRATE_RESERVE) {
3839 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3840 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3841 }
3842 }
3843}
3844
3845
3846
3847
3848
3849
3850void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3851 unsigned long start_pfn, enum memmap_context context)
3852{
3853 struct page *page;
3854 unsigned long end_pfn = start_pfn + size;
3855 unsigned long pfn;
3856 struct zone *z;
3857
3858 if (highest_memmap_pfn < end_pfn - 1)
3859 highest_memmap_pfn = end_pfn - 1;
3860
3861 z = &NODE_DATA(nid)->node_zones[zone];
3862 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3863
3864
3865
3866
3867
3868 if (context == MEMMAP_EARLY) {
3869 if (!early_pfn_valid(pfn))
3870 continue;
3871 if (!early_pfn_in_nid(pfn, nid))
3872 continue;
3873 }
3874 page = pfn_to_page(pfn);
3875 set_page_links(page, zone, nid, pfn);
3876 mminit_verify_page_links(page, zone, nid, pfn);
3877 init_page_count(page);
3878 reset_page_mapcount(page);
3879 reset_page_last_nid(page);
3880 SetPageReserved(page);
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895 if ((z->zone_start_pfn <= pfn)
3896 && (pfn < z->zone_start_pfn + z->spanned_pages)
3897 && !(pfn & (pageblock_nr_pages - 1)))
3898 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3899
3900 INIT_LIST_HEAD(&page->lru);
3901#ifdef WANT_PAGE_VIRTUAL
3902
3903 if (!is_highmem_idx(zone))
3904 set_page_address(page, __va(pfn << PAGE_SHIFT));
3905#endif
3906 }
3907}
3908
3909static void __meminit zone_init_free_lists(struct zone *zone)
3910{
3911 int order, t;
3912 for_each_migratetype_order(order, t) {
3913 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3914 zone->free_area[order].nr_free = 0;
3915 }
3916}
3917
3918#ifndef __HAVE_ARCH_MEMMAP_INIT
3919#define memmap_init(size, nid, zone, start_pfn) \
3920 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3921#endif
3922
3923static int __meminit zone_batchsize(struct zone *zone)
3924{
3925#ifdef CONFIG_MMU
3926 int batch;
3927
3928
3929
3930
3931
3932
3933
3934 batch = zone->present_pages / 1024;
3935 if (batch * PAGE_SIZE > 512 * 1024)
3936 batch = (512 * 1024) / PAGE_SIZE;
3937 batch /= 4;
3938 if (batch < 1)
3939 batch = 1;
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951 batch = rounddown_pow_of_two(batch + batch/2) - 1;
3952
3953 return batch;
3954
3955#else
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969 return 0;
3970#endif
3971}
3972
3973static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3974{
3975 struct per_cpu_pages *pcp;
3976 int migratetype;
3977
3978 memset(p, 0, sizeof(*p));
3979
3980 pcp = &p->pcp;
3981 pcp->count = 0;
3982 pcp->high = 6 * batch;
3983 pcp->batch = max(1UL, 1 * batch);
3984 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3985 INIT_LIST_HEAD(&pcp->lists[migratetype]);
3986}
3987
3988
3989
3990
3991
3992
3993static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3994 unsigned long high)
3995{
3996 struct per_cpu_pages *pcp;
3997
3998 pcp = &p->pcp;
3999 pcp->high = high;
4000 pcp->batch = max(1UL, high/4);
4001 if ((high/4) > (PAGE_SHIFT * 8))
4002 pcp->batch = PAGE_SHIFT * 8;
4003}
4004
4005static void __meminit setup_zone_pageset(struct zone *zone)
4006{
4007 int cpu;
4008
4009 zone->pageset = alloc_percpu(struct per_cpu_pageset);
4010
4011 for_each_possible_cpu(cpu) {
4012 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4013
4014 setup_pageset(pcp, zone_batchsize(zone));
4015
4016 if (percpu_pagelist_fraction)
4017 setup_pagelist_highmark(pcp,
4018 (zone->present_pages /
4019 percpu_pagelist_fraction));
4020 }
4021}
4022
4023
4024
4025
4026
4027void __init setup_per_cpu_pageset(void)
4028{
4029 struct zone *zone;
4030
4031 for_each_populated_zone(zone)
4032 setup_zone_pageset(zone);
4033}
4034
4035static noinline __init_refok
4036int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4037{
4038 int i;
4039 struct pglist_data *pgdat = zone->zone_pgdat;
4040 size_t alloc_size;
4041
4042
4043
4044
4045
4046 zone->wait_table_hash_nr_entries =
4047 wait_table_hash_nr_entries(zone_size_pages);
4048 zone->wait_table_bits =
4049 wait_table_bits(zone->wait_table_hash_nr_entries);
4050 alloc_size = zone->wait_table_hash_nr_entries
4051 * sizeof(wait_queue_head_t);
4052
4053 if (!slab_is_available()) {
4054 zone->wait_table = (wait_queue_head_t *)
4055 alloc_bootmem_node_nopanic(pgdat, alloc_size);
4056 } else {
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067 zone->wait_table = vmalloc(alloc_size);
4068 }
4069 if (!zone->wait_table)
4070 return -ENOMEM;
4071
4072 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4073 init_waitqueue_head(zone->wait_table + i);
4074
4075 return 0;
4076}
4077
4078static __meminit void zone_pcp_init(struct zone *zone)
4079{
4080
4081
4082
4083
4084
4085 zone->pageset = &boot_pageset;
4086
4087 if (zone->present_pages)
4088 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4089 zone->name, zone->present_pages,
4090 zone_batchsize(zone));
4091}
4092
4093int __meminit init_currently_empty_zone(struct zone *zone,
4094 unsigned long zone_start_pfn,
4095 unsigned long size,
4096 enum memmap_context context)
4097{
4098 struct pglist_data *pgdat = zone->zone_pgdat;
4099 int ret;
4100 ret = zone_wait_table_init(zone, size);
4101 if (ret)
4102 return ret;
4103 pgdat->nr_zones = zone_idx(zone) + 1;
4104
4105 zone->zone_start_pfn = zone_start_pfn;
4106
4107 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4108 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4109 pgdat->node_id,
4110 (unsigned long)zone_idx(zone),
4111 zone_start_pfn, (zone_start_pfn + size));
4112
4113 zone_init_free_lists(zone);
4114
4115 return 0;
4116}
4117
4118#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4119#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4120
4121
4122
4123
4124
4125
4126int __meminit __early_pfn_to_nid(unsigned long pfn)
4127{
4128 unsigned long start_pfn, end_pfn;
4129 int i, nid;
4130
4131 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4132 if (start_pfn <= pfn && pfn < end_pfn)
4133 return nid;
4134
4135 return -1;
4136}
4137#endif
4138
4139int __meminit early_pfn_to_nid(unsigned long pfn)
4140{
4141 int nid;
4142
4143 nid = __early_pfn_to_nid(pfn);
4144 if (nid >= 0)
4145 return nid;
4146
4147 return 0;
4148}
4149
4150#ifdef CONFIG_NODES_SPAN_OTHER_NODES
4151bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4152{
4153 int nid;
4154
4155 nid = __early_pfn_to_nid(pfn);
4156 if (nid >= 0 && nid != node)
4157 return false;
4158 return true;
4159}
4160#endif
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4172{
4173 unsigned long start_pfn, end_pfn;
4174 int i, this_nid;
4175
4176 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4177 start_pfn = min(start_pfn, max_low_pfn);
4178 end_pfn = min(end_pfn, max_low_pfn);
4179
4180 if (start_pfn < end_pfn)
4181 free_bootmem_node(NODE_DATA(this_nid),
4182 PFN_PHYS(start_pfn),
4183 (end_pfn - start_pfn) << PAGE_SHIFT);
4184 }
4185}
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195void __init sparse_memory_present_with_active_regions(int nid)
4196{
4197 unsigned long start_pfn, end_pfn;
4198 int i, this_nid;
4199
4200 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4201 memory_present(this_nid, start_pfn, end_pfn);
4202}
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215void __meminit get_pfn_range_for_nid(unsigned int nid,
4216 unsigned long *start_pfn, unsigned long *end_pfn)
4217{
4218 unsigned long this_start_pfn, this_end_pfn;
4219 int i;
4220
4221 *start_pfn = -1UL;
4222 *end_pfn = 0;
4223
4224 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4225 *start_pfn = min(*start_pfn, this_start_pfn);
4226 *end_pfn = max(*end_pfn, this_end_pfn);
4227 }
4228
4229 if (*start_pfn == -1UL)
4230 *start_pfn = 0;
4231}
4232
4233
4234
4235
4236
4237
4238static void __init find_usable_zone_for_movable(void)
4239{
4240 int zone_index;
4241 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4242 if (zone_index == ZONE_MOVABLE)
4243 continue;
4244
4245 if (arch_zone_highest_possible_pfn[zone_index] >
4246 arch_zone_lowest_possible_pfn[zone_index])
4247 break;
4248 }
4249
4250 VM_BUG_ON(zone_index == -1);
4251 movable_zone = zone_index;
4252}
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264static void __meminit adjust_zone_range_for_zone_movable(int nid,
4265 unsigned long zone_type,
4266 unsigned long node_start_pfn,
4267 unsigned long node_end_pfn,
4268 unsigned long *zone_start_pfn,
4269 unsigned long *zone_end_pfn)
4270{
4271
4272 if (zone_movable_pfn[nid]) {
4273
4274 if (zone_type == ZONE_MOVABLE) {
4275 *zone_start_pfn = zone_movable_pfn[nid];
4276 *zone_end_pfn = min(node_end_pfn,
4277 arch_zone_highest_possible_pfn[movable_zone]);
4278
4279
4280 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4281 *zone_end_pfn > zone_movable_pfn[nid]) {
4282 *zone_end_pfn = zone_movable_pfn[nid];
4283
4284
4285 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4286 *zone_start_pfn = *zone_end_pfn;
4287 }
4288}
4289
4290
4291
4292
4293
4294static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4295 unsigned long zone_type,
4296 unsigned long *ignored)
4297{
4298 unsigned long node_start_pfn, node_end_pfn;
4299 unsigned long zone_start_pfn, zone_end_pfn;
4300
4301
4302 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4303 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4304 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4305 adjust_zone_range_for_zone_movable(nid, zone_type,
4306 node_start_pfn, node_end_pfn,
4307 &zone_start_pfn, &zone_end_pfn);
4308
4309
4310 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4311 return 0;
4312
4313
4314 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4315 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4316
4317
4318 return zone_end_pfn - zone_start_pfn;
4319}
4320
4321
4322
4323
4324
4325unsigned long __meminit __absent_pages_in_range(int nid,
4326 unsigned long range_start_pfn,
4327 unsigned long range_end_pfn)
4328{
4329 unsigned long nr_absent = range_end_pfn - range_start_pfn;
4330 unsigned long start_pfn, end_pfn;
4331 int i;
4332
4333 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4334 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4335 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4336 nr_absent -= end_pfn - start_pfn;
4337 }
4338 return nr_absent;
4339}
4340
4341
4342
4343
4344
4345
4346
4347
4348unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4349 unsigned long end_pfn)
4350{
4351 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4352}
4353
4354
4355static unsigned long __meminit zone_absent_pages_in_node(int nid,
4356 unsigned long zone_type,
4357 unsigned long *ignored)
4358{
4359 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4360 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4361 unsigned long node_start_pfn, node_end_pfn;
4362 unsigned long zone_start_pfn, zone_end_pfn;
4363
4364 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4365 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4366 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4367
4368 adjust_zone_range_for_zone_movable(nid, zone_type,
4369 node_start_pfn, node_end_pfn,
4370 &zone_start_pfn, &zone_end_pfn);
4371 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4372}
4373
4374#else
4375static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4376 unsigned long zone_type,
4377 unsigned long *zones_size)
4378{
4379 return zones_size[zone_type];
4380}
4381
4382static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4383 unsigned long zone_type,
4384 unsigned long *zholes_size)
4385{
4386 if (!zholes_size)
4387 return 0;
4388
4389 return zholes_size[zone_type];
4390}
4391
4392#endif
4393
4394static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4395 unsigned long *zones_size, unsigned long *zholes_size)
4396{
4397 unsigned long realtotalpages, totalpages = 0;
4398 enum zone_type i;
4399
4400 for (i = 0; i < MAX_NR_ZONES; i++)
4401 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4402 zones_size);
4403 pgdat->node_spanned_pages = totalpages;
4404
4405 realtotalpages = totalpages;
4406 for (i = 0; i < MAX_NR_ZONES; i++)
4407 realtotalpages -=
4408 zone_absent_pages_in_node(pgdat->node_id, i,
4409 zholes_size);
4410 pgdat->node_present_pages = realtotalpages;
4411 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4412 realtotalpages);
4413}
4414
4415#ifndef CONFIG_SPARSEMEM
4416
4417
4418
4419
4420
4421
4422
4423static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4424{
4425 unsigned long usemapsize;
4426
4427 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4428 usemapsize = roundup(zonesize, pageblock_nr_pages);
4429 usemapsize = usemapsize >> pageblock_order;
4430 usemapsize *= NR_PAGEBLOCK_BITS;
4431 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4432
4433 return usemapsize / 8;
4434}
4435
4436static void __init setup_usemap(struct pglist_data *pgdat,
4437 struct zone *zone,
4438 unsigned long zone_start_pfn,
4439 unsigned long zonesize)
4440{
4441 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4442 zone->pageblock_flags = NULL;
4443 if (usemapsize)
4444 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4445 usemapsize);
4446}
4447#else
4448static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4449 unsigned long zone_start_pfn, unsigned long zonesize) {}
4450#endif
4451
4452#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4453
4454
4455void __init set_pageblock_order(void)
4456{
4457 unsigned int order;
4458
4459
4460 if (pageblock_order)
4461 return;
4462
4463 if (HPAGE_SHIFT > PAGE_SHIFT)
4464 order = HUGETLB_PAGE_ORDER;
4465 else
4466 order = MAX_ORDER - 1;
4467
4468
4469
4470
4471
4472
4473 pageblock_order = order;
4474}
4475#else
4476
4477
4478
4479
4480
4481
4482
4483void __init set_pageblock_order(void)
4484{
4485}
4486
4487#endif
4488
4489static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
4490 unsigned long present_pages)
4491{
4492 unsigned long pages = spanned_pages;
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502 if (spanned_pages > present_pages + (present_pages >> 4) &&
4503 IS_ENABLED(CONFIG_SPARSEMEM))
4504 pages = present_pages;
4505
4506 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
4507}
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4518 unsigned long *zones_size, unsigned long *zholes_size)
4519{
4520 enum zone_type j;
4521 int nid = pgdat->node_id;
4522 unsigned long zone_start_pfn = pgdat->node_start_pfn;
4523 int ret;
4524
4525 pgdat_resize_init(pgdat);
4526#ifdef CONFIG_NUMA_BALANCING
4527 spin_lock_init(&pgdat->numabalancing_migrate_lock);
4528 pgdat->numabalancing_migrate_nr_pages = 0;
4529 pgdat->numabalancing_migrate_next_window = jiffies;
4530#endif
4531 init_waitqueue_head(&pgdat->kswapd_wait);
4532 init_waitqueue_head(&pgdat->pfmemalloc_wait);
4533 pgdat_page_cgroup_init(pgdat);
4534
4535 for (j = 0; j < MAX_NR_ZONES; j++) {
4536 struct zone *zone = pgdat->node_zones + j;
4537 unsigned long size, realsize, freesize, memmap_pages;
4538
4539 size = zone_spanned_pages_in_node(nid, j, zones_size);
4540 realsize = freesize = size - zone_absent_pages_in_node(nid, j,
4541 zholes_size);
4542
4543
4544
4545
4546
4547
4548 memmap_pages = calc_memmap_size(size, realsize);
4549 if (freesize >= memmap_pages) {
4550 freesize -= memmap_pages;
4551 if (memmap_pages)
4552 printk(KERN_DEBUG
4553 " %s zone: %lu pages used for memmap\n",
4554 zone_names[j], memmap_pages);
4555 } else
4556 printk(KERN_WARNING
4557 " %s zone: %lu pages exceeds freesize %lu\n",
4558 zone_names[j], memmap_pages, freesize);
4559
4560
4561 if (j == 0 && freesize > dma_reserve) {
4562 freesize -= dma_reserve;
4563 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
4564 zone_names[0], dma_reserve);
4565 }
4566
4567 if (!is_highmem_idx(j))
4568 nr_kernel_pages += freesize;
4569
4570 else if (nr_kernel_pages > memmap_pages * 2)
4571 nr_kernel_pages -= memmap_pages;
4572 nr_all_pages += freesize;
4573
4574 zone->spanned_pages = size;
4575 zone->present_pages = freesize;
4576
4577
4578
4579
4580
4581 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
4582#ifdef CONFIG_NUMA
4583 zone->node = nid;
4584 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
4585 / 100;
4586 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
4587#endif
4588 zone->name = zone_names[j];
4589 spin_lock_init(&zone->lock);
4590 spin_lock_init(&zone->lru_lock);
4591 zone_seqlock_init(zone);
4592 zone->zone_pgdat = pgdat;
4593
4594 zone_pcp_init(zone);
4595 lruvec_init(&zone->lruvec);
4596 if (!size)
4597 continue;
4598
4599 set_pageblock_order();
4600 setup_usemap(pgdat, zone, zone_start_pfn, size);
4601 ret = init_currently_empty_zone(zone, zone_start_pfn,
4602 size, MEMMAP_EARLY);
4603 BUG_ON(ret);
4604 memmap_init(size, nid, j, zone_start_pfn);
4605 zone_start_pfn += size;
4606 }
4607}
4608
4609static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4610{
4611
4612 if (!pgdat->node_spanned_pages)
4613 return;
4614
4615#ifdef CONFIG_FLAT_NODE_MEM_MAP
4616
4617 if (!pgdat->node_mem_map) {
4618 unsigned long size, start, end;
4619 struct page *map;
4620
4621
4622
4623
4624
4625
4626 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4627 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4628 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4629 size = (end - start) * sizeof(struct page);
4630 map = alloc_remap(pgdat->node_id, size);
4631 if (!map)
4632 map = alloc_bootmem_node_nopanic(pgdat, size);
4633 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4634 }
4635#ifndef CONFIG_NEED_MULTIPLE_NODES
4636
4637
4638
4639 if (pgdat == NODE_DATA(0)) {
4640 mem_map = NODE_DATA(0)->node_mem_map;
4641#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4642 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4643 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4644#endif
4645 }
4646#endif
4647#endif
4648}
4649
4650void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4651 unsigned long node_start_pfn, unsigned long *zholes_size)
4652{
4653 pg_data_t *pgdat = NODE_DATA(nid);
4654
4655
4656 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
4657
4658 pgdat->node_id = nid;
4659 pgdat->node_start_pfn = node_start_pfn;
4660 init_zone_allows_reclaim(nid);
4661 calculate_node_totalpages(pgdat, zones_size, zholes_size);
4662
4663 alloc_node_mem_map(pgdat);
4664#ifdef CONFIG_FLAT_NODE_MEM_MAP
4665 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4666 nid, (unsigned long)pgdat,
4667 (unsigned long)pgdat->node_mem_map);
4668#endif
4669
4670 free_area_init_core(pgdat, zones_size, zholes_size);
4671}
4672
4673#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4674
4675#if MAX_NUMNODES > 1
4676
4677
4678
4679static void __init setup_nr_node_ids(void)
4680{
4681 unsigned int node;
4682 unsigned int highest = 0;
4683
4684 for_each_node_mask(node, node_possible_map)
4685 highest = node;
4686 nr_node_ids = highest + 1;
4687}
4688#else
4689static inline void setup_nr_node_ids(void)
4690{
4691}
4692#endif
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713unsigned long __init node_map_pfn_alignment(void)
4714{
4715 unsigned long accl_mask = 0, last_end = 0;
4716 unsigned long start, end, mask;
4717 int last_nid = -1;
4718 int i, nid;
4719
4720 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
4721 if (!start || last_nid < 0 || last_nid == nid) {
4722 last_nid = nid;
4723 last_end = end;
4724 continue;
4725 }
4726
4727
4728
4729
4730
4731
4732 mask = ~((1 << __ffs(start)) - 1);
4733 while (mask && last_end <= (start & (mask << 1)))
4734 mask <<= 1;
4735
4736
4737 accl_mask |= mask;
4738 }
4739
4740
4741 return ~accl_mask + 1;
4742}
4743
4744
4745static unsigned long __init find_min_pfn_for_node(int nid)
4746{
4747 unsigned long min_pfn = ULONG_MAX;
4748 unsigned long start_pfn;
4749 int i;
4750
4751 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4752 min_pfn = min(min_pfn, start_pfn);
4753
4754 if (min_pfn == ULONG_MAX) {
4755 printk(KERN_WARNING
4756 "Could not find start_pfn for node %d\n", nid);
4757 return 0;
4758 }
4759
4760 return min_pfn;
4761}
4762
4763
4764
4765
4766
4767
4768
4769unsigned long __init find_min_pfn_with_active_regions(void)
4770{
4771 return find_min_pfn_for_node(MAX_NUMNODES);
4772}
4773
4774
4775
4776
4777
4778
4779static unsigned long __init early_calculate_totalpages(void)
4780{
4781 unsigned long totalpages = 0;
4782 unsigned long start_pfn, end_pfn;
4783 int i, nid;
4784
4785 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4786 unsigned long pages = end_pfn - start_pfn;
4787
4788 totalpages += pages;
4789 if (pages)
4790 node_set_state(nid, N_MEMORY);
4791 }
4792 return totalpages;
4793}
4794
4795
4796
4797
4798
4799
4800
4801static void __init find_zone_movable_pfns_for_nodes(void)
4802{
4803 int i, nid;
4804 unsigned long usable_startpfn;
4805 unsigned long kernelcore_node, kernelcore_remaining;
4806
4807 nodemask_t saved_node_state = node_states[N_MEMORY];
4808 unsigned long totalpages = early_calculate_totalpages();
4809 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819 if (required_movablecore) {
4820 unsigned long corepages;
4821
4822
4823
4824
4825
4826 required_movablecore =
4827 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4828 corepages = totalpages - required_movablecore;
4829
4830 required_kernelcore = max(required_kernelcore, corepages);
4831 }
4832
4833
4834 if (!required_kernelcore)
4835 goto out;
4836
4837
4838 find_usable_zone_for_movable();
4839 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4840
4841restart:
4842
4843 kernelcore_node = required_kernelcore / usable_nodes;
4844 for_each_node_state(nid, N_MEMORY) {
4845 unsigned long start_pfn, end_pfn;
4846