1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/stddef.h>
19#include <linux/mm.h>
20#include <linux/highmem.h>
21#include <linux/swap.h>
22#include <linux/interrupt.h>
23#include <linux/pagemap.h>
24#include <linux/jiffies.h>
25#include <linux/memblock.h>
26#include <linux/compiler.h>
27#include <linux/kernel.h>
28#include <linux/kasan.h>
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
34#include <linux/ratelimit.h>
35#include <linux/oom.h>
36#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
40#include <linux/memory_hotplug.h>
41#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
43#include <linux/vmstat.h>
44#include <linux/mempolicy.h>
45#include <linux/memremap.h>
46#include <linux/stop_machine.h>
47#include <linux/random.h>
48#include <linux/sort.h>
49#include <linux/pfn.h>
50#include <linux/backing-dev.h>
51#include <linux/fault-inject.h>
52#include <linux/page-isolation.h>
53#include <linux/debugobjects.h>
54#include <linux/kmemleak.h>
55#include <linux/compaction.h>
56#include <trace/events/kmem.h>
57#include <trace/events/oom.h>
58#include <linux/prefetch.h>
59#include <linux/mm_inline.h>
60#include <linux/mmu_notifier.h>
61#include <linux/migrate.h>
62#include <linux/hugetlb.h>
63#include <linux/sched/rt.h>
64#include <linux/sched/mm.h>
65#include <linux/page_owner.h>
66#include <linux/kthread.h>
67#include <linux/memcontrol.h>
68#include <linux/ftrace.h>
69#include <linux/lockdep.h>
70#include <linux/nmi.h>
71#include <linux/psi.h>
72#include <linux/padata.h>
73#include <linux/khugepaged.h>
74#include <linux/buffer_head.h>
75#include <asm/sections.h>
76#include <asm/tlbflush.h>
77#include <asm/div64.h>
78#include "internal.h"
79#include "shuffle.h"
80#include "page_reporting.h"
81
82
83typedef int __bitwise fpi_t;
84
85
86#define FPI_NONE ((__force fpi_t)0)
87
88
89
90
91
92
93
94
95
96#define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
97
98
99
100
101
102
103
104
105
106
107
108#define FPI_TO_TAIL ((__force fpi_t)BIT(1))
109
110
111
112
113
114
115
116
117
118
119#define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2))
120
121
122static DEFINE_MUTEX(pcp_batch_high_lock);
123#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
124
125struct pagesets {
126 local_lock_t lock;
127};
128static DEFINE_PER_CPU(struct pagesets, pagesets) = {
129 .lock = INIT_LOCAL_LOCK(lock),
130};
131
132#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
133DEFINE_PER_CPU(int, numa_node);
134EXPORT_PER_CPU_SYMBOL(numa_node);
135#endif
136
137DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
138
139#ifdef CONFIG_HAVE_MEMORYLESS_NODES
140
141
142
143
144
145
146DEFINE_PER_CPU(int, _numa_mem_);
147EXPORT_PER_CPU_SYMBOL(_numa_mem_);
148#endif
149
150
151struct pcpu_drain {
152 struct zone *zone;
153 struct work_struct work;
154};
155static DEFINE_MUTEX(pcpu_drain_mutex);
156static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
157
158#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
159volatile unsigned long latent_entropy __latent_entropy;
160EXPORT_SYMBOL(latent_entropy);
161#endif
162
163
164
165
166nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
167 [N_POSSIBLE] = NODE_MASK_ALL,
168 [N_ONLINE] = { { [0] = 1UL } },
169#ifndef CONFIG_NUMA
170 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
171#ifdef CONFIG_HIGHMEM
172 [N_HIGH_MEMORY] = { { [0] = 1UL } },
173#endif
174 [N_MEMORY] = { { [0] = 1UL } },
175 [N_CPU] = { { [0] = 1UL } },
176#endif
177};
178EXPORT_SYMBOL(node_states);
179
180atomic_long_t _totalram_pages __read_mostly;
181EXPORT_SYMBOL(_totalram_pages);
182unsigned long totalreserve_pages __read_mostly;
183unsigned long totalcma_pages __read_mostly;
184
185int percpu_pagelist_high_fraction;
186gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
187DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
188EXPORT_SYMBOL(init_on_alloc);
189
190DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
191EXPORT_SYMBOL(init_on_free);
192
193static bool _init_on_alloc_enabled_early __read_mostly
194 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
195static int __init early_init_on_alloc(char *buf)
196{
197
198 return kstrtobool(buf, &_init_on_alloc_enabled_early);
199}
200early_param("init_on_alloc", early_init_on_alloc);
201
202static bool _init_on_free_enabled_early __read_mostly
203 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
204static int __init early_init_on_free(char *buf)
205{
206 return kstrtobool(buf, &_init_on_free_enabled_early);
207}
208early_param("init_on_free", early_init_on_free);
209
210
211
212
213
214
215
216
217
218static inline int get_pcppage_migratetype(struct page *page)
219{
220 return page->index;
221}
222
223static inline void set_pcppage_migratetype(struct page *page, int migratetype)
224{
225 page->index = migratetype;
226}
227
228#ifdef CONFIG_PM_SLEEP
229
230
231
232
233
234
235
236
237
238
239static gfp_t saved_gfp_mask;
240
241void pm_restore_gfp_mask(void)
242{
243 WARN_ON(!mutex_is_locked(&system_transition_mutex));
244 if (saved_gfp_mask) {
245 gfp_allowed_mask = saved_gfp_mask;
246 saved_gfp_mask = 0;
247 }
248}
249
250void pm_restrict_gfp_mask(void)
251{
252 WARN_ON(!mutex_is_locked(&system_transition_mutex));
253 WARN_ON(saved_gfp_mask);
254 saved_gfp_mask = gfp_allowed_mask;
255 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
256}
257
258bool pm_suspended_storage(void)
259{
260 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
261 return false;
262 return true;
263}
264#endif
265
266#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
267unsigned int pageblock_order __read_mostly;
268#endif
269
270static void __free_pages_ok(struct page *page, unsigned int order,
271 fpi_t fpi_flags);
272
273
274
275
276
277
278
279
280
281
282
283
284int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
285#ifdef CONFIG_ZONE_DMA
286 [ZONE_DMA] = 256,
287#endif
288#ifdef CONFIG_ZONE_DMA32
289 [ZONE_DMA32] = 256,
290#endif
291 [ZONE_NORMAL] = 32,
292#ifdef CONFIG_HIGHMEM
293 [ZONE_HIGHMEM] = 0,
294#endif
295 [ZONE_MOVABLE] = 0,
296};
297
298static char * const zone_names[MAX_NR_ZONES] = {
299#ifdef CONFIG_ZONE_DMA
300 "DMA",
301#endif
302#ifdef CONFIG_ZONE_DMA32
303 "DMA32",
304#endif
305 "Normal",
306#ifdef CONFIG_HIGHMEM
307 "HighMem",
308#endif
309 "Movable",
310#ifdef CONFIG_ZONE_DEVICE
311 "Device",
312#endif
313};
314
315const char * const migratetype_names[MIGRATE_TYPES] = {
316 "Unmovable",
317 "Movable",
318 "Reclaimable",
319 "HighAtomic",
320#ifdef CONFIG_CMA
321 "CMA",
322#endif
323#ifdef CONFIG_MEMORY_ISOLATION
324 "Isolate",
325#endif
326};
327
328compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
329 [NULL_COMPOUND_DTOR] = NULL,
330 [COMPOUND_PAGE_DTOR] = free_compound_page,
331#ifdef CONFIG_HUGETLB_PAGE
332 [HUGETLB_PAGE_DTOR] = free_huge_page,
333#endif
334#ifdef CONFIG_TRANSPARENT_HUGEPAGE
335 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
336#endif
337};
338
339int min_free_kbytes = 1024;
340int user_min_free_kbytes = -1;
341int watermark_boost_factor __read_mostly = 15000;
342int watermark_scale_factor = 10;
343
344static unsigned long nr_kernel_pages __initdata;
345static unsigned long nr_all_pages __initdata;
346static unsigned long dma_reserve __initdata;
347
348static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
349static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
350static unsigned long required_kernelcore __initdata;
351static unsigned long required_kernelcore_percent __initdata;
352static unsigned long required_movablecore __initdata;
353static unsigned long required_movablecore_percent __initdata;
354static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
355static bool mirrored_kernelcore __meminitdata;
356
357
358int movable_zone;
359EXPORT_SYMBOL(movable_zone);
360
361#if MAX_NUMNODES > 1
362unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
363unsigned int nr_online_nodes __read_mostly = 1;
364EXPORT_SYMBOL(nr_node_ids);
365EXPORT_SYMBOL(nr_online_nodes);
366#endif
367
368int page_group_by_mobility_disabled __read_mostly;
369
370#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
371
372
373
374
375
376static DEFINE_STATIC_KEY_TRUE(deferred_pages);
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
392{
393 return static_branch_unlikely(&deferred_pages) ||
394 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
395 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
396 PageSkipKASanPoison(page);
397}
398
399
400static inline bool __meminit early_page_uninitialised(unsigned long pfn)
401{
402 int nid = early_pfn_to_nid(pfn);
403
404 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
405 return true;
406
407 return false;
408}
409
410
411
412
413
414static bool __meminit
415defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
416{
417 static unsigned long prev_end_pfn, nr_initialised;
418
419
420
421
422
423 if (prev_end_pfn != end_pfn) {
424 prev_end_pfn = end_pfn;
425 nr_initialised = 0;
426 }
427
428
429 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
430 return false;
431
432 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
433 return true;
434
435
436
437
438 nr_initialised++;
439 if ((nr_initialised > PAGES_PER_SECTION) &&
440 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
441 NODE_DATA(nid)->first_deferred_pfn = pfn;
442 return true;
443 }
444 return false;
445}
446#else
447static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
448{
449 return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
450 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
451 PageSkipKASanPoison(page);
452}
453
454static inline bool early_page_uninitialised(unsigned long pfn)
455{
456 return false;
457}
458
459static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
460{
461 return false;
462}
463#endif
464
465
466static inline unsigned long *get_pageblock_bitmap(const struct page *page,
467 unsigned long pfn)
468{
469#ifdef CONFIG_SPARSEMEM
470 return section_to_usemap(__pfn_to_section(pfn));
471#else
472 return page_zone(page)->pageblock_flags;
473#endif
474}
475
476static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
477{
478#ifdef CONFIG_SPARSEMEM
479 pfn &= (PAGES_PER_SECTION-1);
480#else
481 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
482#endif
483 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
484}
485
486static __always_inline
487unsigned long __get_pfnblock_flags_mask(const struct page *page,
488 unsigned long pfn,
489 unsigned long mask)
490{
491 unsigned long *bitmap;
492 unsigned long bitidx, word_bitidx;
493 unsigned long word;
494
495 bitmap = get_pageblock_bitmap(page, pfn);
496 bitidx = pfn_to_bitidx(page, pfn);
497 word_bitidx = bitidx / BITS_PER_LONG;
498 bitidx &= (BITS_PER_LONG-1);
499
500 word = bitmap[word_bitidx];
501 return (word >> bitidx) & mask;
502}
503
504
505
506
507
508
509
510
511
512unsigned long get_pfnblock_flags_mask(const struct page *page,
513 unsigned long pfn, unsigned long mask)
514{
515 return __get_pfnblock_flags_mask(page, pfn, mask);
516}
517
518static __always_inline int get_pfnblock_migratetype(const struct page *page,
519 unsigned long pfn)
520{
521 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
522}
523
524
525
526
527
528
529
530
531void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
532 unsigned long pfn,
533 unsigned long mask)
534{
535 unsigned long *bitmap;
536 unsigned long bitidx, word_bitidx;
537 unsigned long old_word, word;
538
539 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
540 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
541
542 bitmap = get_pageblock_bitmap(page, pfn);
543 bitidx = pfn_to_bitidx(page, pfn);
544 word_bitidx = bitidx / BITS_PER_LONG;
545 bitidx &= (BITS_PER_LONG-1);
546
547 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
548
549 mask <<= bitidx;
550 flags <<= bitidx;
551
552 word = READ_ONCE(bitmap[word_bitidx]);
553 for (;;) {
554 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
555 if (word == old_word)
556 break;
557 word = old_word;
558 }
559}
560
561void set_pageblock_migratetype(struct page *page, int migratetype)
562{
563 if (unlikely(page_group_by_mobility_disabled &&
564 migratetype < MIGRATE_PCPTYPES))
565 migratetype = MIGRATE_UNMOVABLE;
566
567 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
568 page_to_pfn(page), MIGRATETYPE_MASK);
569}
570
571#ifdef CONFIG_DEBUG_VM
572static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
573{
574 int ret = 0;
575 unsigned seq;
576 unsigned long pfn = page_to_pfn(page);
577 unsigned long sp, start_pfn;
578
579 do {
580 seq = zone_span_seqbegin(zone);
581 start_pfn = zone->zone_start_pfn;
582 sp = zone->spanned_pages;
583 if (!zone_spans_pfn(zone, pfn))
584 ret = 1;
585 } while (zone_span_seqretry(zone, seq));
586
587 if (ret)
588 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
589 pfn, zone_to_nid(zone), zone->name,
590 start_pfn, start_pfn + sp);
591
592 return ret;
593}
594
595static int page_is_consistent(struct zone *zone, struct page *page)
596{
597 if (!pfn_valid_within(page_to_pfn(page)))
598 return 0;
599 if (zone != page_zone(page))
600 return 0;
601
602 return 1;
603}
604
605
606
607static int __maybe_unused bad_range(struct zone *zone, struct page *page)
608{
609 if (page_outside_zone_boundaries(zone, page))
610 return 1;
611 if (!page_is_consistent(zone, page))
612 return 1;
613
614 return 0;
615}
616#else
617static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
618{
619 return 0;
620}
621#endif
622
623static void bad_page(struct page *page, const char *reason)
624{
625 static unsigned long resume;
626 static unsigned long nr_shown;
627 static unsigned long nr_unshown;
628
629
630
631
632
633 if (nr_shown == 60) {
634 if (time_before(jiffies, resume)) {
635 nr_unshown++;
636 goto out;
637 }
638 if (nr_unshown) {
639 pr_alert(
640 "BUG: Bad page state: %lu messages suppressed\n",
641 nr_unshown);
642 nr_unshown = 0;
643 }
644 nr_shown = 0;
645 }
646 if (nr_shown++ == 0)
647 resume = jiffies + 60 * HZ;
648
649 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
650 current->comm, page_to_pfn(page));
651 dump_page(page, reason);
652
653 print_modules();
654 dump_stack();
655out:
656
657 page_mapcount_reset(page);
658 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
659}
660
661static inline unsigned int order_to_pindex(int migratetype, int order)
662{
663 int base = order;
664
665#ifdef CONFIG_TRANSPARENT_HUGEPAGE
666 if (order > PAGE_ALLOC_COSTLY_ORDER) {
667 VM_BUG_ON(order != pageblock_order);
668 base = PAGE_ALLOC_COSTLY_ORDER + 1;
669 }
670#else
671 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
672#endif
673
674 return (MIGRATE_PCPTYPES * base) + migratetype;
675}
676
677static inline int pindex_to_order(unsigned int pindex)
678{
679 int order = pindex / MIGRATE_PCPTYPES;
680
681#ifdef CONFIG_TRANSPARENT_HUGEPAGE
682 if (order > PAGE_ALLOC_COSTLY_ORDER) {
683 order = pageblock_order;
684 VM_BUG_ON(order != pageblock_order);
685 }
686#else
687 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
688#endif
689
690 return order;
691}
692
693static inline bool pcp_allowed_order(unsigned int order)
694{
695 if (order <= PAGE_ALLOC_COSTLY_ORDER)
696 return true;
697#ifdef CONFIG_TRANSPARENT_HUGEPAGE
698 if (order == pageblock_order)
699 return true;
700#endif
701 return false;
702}
703
704static inline void free_the_page(struct page *page, unsigned int order)
705{
706 if (pcp_allowed_order(order))
707 free_unref_page(page, order);
708 else
709 __free_pages_ok(page, order, FPI_NONE);
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727void free_compound_page(struct page *page)
728{
729 mem_cgroup_uncharge(page);
730 free_the_page(page, compound_order(page));
731}
732
733void prep_compound_page(struct page *page, unsigned int order)
734{
735 int i;
736 int nr_pages = 1 << order;
737
738 __SetPageHead(page);
739 for (i = 1; i < nr_pages; i++) {
740 struct page *p = page + i;
741 p->mapping = TAIL_MAPPING;
742 set_compound_head(p, page);
743 }
744
745 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
746 set_compound_order(page, order);
747 atomic_set(compound_mapcount_ptr(page), -1);
748 if (hpage_pincount_available(page))
749 atomic_set(compound_pincount_ptr(page), 0);
750}
751
752#ifdef CONFIG_DEBUG_PAGEALLOC
753unsigned int _debug_guardpage_minorder;
754
755bool _debug_pagealloc_enabled_early __read_mostly
756 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
757EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
758DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
759EXPORT_SYMBOL(_debug_pagealloc_enabled);
760
761DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
762
763static int __init early_debug_pagealloc(char *buf)
764{
765 return kstrtobool(buf, &_debug_pagealloc_enabled_early);
766}
767early_param("debug_pagealloc", early_debug_pagealloc);
768
769static int __init debug_guardpage_minorder_setup(char *buf)
770{
771 unsigned long res;
772
773 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
774 pr_err("Bad debug_guardpage_minorder value\n");
775 return 0;
776 }
777 _debug_guardpage_minorder = res;
778 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
779 return 0;
780}
781early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
782
783static inline bool set_page_guard(struct zone *zone, struct page *page,
784 unsigned int order, int migratetype)
785{
786 if (!debug_guardpage_enabled())
787 return false;
788
789 if (order >= debug_guardpage_minorder())
790 return false;
791
792 __SetPageGuard(page);
793 INIT_LIST_HEAD(&page->lru);
794 set_page_private(page, order);
795
796 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
797
798 return true;
799}
800
801static inline void clear_page_guard(struct zone *zone, struct page *page,
802 unsigned int order, int migratetype)
803{
804 if (!debug_guardpage_enabled())
805 return;
806
807 __ClearPageGuard(page);
808
809 set_page_private(page, 0);
810 if (!is_migrate_isolate(migratetype))
811 __mod_zone_freepage_state(zone, (1 << order), migratetype);
812}
813#else
814static inline bool set_page_guard(struct zone *zone, struct page *page,
815 unsigned int order, int migratetype) { return false; }
816static inline void clear_page_guard(struct zone *zone, struct page *page,
817 unsigned int order, int migratetype) {}
818#endif
819
820
821
822
823
824
825
826void init_mem_debugging_and_hardening(void)
827{
828 bool page_poisoning_requested = false;
829
830#ifdef CONFIG_PAGE_POISONING
831
832
833
834
835 if (page_poisoning_enabled() ||
836 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
837 debug_pagealloc_enabled())) {
838 static_branch_enable(&_page_poisoning_enabled);
839 page_poisoning_requested = true;
840 }
841#endif
842
843 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
844 page_poisoning_requested) {
845 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
846 "will take precedence over init_on_alloc and init_on_free\n");
847 _init_on_alloc_enabled_early = false;
848 _init_on_free_enabled_early = false;
849 }
850
851 if (_init_on_alloc_enabled_early)
852 static_branch_enable(&init_on_alloc);
853 else
854 static_branch_disable(&init_on_alloc);
855
856 if (_init_on_free_enabled_early)
857 static_branch_enable(&init_on_free);
858 else
859 static_branch_disable(&init_on_free);
860
861#ifdef CONFIG_DEBUG_PAGEALLOC
862 if (!debug_pagealloc_enabled())
863 return;
864
865 static_branch_enable(&_debug_pagealloc_enabled);
866
867 if (!debug_guardpage_minorder())
868 return;
869
870 static_branch_enable(&_debug_guardpage_enabled);
871#endif
872}
873
874static inline void set_buddy_order(struct page *page, unsigned int order)
875{
876 set_page_private(page, order);
877 __SetPageBuddy(page);
878}
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893static inline bool page_is_buddy(struct page *page, struct page *buddy,
894 unsigned int order)
895{
896 if (!page_is_guard(buddy) && !PageBuddy(buddy))
897 return false;
898
899 if (buddy_order(buddy) != order)
900 return false;
901
902
903
904
905
906 if (page_zone_id(page) != page_zone_id(buddy))
907 return false;
908
909 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
910
911 return true;
912}
913
914#ifdef CONFIG_COMPACTION
915static inline struct capture_control *task_capc(struct zone *zone)
916{
917 struct capture_control *capc = current->capture_control;
918
919 return unlikely(capc) &&
920 !(current->flags & PF_KTHREAD) &&
921 !capc->page &&
922 capc->cc->zone == zone ? capc : NULL;
923}
924
925static inline bool
926compaction_capture(struct capture_control *capc, struct page *page,
927 int order, int migratetype)
928{
929 if (!capc || order != capc->cc->order)
930 return false;
931
932
933 if (is_migrate_cma(migratetype) ||
934 is_migrate_isolate(migratetype))
935 return false;
936
937
938
939
940
941
942
943 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
944 return false;
945
946 capc->page = page;
947 return true;
948}
949
950#else
951static inline struct capture_control *task_capc(struct zone *zone)
952{
953 return NULL;
954}
955
956static inline bool
957compaction_capture(struct capture_control *capc, struct page *page,
958 int order, int migratetype)
959{
960 return false;
961}
962#endif
963
964
965static inline void add_to_free_list(struct page *page, struct zone *zone,
966 unsigned int order, int migratetype)
967{
968 struct free_area *area = &zone->free_area[order];
969
970 list_add(&page->lru, &area->free_list[migratetype]);
971 area->nr_free++;
972}
973
974
975static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
976 unsigned int order, int migratetype)
977{
978 struct free_area *area = &zone->free_area[order];
979
980 list_add_tail(&page->lru, &area->free_list[migratetype]);
981 area->nr_free++;
982}
983
984
985
986
987
988
989static inline void move_to_free_list(struct page *page, struct zone *zone,
990 unsigned int order, int migratetype)
991{
992 struct free_area *area = &zone->free_area[order];
993
994 list_move_tail(&page->lru, &area->free_list[migratetype]);
995}
996
997static inline void del_page_from_free_list(struct page *page, struct zone *zone,
998 unsigned int order)
999{
1000
1001 if (page_reported(page))
1002 __ClearPageReported(page);
1003
1004 list_del(&page->lru);
1005 __ClearPageBuddy(page);
1006 set_page_private(page, 0);
1007 zone->free_area[order].nr_free--;
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static inline bool
1019buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1020 struct page *page, unsigned int order)
1021{
1022 struct page *higher_page, *higher_buddy;
1023 unsigned long combined_pfn;
1024
1025 if (order >= MAX_ORDER - 2)
1026 return false;
1027
1028 if (!pfn_valid_within(buddy_pfn))
1029 return false;
1030
1031 combined_pfn = buddy_pfn & pfn;
1032 higher_page = page + (combined_pfn - pfn);
1033 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
1034 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
1035
1036 return pfn_valid_within(buddy_pfn) &&
1037 page_is_buddy(higher_page, higher_buddy, order + 1);
1038}
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static inline void __free_one_page(struct page *page,
1065 unsigned long pfn,
1066 struct zone *zone, unsigned int order,
1067 int migratetype, fpi_t fpi_flags)
1068{
1069 struct capture_control *capc = task_capc(zone);
1070 unsigned long buddy_pfn;
1071 unsigned long combined_pfn;
1072 unsigned int max_order;
1073 struct page *buddy;
1074 bool to_tail;
1075
1076 max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1077
1078 VM_BUG_ON(!zone_is_initialized(zone));
1079 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1080
1081 VM_BUG_ON(migratetype == -1);
1082 if (likely(!is_migrate_isolate(migratetype)))
1083 __mod_zone_freepage_state(zone, 1 << order, migratetype);
1084
1085 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1086 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1087
1088continue_merging:
1089 while (order < max_order) {
1090 if (compaction_capture(capc, page, order, migratetype)) {
1091 __mod_zone_freepage_state(zone, -(1 << order),
1092 migratetype);
1093 return;
1094 }
1095 buddy_pfn = __find_buddy_pfn(pfn, order);
1096 buddy = page + (buddy_pfn - pfn);
1097
1098 if (!pfn_valid_within(buddy_pfn))
1099 goto done_merging;
1100 if (!page_is_buddy(page, buddy, order))
1101 goto done_merging;
1102
1103
1104
1105
1106 if (page_is_guard(buddy))
1107 clear_page_guard(zone, buddy, order, migratetype);
1108 else
1109 del_page_from_free_list(buddy, zone, order);
1110 combined_pfn = buddy_pfn & pfn;
1111 page = page + (combined_pfn - pfn);
1112 pfn = combined_pfn;
1113 order++;
1114 }
1115 if (order < MAX_ORDER - 1) {
1116
1117
1118
1119
1120
1121
1122
1123
1124 if (unlikely(has_isolate_pageblock(zone))) {
1125 int buddy_mt;
1126
1127 buddy_pfn = __find_buddy_pfn(pfn, order);
1128 buddy = page + (buddy_pfn - pfn);
1129 buddy_mt = get_pageblock_migratetype(buddy);
1130
1131 if (migratetype != buddy_mt
1132 && (is_migrate_isolate(migratetype) ||
1133 is_migrate_isolate(buddy_mt)))
1134 goto done_merging;
1135 }
1136 max_order = order + 1;
1137 goto continue_merging;
1138 }
1139
1140done_merging:
1141 set_buddy_order(page, order);
1142
1143 if (fpi_flags & FPI_TO_TAIL)
1144 to_tail = true;
1145 else if (is_shuffle_order(order))
1146 to_tail = shuffle_pick_tail();
1147 else
1148 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1149
1150 if (to_tail)
1151 add_to_free_list_tail(page, zone, order, migratetype);
1152 else
1153 add_to_free_list(page, zone, order, migratetype);
1154
1155
1156 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1157 page_reporting_notify_free(order);
1158}
1159
1160
1161
1162
1163
1164
1165static inline bool page_expected_state(struct page *page,
1166 unsigned long check_flags)
1167{
1168 if (unlikely(atomic_read(&page->_mapcount) != -1))
1169 return false;
1170
1171 if (unlikely((unsigned long)page->mapping |
1172 page_ref_count(page) |
1173#ifdef CONFIG_MEMCG
1174 page->memcg_data |
1175#endif
1176 (page->flags & check_flags)))
1177 return false;
1178
1179 return true;
1180}
1181
1182static const char *page_bad_reason(struct page *page, unsigned long flags)
1183{
1184 const char *bad_reason = NULL;
1185
1186 if (unlikely(atomic_read(&page->_mapcount) != -1))
1187 bad_reason = "nonzero mapcount";
1188 if (unlikely(page->mapping != NULL))
1189 bad_reason = "non-NULL mapping";
1190 if (unlikely(page_ref_count(page) != 0))
1191 bad_reason = "nonzero _refcount";
1192 if (unlikely(page->flags & flags)) {
1193 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1194 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1195 else
1196 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1197 }
1198#ifdef CONFIG_MEMCG
1199 if (unlikely(page->memcg_data))
1200 bad_reason = "page still charged to cgroup";
1201#endif
1202 return bad_reason;
1203}
1204
1205static void check_free_page_bad(struct page *page)
1206{
1207 bad_page(page,
1208 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1209}
1210
1211static inline int check_free_page(struct page *page)
1212{
1213 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1214 return 0;
1215
1216
1217 check_free_page_bad(page);
1218 return 1;
1219}
1220
1221static int free_tail_pages_check(struct page *head_page, struct page *page)
1222{
1223 int ret = 1;
1224
1225
1226
1227
1228
1229 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1230
1231 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1232 ret = 0;
1233 goto out;
1234 }
1235 switch (page - head_page) {
1236 case 1:
1237
1238 if (unlikely(compound_mapcount(page))) {
1239 bad_page(page, "nonzero compound_mapcount");
1240 goto out;
1241 }
1242 break;
1243 case 2:
1244
1245
1246
1247
1248 break;
1249 default:
1250 if (page->mapping != TAIL_MAPPING) {
1251 bad_page(page, "corrupted mapping in tail page");
1252 goto out;
1253 }
1254 break;
1255 }
1256 if (unlikely(!PageTail(page))) {
1257 bad_page(page, "PageTail not set");
1258 goto out;
1259 }
1260 if (unlikely(compound_head(page) != head_page)) {
1261 bad_page(page, "compound_head not consistent");
1262 goto out;
1263 }
1264 ret = 0;
1265out:
1266 page->mapping = NULL;
1267 clear_compound_head(page);
1268 return ret;
1269}
1270
1271static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
1272{
1273 int i;
1274
1275 if (zero_tags) {
1276 for (i = 0; i < numpages; i++)
1277 tag_clear_highpage(page + i);
1278 return;
1279 }
1280
1281
1282 kasan_disable_current();
1283 for (i = 0; i < numpages; i++) {
1284 u8 tag = page_kasan_tag(page + i);
1285 page_kasan_tag_reset(page + i);
1286 clear_highpage(page + i);
1287 page_kasan_tag_set(page + i, tag);
1288 }
1289 kasan_enable_current();
1290}
1291
1292static __always_inline bool free_pages_prepare(struct page *page,
1293 unsigned int order, bool check_free, fpi_t fpi_flags)
1294{
1295 int bad = 0;
1296 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1297
1298 VM_BUG_ON_PAGE(PageTail(page), page);
1299
1300 trace_mm_page_free(page, order);
1301
1302 if (unlikely(PageHWPoison(page)) && !order) {
1303
1304
1305
1306
1307 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1308 __memcg_kmem_uncharge_page(page, order);
1309 reset_page_owner(page, order);
1310 return false;
1311 }
1312
1313
1314
1315
1316
1317 if (unlikely(order)) {
1318 bool compound = PageCompound(page);
1319 int i;
1320
1321 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1322
1323 if (compound)
1324 ClearPageDoubleMap(page);
1325 for (i = 1; i < (1 << order); i++) {
1326 if (compound)
1327 bad += free_tail_pages_check(page, page + i);
1328 if (unlikely(check_free_page(page + i))) {
1329 bad++;
1330 continue;
1331 }
1332 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1333 }
1334 }
1335 if (PageMappingFlags(page))
1336 page->mapping = NULL;
1337 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1338 __memcg_kmem_uncharge_page(page, order);
1339 if (check_free)
1340 bad += check_free_page(page);
1341 if (bad)
1342 return false;
1343
1344 page_cpupid_reset_last(page);
1345 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1346 reset_page_owner(page, order);
1347
1348 if (!PageHighMem(page)) {
1349 debug_check_no_locks_freed(page_address(page),
1350 PAGE_SIZE << order);
1351 debug_check_no_obj_freed(page_address(page),
1352 PAGE_SIZE << order);
1353 }
1354
1355 kernel_poison_pages(page, 1 << order);
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 if (kasan_has_integrated_init()) {
1366 if (!skip_kasan_poison)
1367 kasan_free_pages(page, order);
1368 } else {
1369 bool init = want_init_on_free();
1370
1371 if (init)
1372 kernel_init_free_pages(page, 1 << order, false);
1373 if (!skip_kasan_poison)
1374 kasan_poison_pages(page, order, init);
1375 }
1376
1377
1378
1379
1380
1381
1382 arch_free_page(page, order);
1383
1384 debug_pagealloc_unmap_pages(page, 1 << order);
1385
1386 return true;
1387}
1388
1389#ifdef CONFIG_DEBUG_VM
1390
1391
1392
1393
1394
1395static bool free_pcp_prepare(struct page *page, unsigned int order)
1396{
1397 return free_pages_prepare(page, order, true, FPI_NONE);
1398}
1399
1400static bool bulkfree_pcp_prepare(struct page *page)
1401{
1402 if (debug_pagealloc_enabled_static())
1403 return check_free_page(page);
1404 else
1405 return false;
1406}
1407#else
1408
1409
1410
1411
1412
1413
1414static bool free_pcp_prepare(struct page *page, unsigned int order)
1415{
1416 if (debug_pagealloc_enabled_static())
1417 return free_pages_prepare(page, order, true, FPI_NONE);
1418 else
1419 return free_pages_prepare(page, order, false, FPI_NONE);
1420}
1421
1422static bool bulkfree_pcp_prepare(struct page *page)
1423{
1424 return check_free_page(page);
1425}
1426#endif
1427
1428static inline void prefetch_buddy(struct page *page)
1429{
1430 unsigned long pfn = page_to_pfn(page);
1431 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1432 struct page *buddy = page + (buddy_pfn - pfn);
1433
1434 prefetch(buddy);
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448static void free_pcppages_bulk(struct zone *zone, int count,
1449 struct per_cpu_pages *pcp)
1450{
1451 int pindex = 0;
1452 int batch_free = 0;
1453 int nr_freed = 0;
1454 unsigned int order;
1455 int prefetch_nr = READ_ONCE(pcp->batch);
1456 bool isolated_pageblocks;
1457 struct page *page, *tmp;
1458 LIST_HEAD(head);
1459
1460
1461
1462
1463
1464 count = min(pcp->count, count);
1465 while (count > 0) {
1466 struct list_head *list;
1467
1468
1469
1470
1471
1472
1473
1474
1475 do {
1476 batch_free++;
1477 if (++pindex == NR_PCP_LISTS)
1478 pindex = 0;
1479 list = &pcp->lists[pindex];
1480 } while (list_empty(list));
1481
1482
1483 if (batch_free == NR_PCP_LISTS)
1484 batch_free = count;
1485
1486 order = pindex_to_order(pindex);
1487 BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
1488 do {
1489 page = list_last_entry(list, struct page, lru);
1490
1491 list_del(&page->lru);
1492 nr_freed += 1 << order;
1493 count -= 1 << order;
1494
1495 if (bulkfree_pcp_prepare(page))
1496 continue;
1497
1498
1499 page->index <<= NR_PCP_ORDER_WIDTH;
1500 page->index |= order;
1501
1502 list_add_tail(&page->lru, &head);
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513 if (prefetch_nr) {
1514 prefetch_buddy(page);
1515 prefetch_nr--;
1516 }
1517 } while (count > 0 && --batch_free && !list_empty(list));
1518 }
1519 pcp->count -= nr_freed;
1520
1521
1522
1523
1524
1525 spin_lock(&zone->lock);
1526 isolated_pageblocks = has_isolate_pageblock(zone);
1527
1528
1529
1530
1531
1532 list_for_each_entry_safe(page, tmp, &head, lru) {
1533 int mt = get_pcppage_migratetype(page);
1534
1535
1536 order = mt & NR_PCP_ORDER_MASK;
1537 mt >>= NR_PCP_ORDER_WIDTH;
1538
1539
1540 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1541
1542 if (unlikely(isolated_pageblocks))
1543 mt = get_pageblock_migratetype(page);
1544
1545 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1546 trace_mm_page_pcpu_drain(page, order, mt);
1547 }
1548 spin_unlock(&zone->lock);
1549}
1550
1551static void free_one_page(struct zone *zone,
1552 struct page *page, unsigned long pfn,
1553 unsigned int order,
1554 int migratetype, fpi_t fpi_flags)
1555{
1556 unsigned long flags;
1557
1558 spin_lock_irqsave(&zone->lock, flags);
1559 if (unlikely(has_isolate_pageblock(zone) ||
1560 is_migrate_isolate(migratetype))) {
1561 migratetype = get_pfnblock_migratetype(page, pfn);
1562 }
1563 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1564 spin_unlock_irqrestore(&zone->lock, flags);
1565}
1566
1567static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1568 unsigned long zone, int nid)
1569{
1570 mm_zero_struct_page(page);
1571 set_page_links(page, zone, nid, pfn);
1572 init_page_count(page);
1573 page_mapcount_reset(page);
1574 page_cpupid_reset_last(page);
1575 page_kasan_tag_reset(page);
1576
1577 INIT_LIST_HEAD(&page->lru);
1578#ifdef WANT_PAGE_VIRTUAL
1579
1580 if (!is_highmem_idx(zone))
1581 set_page_address(page, __va(pfn << PAGE_SHIFT));
1582#endif
1583}
1584
1585#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1586static void __meminit init_reserved_page(unsigned long pfn)
1587{
1588 pg_data_t *pgdat;
1589 int nid, zid;
1590
1591 if (!early_page_uninitialised(pfn))
1592 return;
1593
1594 nid = early_pfn_to_nid(pfn);
1595 pgdat = NODE_DATA(nid);
1596
1597 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1598 struct zone *zone = &pgdat->node_zones[zid];
1599
1600 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1601 break;
1602 }
1603 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1604}
1605#else
1606static inline void init_reserved_page(unsigned long pfn)
1607{
1608}
1609#endif
1610
1611
1612
1613
1614
1615
1616
1617void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1618{
1619 unsigned long start_pfn = PFN_DOWN(start);
1620 unsigned long end_pfn = PFN_UP(end);
1621
1622 for (; start_pfn < end_pfn; start_pfn++) {
1623 if (pfn_valid(start_pfn)) {
1624 struct page *page = pfn_to_page(start_pfn);
1625
1626 init_reserved_page(start_pfn);
1627
1628
1629 INIT_LIST_HEAD(&page->lru);
1630
1631
1632
1633
1634
1635
1636 __SetPageReserved(page);
1637 }
1638 }
1639}
1640
1641static void __free_pages_ok(struct page *page, unsigned int order,
1642 fpi_t fpi_flags)
1643{
1644 unsigned long flags;
1645 int migratetype;
1646 unsigned long pfn = page_to_pfn(page);
1647 struct zone *zone = page_zone(page);
1648
1649 if (!free_pages_prepare(page, order, true, fpi_flags))
1650 return;
1651
1652 migratetype = get_pfnblock_migratetype(page, pfn);
1653
1654 spin_lock_irqsave(&zone->lock, flags);
1655 if (unlikely(has_isolate_pageblock(zone) ||
1656 is_migrate_isolate(migratetype))) {
1657 migratetype = get_pfnblock_migratetype(page, pfn);
1658 }
1659 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1660 spin_unlock_irqrestore(&zone->lock, flags);
1661
1662 __count_vm_events(PGFREE, 1 << order);
1663}
1664
1665void __free_pages_core(struct page *page, unsigned int order)
1666{
1667 unsigned int nr_pages = 1 << order;
1668 struct page *p = page;
1669 unsigned int loop;
1670
1671
1672
1673
1674
1675
1676 prefetchw(p);
1677 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1678 prefetchw(p + 1);
1679 __ClearPageReserved(p);
1680 set_page_count(p, 0);
1681 }
1682 __ClearPageReserved(p);
1683 set_page_count(p, 0);
1684
1685 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1686
1687
1688
1689
1690
1691 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1692}
1693
1694#ifdef CONFIG_NUMA
1695
1696
1697
1698
1699
1700
1701struct mminit_pfnnid_cache {
1702 unsigned long last_start;
1703 unsigned long last_end;
1704 int last_nid;
1705};
1706
1707static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1708
1709
1710
1711
1712static int __meminit __early_pfn_to_nid(unsigned long pfn,
1713 struct mminit_pfnnid_cache *state)
1714{
1715 unsigned long start_pfn, end_pfn;
1716 int nid;
1717
1718 if (state->last_start <= pfn && pfn < state->last_end)
1719 return state->last_nid;
1720
1721 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1722 if (nid != NUMA_NO_NODE) {
1723 state->last_start = start_pfn;
1724 state->last_end = end_pfn;
1725 state->last_nid = nid;
1726 }
1727
1728 return nid;
1729}
1730
1731int __meminit early_pfn_to_nid(unsigned long pfn)
1732{
1733 static DEFINE_SPINLOCK(early_pfn_lock);
1734 int nid;
1735
1736 spin_lock(&early_pfn_lock);
1737 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1738 if (nid < 0)
1739 nid = first_online_node;
1740 spin_unlock(&early_pfn_lock);
1741
1742 return nid;
1743}
1744#endif
1745
1746void __init memblock_free_pages(struct page *page, unsigned long pfn,
1747 unsigned int order)
1748{
1749 if (early_page_uninitialised(pfn))
1750 return;
1751 __free_pages_core(page, order);
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1772 unsigned long end_pfn, struct zone *zone)
1773{
1774 struct page *start_page;
1775 struct page *end_page;
1776
1777
1778 end_pfn--;
1779
1780 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1781 return NULL;
1782
1783 start_page = pfn_to_online_page(start_pfn);
1784 if (!start_page)
1785 return NULL;
1786
1787 if (page_zone(start_page) != zone)
1788 return NULL;
1789
1790 end_page = pfn_to_page(end_pfn);
1791
1792
1793 if (page_zone_id(start_page) != page_zone_id(end_page))
1794 return NULL;
1795
1796 return start_page;
1797}
1798
1799void set_zone_contiguous(struct zone *zone)
1800{
1801 unsigned long block_start_pfn = zone->zone_start_pfn;
1802 unsigned long block_end_pfn;
1803
1804 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1805 for (; block_start_pfn < zone_end_pfn(zone);
1806 block_start_pfn = block_end_pfn,
1807 block_end_pfn += pageblock_nr_pages) {
1808
1809 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1810
1811 if (!__pageblock_pfn_to_page(block_start_pfn,
1812 block_end_pfn, zone))
1813 return;
1814 cond_resched();
1815 }
1816
1817
1818 zone->contiguous = true;
1819}
1820
1821void clear_zone_contiguous(struct zone *zone)
1822{
1823 zone->contiguous = false;
1824}
1825
1826#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1827static void __init deferred_free_range(unsigned long pfn,
1828 unsigned long nr_pages)
1829{
1830 struct page *page;
1831 unsigned long i;
1832
1833 if (!nr_pages)
1834 return;
1835
1836 page = pfn_to_page(pfn);
1837
1838
1839 if (nr_pages == pageblock_nr_pages &&
1840 (pfn & (pageblock_nr_pages - 1)) == 0) {
1841 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1842 __free_pages_core(page, pageblock_order);
1843 return;
1844 }
1845
1846 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1847 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1848 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1849 __free_pages_core(page, 0);
1850 }
1851}
1852
1853
1854static atomic_t pgdat_init_n_undone __initdata;
1855static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1856
1857static inline void __init pgdat_init_report_one_done(void)
1858{
1859 if (atomic_dec_and_test(&pgdat_init_n_undone))
1860 complete(&pgdat_init_all_done_comp);
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static inline bool __init deferred_pfn_valid(unsigned long pfn)
1874{
1875 if (!pfn_valid_within(pfn))
1876 return false;
1877 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1878 return false;
1879 return true;
1880}
1881
1882
1883
1884
1885
1886static void __init deferred_free_pages(unsigned long pfn,
1887 unsigned long end_pfn)
1888{
1889 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1890 unsigned long nr_free = 0;
1891
1892 for (; pfn < end_pfn; pfn++) {
1893 if (!deferred_pfn_valid(pfn)) {
1894 deferred_free_range(pfn - nr_free, nr_free);
1895 nr_free = 0;
1896 } else if (!(pfn & nr_pgmask)) {
1897 deferred_free_range(pfn - nr_free, nr_free);
1898 nr_free = 1;
1899 } else {
1900 nr_free++;
1901 }
1902 }
1903
1904 deferred_free_range(pfn - nr_free, nr_free);
1905}
1906
1907
1908
1909
1910
1911
1912static unsigned long __init deferred_init_pages(struct zone *zone,
1913 unsigned long pfn,
1914 unsigned long end_pfn)
1915{
1916 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1917 int nid = zone_to_nid(zone);
1918 unsigned long nr_pages = 0;
1919 int zid = zone_idx(zone);
1920 struct page *page = NULL;
1921
1922 for (; pfn < end_pfn; pfn++) {
1923 if (!deferred_pfn_valid(pfn)) {
1924 page = NULL;
1925 continue;
1926 } else if (!page || !(pfn & nr_pgmask)) {
1927 page = pfn_to_page(pfn);
1928 } else {
1929 page++;
1930 }
1931 __init_single_page(page, pfn, zid, nid);
1932 nr_pages++;
1933 }
1934 return (nr_pages);
1935}
1936
1937
1938
1939
1940
1941
1942
1943static bool __init
1944deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1945 unsigned long *spfn, unsigned long *epfn,
1946 unsigned long first_init_pfn)
1947{
1948 u64 j;
1949
1950
1951
1952
1953
1954
1955 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1956 if (*epfn <= first_init_pfn)
1957 continue;
1958 if (*spfn < first_init_pfn)
1959 *spfn = first_init_pfn;
1960 *i = j;
1961 return true;
1962 }
1963
1964 return false;
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static unsigned long __init
1978deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1979 unsigned long *end_pfn)
1980{
1981 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1982 unsigned long spfn = *start_pfn, epfn = *end_pfn;
1983 unsigned long nr_pages = 0;
1984 u64 j = *i;
1985
1986
1987 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1988 unsigned long t;
1989
1990 if (mo_pfn <= *start_pfn)
1991 break;
1992
1993 t = min(mo_pfn, *end_pfn);
1994 nr_pages += deferred_init_pages(zone, *start_pfn, t);
1995
1996 if (mo_pfn < *end_pfn) {
1997 *start_pfn = mo_pfn;
1998 break;
1999 }
2000 }
2001
2002
2003 swap(j, *i);
2004
2005 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2006 unsigned long t;
2007
2008 if (mo_pfn <= spfn)
2009 break;
2010
2011 t = min(mo_pfn, epfn);
2012 deferred_free_pages(spfn, t);
2013
2014 if (mo_pfn <= epfn)
2015 break;
2016 }
2017
2018 return nr_pages;
2019}
2020
2021static void __init
2022deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2023 void *arg)
2024{
2025 unsigned long spfn, epfn;
2026 struct zone *zone = arg;
2027 u64 i;
2028
2029 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2030
2031
2032
2033
2034
2035 while (spfn < end_pfn) {
2036 deferred_init_maxorder(&i, zone, &spfn, &epfn);
2037 cond_resched();
2038 }
2039}
2040
2041
2042__weak int __init
2043deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2044{
2045 return 1;
2046}
2047
2048
2049static int __init deferred_init_memmap(void *data)
2050{
2051 pg_data_t *pgdat = data;
2052 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2053 unsigned long spfn = 0, epfn = 0;
2054 unsigned long first_init_pfn, flags;
2055 unsigned long start = jiffies;
2056 struct zone *zone;
2057 int zid, max_threads;
2058 u64 i;
2059
2060
2061 if (!cpumask_empty(cpumask))
2062 set_cpus_allowed_ptr(current, cpumask);
2063
2064 pgdat_resize_lock(pgdat, &flags);
2065 first_init_pfn = pgdat->first_deferred_pfn;
2066 if (first_init_pfn == ULONG_MAX) {
2067 pgdat_resize_unlock(pgdat, &flags);
2068 pgdat_init_report_one_done();
2069 return 0;
2070 }
2071
2072
2073 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2074 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2075 pgdat->first_deferred_pfn = ULONG_MAX;
2076
2077
2078
2079
2080
2081
2082 pgdat_resize_unlock(pgdat, &flags);
2083
2084
2085 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2086 zone = pgdat->node_zones + zid;
2087 if (first_init_pfn < zone_end_pfn(zone))
2088 break;
2089 }
2090
2091
2092 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2093 first_init_pfn))
2094 goto zone_empty;
2095
2096 max_threads = deferred_page_init_max_threads(cpumask);
2097
2098 while (spfn < epfn) {
2099 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2100 struct padata_mt_job job = {
2101 .thread_fn = deferred_init_memmap_chunk,
2102 .fn_arg = zone,
2103 .start = spfn,
2104 .size = epfn_align - spfn,
2105 .align = PAGES_PER_SECTION,
2106 .min_chunk = PAGES_PER_SECTION,
2107 .max_threads = max_threads,
2108 };
2109
2110 padata_do_multithreaded(&job);
2111 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2112 epfn_align);
2113 }
2114zone_empty:
2115
2116 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2117
2118 pr_info("node %d deferred pages initialised in %ums\n",
2119 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2120
2121 pgdat_init_report_one_done();
2122 return 0;
2123}
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140static noinline bool __init
2141deferred_grow_zone(struct zone *zone, unsigned int order)
2142{
2143 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2144 pg_data_t *pgdat = zone->zone_pgdat;
2145 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2146 unsigned long spfn, epfn, flags;
2147 unsigned long nr_pages = 0;
2148 u64 i;
2149
2150
2151 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2152 return false;
2153
2154 pgdat_resize_lock(pgdat, &flags);
2155
2156
2157
2158
2159
2160 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2161 pgdat_resize_unlock(pgdat, &flags);
2162 return true;
2163 }
2164
2165
2166 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2167 first_deferred_pfn)) {
2168 pgdat->first_deferred_pfn = ULONG_MAX;
2169 pgdat_resize_unlock(pgdat, &flags);
2170
2171 return first_deferred_pfn != ULONG_MAX;
2172 }
2173
2174
2175
2176
2177
2178
2179 while (spfn < epfn) {
2180
2181 first_deferred_pfn = spfn;
2182
2183 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2184 touch_nmi_watchdog();
2185
2186
2187 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2188 continue;
2189
2190
2191 if (nr_pages >= nr_pages_needed)
2192 break;
2193 }
2194
2195 pgdat->first_deferred_pfn = spfn;
2196 pgdat_resize_unlock(pgdat, &flags);
2197
2198 return nr_pages > 0;
2199}
2200
2201
2202
2203
2204
2205
2206
2207static bool __ref
2208_deferred_grow_zone(struct zone *zone, unsigned int order)
2209{
2210 return deferred_grow_zone(zone, order);
2211}
2212
2213#endif
2214
2215void __init page_alloc_init_late(void)
2216{
2217 struct zone *zone;
2218 int nid;
2219
2220#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2221
2222
2223 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2224 for_each_node_state(nid, N_MEMORY) {
2225 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2226 }
2227
2228
2229 wait_for_completion(&pgdat_init_all_done_comp);
2230
2231
2232
2233
2234
2235 static_branch_disable(&deferred_pages);
2236
2237
2238 files_maxfiles_init();
2239#endif
2240
2241 buffer_init();
2242
2243
2244 memblock_discard();
2245
2246 for_each_node_state(nid, N_MEMORY)
2247 shuffle_free_memory(NODE_DATA(nid));
2248
2249 for_each_populated_zone(zone)
2250 set_zone_contiguous(zone);
2251}
2252
2253#ifdef CONFIG_CMA
2254
2255void __init init_cma_reserved_pageblock(struct page *page)
2256{
2257 unsigned i = pageblock_nr_pages;
2258 struct page *p = page;
2259
2260 do {
2261 __ClearPageReserved(p);
2262 set_page_count(p, 0);
2263 } while (++p, --i);
2264
2265 set_pageblock_migratetype(page, MIGRATE_CMA);
2266
2267 if (pageblock_order >= MAX_ORDER) {
2268 i = pageblock_nr_pages;
2269 p = page;
2270 do {
2271 set_page_refcounted(p);
2272 __free_pages(p, MAX_ORDER - 1);
2273 p += MAX_ORDER_NR_PAGES;
2274 } while (i -= MAX_ORDER_NR_PAGES);
2275 } else {
2276 set_page_refcounted(page);
2277 __free_pages(page, pageblock_order);
2278 }
2279
2280 adjust_managed_page_count(page, pageblock_nr_pages);
2281 page_zone(page)->cma_pages += pageblock_nr_pages;
2282}
2283#endif
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299static inline void expand(struct zone *zone, struct page *page,
2300 int low, int high, int migratetype)
2301{
2302 unsigned long size = 1 << high;
2303
2304 while (high > low) {
2305 high--;
2306 size >>= 1;
2307 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2308
2309
2310
2311
2312
2313
2314
2315 if (set_page_guard(zone, &page[size], high, migratetype))
2316 continue;
2317
2318 add_to_free_list(&page[size], zone, high, migratetype);
2319 set_buddy_order(&page[size], high);
2320 }
2321}
2322
2323static void check_new_page_bad(struct page *page)
2324{
2325 if (unlikely(page->flags & __PG_HWPOISON)) {
2326
2327 page_mapcount_reset(page);
2328 return;
2329 }
2330
2331 bad_page(page,
2332 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2333}
2334
2335
2336
2337
2338static inline int check_new_page(struct page *page)
2339{
2340 if (likely(page_expected_state(page,
2341 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2342 return 0;
2343
2344 check_new_page_bad(page);
2345 return 1;
2346}
2347
2348#ifdef CONFIG_DEBUG_VM
2349
2350
2351
2352
2353
2354static inline bool check_pcp_refill(struct page *page)
2355{
2356 if (debug_pagealloc_enabled_static())
2357 return check_new_page(page);
2358 else
2359 return false;
2360}
2361
2362static inline bool check_new_pcp(struct page *page)
2363{
2364 return check_new_page(page);
2365}
2366#else
2367
2368
2369
2370
2371
2372static inline bool check_pcp_refill(struct page *page)
2373{
2374 return check_new_page(page);
2375}
2376static inline bool check_new_pcp(struct page *page)
2377{
2378 if (debug_pagealloc_enabled_static())
2379 return check_new_page(page);
2380 else
2381 return false;
2382}
2383#endif
2384
2385static bool check_new_pages(struct page *page, unsigned int order)
2386{
2387 int i;
2388 for (i = 0; i < (1 << order); i++) {
2389 struct page *p = page + i;
2390
2391 if (unlikely(check_new_page(p)))
2392 return true;
2393 }
2394
2395 return false;
2396}
2397
2398inline void post_alloc_hook(struct page *page, unsigned int order,
2399 gfp_t gfp_flags)
2400{
2401 set_page_private(page, 0);
2402 set_page_refcounted(page);
2403
2404 arch_alloc_page(page, order);
2405 debug_pagealloc_map_pages(page, 1 << order);
2406
2407
2408
2409
2410
2411
2412 kernel_unpoison_pages(page, 1 << order);
2413
2414
2415
2416
2417
2418
2419 if (kasan_has_integrated_init()) {
2420 kasan_alloc_pages(page, order, gfp_flags);
2421 } else {
2422 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2423
2424 kasan_unpoison_pages(page, order, init);
2425 if (init)
2426 kernel_init_free_pages(page, 1 << order,
2427 gfp_flags & __GFP_ZEROTAGS);
2428 }
2429
2430 set_page_owner(page, order, gfp_flags);
2431}
2432
2433static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2434 unsigned int alloc_flags)
2435{
2436 post_alloc_hook(page, order, gfp_flags);
2437
2438 if (order && (gfp_flags & __GFP_COMP))
2439 prep_compound_page(page, order);
2440
2441
2442
2443
2444
2445
2446
2447 if (alloc_flags & ALLOC_NO_WATERMARKS)
2448 set_page_pfmemalloc(page);
2449 else
2450 clear_page_pfmemalloc(page);
2451}
2452
2453
2454
2455
2456
2457static __always_inline
2458struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2459 int migratetype)
2460{
2461 unsigned int current_order;
2462 struct free_area *area;
2463 struct page *page;
2464
2465
2466 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2467 area = &(zone->free_area[current_order]);
2468 page = get_page_from_free_area(area, migratetype);
2469 if (!page)
2470 continue;
2471 del_page_from_free_list(page, zone, current_order);
2472 expand(zone, page, order, current_order, migratetype);
2473 set_pcppage_migratetype(page, migratetype);
2474 return page;
2475 }
2476
2477 return NULL;
2478}
2479
2480
2481
2482
2483
2484
2485static int fallbacks[MIGRATE_TYPES][3] = {
2486 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2487 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2488 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2489#ifdef CONFIG_CMA
2490 [MIGRATE_CMA] = { MIGRATE_TYPES },
2491#endif
2492#ifdef CONFIG_MEMORY_ISOLATION
2493 [MIGRATE_ISOLATE] = { MIGRATE_TYPES },
2494#endif
2495};
2496
2497#ifdef CONFIG_CMA
2498static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2499 unsigned int order)
2500{
2501 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2502}
2503#else
2504static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2505 unsigned int order) { return NULL; }
2506#endif
2507
2508
2509
2510
2511
2512
2513static int move_freepages(struct zone *zone,
2514 unsigned long start_pfn, unsigned long end_pfn,
2515 int migratetype, int *num_movable)
2516{
2517 struct page *page;
2518 unsigned long pfn;
2519 unsigned int order;
2520 int pages_moved = 0;
2521
2522 for (pfn = start_pfn; pfn <= end_pfn;) {
2523 if (!pfn_valid_within(pfn)) {
2524 pfn++;
2525 continue;
2526 }
2527
2528 page = pfn_to_page(pfn);
2529 if (!PageBuddy(page)) {
2530
2531
2532
2533
2534
2535 if (num_movable &&
2536 (PageLRU(page) || __PageMovable(page)))
2537 (*num_movable)++;
2538 pfn++;
2539 continue;
2540 }
2541
2542
2543 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2544 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2545
2546 order = buddy_order(page);
2547 move_to_free_list(page, zone, order, migratetype);
2548 pfn += 1 << order;
2549 pages_moved += 1 << order;
2550 }
2551
2552 return pages_moved;
2553}
2554
2555int move_freepages_block(struct zone *zone, struct page *page,
2556 int migratetype, int *num_movable)
2557{
2558 unsigned long start_pfn, end_pfn, pfn;
2559
2560 if (num_movable)
2561 *num_movable = 0;
2562
2563 pfn = page_to_pfn(page);
2564 start_pfn = pfn & ~(pageblock_nr_pages - 1);
2565 end_pfn = start_pfn + pageblock_nr_pages - 1;
2566
2567
2568 if (!zone_spans_pfn(zone, start_pfn))
2569 start_pfn = pfn;
2570 if (!zone_spans_pfn(zone, end_pfn))
2571 return 0;
2572
2573 return move_freepages(zone, start_pfn, end_pfn, migratetype,
2574 num_movable);
2575}
2576
2577static void change_pageblock_range(struct page *pageblock_page,
2578 int start_order, int migratetype)
2579{
2580 int nr_pageblocks = 1 << (start_order - pageblock_order);
2581
2582 while (nr_pageblocks--) {
2583 set_pageblock_migratetype(pageblock_page, migratetype);
2584 pageblock_page += pageblock_nr_pages;
2585 }
2586}
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600static bool can_steal_fallback(unsigned int order, int start_mt)
2601{
2602
2603
2604
2605
2606
2607
2608
2609 if (order >= pageblock_order)
2610 return true;
2611
2612 if (order >= pageblock_order / 2 ||
2613 start_mt == MIGRATE_RECLAIMABLE ||
2614 start_mt == MIGRATE_UNMOVABLE ||
2615 page_group_by_mobility_disabled)
2616 return true;
2617
2618 return false;
2619}
2620
2621static inline bool boost_watermark(struct zone *zone)
2622{
2623 unsigned long max_boost;
2624
2625 if (!watermark_boost_factor)
2626 return false;
2627
2628
2629
2630
2631
2632
2633 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2634 return false;
2635
2636 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2637 watermark_boost_factor, 10000);
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647 if (!max_boost)
2648 return false;
2649
2650 max_boost = max(pageblock_nr_pages, max_boost);
2651
2652 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2653 max_boost);
2654
2655 return true;
2656}
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666static void steal_suitable_fallback(struct zone *zone, struct page *page,
2667 unsigned int alloc_flags, int start_type, bool whole_block)
2668{
2669 unsigned int current_order = buddy_order(page);
2670 int free_pages, movable_pages, alike_pages;
2671 int old_block_type;
2672
2673 old_block_type = get_pageblock_migratetype(page);
2674
2675
2676
2677
2678
2679 if (is_migrate_highatomic(old_block_type))
2680 goto single_page;
2681
2682
2683 if (current_order >= pageblock_order) {
2684 change_pageblock_range(page, current_order, start_type);
2685 goto single_page;
2686 }
2687
2688
2689
2690
2691
2692
2693 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2694 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2695
2696
2697 if (!whole_block)
2698 goto single_page;
2699
2700 free_pages = move_freepages_block(zone, page, start_type,
2701 &movable_pages);
2702
2703
2704
2705
2706
2707 if (start_type == MIGRATE_MOVABLE) {
2708 alike_pages = movable_pages;
2709 } else {
2710
2711
2712
2713
2714
2715
2716
2717 if (old_block_type == MIGRATE_MOVABLE)
2718 alike_pages = pageblock_nr_pages
2719 - (free_pages + movable_pages);
2720 else
2721 alike_pages = 0;
2722 }
2723
2724
2725 if (!free_pages)
2726 goto single_page;
2727
2728
2729
2730
2731
2732 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2733 page_group_by_mobility_disabled)
2734 set_pageblock_migratetype(page, start_type);
2735
2736 return;
2737
2738single_page:
2739 move_to_free_list(page, zone, current_order, start_type);
2740}
2741
2742
2743
2744
2745
2746
2747
2748int find_suitable_fallback(struct free_area *area, unsigned int order,
2749 int migratetype, bool only_stealable, bool *can_steal)
2750{
2751 int i;
2752 int fallback_mt;
2753
2754 if (area->nr_free == 0)
2755 return -1;
2756
2757 *can_steal = false;
2758 for (i = 0;; i++) {
2759 fallback_mt = fallbacks[migratetype][i];
2760 if (fallback_mt == MIGRATE_TYPES)
2761 break;
2762
2763 if (free_area_empty(area, fallback_mt))
2764 continue;
2765
2766 if (can_steal_fallback(order, migratetype))
2767 *can_steal = true;
2768
2769 if (!only_stealable)
2770 return fallback_mt;
2771
2772 if (*can_steal)
2773 return fallback_mt;
2774 }
2775
2776 return -1;
2777}
2778
2779
2780
2781
2782
2783static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2784 unsigned int alloc_order)
2785{
2786 int mt;
2787 unsigned long max_managed, flags;
2788
2789
2790
2791
2792
2793 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2794 if (zone->nr_reserved_highatomic >= max_managed)
2795 return;
2796
2797 spin_lock_irqsave(&zone->lock, flags);
2798
2799
2800 if (zone->nr_reserved_highatomic >= max_managed)
2801 goto out_unlock;
2802
2803
2804 mt = get_pageblock_migratetype(page);
2805 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2806 && !is_migrate_cma(mt)) {
2807 zone->nr_reserved_highatomic += pageblock_nr_pages;
2808 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2809 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2810 }
2811
2812out_unlock:
2813 spin_unlock_irqrestore(&zone->lock, flags);
2814}
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2826 bool force)
2827{
2828 struct zonelist *zonelist = ac->zonelist;
2829 unsigned long flags;
2830 struct zoneref *z;
2831 struct zone *zone;
2832 struct page *page;
2833 int order;
2834 bool ret;
2835
2836 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2837 ac->nodemask) {
2838
2839
2840
2841
2842 if (!force && zone->nr_reserved_highatomic <=
2843 pageblock_nr_pages)
2844 continue;
2845
2846 spin_lock_irqsave(&zone->lock, flags);
2847 for (order = 0; order < MAX_ORDER; order++) {
2848 struct free_area *area = &(zone->free_area[order]);
2849
2850 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2851 if (!page)
2852 continue;
2853
2854
2855
2856
2857
2858
2859
2860
2861 if (is_migrate_highatomic_page(page)) {
2862
2863
2864
2865
2866
2867
2868
2869 zone->nr_reserved_highatomic -= min(
2870 pageblock_nr_pages,
2871 zone->nr_reserved_highatomic);
2872 }
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 set_pageblock_migratetype(page, ac->migratetype);
2884 ret = move_freepages_block(zone, page, ac->migratetype,
2885 NULL);
2886 if (ret) {
2887 spin_unlock_irqrestore(&zone->lock, flags);
2888 return ret;
2889 }
2890 }
2891 spin_unlock_irqrestore(&zone->lock, flags);
2892 }
2893
2894 return false;
2895}
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907static __always_inline bool
2908__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2909 unsigned int alloc_flags)
2910{
2911 struct free_area *area;
2912 int current_order;
2913 int min_order = order;
2914 struct page *page;
2915 int fallback_mt;
2916 bool can_steal;
2917
2918
2919
2920
2921
2922
2923 if (alloc_flags & ALLOC_NOFRAGMENT)
2924 min_order = pageblock_order;
2925
2926
2927
2928
2929
2930
2931 for (current_order = MAX_ORDER - 1; current_order >= min_order;
2932 --current_order) {
2933 area = &(zone->free_area[current_order]);
2934 fallback_mt = find_suitable_fallback(area, current_order,
2935 start_migratetype, false, &can_steal);
2936 if (fallback_mt == -1)
2937 continue;
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2948 && current_order > order)
2949 goto find_smallest;
2950
2951 goto do_steal;
2952 }
2953
2954 return false;
2955
2956find_smallest:
2957 for (current_order = order; current_order < MAX_ORDER;
2958 current_order++) {
2959 area = &(zone->free_area[current_order]);
2960 fallback_mt = find_suitable_fallback(area, current_order,
2961 start_migratetype, false, &can_steal);
2962 if (fallback_mt != -1)
2963 break;
2964 }
2965
2966
2967
2968
2969
2970 VM_BUG_ON(current_order == MAX_ORDER);
2971
2972do_steal:
2973 page = get_page_from_free_area(area, fallback_mt);
2974
2975 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2976 can_steal);
2977
2978 trace_mm_page_alloc_extfrag(page, order, current_order,
2979 start_migratetype, fallback_mt);
2980
2981 return true;
2982
2983}
2984
2985
2986
2987
2988
2989static __always_inline struct page *
2990__rmqueue(struct zone *zone, unsigned int order, int migratetype,
2991 unsigned int alloc_flags)
2992{
2993 struct page *page;
2994
2995 if (IS_ENABLED(CONFIG_CMA)) {
2996
2997
2998
2999
3000
3001 if (alloc_flags & ALLOC_CMA &&
3002 zone_page_state(zone, NR_FREE_CMA_PAGES) >
3003 zone_page_state(zone, NR_FREE_PAGES) / 2) {
3004 page = __rmqueue_cma_fallback(zone, order);
3005 if (page)
3006 goto out;
3007 }
3008 }
3009retry:
3010 page = __rmqueue_smallest(zone, order, migratetype);
3011 if (unlikely(!page)) {
3012 if (alloc_flags & ALLOC_CMA)
3013 page = __rmqueue_cma_fallback(zone, order);
3014
3015 if (!page && __rmqueue_fallback(zone, order, migratetype,
3016 alloc_flags))
3017 goto retry;
3018 }
3019out:
3020 if (page)
3021 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3022 return page;
3023}
3024
3025
3026
3027
3028
3029
3030static int rmqueue_bulk(struct zone *zone, unsigned int order,
3031 unsigned long count, struct list_head *list,
3032 int migratetype, unsigned int alloc_flags)
3033{
3034 int i, allocated = 0;
3035
3036
3037
3038
3039
3040 spin_lock(&zone->lock);
3041 for (i = 0; i < count; ++i) {
3042 struct page *page = __rmqueue(zone, order, migratetype,
3043 alloc_flags);
3044 if (unlikely(page == NULL))
3045 break;
3046
3047 if (unlikely(check_pcp_refill(page)))
3048 continue;
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060 list_add_tail(&page->lru, list);
3061 allocated++;
3062 if (is_migrate_cma(get_pcppage_migratetype(page)))
3063 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3064 -(1 << order));
3065 }
3066
3067
3068
3069
3070
3071
3072
3073 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3074 spin_unlock(&zone->lock);
3075 return allocated;
3076}
3077
3078#ifdef CONFIG_NUMA
3079
3080
3081
3082
3083
3084
3085
3086
3087void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3088{
3089 unsigned long flags;
3090 int to_drain, batch;
3091
3092 local_lock_irqsave(&pagesets.lock, flags);
3093 batch = READ_ONCE(pcp->batch);
3094 to_drain = min(pcp->count, batch);
3095 if (to_drain > 0)
3096 free_pcppages_bulk(zone, to_drain, pcp);
3097 local_unlock_irqrestore(&pagesets.lock, flags);
3098}
3099#endif
3100
3101
3102
3103
3104
3105
3106
3107
3108static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3109{
3110 unsigned long flags;
3111 struct per_cpu_pages *pcp;
3112
3113 local_lock_irqsave(&pagesets.lock, flags);
3114
3115 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3116 if (pcp->count)
3117 free_pcppages_bulk(zone, pcp->count, pcp);
3118
3119 local_unlock_irqrestore(&pagesets.lock, flags);
3120}
3121
3122
3123
3124
3125
3126
3127
3128
3129static void drain_pages(unsigned int cpu)
3130{
3131 struct zone *zone;
3132
3133 for_each_populated_zone(zone) {
3134 drain_pages_zone(cpu, zone);
3135 }
3136}
3137
3138
3139
3140
3141
3142
3143
3144void drain_local_pages(struct zone *zone)
3145{
3146 int cpu = smp_processor_id();
3147
3148 if (zone)
3149 drain_pages_zone(cpu, zone);
3150 else
3151 drain_pages(cpu);
3152}
3153
3154static void drain_local_pages_wq(struct work_struct *work)
3155{
3156 struct pcpu_drain *drain;
3157
3158 drain = container_of(work, struct pcpu_drain, work);
3159
3160
3161
3162
3163
3164
3165
3166
3167 preempt_disable();
3168 drain_local_pages(drain->zone);
3169 preempt_enable();
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3183{
3184 int cpu;
3185
3186
3187
3188
3189
3190 static cpumask_t cpus_with_pcps;
3191
3192
3193
3194
3195
3196 if (WARN_ON_ONCE(!mm_percpu_wq))
3197 return;
3198
3199
3200
3201
3202
3203
3204 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3205 if (!zone)
3206 return;
3207 mutex_lock(&pcpu_drain_mutex);
3208 }
3209
3210
3211
3212
3213
3214
3215
3216 for_each_online_cpu(cpu) {
3217 struct per_cpu_pages *pcp;
3218 struct zone *z;
3219 bool has_pcps = false;
3220
3221 if (force_all_cpus) {
3222
3223
3224
3225
3226 has_pcps = true;
3227 } else if (zone) {
3228 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3229 if (pcp->count)
3230 has_pcps = true;
3231 } else {
3232 for_each_populated_zone(z) {
3233 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3234 if (pcp->count) {
3235 has_pcps = true;
3236 break;
3237 }
3238 }
3239 }
3240
3241 if (has_pcps)
3242 cpumask_set_cpu(cpu, &cpus_with_pcps);
3243 else
3244 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3245 }
3246
3247 for_each_cpu(cpu, &cpus_with_pcps) {
3248 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3249
3250 drain->zone = zone;
3251 INIT_WORK(&drain->work, drain_local_pages_wq);
3252 queue_work_on(cpu, mm_percpu_wq, &drain->work);
3253 }
3254 for_each_cpu(cpu, &cpus_with_pcps)
3255 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3256
3257 mutex_unlock(&pcpu_drain_mutex);
3258}
3259
3260
3261
3262
3263
3264
3265
3266
3267void drain_all_pages(struct zone *zone)
3268{
3269 __drain_all_pages(zone, false);
3270}
3271
3272#ifdef CONFIG_HIBERNATION
3273
3274
3275
3276
3277#define WD_PAGE_COUNT (128*1024)
3278
3279void mark_free_pages(struct zone *zone)
3280{
3281 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3282 unsigned long flags;
3283 unsigned int order, t;
3284 struct page *page;
3285
3286 if (zone_is_empty(zone))
3287 return;
3288
3289 spin_lock_irqsave(&zone->lock, flags);
3290
3291 max_zone_pfn = zone_end_pfn(zone);
3292 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3293 if (pfn_valid(pfn)) {
3294 page = pfn_to_page(pfn);
3295
3296 if (!--page_count) {
3297 touch_nmi_watchdog();
3298 page_count = WD_PAGE_COUNT;
3299 }
3300
3301 if (page_zone(page) != zone)
3302 continue;
3303
3304 if (!swsusp_page_is_forbidden(page))
3305 swsusp_unset_page_free(page);
3306 }
3307
3308 for_each_migratetype_order(order, t) {
3309 list_for_each_entry(page,
3310 &zone->free_area[order].free_list[t], lru) {
3311 unsigned long i;
3312
3313 pfn = page_to_pfn(page);
3314 for (i = 0; i < (1UL << order); i++) {
3315 if (!--page_count) {
3316 touch_nmi_watchdog();
3317 page_count = WD_PAGE_COUNT;
3318 }
3319 swsusp_set_page_free(pfn_to_page(pfn + i));
3320 }
3321 }
3322 }
3323 spin_unlock_irqrestore(&zone->lock, flags);
3324}
3325#endif
3326
3327static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3328 unsigned int order)
3329{
3330 int migratetype;
3331
3332 if (!free_pcp_prepare(page, order))
3333 return false;
3334
3335 migratetype = get_pfnblock_migratetype(page, pfn);
3336 set_pcppage_migratetype(page, migratetype);
3337 return true;
3338}
3339
3340static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch)
3341{
3342 int min_nr_free, max_nr_free;
3343
3344
3345 if (unlikely(high < batch))
3346 return 1;
3347
3348
3349 min_nr_free = batch;
3350 max_nr_free = high - batch;
3351
3352
3353
3354
3355
3356 batch <<= pcp->free_factor;
3357 if (batch < max_nr_free)
3358 pcp->free_factor++;
3359 batch = clamp(batch, min_nr_free, max_nr_free);
3360
3361 return batch;
3362}
3363
3364static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
3365{
3366 int high = READ_ONCE(pcp->high);
3367
3368 if (unlikely(!high))
3369 return 0;
3370
3371 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3372 return high;
3373
3374
3375
3376
3377
3378 return min(READ_ONCE(pcp->batch) << 2, high);
3379}
3380
3381static void free_unref_page_commit(struct page *page, unsigned long pfn,
3382 int migratetype, unsigned int order)
3383{
3384 struct zone *zone = page_zone(page);
3385 struct per_cpu_pages *pcp;
3386 int high;
3387 int pindex;
3388
3389 __count_vm_event(PGFREE);
3390 pcp = this_cpu_ptr(zone->per_cpu_pageset);
3391 pindex = order_to_pindex(migratetype, order);
3392 list_add(&page->lru, &pcp->lists[pindex]);
3393 pcp->count += 1 << order;
3394 high = nr_pcp_high(pcp, zone);
3395 if (pcp->count >= high) {
3396 int batch = READ_ONCE(pcp->batch);
3397
3398 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp);
3399 }
3400}
3401
3402
3403
3404
3405void free_unref_page(struct page *page, unsigned int order)
3406{
3407 unsigned long flags;
3408 unsigned long pfn = page_to_pfn(page);
3409 int migratetype;
3410
3411 if (!free_unref_page_prepare(page, pfn, order))
3412 return;
3413
3414
3415
3416
3417
3418
3419
3420
3421 migratetype = get_pcppage_migratetype(page);
3422 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3423 if (unlikely(is_migrate_isolate(migratetype))) {
3424 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3425 return;
3426 }
3427 migratetype = MIGRATE_MOVABLE;
3428 }
3429
3430 local_lock_irqsave(&pagesets.lock, flags);
3431 free_unref_page_commit(page, pfn, migratetype, order);
3432 local_unlock_irqrestore(&pagesets.lock, flags);
3433}
3434
3435
3436
3437
3438void free_unref_page_list(struct list_head *list)
3439{
3440 struct page *page, *next;
3441 unsigned long flags, pfn;
3442 int batch_count = 0;
3443 int migratetype;
3444
3445
3446 list_for_each_entry_safe(page, next, list, lru) {
3447 pfn = page_to_pfn(page);
3448 if (!free_unref_page_prepare(page, pfn, 0))
3449 list_del(&page->lru);
3450
3451
3452
3453
3454
3455 migratetype = get_pcppage_migratetype(page);
3456 if (unlikely(is_migrate_isolate(migratetype))) {
3457 list_del(&page->lru);
3458 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3459 continue;
3460 }
3461
3462 set_page_private(page, pfn);
3463 }
3464
3465 local_lock_irqsave(&pagesets.lock, flags);
3466 list_for_each_entry_safe(page, next, list, lru) {
3467 pfn = page_private(page);
3468 set_page_private(page, 0);
3469
3470
3471
3472
3473
3474 migratetype = get_pcppage_migratetype(page);
3475 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3476 migratetype = MIGRATE_MOVABLE;
3477
3478 trace_mm_page_free_batched(page);
3479 free_unref_page_commit(page, pfn, migratetype, 0);
3480
3481
3482
3483
3484
3485 if (++batch_count == SWAP_CLUSTER_MAX) {
3486 local_unlock_irqrestore(&pagesets.lock, flags);
3487 batch_count = 0;
3488 local_lock_irqsave(&pagesets.lock, flags);
3489 }
3490 }
3491 local_unlock_irqrestore(&pagesets.lock, flags);
3492}
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502void split_page(struct page *page, unsigned int order)
3503{
3504 int i;
3505
3506 VM_BUG_ON_PAGE(PageCompound(page), page);
3507 VM_BUG_ON_PAGE(!page_count(page), page);
3508
3509 for (i = 1; i < (1 << order); i++)
3510 set_page_refcounted(page + i);
3511 split_page_owner(page, 1 << order);
3512 split_page_memcg(page, 1 << order);
3513}
3514EXPORT_SYMBOL_GPL(split_page);
3515
3516int __isolate_free_page(struct page *page, unsigned int order)
3517{
3518 unsigned long watermark;
3519 struct zone *zone;
3520 int mt;
3521
3522 BUG_ON(!PageBuddy(page));
3523
3524 zone = page_zone(page);
3525 mt = get_pageblock_migratetype(page);
3526
3527 if (!is_migrate_isolate(mt)) {
3528
3529
3530
3531
3532
3533
3534 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3535 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3536 return 0;
3537
3538 __mod_zone_freepage_state(zone, -(1UL << order), mt);
3539 }
3540
3541
3542
3543 del_page_from_free_list(page, zone, order);
3544
3545
3546
3547
3548
3549 if (order >= pageblock_order - 1) {
3550 struct page *endpage = page + (1 << order) - 1;
3551 for (; page < endpage; page += pageblock_nr_pages) {
3552 int mt = get_pageblock_migratetype(page);
3553 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3554 && !is_migrate_highatomic(mt))
3555 set_pageblock_migratetype(page,
3556 MIGRATE_MOVABLE);
3557 }
3558 }
3559
3560
3561 return 1UL << order;
3562}
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3574{
3575 struct zone *zone = page_zone(page);
3576
3577
3578 lockdep_assert_held(&zone->lock);
3579
3580
3581 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3582 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3583}
3584
3585
3586
3587
3588
3589
3590static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3591 long nr_account)
3592{
3593#ifdef CONFIG_NUMA
3594 enum numa_stat_item local_stat = NUMA_LOCAL;
3595
3596
3597 if (!static_branch_likely(&vm_numa_stat_key))
3598 return;
3599
3600 if (zone_to_nid(z) != numa_node_id())
3601 local_stat = NUMA_OTHER;
3602
3603 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3604 __count_numa_events(z, NUMA_HIT, nr_account);
3605 else {
3606 __count_numa_events(z, NUMA_MISS, nr_account);
3607 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3608 }
3609 __count_numa_events(z, local_stat, nr_account);
3610#endif
3611}
3612
3613
3614static inline
3615struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3616 int migratetype,
3617 unsigned int alloc_flags,
3618 struct per_cpu_pages *pcp,
3619 struct list_head *list)
3620{
3621 struct page *page;
3622
3623 do {
3624 if (list_empty(list)) {
3625 int batch = READ_ONCE(pcp->batch);
3626 int alloced;
3627
3628
3629
3630
3631
3632
3633
3634
3635 if (batch > 1)
3636 batch = max(batch >> order, 2);
3637 alloced = rmqueue_bulk(zone, order,
3638 batch, list,
3639 migratetype, alloc_flags);
3640
3641 pcp->count += alloced << order;
3642 if (unlikely(list_empty(list)))
3643 return NULL;
3644 }
3645
3646 page = list_first_entry(list, struct page, lru);
3647 list_del(&page->lru);
3648 pcp->count -= 1 << order;
3649 } while (check_new_pcp(page));
3650
3651 return page;
3652}
3653
3654
3655static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3656 struct zone *zone, unsigned int order,
3657 gfp_t gfp_flags, int migratetype,
3658 unsigned int alloc_flags)
3659{
3660 struct per_cpu_pages *pcp;
3661 struct list_head *list;
3662 struct page *page;
3663 unsigned long flags;
3664
3665 local_lock_irqsave(&pagesets.lock, flags);
3666
3667
3668
3669
3670
3671
3672 pcp = this_cpu_ptr(zone->per_cpu_pageset);
3673 pcp->free_factor >>= 1;
3674 list = &pcp->lists[order_to_pindex(migratetype, order)];
3675 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3676 local_unlock_irqrestore(&pagesets.lock, flags);
3677 if (page) {
3678 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3679 zone_statistics(preferred_zone, zone, 1);
3680 }
3681 return page;
3682}
3683
3684
3685
3686
3687static inline
3688struct page *rmqueue(struct zone *preferred_zone,
3689 struct zone *zone, unsigned int order,
3690 gfp_t gfp_flags, unsigned int alloc_flags,
3691 int migratetype)
3692{
3693 unsigned long flags;
3694 struct page *page;
3695
3696 if (likely(pcp_allowed_order(order))) {
3697
3698
3699
3700
3701 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3702 migratetype != MIGRATE_MOVABLE) {
3703 page = rmqueue_pcplist(preferred_zone, zone, order,
3704 gfp_flags, migratetype, alloc_flags);
3705 goto out;
3706 }
3707 }
3708
3709
3710
3711
3712
3713 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3714 spin_lock_irqsave(&zone->lock, flags);
3715
3716 do {
3717 page = NULL;
3718
3719
3720
3721
3722
3723
3724 if (order > 0 && alloc_flags & ALLOC_HARDER) {
3725 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3726 if (page)
3727 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3728 }
3729 if (!page)
3730 page = __rmqueue(zone, order, migratetype, alloc_flags);
3731 } while (page && check_new_pages(page, order));
3732 if (!page)
3733 goto failed;
3734
3735 __mod_zone_freepage_state(zone, -(1 << order),
3736 get_pcppage_migratetype(page));
3737 spin_unlock_irqrestore(&zone->lock, flags);
3738
3739 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3740 zone_statistics(preferred_zone, zone, 1);
3741
3742out:
3743
3744 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3745 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3746 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3747 }
3748
3749 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3750 return page;
3751
3752failed:
3753 spin_unlock_irqrestore(&zone->lock, flags);
3754 return NULL;
3755}
3756
3757#ifdef CONFIG_FAIL_PAGE_ALLOC
3758
3759static struct {
3760 struct fault_attr attr;
3761
3762 bool ignore_gfp_highmem;
3763 bool ignore_gfp_reclaim;
3764 u32 min_order;
3765} fail_page_alloc = {
3766 .attr = FAULT_ATTR_INITIALIZER,
3767 .ignore_gfp_reclaim = true,
3768 .ignore_gfp_highmem = true,
3769 .min_order = 1,
3770};
3771
3772static int __init setup_fail_page_alloc(char *str)
3773{
3774 return setup_fault_attr(&fail_page_alloc.attr, str);
3775}
3776__setup("fail_page_alloc=", setup_fail_page_alloc);
3777
3778static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3779{
3780 if (order < fail_page_alloc.min_order)
3781 return false;
3782 if (gfp_mask & __GFP_NOFAIL)
3783 return false;
3784 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3785 return false;
3786 if (fail_page_alloc.ignore_gfp_reclaim &&
3787 (gfp_mask & __GFP_DIRECT_RECLAIM))
3788 return false;
3789
3790 return should_fail(&fail_page_alloc.attr, 1 << order);
3791}
3792
3793#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3794
3795static int __init fail_page_alloc_debugfs(void)
3796{
3797 umode_t mode = S_IFREG | 0600;
3798 struct dentry *dir;
3799
3800 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3801 &fail_page_alloc.attr);
3802
3803 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3804 &fail_page_alloc.ignore_gfp_reclaim);
3805 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3806 &fail_page_alloc.ignore_gfp_highmem);
3807 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3808
3809 return 0;
3810}
3811
3812late_initcall(fail_page_alloc_debugfs);
3813
3814#endif
3815
3816#else
3817
3818static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3819{
3820 return false;
3821}
3822
3823#endif
3824
3825noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3826{
3827 return __should_fail_alloc_page(gfp_mask, order);
3828}
3829ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3830
3831static inline long __zone_watermark_unusable_free(struct zone *z,
3832 unsigned int order, unsigned int alloc_flags)
3833{
3834 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3835 long unusable_free = (1 << order) - 1;
3836
3837
3838
3839
3840
3841
3842 if (likely(!alloc_harder))
3843 unusable_free += z->nr_reserved_highatomic;
3844
3845#ifdef CONFIG_CMA
3846
3847 if (!(alloc_flags & ALLOC_CMA))
3848 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3849#endif
3850
3851 return unusable_free;
3852}
3853
3854
3855
3856
3857
3858
3859
3860bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3861 int highest_zoneidx, unsigned int alloc_flags,
3862 long free_pages)
3863{
3864 long min = mark;
3865 int o;
3866 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3867
3868
3869 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3870
3871 if (alloc_flags & ALLOC_HIGH)
3872 min -= min / 2;
3873
3874 if (unlikely(alloc_harder)) {
3875
3876
3877
3878
3879
3880
3881 if (alloc_flags & ALLOC_OOM)
3882 min -= min / 2;
3883 else
3884 min -= min / 4;
3885 }
3886
3887
3888
3889
3890
3891
3892 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3893 return false;
3894
3895
3896 if (!order)
3897 return true;
3898
3899
3900 for (o = order; o < MAX_ORDER; o++) {
3901 struct free_area *area = &z->free_area[o];
3902 int mt;
3903
3904 if (!area->nr_free)
3905 continue;
3906
3907 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3908 if (!free_area_empty(area, mt))
3909 return true;
3910 }
3911
3912#ifdef CONFIG_CMA
3913 if ((alloc_flags & ALLOC_CMA) &&
3914 !free_area_empty(area, MIGRATE_CMA)) {
3915 return true;
3916 }
3917#endif
3918 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3919 return true;
3920 }
3921 return false;
3922}
3923
3924bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3925 int highest_zoneidx, unsigned int alloc_flags)
3926{
3927 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3928 zone_page_state(z, NR_FREE_PAGES));
3929}
3930
3931static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3932 unsigned long mark, int highest_zoneidx,
3933 unsigned int alloc_flags, gfp_t gfp_mask)
3934{
3935 long free_pages;
3936
3937 free_pages = zone_page_state(z, NR_FREE_PAGES);
3938
3939
3940
3941
3942
3943 if (!order) {
3944 long fast_free;
3945
3946 fast_free = free_pages;
3947 fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3948 if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3949 return true;
3950 }
3951
3952 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3953 free_pages))
3954 return true;
3955
3956
3957
3958
3959
3960
3961 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3962 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3963 mark = z->_watermark[WMARK_MIN];
3964 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3965 alloc_flags, free_pages);
3966 }
3967
3968 return false;
3969}
3970
3971bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3972 unsigned long mark, int highest_zoneidx)
3973{
3974 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3975
3976 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3977 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3978
3979 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3980 free_pages);
3981}
3982
3983#ifdef CONFIG_NUMA
3984static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3985{
3986 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3987 node_reclaim_distance;
3988}
3989#else
3990static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3991{
3992 return true;
3993}
3994#endif
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004static inline unsigned int
4005alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
4006{
4007 unsigned int alloc_flags;
4008
4009
4010
4011
4012
4013 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
4014
4015#ifdef CONFIG_ZONE_DMA32
4016 if (!zone)
4017 return alloc_flags;
4018
4019 if (zone_idx(zone) != ZONE_NORMAL)
4020 return alloc_flags;
4021
4022
4023
4024
4025
4026
4027 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4028 if (nr_online_nodes > 1 && !populated_zone(--zone))
4029 return alloc_flags;
4030
4031 alloc_flags |= ALLOC_NOFRAGMENT;
4032#endif
4033 return alloc_flags;
4034}
4035
4036
4037static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4038 unsigned int alloc_flags)
4039{
4040#ifdef CONFIG_CMA
4041 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4042 alloc_flags |= ALLOC_CMA;
4043#endif
4044 return alloc_flags;
4045}
4046
4047
4048
4049
4050
4051static struct page *
4052get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4053 const struct alloc_context *ac)
4054{
4055 struct zoneref *z;
4056 struct zone *zone;
4057 struct pglist_data *last_pgdat_dirty_limit = NULL;
4058 bool no_fallback;
4059
4060retry:
4061
4062
4063
4064
4065 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4066 z = ac->preferred_zoneref;
4067 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4068 ac->nodemask) {
4069 struct page *page;
4070 unsigned long mark;
4071
4072 if (cpusets_enabled() &&
4073 (alloc_flags & ALLOC_CPUSET) &&
4074 !__cpuset_zone_allowed(zone, gfp_mask))
4075 continue;
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095 if (ac->spread_dirty_pages) {
4096 if (last_pgdat_dirty_limit == zone->zone_pgdat)
4097 continue;
4098
4099 if (!node_dirty_ok(zone->zone_pgdat)) {
4100 last_pgdat_dirty_limit = zone->zone_pgdat;
4101 continue;
4102 }
4103 }
4104
4105 if (no_fallback && nr_online_nodes > 1 &&
4106 zone != ac->preferred_zoneref->zone) {
4107 int local_nid;
4108
4109
4110
4111
4112
4113
4114 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4115 if (zone_to_nid(zone) != local_nid) {
4116 alloc_flags &= ~ALLOC_NOFRAGMENT;
4117 goto retry;
4118 }
4119 }
4120
4121 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4122 if (!zone_watermark_fast(zone, order, mark,
4123 ac->highest_zoneidx, alloc_flags,
4124 gfp_mask)) {
4125 int ret;
4126
4127#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4128
4129
4130
4131
4132 if (static_branch_unlikely(&deferred_pages)) {
4133 if (_deferred_grow_zone(zone, order))
4134 goto try_this_zone;
4135 }
4136#endif
4137
4138 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4139 if (alloc_flags & ALLOC_NO_WATERMARKS)
4140 goto try_this_zone;
4141
4142 if (!node_reclaim_enabled() ||
4143 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4144 continue;
4145
4146 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4147 switch (ret) {
4148 case NODE_RECLAIM_NOSCAN:
4149
4150 continue;
4151 case NODE_RECLAIM_FULL:
4152
4153 continue;
4154 default:
4155
4156 if (zone_watermark_ok(zone, order, mark,
4157 ac->highest_zoneidx, alloc_flags))
4158 goto try_this_zone;
4159
4160 continue;
4161 }
4162 }
4163
4164try_this_zone:
4165 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4166 gfp_mask, alloc_flags, ac->migratetype);
4167 if (page) {
4168 prep_new_page(page, order, gfp_mask, alloc_flags);
4169
4170
4171
4172
4173
4174 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4175 reserve_highatomic_pageblock(page, zone, order);
4176
4177 return page;
4178 } else {
4179#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4180
4181 if (static_branch_unlikely(&deferred_pages)) {
4182 if (_deferred_grow_zone(zone, order))
4183 goto try_this_zone;
4184 }
4185#endif
4186 }
4187 }
4188
4189
4190
4191
4192
4193 if (no_fallback) {
4194 alloc_flags &= ~ALLOC_NOFRAGMENT;
4195 goto retry;
4196 }
4197
4198 return NULL;
4199}
4200
4201static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4202{
4203 unsigned int filter = SHOW_MEM_FILTER_NODES;
4204
4205
4206
4207
4208
4209
4210 if (!(gfp_mask & __GFP_NOMEMALLOC))
4211 if (tsk_is_oom_victim(current) ||
4212 (current->flags & (PF_MEMALLOC | PF_EXITING)))
4213 filter &= ~SHOW_MEM_FILTER_NODES;
4214 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4215 filter &= ~SHOW_MEM_FILTER_NODES;
4216
4217 show_mem(filter, nodemask);
4218}
4219
4220void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4221{
4222 struct va_format vaf;
4223 va_list args;
4224 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4225
4226 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
4227 return;
4228
4229 va_start(args, fmt);
4230 vaf.fmt = fmt;
4231 vaf.va = &args;
4232 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4233 current->comm, &vaf, gfp_mask, &gfp_mask,
4234 nodemask_pr_args(nodemask));
4235 va_end(args);
4236
4237 cpuset_print_current_mems_allowed();
4238 pr_cont("\n");
4239 dump_stack();
4240 warn_alloc_show_mem(gfp_mask, nodemask);
4241}
4242
4243static inline struct page *
4244__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4245 unsigned int alloc_flags,
4246 const struct alloc_context *ac)
4247{
4248 struct page *page;
4249
4250 page = get_page_from_freelist(gfp_mask, order,
4251 alloc_flags|ALLOC_CPUSET, ac);
4252
4253
4254
4255
4256 if (!page)
4257 page = get_page_from_freelist(gfp_mask, order,
4258 alloc_flags, ac);
4259
4260 return page;
4261}
4262
4263static inline struct page *
4264__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4265 const struct alloc_context *ac, unsigned long *did_some_progress)
4266{
4267 struct oom_control oc = {
4268 .zonelist = ac->zonelist,
4269 .nodemask = ac->nodemask,
4270 .memcg = NULL,
4271 .gfp_mask = gfp_mask,
4272 .order = order,
4273 };
4274 struct page *page;
4275
4276 *did_some_progress = 0;
4277
4278
4279
4280
4281
4282 if (!mutex_trylock(&oom_lock)) {
4283 *did_some_progress = 1;
4284 schedule_timeout_uninterruptible(1);
4285 return NULL;
4286 }
4287
4288
4289
4290
4291
4292
4293
4294
4295 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4296 ~__GFP_DIRECT_RECLAIM, order,
4297 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4298 if (page)
4299 goto out;
4300
4301
4302 if (current->flags & PF_DUMPCORE)
4303 goto out;
4304
4305 if (order > PAGE_ALLOC_COSTLY_ORDER)
4306 goto out;
4307
4308
4309
4310
4311
4312
4313
4314
4315 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4316 goto out;
4317
4318 if (ac->highest_zoneidx < ZONE_NORMAL)
4319 goto out;
4320 if (pm_suspended_storage())
4321 goto out;
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4334 *did_some_progress = 1;
4335
4336
4337
4338
4339
4340 if (gfp_mask & __GFP_NOFAIL)
4341 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4342 ALLOC_NO_WATERMARKS, ac);
4343 }
4344out:
4345 mutex_unlock(&oom_lock);
4346 return page;
4347}
4348
4349
4350
4351
4352
4353#define MAX_COMPACT_RETRIES 16
4354
4355#ifdef CONFIG_COMPACTION
4356
4357static struct page *
4358__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4359 unsigned int alloc_flags, const struct alloc_context *ac,
4360 enum compact_priority prio, enum compact_result *compact_result)
4361{
4362 struct page *page = NULL;
4363 unsigned long pflags;
4364 unsigned int noreclaim_flag;
4365
4366 if (!order)
4367 return NULL;
4368
4369 psi_memstall_enter(&pflags);
4370 noreclaim_flag = memalloc_noreclaim_save();
4371
4372 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4373 prio, &page);
4374
4375 memalloc_noreclaim_restore(noreclaim_flag);
4376 psi_memstall_leave(&pflags);
4377
4378 if (*compact_result == COMPACT_SKIPPED)
4379 return NULL;
4380
4381
4382
4383
4384 count_vm_event(COMPACTSTALL);
4385
4386
4387 if (page)
4388 prep_new_page(page, order, gfp_mask, alloc_flags);
4389
4390
4391 if (!page)
4392 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4393
4394 if (page) {
4395 struct zone *zone = page_zone(page);
4396
4397 zone->compact_blockskip_flush = false;
4398 compaction_defer_reset(zone, order, true);
4399 count_vm_event(COMPACTSUCCESS);
4400 return page;
4401 }
4402
4403
4404
4405
4406
4407 count_vm_event(COMPACTFAIL);
4408
4409 cond_resched();
4410
4411 return NULL;
4412}
4413
4414static inline bool
4415should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4416 enum compact_result compact_result,
4417 enum compact_priority *compact_priority,
4418 int *compaction_retries)
4419{
4420 int max_retries = MAX_COMPACT_RETRIES;
4421 int min_priority;
4422 bool ret = false;
4423 int retries = *compaction_retries;
4424 enum compact_priority priority = *compact_priority;
4425
4426 if (!order)
4427 return false;
4428
4429 if (fatal_signal_pending(current))
4430 return false;
4431
4432 if (compaction_made_progress(compact_result))
4433 (*compaction_retries)++;
4434
4435
4436
4437
4438
4439
4440 if (compaction_failed(compact_result))
4441 goto check_priority;
4442
4443
4444
4445
4446
4447 if (compaction_needs_reclaim(compact_result)) {
4448 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4449 goto out;
4450 }
4451
4452
4453
4454
4455
4456
4457
4458 if (compaction_withdrawn(compact_result)) {
4459 goto check_priority;
4460 }
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470 if (order > PAGE_ALLOC_COSTLY_ORDER)
4471 max_retries /= 4;
4472 if (*compaction_retries <= max_retries) {
4473 ret = true;
4474 goto out;
4475 }
4476
4477
4478
4479
4480
4481check_priority:
4482 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4483 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4484
4485 if (*compact_priority > min_priority) {
4486 (*compact_priority)--;
4487 *compaction_retries = 0;
4488 ret = true;
4489 }
4490out:
4491 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4492 return ret;
4493}
4494#else
4495static inline struct page *
4496__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4497 unsigned int alloc_flags, const struct alloc_context *ac,
4498 enum compact_priority prio, enum compact_result *compact_result)
4499{
4500 *compact_result = COMPACT_SKIPPED;
4501 return NULL;
4502}
4503
4504static inline bool
4505should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4506 enum compact_result compact_result,
4507 enum compact_priority *compact_priority,
4508 int *compaction_retries)
4509{
4510 struct zone *zone;
4511 struct zoneref *z;
4512
4513 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4514 return false;
4515
4516
4517
4518
4519
4520
4521
4522 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4523 ac->highest_zoneidx, ac->nodemask) {
4524 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4525 ac->highest_zoneidx, alloc_flags))
4526 return true;
4527 }
4528 return false;
4529}
4530#endif
4531
4532#ifdef CONFIG_LOCKDEP
4533static struct lockdep_map __fs_reclaim_map =
4534 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4535
4536static bool __need_reclaim(gfp_t gfp_mask)
4537{
4538
4539 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4540 return false;
4541
4542
4543 if (current->flags & PF_MEMALLOC)
4544 return false;
4545
4546 if (gfp_mask & __GFP_NOLOCKDEP)
4547 return false;
4548
4549 return true;
4550}
4551
4552void __fs_reclaim_acquire(void)
4553{
4554 lock_map_acquire(&__fs_reclaim_map);
4555}
4556
4557void __fs_reclaim_release(void)
4558{
4559 lock_map_release(&__fs_reclaim_map);
4560}
4561
4562void fs_reclaim_acquire(gfp_t gfp_mask)
4563{
4564 gfp_mask = current_gfp_context(gfp_mask);
4565
4566 if (__need_reclaim(gfp_mask)) {
4567 if (gfp_mask & __GFP_FS)
4568 __fs_reclaim_acquire();
4569
4570#ifdef CONFIG_MMU_NOTIFIER
4571 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4572 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4573#endif
4574
4575 }
4576}
4577EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4578
4579void fs_reclaim_release(gfp_t gfp_mask)
4580{
4581 gfp_mask = current_gfp_context(gfp_mask);
4582
4583 if (__need_reclaim(gfp_mask)) {
4584 if (gfp_mask & __GFP_FS)
4585 __fs_reclaim_release();
4586 }
4587}
4588EXPORT_SYMBOL_GPL(fs_reclaim_release);
4589#endif
4590
4591
4592static unsigned long
4593__perform_reclaim(gfp_t gfp_mask, unsigned int order,
4594 const struct alloc_context *ac)
4595{
4596 unsigned int noreclaim_flag;
4597 unsigned long pflags, progress;
4598
4599 cond_resched();
4600
4601
4602 cpuset_memory_pressure_bump();
4603 psi_memstall_enter(&pflags);
4604 fs_reclaim_acquire(gfp_mask);
4605 noreclaim_flag = memalloc_noreclaim_save();
4606
4607 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4608 ac->nodemask);
4609
4610 memalloc_noreclaim_restore(noreclaim_flag);
4611 fs_reclaim_release(gfp_mask);
4612 psi_memstall_leave(&pflags);
4613
4614 cond_resched();
4615
4616 return progress;
4617}
4618
4619
4620static inline struct page *
4621__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4622 unsigned int alloc_flags, const struct alloc_context *ac,
4623 unsigned long *did_some_progress)
4624{
4625 struct page *page = NULL;
4626 bool drained = false;
4627
4628 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4629 if (unlikely(!(*did_some_progress)))
4630 return NULL;
4631
4632retry:
4633 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4634
4635
4636
4637
4638
4639
4640 if (!page && !drained) {
4641 unreserve_highatomic_pageblock(ac, false);
4642 drain_all_pages(NULL);
4643 drained = true;
4644 goto retry;
4645 }
4646
4647 return page;
4648}
4649
4650static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4651 const struct alloc_context *ac)
4652{
4653 struct zoneref *z;
4654 struct zone *zone;
4655 pg_data_t *last_pgdat = NULL;
4656 enum zone_type highest_zoneidx = ac->highest_zoneidx;
4657
4658 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4659 ac->nodemask) {
4660 if (last_pgdat != zone->zone_pgdat)
4661 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4662 last_pgdat = zone->zone_pgdat;
4663 }
4664}
4665
4666static inline unsigned int
4667gfp_to_alloc_flags(gfp_t gfp_mask)
4668{
4669 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4670
4671
4672
4673
4674
4675
4676 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4677 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4678
4679
4680
4681
4682
4683
4684
4685 alloc_flags |= (__force int)
4686 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4687
4688 if (gfp_mask & __GFP_ATOMIC) {
4689
4690
4691
4692
4693 if (!(gfp_mask & __GFP_NOMEMALLOC))
4694 alloc_flags |= ALLOC_HARDER;
4695
4696
4697
4698
4699 alloc_flags &= ~ALLOC_CPUSET;
4700 } else if (unlikely(rt_task(current)) && !in_interrupt())
4701 alloc_flags |= ALLOC_HARDER;
4702
4703 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4704
4705 return alloc_flags;
4706}
4707
4708static bool oom_reserves_allowed(struct task_struct *tsk)
4709{
4710 if (!tsk_is_oom_victim(tsk))
4711 return false;
4712
4713
4714
4715
4716
4717 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4718 return false;
4719
4720 return true;
4721}
4722
4723
4724
4725
4726
4727static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4728{
4729 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4730 return 0;
4731 if (gfp_mask & __GFP_MEMALLOC)
4732 return ALLOC_NO_WATERMARKS;
4733 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4734 return ALLOC_NO_WATERMARKS;
4735 if (!in_interrupt()) {
4736 if (current->flags & PF_MEMALLOC)
4737 return ALLOC_NO_WATERMARKS;
4738 else if (oom_reserves_allowed(current))
4739 return ALLOC_OOM;
4740 }
4741
4742 return 0;
4743}
4744
4745bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4746{
4747 return !!__gfp_pfmemalloc_flags(gfp_mask);
4748}
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760static inline bool
4761should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4762 struct alloc_context *ac, int alloc_flags,
4763 bool did_some_progress, int *no_progress_loops)
4764{
4765 struct zone *zone;
4766 struct zoneref *z;
4767 bool ret = false;
4768
4769
4770
4771
4772
4773
4774 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4775 *no_progress_loops = 0;
4776 else
4777 (*no_progress_loops)++;
4778
4779
4780
4781
4782
4783 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4784
4785 return unreserve_highatomic_pageblock(ac, true);
4786 }
4787
4788
4789
4790
4791
4792
4793
4794 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4795 ac->highest_zoneidx, ac->nodemask) {
4796 unsigned long available;
4797 unsigned long reclaimable;
4798 unsigned long min_wmark = min_wmark_pages(zone);
4799 bool wmark;
4800
4801 available = reclaimable = zone_reclaimable_pages(zone);
4802 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4803
4804
4805
4806
4807
4808 wmark = __zone_watermark_ok(zone, order, min_wmark,
4809 ac->highest_zoneidx, alloc_flags, available);
4810 trace_reclaim_retry_zone(z, order, reclaimable,
4811 available, min_wmark, *no_progress_loops, wmark);
4812 if (wmark) {
4813
4814
4815
4816
4817
4818
4819 if (!did_some_progress) {
4820 unsigned long write_pending;
4821
4822 write_pending = zone_page_state_snapshot(zone,
4823 NR_ZONE_WRITE_PENDING);
4824
4825 if (2 * write_pending > reclaimable) {
4826 congestion_wait(BLK_RW_ASYNC, HZ/10);
4827 return true;
4828 }
4829 }
4830
4831 ret = true;
4832 goto out;
4833 }
4834 }
4835
4836out:
4837
4838
4839
4840
4841
4842
4843
4844 if (current->flags & PF_WQ_WORKER)
4845 schedule_timeout_uninterruptible(1);
4846 else
4847 cond_resched();
4848 return ret;
4849}
4850
4851static inline bool
4852check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4853{
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865 if (cpusets_enabled() && ac->nodemask &&
4866 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4867 ac->nodemask = NULL;
4868 return true;
4869 }
4870
4871
4872
4873
4874
4875
4876
4877
4878 if (read_mems_allowed_retry(cpuset_mems_cookie))
4879 return true;
4880
4881 return false;
4882}
4883
4884static inline struct page *
4885__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4886 struct alloc_context *ac)
4887{
4888 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4889 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4890 struct page *page = NULL;
4891 unsigned int alloc_flags;
4892 unsigned long did_some_progress;
4893 enum compact_priority compact_priority;
4894 enum compact_result compact_result;
4895 int compaction_retries;
4896 int no_progress_loops;
4897 unsigned int cpuset_mems_cookie;
4898 int reserve_flags;
4899
4900
4901
4902
4903
4904 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4905 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4906 gfp_mask &= ~__GFP_ATOMIC;
4907
4908retry_cpuset:
4909 compaction_retries = 0;
4910 no_progress_loops = 0;
4911 compact_priority = DEF_COMPACT_PRIORITY;
4912 cpuset_mems_cookie = read_mems_allowed_begin();
4913
4914
4915
4916
4917
4918
4919 alloc_flags = gfp_to_alloc_flags(gfp_mask);
4920
4921
4922
4923
4924
4925
4926
4927 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4928 ac->highest_zoneidx, ac->nodemask);
4929 if (!ac->preferred_zoneref->zone)
4930 goto nopage;
4931
4932 if (alloc_flags & ALLOC_KSWAPD)
4933 wake_all_kswapds(order, gfp_mask, ac);
4934
4935
4936
4937
4938
4939 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4940 if (page)
4941 goto got_pg;
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952 if (can_direct_reclaim &&
4953 (costly_order ||
4954 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4955 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4956 page = __alloc_pages_direct_compact(gfp_mask, order,
4957 alloc_flags, ac,
4958 INIT_COMPACT_PRIORITY,
4959 &compact_result);
4960 if (page)
4961 goto got_pg;
4962
4963
4964
4965
4966
4967 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980