1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6#include <linux/mmdebug.h>
7#include <linux/gfp.h>
8#include <linux/bug.h>
9#include <linux/list.h>
10#include <linux/mmzone.h>
11#include <linux/rbtree.h>
12#include <linux/atomic.h>
13#include <linux/debug_locks.h>
14#include <linux/mm_types.h>
15#include <linux/mmap_lock.h>
16#include <linux/range.h>
17#include <linux/pfn.h>
18#include <linux/percpu-refcount.h>
19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h>
21#include <linux/resource.h>
22#include <linux/page_ext.h>
23#include <linux/err.h>
24#include <linux/page-flags.h>
25#include <linux/page_ref.h>
26#include <linux/overflow.h>
27#include <linux/sizes.h>
28#include <linux/sched.h>
29#include <linux/pgtable.h>
30#include <linux/kasan.h>
31#include <linux/memremap.h>
32#include <linux/slab.h>
33
34struct mempolicy;
35struct anon_vma;
36struct anon_vma_chain;
37struct user_struct;
38struct pt_regs;
39
40extern int sysctl_page_lock_unfairness;
41
42void mm_core_init(void);
43void init_mm_internals(void);
44
45#ifndef CONFIG_NUMA
46extern unsigned long max_mapnr;
47
48static inline void set_max_mapnr(unsigned long limit)
49{
50 max_mapnr = limit;
51}
52#else
53static inline void set_max_mapnr(unsigned long limit) { }
54#endif
55
56extern atomic_long_t _totalram_pages;
57static inline unsigned long totalram_pages(void)
58{
59 return (unsigned long)atomic_long_read(&_totalram_pages);
60}
61
62static inline void totalram_pages_inc(void)
63{
64 atomic_long_inc(&_totalram_pages);
65}
66
67static inline void totalram_pages_dec(void)
68{
69 atomic_long_dec(&_totalram_pages);
70}
71
72static inline void totalram_pages_add(long count)
73{
74 atomic_long_add(count, &_totalram_pages);
75}
76
77extern void * high_memory;
78extern int page_cluster;
79extern const int page_cluster_max;
80
81#ifdef CONFIG_SYSCTL
82extern int sysctl_legacy_va_layout;
83#else
84#define sysctl_legacy_va_layout 0
85#endif
86
87#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
88extern const int mmap_rnd_bits_min;
89extern const int mmap_rnd_bits_max;
90extern int mmap_rnd_bits __read_mostly;
91#endif
92#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
93extern const int mmap_rnd_compat_bits_min;
94extern const int mmap_rnd_compat_bits_max;
95extern int mmap_rnd_compat_bits __read_mostly;
96#endif
97
98#include <asm/page.h>
99#include <asm/processor.h>
100
101#ifndef __pa_symbol
102#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
103#endif
104
105#ifndef page_to_virt
106#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
107#endif
108
109#ifndef lm_alias
110#define lm_alias(x) __va(__pa_symbol(x))
111#endif
112
113
114
115
116
117
118
119
120#ifndef mm_forbids_zeropage
121#define mm_forbids_zeropage(X) (0)
122#endif
123
124
125
126
127
128
129
130#if BITS_PER_LONG == 64
131
132
133
134
135
136
137#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
138static inline void __mm_zero_struct_page(struct page *page)
139{
140 unsigned long *_pp = (void *)page;
141
142
143 BUILD_BUG_ON(sizeof(struct page) & 7);
144 BUILD_BUG_ON(sizeof(struct page) < 56);
145 BUILD_BUG_ON(sizeof(struct page) > 96);
146
147 switch (sizeof(struct page)) {
148 case 96:
149 _pp[11] = 0;
150 fallthrough;
151 case 88:
152 _pp[10] = 0;
153 fallthrough;
154 case 80:
155 _pp[9] = 0;
156 fallthrough;
157 case 72:
158 _pp[8] = 0;
159 fallthrough;
160 case 64:
161 _pp[7] = 0;
162 fallthrough;
163 case 56:
164 _pp[6] = 0;
165 _pp[5] = 0;
166 _pp[4] = 0;
167 _pp[3] = 0;
168 _pp[2] = 0;
169 _pp[1] = 0;
170 _pp[0] = 0;
171 }
172}
173#else
174#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
175#endif
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193#define MAPCOUNT_ELF_CORE_MARGIN (5)
194#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
195
196extern int sysctl_max_map_count;
197
198extern unsigned long sysctl_user_reserve_kbytes;
199extern unsigned long sysctl_admin_reserve_kbytes;
200
201extern int sysctl_overcommit_memory;
202extern int sysctl_overcommit_ratio;
203extern unsigned long sysctl_overcommit_kbytes;
204
205int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
206 loff_t *);
207int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
208 loff_t *);
209int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
210 loff_t *);
211
212#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
213#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
214#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
215#else
216#define nth_page(page,n) ((page) + (n))
217#define folio_page_idx(folio, p) ((p) - &(folio)->page)
218#endif
219
220
221#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
222
223
224#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
225
226
227#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
228
229#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
230static inline struct folio *lru_to_folio(struct list_head *head)
231{
232 return list_entry((head)->prev, struct folio, lru);
233}
234
235void setup_initial_init_mm(void *start_code, void *end_code,
236 void *end_data, void *brk);
237
238
239
240
241
242
243
244
245
246
247struct vm_area_struct *vm_area_alloc(struct mm_struct *);
248struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
249void vm_area_free(struct vm_area_struct *);
250
251void __vm_area_free(struct vm_area_struct *vma);
252
253#ifndef CONFIG_MMU
254extern struct rb_root nommu_region_tree;
255extern struct rw_semaphore nommu_region_sem;
256
257extern unsigned int kobjsize(const void *objp);
258#endif
259
260
261
262
263
264#define VM_NONE 0x00000000
265
266#define VM_READ 0x00000001
267#define VM_WRITE 0x00000002
268#define VM_EXEC 0x00000004
269#define VM_SHARED 0x00000008
270
271
272#define VM_MAYREAD 0x00000010
273#define VM_MAYWRITE 0x00000020
274#define VM_MAYEXEC 0x00000040
275#define VM_MAYSHARE 0x00000080
276
277#define VM_GROWSDOWN 0x00000100
278#ifdef CONFIG_MMU
279#define VM_UFFD_MISSING 0x00000200
280#else
281#define VM_MAYOVERLAY 0x00000200
282#define VM_UFFD_MISSING 0
283#endif
284#define VM_PFNMAP 0x00000400
285#define VM_UFFD_WP 0x00001000
286
287#define VM_LOCKED 0x00002000
288#define VM_IO 0x00004000
289
290
291#define VM_SEQ_READ 0x00008000
292#define VM_RAND_READ 0x00010000
293
294#define VM_DONTCOPY 0x00020000
295#define VM_DONTEXPAND 0x00040000
296#define VM_LOCKONFAULT 0x00080000
297#define VM_ACCOUNT 0x00100000
298#define VM_NORESERVE 0x00200000
299#define VM_HUGETLB 0x00400000
300#define VM_SYNC 0x00800000
301#define VM_ARCH_1 0x01000000
302#define VM_WIPEONFORK 0x02000000
303#define VM_DONTDUMP 0x04000000
304
305#ifdef CONFIG_MEM_SOFT_DIRTY
306# define VM_SOFTDIRTY 0x08000000
307#else
308# define VM_SOFTDIRTY 0
309#endif
310
311#define VM_MIXEDMAP 0x10000000
312#define VM_HUGEPAGE 0x20000000
313#define VM_NOHUGEPAGE 0x40000000
314#define VM_MERGEABLE 0x80000000
315
316#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
317#define VM_HIGH_ARCH_BIT_0 32
318#define VM_HIGH_ARCH_BIT_1 33
319#define VM_HIGH_ARCH_BIT_2 34
320#define VM_HIGH_ARCH_BIT_3 35
321#define VM_HIGH_ARCH_BIT_4 36
322#define VM_HIGH_ARCH_BIT_5 37
323#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
324#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
325#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
326#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
327#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
328#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
329#endif
330
331#ifdef CONFIG_ARCH_HAS_PKEYS
332# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
333# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
334# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
335# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
336# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
337#ifdef CONFIG_PPC
338# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
339#else
340# define VM_PKEY_BIT4 0
341#endif
342#endif
343
344#ifdef CONFIG_X86_USER_SHADOW_STACK
345
346
347
348
349
350
351
352
353
354# define VM_SHADOW_STACK VM_HIGH_ARCH_5
355#else
356# define VM_SHADOW_STACK VM_NONE
357#endif
358
359#if defined(CONFIG_X86)
360# define VM_PAT VM_ARCH_1
361#elif defined(CONFIG_PPC)
362# define VM_SAO VM_ARCH_1
363#elif defined(CONFIG_PARISC)
364# define VM_GROWSUP VM_ARCH_1
365#elif defined(CONFIG_SPARC64)
366# define VM_SPARC_ADI VM_ARCH_1
367# define VM_ARCH_CLEAR VM_SPARC_ADI
368#elif defined(CONFIG_ARM64)
369# define VM_ARM64_BTI VM_ARCH_1
370# define VM_ARCH_CLEAR VM_ARM64_BTI
371#elif !defined(CONFIG_MMU)
372# define VM_MAPPED_COPY VM_ARCH_1
373#endif
374
375#if defined(CONFIG_ARM64_MTE)
376# define VM_MTE VM_HIGH_ARCH_0
377# define VM_MTE_ALLOWED VM_HIGH_ARCH_1
378#else
379# define VM_MTE VM_NONE
380# define VM_MTE_ALLOWED VM_NONE
381#endif
382
383#ifndef VM_GROWSUP
384# define VM_GROWSUP VM_NONE
385#endif
386
387#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
388# define VM_UFFD_MINOR_BIT 38
389# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT)
390#else
391# define VM_UFFD_MINOR VM_NONE
392#endif
393
394
395#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
396
397#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
398
399
400#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
401 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
402#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
403 VM_MAYWRITE | VM_MAYEXEC)
404#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
405 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
406
407#ifndef VM_DATA_DEFAULT_FLAGS
408#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
409#endif
410
411#ifndef VM_STACK_DEFAULT_FLAGS
412#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
413#endif
414
415#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
416
417#ifdef CONFIG_STACK_GROWSUP
418#define VM_STACK VM_GROWSUP
419#define VM_STACK_EARLY VM_GROWSDOWN
420#else
421#define VM_STACK VM_GROWSDOWN
422#define VM_STACK_EARLY 0
423#endif
424
425#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
426
427
428#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
429
430
431
432
433
434#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
435
436
437#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
438
439
440#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
441
442
443#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
444
445
446#ifndef VM_ARCH_CLEAR
447# define VM_ARCH_CLEAR VM_NONE
448#endif
449#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
450
451
452
453
454
455
456
457
458
459
460#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
461 FAULT_FLAG_KILLABLE | \
462 FAULT_FLAG_INTERRUPTIBLE)
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
478{
479 return (flags & FAULT_FLAG_ALLOW_RETRY) &&
480 (!(flags & FAULT_FLAG_TRIED));
481}
482
483#define FAULT_FLAG_TRACE \
484 { FAULT_FLAG_WRITE, "WRITE" }, \
485 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
486 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
487 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
488 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
489 { FAULT_FLAG_TRIED, "TRIED" }, \
490 { FAULT_FLAG_USER, "USER" }, \
491 { FAULT_FLAG_REMOTE, "REMOTE" }, \
492 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
493 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \
494 { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" }
495
496
497
498
499
500
501
502
503
504
505
506struct vm_fault {
507 const struct {
508 struct vm_area_struct *vma;
509 gfp_t gfp_mask;
510 pgoff_t pgoff;
511 unsigned long address;
512 unsigned long real_address;
513 };
514 enum fault_flag flags;
515
516 pmd_t *pmd;
517
518 pud_t *pud;
519
520
521 union {
522 pte_t orig_pte;
523 pmd_t orig_pmd;
524
525
526 };
527
528 struct page *cow_page;
529 struct page *page;
530
531
532
533
534
535 pte_t *pte;
536
537
538
539 spinlock_t *ptl;
540
541
542
543 pgtable_t prealloc_pte;
544
545
546
547
548
549
550};
551
552
553
554
555
556
557struct vm_operations_struct {
558 void (*open)(struct vm_area_struct * area);
559
560
561
562
563 void (*close)(struct vm_area_struct * area);
564
565 int (*may_split)(struct vm_area_struct *area, unsigned long addr);
566 int (*mremap)(struct vm_area_struct *area);
567
568
569
570
571
572 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
573 unsigned long end, unsigned long newflags);
574 vm_fault_t (*fault)(struct vm_fault *vmf);
575 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
576 vm_fault_t (*map_pages)(struct vm_fault *vmf,
577 pgoff_t start_pgoff, pgoff_t end_pgoff);
578 unsigned long (*pagesize)(struct vm_area_struct * area);
579
580
581
582 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
583
584
585 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
586
587
588
589
590
591 int (*access)(struct vm_area_struct *vma, unsigned long addr,
592 void *buf, int len, int write);
593
594
595
596
597 const char *(*name)(struct vm_area_struct *vma);
598
599#ifdef CONFIG_NUMA
600
601
602
603
604
605
606
607 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
608
609
610
611
612
613
614
615
616
617
618
619 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
620 unsigned long addr, pgoff_t *ilx);
621#endif
622
623
624
625
626
627 struct page *(*find_special_page)(struct vm_area_struct *vma,
628 unsigned long addr);
629};
630
631#ifdef CONFIG_NUMA_BALANCING
632static inline void vma_numab_state_init(struct vm_area_struct *vma)
633{
634 vma->numab_state = NULL;
635}
636static inline void vma_numab_state_free(struct vm_area_struct *vma)
637{
638 kfree(vma->numab_state);
639}
640#else
641static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
642static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
643#endif
644
645#ifdef CONFIG_PER_VMA_LOCK
646
647
648
649
650
651static inline bool vma_start_read(struct vm_area_struct *vma)
652{
653
654
655
656
657
658
659
660 if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
661 return false;
662
663 if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
664 return false;
665
666
667
668
669
670
671
672
673
674
675
676
677 if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
678 up_read(&vma->vm_lock->lock);
679 return false;
680 }
681 return true;
682}
683
684static inline void vma_end_read(struct vm_area_struct *vma)
685{
686 rcu_read_lock();
687 up_read(&vma->vm_lock->lock);
688 rcu_read_unlock();
689}
690
691
692static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
693{
694 mmap_assert_write_locked(vma->vm_mm);
695
696
697
698
699
700 *mm_lock_seq = vma->vm_mm->mm_lock_seq;
701 return (vma->vm_lock_seq == *mm_lock_seq);
702}
703
704
705
706
707
708
709static inline void vma_start_write(struct vm_area_struct *vma)
710{
711 int mm_lock_seq;
712
713 if (__is_vma_write_locked(vma, &mm_lock_seq))
714 return;
715
716 down_write(&vma->vm_lock->lock);
717
718
719
720
721
722
723 WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
724 up_write(&vma->vm_lock->lock);
725}
726
727static inline void vma_assert_write_locked(struct vm_area_struct *vma)
728{
729 int mm_lock_seq;
730
731 VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
732}
733
734static inline void vma_assert_locked(struct vm_area_struct *vma)
735{
736 if (!rwsem_is_locked(&vma->vm_lock->lock))
737 vma_assert_write_locked(vma);
738}
739
740static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
741{
742
743 if (detached)
744 vma_assert_write_locked(vma);
745 vma->detached = detached;
746}
747
748static inline void release_fault_lock(struct vm_fault *vmf)
749{
750 if (vmf->flags & FAULT_FLAG_VMA_LOCK)
751 vma_end_read(vmf->vma);
752 else
753 mmap_read_unlock(vmf->vma->vm_mm);
754}
755
756static inline void assert_fault_locked(struct vm_fault *vmf)
757{
758 if (vmf->flags & FAULT_FLAG_VMA_LOCK)
759 vma_assert_locked(vmf->vma);
760 else
761 mmap_assert_locked(vmf->vma->vm_mm);
762}
763
764struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
765 unsigned long address);
766
767#else
768
769static inline bool vma_start_read(struct vm_area_struct *vma)
770 { return false; }
771static inline void vma_end_read(struct vm_area_struct *vma) {}
772static inline void vma_start_write(struct vm_area_struct *vma) {}
773static inline void vma_assert_write_locked(struct vm_area_struct *vma)
774 { mmap_assert_write_locked(vma->vm_mm); }
775static inline void vma_mark_detached(struct vm_area_struct *vma,
776 bool detached) {}
777
778static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
779 unsigned long address)
780{
781 return NULL;
782}
783
784static inline void release_fault_lock(struct vm_fault *vmf)
785{
786 mmap_read_unlock(vmf->vma->vm_mm);
787}
788
789static inline void assert_fault_locked(struct vm_fault *vmf)
790{
791 mmap_assert_locked(vmf->vma->vm_mm);
792}
793
794#endif
795
796extern const struct vm_operations_struct vma_dummy_vm_ops;
797
798
799
800
801
802static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
803{
804 memset(vma, 0, sizeof(*vma));
805 vma->vm_mm = mm;
806 vma->vm_ops = &vma_dummy_vm_ops;
807 INIT_LIST_HEAD(&vma->anon_vma_chain);
808 vma_mark_detached(vma, false);
809 vma_numab_state_init(vma);
810}
811
812
813static inline void vm_flags_init(struct vm_area_struct *vma,
814 vm_flags_t flags)
815{
816 ACCESS_PRIVATE(vma, __vm_flags) = flags;
817}
818
819
820
821
822
823
824static inline void vm_flags_reset(struct vm_area_struct *vma,
825 vm_flags_t flags)
826{
827 vma_assert_write_locked(vma);
828 vm_flags_init(vma, flags);
829}
830
831static inline void vm_flags_reset_once(struct vm_area_struct *vma,
832 vm_flags_t flags)
833{
834 vma_assert_write_locked(vma);
835 WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
836}
837
838static inline void vm_flags_set(struct vm_area_struct *vma,
839 vm_flags_t flags)
840{
841 vma_start_write(vma);
842 ACCESS_PRIVATE(vma, __vm_flags) |= flags;
843}
844
845static inline void vm_flags_clear(struct vm_area_struct *vma,
846 vm_flags_t flags)
847{
848 vma_start_write(vma);
849 ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
850}
851
852
853
854
855
856static inline void __vm_flags_mod(struct vm_area_struct *vma,
857 vm_flags_t set, vm_flags_t clear)
858{
859 vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
860}
861
862
863
864
865
866static inline void vm_flags_mod(struct vm_area_struct *vma,
867 vm_flags_t set, vm_flags_t clear)
868{
869 vma_start_write(vma);
870 __vm_flags_mod(vma, set, clear);
871}
872
873static inline void vma_set_anonymous(struct vm_area_struct *vma)
874{
875 vma->vm_ops = NULL;
876}
877
878static inline bool vma_is_anonymous(struct vm_area_struct *vma)
879{
880 return !vma->vm_ops;
881}
882
883
884
885
886
887static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
888{
889 return vma->vm_start < vma->vm_mm->brk &&
890 vma->vm_end > vma->vm_mm->start_brk;
891}
892
893
894
895
896
897static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
898{
899
900
901
902
903
904 return vma->vm_start <= vma->vm_mm->start_stack &&
905 vma->vm_end >= vma->vm_mm->start_stack;
906}
907
908static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
909{
910 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
911
912 if (!maybe_stack)
913 return false;
914
915 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
916 VM_STACK_INCOMPLETE_SETUP)
917 return true;
918
919 return false;
920}
921
922static inline bool vma_is_foreign(struct vm_area_struct *vma)
923{
924 if (!current->mm)
925 return true;
926
927 if (current->mm != vma->vm_mm)
928 return true;
929
930 return false;
931}
932
933static inline bool vma_is_accessible(struct vm_area_struct *vma)
934{
935 return vma->vm_flags & VM_ACCESS_FLAGS;
936}
937
938static inline bool is_shared_maywrite(vm_flags_t vm_flags)
939{
940 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
941 (VM_SHARED | VM_MAYWRITE);
942}
943
944static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
945{
946 return is_shared_maywrite(vma->vm_flags);
947}
948
949static inline
950struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
951{
952 return mas_find(&vmi->mas, max - 1);
953}
954
955static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
956{
957
958
959
960
961 return mas_find(&vmi->mas, ULONG_MAX);
962}
963
964static inline
965struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
966{
967 return mas_next_range(&vmi->mas, ULONG_MAX);
968}
969
970
971static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
972{
973 return mas_prev(&vmi->mas, 0);
974}
975
976static inline
977struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
978{
979 return mas_prev_range(&vmi->mas, 0);
980}
981
982static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
983{
984 return vmi->mas.index;
985}
986
987static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
988{
989 return vmi->mas.last + 1;
990}
991static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
992 unsigned long count)
993{
994 return mas_expected_entries(&vmi->mas, count);
995}
996
997
998static inline void vma_iter_free(struct vma_iterator *vmi)
999{
1000 mas_destroy(&vmi->mas);
1001}
1002
1003static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1004 struct vm_area_struct *vma)
1005{
1006 vmi->mas.index = vma->vm_start;
1007 vmi->mas.last = vma->vm_end - 1;
1008 mas_store(&vmi->mas, vma);
1009 if (unlikely(mas_is_err(&vmi->mas)))
1010 return -ENOMEM;
1011
1012 return 0;
1013}
1014
1015static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1016{
1017 mas_pause(&vmi->mas);
1018}
1019
1020static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1021{
1022 mas_set(&vmi->mas, addr);
1023}
1024
1025#define for_each_vma(__vmi, __vma) \
1026 while (((__vma) = vma_next(&(__vmi))) != NULL)
1027
1028
1029#define for_each_vma_range(__vmi, __vma, __end) \
1030 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1031
1032#ifdef CONFIG_SHMEM
1033
1034
1035
1036
1037bool vma_is_shmem(struct vm_area_struct *vma);
1038bool vma_is_anon_shmem(struct vm_area_struct *vma);
1039#else
1040static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1041static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
1042#endif
1043
1044int vma_is_stack_for_current(struct vm_area_struct *vma);
1045
1046
1047#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1048
1049struct mmu_gather;
1050struct inode;
1051
1052
1053
1054
1055
1056
1057
1058
1059static inline unsigned int compound_order(struct page *page)
1060{
1061 struct folio *folio = (struct folio *)page;
1062
1063 if (!test_bit(PG_head, &folio->flags))
1064 return 0;
1065 return folio->_flags_1 & 0xff;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077static inline unsigned int folio_order(struct folio *folio)
1078{
1079 if (!folio_test_large(folio))
1080 return 0;
1081 return folio->_flags_1 & 0xff;
1082}
1083
1084#include <linux/huge_mm.h>
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static inline int put_page_testzero(struct page *page)
1103{
1104 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1105 return page_ref_dec_and_test(page);
1106}
1107
1108static inline int folio_put_testzero(struct folio *folio)
1109{
1110 return put_page_testzero(&folio->page);
1111}
1112
1113
1114
1115
1116
1117
1118
1119static inline bool get_page_unless_zero(struct page *page)
1120{
1121 return page_ref_add_unless(page, 1, 0);
1122}
1123
1124static inline struct folio *folio_get_nontail_page(struct page *page)
1125{
1126 if (unlikely(!get_page_unless_zero(page)))
1127 return NULL;
1128 return (struct folio *)page;
1129}
1130
1131extern int page_is_ram(unsigned long pfn);
1132
1133enum {
1134 REGION_INTERSECTS,
1135 REGION_DISJOINT,
1136 REGION_MIXED,
1137};
1138
1139int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1140 unsigned long desc);
1141
1142
1143struct page *vmalloc_to_page(const void *addr);
1144unsigned long vmalloc_to_pfn(const void *addr);
1145
1146
1147
1148
1149
1150
1151
1152#ifdef CONFIG_MMU
1153extern bool is_vmalloc_addr(const void *x);
1154extern int is_vmalloc_or_module_addr(const void *x);
1155#else
1156static inline bool is_vmalloc_addr(const void *x)
1157{
1158 return false;
1159}
1160static inline int is_vmalloc_or_module_addr(const void *x)
1161{
1162 return 0;
1163}
1164#endif
1165
1166
1167
1168
1169
1170
1171
1172static inline int folio_entire_mapcount(struct folio *folio)
1173{
1174 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1175 return atomic_read(&folio->_entire_mapcount) + 1;
1176}
1177
1178
1179
1180
1181
1182
1183static inline void page_mapcount_reset(struct page *page)
1184{
1185 atomic_set(&(page)->_mapcount, -1);
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static inline int page_mapcount(struct page *page)
1201{
1202 int mapcount = atomic_read(&page->_mapcount) + 1;
1203
1204 if (unlikely(PageCompound(page)))
1205 mapcount += folio_entire_mapcount(page_folio(page));
1206
1207 return mapcount;
1208}
1209
1210int folio_total_mapcount(struct folio *folio);
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static inline int folio_mapcount(struct folio *folio)
1224{
1225 if (likely(!folio_test_large(folio)))
1226 return atomic_read(&folio->_mapcount) + 1;
1227 return folio_total_mapcount(folio);
1228}
1229
1230static inline int total_mapcount(struct page *page)
1231{
1232 if (likely(!PageCompound(page)))
1233 return atomic_read(&page->_mapcount) + 1;
1234 return folio_total_mapcount(page_folio(page));
1235}
1236
1237static inline bool folio_large_is_mapped(struct folio *folio)
1238{
1239
1240
1241
1242
1243 return atomic_read(&folio->_nr_pages_mapped) > 0 ||
1244 atomic_read(&folio->_entire_mapcount) >= 0;
1245}
1246
1247
1248
1249
1250
1251
1252
1253static inline bool folio_mapped(struct folio *folio)
1254{
1255 if (likely(!folio_test_large(folio)))
1256 return atomic_read(&folio->_mapcount) >= 0;
1257 return folio_large_is_mapped(folio);
1258}
1259
1260
1261
1262
1263
1264
1265static inline bool page_mapped(struct page *page)
1266{
1267 if (likely(!PageCompound(page)))
1268 return atomic_read(&page->_mapcount) >= 0;
1269 return folio_large_is_mapped(page_folio(page));
1270}
1271
1272static inline struct page *virt_to_head_page(const void *x)
1273{
1274 struct page *page = virt_to_page(x);
1275
1276 return compound_head(page);
1277}
1278
1279static inline struct folio *virt_to_folio(const void *x)
1280{
1281 struct page *page = virt_to_page(x);
1282
1283 return page_folio(page);
1284}
1285
1286void __folio_put(struct folio *folio);
1287
1288void put_pages_list(struct list_head *pages);
1289
1290void split_page(struct page *page, unsigned int order);
1291void folio_copy(struct folio *dst, struct folio *src);
1292
1293unsigned long nr_free_buffer_pages(void);
1294
1295void destroy_large_folio(struct folio *folio);
1296
1297
1298static inline unsigned long page_size(struct page *page)
1299{
1300 return PAGE_SIZE << compound_order(page);
1301}
1302
1303
1304static inline unsigned int page_shift(struct page *page)
1305{
1306 return PAGE_SHIFT + compound_order(page);
1307}
1308
1309
1310
1311
1312
1313static inline unsigned int thp_order(struct page *page)
1314{
1315 VM_BUG_ON_PGFLAGS(PageTail(page), page);
1316 return compound_order(page);
1317}
1318
1319
1320
1321
1322
1323
1324
1325static inline unsigned long thp_size(struct page *page)
1326{
1327 return PAGE_SIZE << thp_order(page);
1328}
1329
1330#ifdef CONFIG_MMU
1331
1332
1333
1334
1335
1336
1337static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1338{
1339 if (likely(vma->vm_flags & VM_WRITE))
1340 pte = pte_mkwrite(pte, vma);
1341 return pte;
1342}
1343
1344vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1345void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1346 struct page *page, unsigned int nr, unsigned long addr);
1347
1348vm_fault_t finish_fault(struct vm_fault *vmf);
1349#endif
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
1412DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1413
1414bool __put_devmap_managed_page_refs(struct page *page, int refs);
1415static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
1416{
1417 if (!static_branch_unlikely(&devmap_managed_key))
1418 return false;
1419 if (!is_zone_device_page(page))
1420 return false;
1421 return __put_devmap_managed_page_refs(page, refs);
1422}
1423#else
1424static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
1425{
1426 return false;
1427}
1428#endif
1429
1430static inline bool put_devmap_managed_page(struct page *page)
1431{
1432 return put_devmap_managed_page_refs(page, 1);
1433}
1434
1435
1436#define folio_ref_zero_or_close_to_overflow(folio) \
1437 ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447static inline void folio_get(struct folio *folio)
1448{
1449 VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1450 folio_ref_inc(folio);
1451}
1452
1453static inline void get_page(struct page *page)
1454{
1455 folio_get(page_folio(page));
1456}
1457
1458static inline __must_check bool try_get_page(struct page *page)
1459{
1460 page = compound_head(page);
1461 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1462 return false;
1463 page_ref_inc(page);
1464 return true;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480static inline void folio_put(struct folio *folio)
1481{
1482 if (folio_put_testzero(folio))
1483 __folio_put(folio);
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static inline void folio_put_refs(struct folio *folio, int refs)
1501{
1502 if (folio_ref_sub_and_test(folio, refs))
1503 __folio_put(folio);
1504}
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517typedef union {
1518 struct page **pages;
1519 struct folio **folios;
1520 struct encoded_page **encoded_pages;
1521} release_pages_arg __attribute__ ((__transparent_union__));
1522
1523void release_pages(release_pages_arg, int nr);
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537static inline void folios_put(struct folio **folios, unsigned int nr)
1538{
1539 release_pages(folios, nr);
1540}
1541
1542static inline void put_page(struct page *page)
1543{
1544 struct folio *folio = page_folio(page);
1545
1546
1547
1548
1549
1550 if (put_devmap_managed_page(&folio->page))
1551 return;
1552 folio_put(folio);
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585#define GUP_PIN_COUNTING_BIAS (1U << 10)
1586
1587void unpin_user_page(struct page *page);
1588void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1589 bool make_dirty);
1590void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1591 bool make_dirty);
1592void unpin_user_pages(struct page **pages, unsigned long npages);
1593
1594static inline bool is_cow_mapping(vm_flags_t flags)
1595{
1596 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1597}
1598
1599#ifndef CONFIG_MMU
1600static inline bool is_nommu_shared_mapping(vm_flags_t flags)
1601{
1602
1603
1604
1605
1606
1607
1608
1609
1610 return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
1611}
1612#endif
1613
1614#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1615#define SECTION_IN_PAGE_FLAGS
1616#endif
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static inline int page_zone_id(struct page *page)
1627{
1628 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1629}
1630
1631#ifdef NODE_NOT_IN_PAGE_FLAGS
1632extern int page_to_nid(const struct page *page);
1633#else
1634static inline int page_to_nid(const struct page *page)
1635{
1636 struct page *p = (struct page *)page;
1637
1638 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1639}
1640#endif
1641
1642static inline int folio_nid(const struct folio *folio)
1643{
1644 return page_to_nid(&folio->page);
1645}
1646
1647#ifdef CONFIG_NUMA_BALANCING
1648
1649#define PAGE_ACCESS_TIME_MIN_BITS 12
1650#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1651#define PAGE_ACCESS_TIME_BUCKETS \
1652 (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1653#else
1654#define PAGE_ACCESS_TIME_BUCKETS 0
1655#endif
1656
1657#define PAGE_ACCESS_TIME_MASK \
1658 (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1659
1660static inline int cpu_pid_to_cpupid(int cpu, int pid)
1661{
1662 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1663}
1664
1665static inline int cpupid_to_pid(int cpupid)
1666{
1667 return cpupid & LAST__PID_MASK;
1668}
1669
1670static inline int cpupid_to_cpu(int cpupid)
1671{
1672 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1673}
1674
1675static inline int cpupid_to_nid(int cpupid)
1676{
1677 return cpu_to_node(cpupid_to_cpu(cpupid));
1678}
1679
1680static inline bool cpupid_pid_unset(int cpupid)
1681{
1682 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1683}
1684
1685static inline bool cpupid_cpu_unset(int cpupid)
1686{
1687 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1688}
1689
1690static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1691{
1692 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1693}
1694
1695#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1696#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1697static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1698{
1699 return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1700}
1701
1702static inline int folio_last_cpupid(struct folio *folio)
1703{
1704 return folio->_last_cpupid;
1705}
1706static inline void page_cpupid_reset_last(struct page *page)
1707{
1708 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1709}
1710#else
1711static inline int folio_last_cpupid(struct folio *folio)
1712{
1713 return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1714}
1715
1716int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
1717
1718static inline void page_cpupid_reset_last(struct page *page)
1719{
1720 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1721}
1722#endif
1723
1724static inline int folio_xchg_access_time(struct folio *folio, int time)
1725{
1726 int last_time;
1727
1728 last_time = folio_xchg_last_cpupid(folio,
1729 time >> PAGE_ACCESS_TIME_BUCKETS);
1730 return last_time << PAGE_ACCESS_TIME_BUCKETS;
1731}
1732
1733static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1734{
1735 unsigned int pid_bit;
1736
1737 pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
1738 if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
1739 __set_bit(pid_bit, &vma->numab_state->pids_active[1]);
1740 }
1741}
1742#else
1743static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1744{
1745 return folio_nid(folio);
1746}
1747
1748static inline int folio_xchg_access_time(struct folio *folio, int time)
1749{
1750 return 0;
1751}
1752
1753static inline int folio_last_cpupid(struct folio *folio)
1754{
1755 return folio_nid(folio);
1756}
1757
1758static inline int cpupid_to_nid(int cpupid)
1759{
1760 return -1;
1761}
1762
1763static inline int cpupid_to_pid(int cpupid)
1764{
1765 return -1;
1766}
1767
1768static inline int cpupid_to_cpu(int cpupid)
1769{
1770 return -1;
1771}
1772
1773static inline int cpu_pid_to_cpupid(int nid, int pid)
1774{
1775 return -1;
1776}
1777
1778static inline bool cpupid_pid_unset(int cpupid)
1779{
1780 return true;
1781}
1782
1783static inline void page_cpupid_reset_last(struct page *page)
1784{
1785}
1786
1787static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1788{
1789 return false;
1790}
1791
1792static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1793{
1794}
1795#endif
1796
1797#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1798
1799
1800
1801
1802
1803
1804
1805static inline u8 page_kasan_tag(const struct page *page)
1806{
1807 u8 tag = 0xff;
1808
1809 if (kasan_enabled()) {
1810 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1811 tag ^= 0xff;
1812 }
1813
1814 return tag;
1815}
1816
1817static inline void page_kasan_tag_set(struct page *page, u8 tag)
1818{
1819 unsigned long old_flags, flags;
1820
1821 if (!kasan_enabled())
1822 return;
1823
1824 tag ^= 0xff;
1825 old_flags = READ_ONCE(page->flags);
1826 do {
1827 flags = old_flags;
1828 flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1829 flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1830 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1831}
1832
1833static inline void page_kasan_tag_reset(struct page *page)
1834{
1835 if (kasan_enabled())
1836 page_kasan_tag_set(page, 0xff);
1837}
1838
1839#else
1840
1841static inline u8 page_kasan_tag(const struct page *page)
1842{
1843 return 0xff;
1844}
1845
1846static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1847static inline void page_kasan_tag_reset(struct page *page) { }
1848
1849#endif
1850
1851static inline struct zone *page_zone(const struct page *page)
1852{
1853 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1854}
1855
1856static inline pg_data_t *page_pgdat(const struct page *page)
1857{
1858 return NODE_DATA(page_to_nid(page));
1859}
1860
1861static inline struct zone *folio_zone(const struct folio *folio)
1862{
1863 return page_zone(&folio->page);
1864}
1865
1866static inline pg_data_t *folio_pgdat(const struct folio *folio)
1867{
1868 return page_pgdat(&folio->page);
1869}
1870
1871#ifdef SECTION_IN_PAGE_FLAGS
1872static inline void set_page_section(struct page *page, unsigned long section)
1873{
1874 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1875 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1876}
1877
1878static inline unsigned long page_to_section(const struct page *page)
1879{
1880 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1881}
1882#endif
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static inline unsigned long folio_pfn(struct folio *folio)
1894{
1895 return page_to_pfn(&folio->page);
1896}
1897
1898static inline struct folio *pfn_folio(unsigned long pfn)
1899{
1900 return page_folio(pfn_to_page(pfn));
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928static inline bool folio_maybe_dma_pinned(struct folio *folio)
1929{
1930 if (folio_test_large(folio))
1931 return atomic_read(&folio->_pincount) > 0;
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941 return ((unsigned int)folio_ref_count(folio)) >=
1942 GUP_PIN_COUNTING_BIAS;
1943}
1944
1945static inline bool page_maybe_dma_pinned(struct page *page)
1946{
1947 return folio_maybe_dma_pinned(page_folio(page));
1948}
1949
1950
1951
1952
1953
1954
1955
1956static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
1957 struct page *page)
1958{
1959 VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
1960
1961 if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1962 return false;
1963
1964 return page_maybe_dma_pinned(page);
1965}
1966
1967
1968
1969
1970
1971
1972
1973static inline bool is_zero_page(const struct page *page)
1974{
1975 return is_zero_pfn(page_to_pfn(page));
1976}
1977
1978
1979
1980
1981
1982
1983
1984static inline bool is_zero_folio(const struct folio *folio)
1985{
1986 return is_zero_page(&folio->page);
1987}
1988
1989
1990#ifdef CONFIG_MIGRATION
1991static inline bool folio_is_longterm_pinnable(struct folio *folio)
1992{
1993#ifdef CONFIG_CMA
1994 int mt = folio_migratetype(folio);
1995
1996 if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
1997 return false;
1998#endif
1999
2000 if (is_zero_folio(folio))
2001 return true;
2002
2003
2004 if (folio_is_device_coherent(folio))
2005 return false;
2006
2007
2008 return !folio_is_zone_movable(folio);
2009
2010}
2011#else
2012static inline bool folio_is_longterm_pinnable(struct folio *folio)
2013{
2014 return true;
2015}
2016#endif
2017
2018static inline void set_page_zone(struct page *page, enum zone_type zone)
2019{
2020 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
2021 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2022}
2023
2024static inline void set_page_node(struct page *page, unsigned long node)
2025{
2026 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
2027 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
2028}
2029
2030static inline void set_page_links(struct page *page, enum zone_type zone,
2031 unsigned long node, unsigned long pfn)
2032{
2033 set_page_zone(page, zone);
2034 set_page_node(page, node);
2035#ifdef SECTION_IN_PAGE_FLAGS
2036 set_page_section(page, pfn_to_section_nr(pfn));
2037#endif
2038}
2039
2040
2041
2042
2043
2044
2045
2046static inline long folio_nr_pages(struct folio *folio)
2047{
2048 if (!folio_test_large(folio))
2049 return 1;
2050#ifdef CONFIG_64BIT
2051 return folio->_folio_nr_pages;
2052#else
2053 return 1L << (folio->_flags_1 & 0xff);
2054#endif
2055}
2056
2057
2058
2059
2060
2061
2062static inline unsigned long compound_nr(struct page *page)
2063{
2064 struct folio *folio = (struct folio *)page;
2065
2066 if (!test_bit(PG_head, &folio->flags))
2067 return 1;
2068#ifdef CONFIG_64BIT
2069 return folio->_folio_nr_pages;
2070#else
2071 return 1L << (folio->_flags_1 & 0xff);
2072#endif
2073}
2074
2075
2076
2077
2078
2079static inline int thp_nr_pages(struct page *page)
2080{
2081 return folio_nr_pages((struct folio *)page);
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098static inline struct folio *folio_next(struct folio *folio)
2099{
2100 return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115static inline unsigned int folio_shift(struct folio *folio)
2116{
2117 return PAGE_SHIFT + folio_order(folio);
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128static inline size_t folio_size(struct folio *folio)
2129{
2130 return PAGE_SIZE << folio_order(folio);
2131}
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146static inline int folio_estimated_sharers(struct folio *folio)
2147{
2148 return page_mapcount(folio_page(folio, 0));
2149}
2150
2151#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
2152static inline int arch_make_page_accessible(struct page *page)
2153{
2154 return 0;
2155}
2156#endif
2157
2158#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
2159static inline int arch_make_folio_accessible(struct folio *folio)
2160{
2161 int ret;
2162 long i, nr = folio_nr_pages(folio);
2163
2164 for (i = 0; i < nr; i++) {
2165 ret = arch_make_page_accessible(folio_page(folio, i));
2166 if (ret)
2167 break;
2168 }
2169
2170 return ret;
2171}
2172#endif
2173
2174
2175
2176
2177#include <linux/vmstat.h>
2178
2179static __always_inline void *lowmem_page_address(const struct page *page)
2180{
2181 return page_to_virt(page);
2182}
2183
2184#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2185#define HASHED_PAGE_VIRTUAL
2186#endif
2187
2188#if defined(WANT_PAGE_VIRTUAL)
2189static inline void *page_address(const struct page *page)
2190{
2191 return page->virtual;
2192}
2193static inline void set_page_address(struct page *page, void *address)
2194{
2195 page->virtual = address;
2196}
2197#define page_address_init() do { } while(0)
2198#endif
2199
2200#if defined(HASHED_PAGE_VIRTUAL)
2201void *page_address(const struct page *page);
2202void set_page_address(struct page *page, void *virtual);
2203void page_address_init(void);
2204#endif
2205
2206#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2207#define page_address(page) lowmem_page_address(page)
2208#define set_page_address(page, address) do { } while(0)
2209#define page_address_init() do { } while(0)
2210#endif
2211
2212static inline void *folio_address(const struct folio *folio)
2213{
2214 return page_address(&folio->page);
2215}
2216
2217extern pgoff_t __page_file_index(struct page *page);
2218
2219
2220
2221
2222
2223static inline pgoff_t page_index(struct page *page)
2224{
2225 if (unlikely(PageSwapCache(page)))
2226 return __page_file_index(page);
2227 return page->index;
2228}
2229
2230
2231
2232
2233
2234
2235static inline bool page_is_pfmemalloc(const struct page *page)
2236{
2237
2238
2239
2240
2241
2242 return (uintptr_t)page->lru.next & BIT(1);
2243}
2244
2245
2246
2247
2248
2249
2250static inline bool folio_is_pfmemalloc(const struct folio *folio)
2251{
2252
2253
2254
2255
2256
2257 return (uintptr_t)folio->lru.next & BIT(1);
2258}
2259
2260
2261
2262
2263
2264static inline void set_page_pfmemalloc(struct page *page)
2265{
2266 page->lru.next = (void *)BIT(1);
2267}
2268
2269static inline void clear_page_pfmemalloc(struct page *page)
2270{
2271 page->lru.next = NULL;
2272}
2273
2274
2275
2276
2277extern void pagefault_out_of_memory(void);
2278
2279#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
2280#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
2281#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
2282
2283
2284
2285
2286struct zap_details {
2287 struct folio *single_folio;
2288 bool even_cows;
2289 zap_flags_t zap_flags;
2290};
2291
2292
2293
2294
2295
2296
2297
2298#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
2299
2300#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
2301
2302#ifdef CONFIG_SCHED_MM_CID
2303void sched_mm_cid_before_execve(struct task_struct *t);
2304void sched_mm_cid_after_execve(struct task_struct *t);
2305void sched_mm_cid_fork(struct task_struct *t);
2306void sched_mm_cid_exit_signals(struct task_struct *t);
2307static inline int task_mm_cid(struct task_struct *t)
2308{
2309 return t->mm_cid;
2310}
2311#else
2312static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
2313static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
2314static inline void sched_mm_cid_fork(struct task_struct *t) { }
2315static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
2316static inline int task_mm_cid(struct task_struct *t)
2317{
2318
2319
2320
2321
2322
2323 return raw_smp_processor_id();
2324}
2325#endif
2326
2327#ifdef CONFIG_MMU
2328extern bool can_do_mlock(void);
2329#else
2330static inline bool can_do_mlock(void) { return false; }
2331#endif
2332extern int user_shm_lock(size_t, struct ucounts *);
2333extern void user_shm_unlock(size_t, struct ucounts *);
2334
2335struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2336 pte_t pte);
2337struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2338 pte_t pte);
2339struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2340 unsigned long addr, pmd_t pmd);
2341struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2342 pmd_t pmd);
2343
2344void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2345 unsigned long size);
2346void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2347 unsigned long size, struct zap_details *details);
2348static inline void zap_vma_pages(struct vm_area_struct *vma)
2349{
2350 zap_page_range_single(vma, vma->vm_start,
2351 vma->vm_end - vma->vm_start, NULL);
2352}
2353void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
2354 struct vm_area_struct *start_vma, unsigned long start,
2355 unsigned long end, unsigned long tree_end, bool mm_wr_locked);
2356
2357struct mmu_notifier_range;
2358
2359void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2360 unsigned long end, unsigned long floor, unsigned long ceiling);
2361int
2362copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
2363int follow_pte(struct mm_struct *mm, unsigned long address,
2364 pte_t **ptepp, spinlock_t **ptlp);
2365int follow_pfn(struct vm_area_struct *vma, unsigned long address,
2366 unsigned long *pfn);
2367int follow_phys(struct vm_area_struct *vma, unsigned long address,
2368 unsigned int flags, unsigned long *prot, resource_size_t *phys);
2369int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2370 void *buf, int len, int write);
2371
2372extern void truncate_pagecache(struct inode *inode, loff_t new);
2373extern void truncate_setsize(struct inode *inode, loff_t newsize);
2374void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2375void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2376int generic_error_remove_page(struct address_space *mapping, struct page *page);
2377
2378struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2379 unsigned long address, struct pt_regs *regs);
2380
2381#ifdef CONFIG_MMU
2382extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2383 unsigned long address, unsigned int flags,
2384 struct pt_regs *regs);
2385extern int fixup_user_fault(struct mm_struct *mm,
2386 unsigned long address, unsigned int fault_flags,
2387 bool *unlocked);
2388void unmap_mapping_pages(struct address_space *mapping,
2389 pgoff_t start, pgoff_t nr, bool even_cows);
2390void unmap_mapping_range(struct address_space *mapping,
2391 loff_t const holebegin, loff_t const holelen, int even_cows);
2392#else
2393static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2394 unsigned long address, unsigned int flags,
2395 struct pt_regs *regs)
2396{
2397
2398 BUG();
2399 return VM_FAULT_SIGBUS;
2400}
2401static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2402 unsigned int fault_flags, bool *unlocked)
2403{
2404
2405 BUG();
2406 return -EFAULT;
2407}
2408static inline void unmap_mapping_pages(struct address_space *mapping,
2409 pgoff_t start, pgoff_t nr, bool even_cows) { }
2410static inline void unmap_mapping_range(struct address_space *mapping,
2411 loff_t const holebegin, loff_t const holelen, int even_cows) { }
2412#endif
2413
2414static inline void unmap_shared_mapping_range(struct address_space *mapping,
2415 loff_t const holebegin, loff_t const holelen)
2416{
2417 unmap_mapping_range(mapping, holebegin, holelen, 0);
2418}
2419
2420static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2421 unsigned long addr);
2422
2423extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2424 void *buf, int len, unsigned int gup_flags);
2425extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2426 void *buf, int len, unsigned int gup_flags);
2427
2428long get_user_pages_remote(struct mm_struct *mm,
2429 unsigned long start, unsigned long nr_pages,
2430 unsigned int gup_flags, struct page **pages,
2431 int *locked);
2432long pin_user_pages_remote(struct mm_struct *mm,
2433 unsigned long start, unsigned long nr_pages,
2434 unsigned int gup_flags, struct page **pages,
2435 int *locked);
2436
2437
2438
2439
2440static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2441 unsigned long addr,
2442 int gup_flags,
2443 struct vm_area_struct **vmap)
2444{
2445 struct page *page;
2446 struct vm_area_struct *vma;
2447 int got;
2448
2449 if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
2450 return ERR_PTR(-EINVAL);
2451
2452 got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2453
2454 if (got < 0)
2455 return ERR_PTR(got);
2456
2457 vma = vma_lookup(mm, addr);
2458 if (WARN_ON_ONCE(!vma)) {
2459 put_page(page);
2460 return ERR_PTR(-EINVAL);
2461 }
2462
2463 *vmap = vma;
2464 return page;
2465}
2466
2467long get_user_pages(unsigned long start, unsigned long nr_pages,
2468 unsigned int gup_flags, struct page **pages);
2469long pin_user_pages(unsigned long start, unsigned long nr_pages,
2470 unsigned int gup_flags, struct page **pages);
2471long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2472 struct page **pages, unsigned int gup_flags);
2473long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2474 struct page **pages, unsigned int gup_flags);
2475
2476int get_user_pages_fast(unsigned long start, int nr_pages,
2477 unsigned int gup_flags, struct page **pages);
2478int pin_user_pages_fast(unsigned long start, int nr_pages,
2479 unsigned int gup_flags, struct page **pages);
2480void folio_add_pin(struct folio *folio);
2481
2482int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2483int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2484 struct task_struct *task, bool bypass_rlim);
2485
2486struct kvec;
2487struct page *get_dump_page(unsigned long addr);
2488
2489bool folio_mark_dirty(struct folio *folio);
2490bool set_page_dirty(struct page *page);
2491int set_page_dirty_lock(struct page *page);
2492
2493int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2494
2495extern unsigned long move_page_tables(struct vm_area_struct *vma,
2496 unsigned long old_addr, struct vm_area_struct *new_vma,
2497 unsigned long new_addr, unsigned long len,
2498 bool need_rmap_locks, bool for_stack);
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
2512
2513#define MM_CP_PROT_NUMA (1UL << 1)
2514
2515#define MM_CP_UFFD_WP (1UL << 2)
2516#define MM_CP_UFFD_WP_RESOLVE (1UL << 3)
2517#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
2518 MM_CP_UFFD_WP_RESOLVE)
2519
2520bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
2521int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2522static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
2523{
2524
2525
2526
2527
2528
2529
2530 if (vma->vm_flags & VM_SHARED)
2531 return vma_wants_writenotify(vma, vma->vm_page_prot);
2532 return !!(vma->vm_flags & VM_WRITE);
2533
2534}
2535bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2536 pte_t pte);
2537extern long change_protection(struct mmu_gather *tlb,
2538 struct vm_area_struct *vma, unsigned long start,
2539 unsigned long end, unsigned long cp_flags);
2540extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
2541 struct vm_area_struct *vma, struct vm_area_struct **pprev,
2542 unsigned long start, unsigned long end, unsigned long newflags);
2543
2544
2545
2546
2547int get_user_pages_fast_only(unsigned long start, int nr_pages,
2548 unsigned int gup_flags, struct page **pages);
2549
2550static inline bool get_user_page_fast_only(unsigned long addr,
2551 unsigned int gup_flags, struct page **pagep)
2552{
2553 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2554}
2555
2556
2557
2558static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2559{
2560 return percpu_counter_read_positive(&mm->rss_stat[member]);
2561}
2562
2563void mm_trace_rss_stat(struct mm_struct *mm, int member);
2564
2565static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2566{
2567 percpu_counter_add(&mm->rss_stat[member], value);
2568
2569 mm_trace_rss_stat(mm, member);
2570}
2571
2572static inline void inc_mm_counter(struct mm_struct *mm, int member)
2573{
2574 percpu_counter_inc(&mm->rss_stat[member]);
2575
2576 mm_trace_rss_stat(mm, member);
2577}
2578
2579static inline void dec_mm_counter(struct mm_struct *mm, int member)
2580{
2581 percpu_counter_dec(&mm->rss_stat[member]);
2582
2583 mm_trace_rss_stat(mm, member);
2584}
2585
2586
2587static inline int mm_counter_file(struct page *page)
2588{
2589 if (PageSwapBacked(page))
2590 return MM_SHMEMPAGES;
2591 return MM_FILEPAGES;
2592}
2593
2594static inline int mm_counter(struct page *page)
2595{
2596 if (PageAnon(page))
2597 return MM_ANONPAGES;
2598 return mm_counter_file(page);
2599}
2600
2601static inline unsigned long get_mm_rss(struct mm_struct *mm)
2602{
2603 return get_mm_counter(mm, MM_FILEPAGES) +
2604 get_mm_counter(mm, MM_ANONPAGES) +
2605 get_mm_counter(mm, MM_SHMEMPAGES);
2606}
2607
2608static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2609{
2610 return max(mm->hiwater_rss, get_mm_rss(mm));
2611}
2612
2613static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2614{
2615 return max(mm->hiwater_vm, mm->total_vm);
2616}
2617
2618static inline void update_hiwater_rss(struct mm_struct *mm)
2619{
2620 unsigned long _rss = get_mm_rss(mm);
2621
2622 if ((mm)->hiwater_rss < _rss)
2623 (mm)->hiwater_rss = _rss;
2624}
2625
2626static inline void update_hiwater_vm(struct mm_struct *mm)
2627{
2628 if (mm->hiwater_vm < mm->total_vm)
2629 mm->hiwater_vm = mm->total_vm;
2630}
2631
2632static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2633{
2634 mm->hiwater_rss = get_mm_rss(mm);
2635}
2636
2637static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2638 struct mm_struct *mm)
2639{
2640 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2641
2642 if (*maxrss < hiwater_rss)
2643 *maxrss = hiwater_rss;
2644}
2645
2646#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2647static inline int pte_special(pte_t pte)
2648{
2649 return 0;
2650}
2651
2652static inline pte_t pte_mkspecial(pte_t pte)
2653{
2654 return pte;
2655}
2656#endif
2657
2658#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2659static inline int pte_devmap(pte_t pte)
2660{
2661 return 0;
2662}
2663#endif
2664
2665extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2666 spinlock_t **ptl);
2667static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2668 spinlock_t **ptl)
2669{
2670 pte_t *ptep;
2671 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2672 return ptep;
2673}
2674
2675#ifdef __PAGETABLE_P4D_FOLDED
2676static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2677 unsigned long address)
2678{
2679 return 0;
2680}
2681#else
2682int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2683#endif
2684
2685#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2686static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2687 unsigned long address)
2688{
2689 return 0;
2690}
2691static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2692static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2693
2694#else
2695int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2696
2697static inline void mm_inc_nr_puds(struct mm_struct *mm)
2698{
2699 if (mm_pud_folded(mm))
2700 return;
2701 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2702}
2703
2704static inline void mm_dec_nr_puds(struct mm_struct *mm)
2705{
2706 if (mm_pud_folded(mm))
2707 return;
2708 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2709}
2710#endif
2711
2712#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2713static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2714 unsigned long address)
2715{
2716 return 0;
2717}
2718
2719static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2720static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2721
2722#else
2723int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2724
2725static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2726{
2727 if (mm_pmd_folded(mm))
2728 return;
2729 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2730}
2731
2732static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2733{
2734 if (mm_pmd_folded(mm))
2735 return;
2736 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2737}
2738#endif
2739
2740#ifdef CONFIG_MMU
2741static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2742{
2743 atomic_long_set(&mm->pgtables_bytes, 0);
2744}
2745
2746static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2747{
2748 return atomic_long_read(&mm->pgtables_bytes);
2749}
2750
2751static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2752{
2753 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2754}
2755
2756static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2757{
2758 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2759}
2760#else
2761
2762static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2763static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2764{
2765 return 0;
2766}
2767
2768static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2769static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2770#endif
2771
2772int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2773int __pte_alloc_kernel(pmd_t *pmd);
2774
2775#if defined(CONFIG_MMU)
2776
2777static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2778 unsigned long address)
2779{
2780 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2781 NULL : p4d_offset(pgd, address);
2782}
2783
2784static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2785 unsigned long address)
2786{
2787 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2788 NULL : pud_offset(p4d, address);
2789}
2790
2791static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2792{
2793 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2794 NULL: pmd_offset(pud, address);
2795}
2796#endif
2797
2798static inline struct ptdesc *virt_to_ptdesc(const void *x)
2799{
2800 return page_ptdesc(virt_to_page(x));
2801}
2802
2803static inline void *ptdesc_to_virt(const struct ptdesc *pt)
2804{
2805 return page_to_virt(ptdesc_page(pt));
2806}
2807
2808static inline void *ptdesc_address(const struct ptdesc *pt)
2809{
2810 return folio_address(ptdesc_folio(pt));
2811}
2812
2813static inline bool pagetable_is_reserved(struct ptdesc *pt)
2814{
2815 return folio_test_reserved(ptdesc_folio(pt));
2816}
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order)
2829{
2830 struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2831
2832 return page_ptdesc(page);
2833}
2834
2835
2836
2837
2838
2839
2840
2841
2842static inline void pagetable_free(struct ptdesc *pt)
2843{
2844 struct page *page = ptdesc_page(pt);
2845
2846 __free_pages(page, compound_order(page));
2847}
2848
2849#if USE_SPLIT_PTE_PTLOCKS
2850#if ALLOC_SPLIT_PTLOCKS
2851void __init ptlock_cache_init(void);
2852bool ptlock_alloc(struct ptdesc *ptdesc);
2853void ptlock_free(struct ptdesc *ptdesc);
2854
2855static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2856{
2857 return ptdesc->ptl;
2858}
2859#else
2860static inline void ptlock_cache_init(void)
2861{
2862}
2863
2864static inline bool ptlock_alloc(struct ptdesc *ptdesc)
2865{
2866 return true;
2867}
2868
2869static inline void ptlock_free(struct ptdesc *ptdesc)
2870{
2871}
2872
2873static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2874{
2875 return &ptdesc->ptl;
2876}
2877#endif
2878
2879static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2880{
2881 return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
2882}
2883
2884static inline bool ptlock_init(struct ptdesc *ptdesc)
2885{
2886
2887
2888
2889
2890
2891
2892
2893 VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
2894 if (!ptlock_alloc(ptdesc))
2895 return false;
2896 spin_lock_init(ptlock_ptr(ptdesc));
2897 return true;
2898}
2899
2900#else
2901
2902
2903
2904static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2905{
2906 return &mm->page_table_lock;
2907}
2908static inline void ptlock_cache_init(void) {}
2909static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
2910static inline void ptlock_free(struct ptdesc *ptdesc) {}
2911#endif
2912
2913static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
2914{
2915 struct folio *folio = ptdesc_folio(ptdesc);
2916
2917 if (!ptlock_init(ptdesc))
2918 return false;
2919 __folio_set_pgtable(folio);
2920 lruvec_stat_add_folio(folio, NR_PAGETABLE);
2921 return true;
2922}
2923
2924static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
2925{
2926 struct folio *folio = ptdesc_folio(ptdesc);
2927
2928 ptlock_free(ptdesc);
2929 __folio_clear_pgtable(folio);
2930 lruvec_stat_sub_folio(folio, NR_PAGETABLE);
2931}
2932
2933pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
2934static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
2935{
2936 return __pte_offset_map(pmd, addr, NULL);
2937}
2938
2939pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
2940 unsigned long addr, spinlock_t **ptlp);
2941static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
2942 unsigned long addr, spinlock_t **ptlp)
2943{
2944 pte_t *pte;
2945
2946 __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
2947 return pte;
2948}
2949
2950pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
2951 unsigned long addr, spinlock_t **ptlp);
2952
2953#define pte_unmap_unlock(pte, ptl) do { \
2954 spin_unlock(ptl); \
2955 pte_unmap(pte); \
2956} while (0)
2957
2958#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2959
2960#define pte_alloc_map(mm, pmd, address) \
2961 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2962
2963#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2964 (pte_alloc(mm, pmd) ? \
2965 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2966
2967#define pte_alloc_kernel(pmd, address) \
2968 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2969 NULL: pte_offset_kernel(pmd, address))
2970
2971#if USE_SPLIT_PMD_PTLOCKS
2972
2973static inline struct page *pmd_pgtable_page(pmd_t *pmd)
2974{
2975 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2976 return virt_to_page((void *)((unsigned long) pmd & mask));
2977}
2978
2979static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
2980{
2981 return page_ptdesc(pmd_pgtable_page(pmd));
2982}
2983
2984static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2985{
2986 return ptlock_ptr(pmd_ptdesc(pmd));
2987}
2988
2989static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
2990{
2991#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2992 ptdesc->pmd_huge_pte = NULL;
2993#endif
2994 return ptlock_init(ptdesc);
2995}
2996
2997static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
2998{
2999#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3000 VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
3001#endif
3002 ptlock_free(ptdesc);
3003}
3004
3005#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3006
3007#else
3008
3009static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3010{
3011 return &mm->page_table_lock;
3012}
3013
3014static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
3015static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
3016
3017#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3018
3019#endif
3020
3021static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3022{
3023 spinlock_t *ptl = pmd_lockptr(mm, pmd);
3024 spin_lock(ptl);
3025 return ptl;
3026}
3027
3028static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
3029{
3030 struct folio *folio = ptdesc_folio(ptdesc);
3031
3032 if (!pmd_ptlock_init(ptdesc))
3033 return false;
3034 __folio_set_pgtable(folio);
3035 lruvec_stat_add_folio(folio, NR_PAGETABLE);
3036 return true;
3037}
3038
3039static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
3040{
3041 struct folio *folio = ptdesc_folio(ptdesc);
3042
3043 pmd_ptlock_free(ptdesc);
3044 __folio_clear_pgtable(folio);
3045 lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3046}
3047
3048
3049
3050
3051
3052
3053
3054static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3055{
3056 return &mm->page_table_lock;
3057}
3058
3059static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3060{
3061 spinlock_t *ptl = pud_lockptr(mm, pud);
3062
3063 spin_lock(ptl);
3064 return ptl;
3065}
3066
3067static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3068{
3069 struct folio *folio = ptdesc_folio(ptdesc);
3070
3071 __folio_set_pgtable(folio);
3072 lruvec_stat_add_folio(folio, NR_PAGETABLE);
3073}
3074
3075static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
3076{
3077 struct folio *folio = ptdesc_folio(ptdesc);
3078
3079 __folio_clear_pgtable(folio);
3080 lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3081}
3082
3083extern void __init pagecache_init(void);
3084extern void free_initmem(void);
3085
3086
3087
3088
3089
3090
3091
3092extern unsigned long free_reserved_area(void *start, void *end,
3093 int poison, const char *s);
3094
3095extern void adjust_managed_page_count(struct page *page, long count);
3096
3097extern void reserve_bootmem_region(phys_addr_t start,
3098 phys_addr_t end, int nid);
3099
3100
3101static inline void free_reserved_page(struct page *page)
3102{
3103 ClearPageReserved(page);
3104 init_page_count(page);
3105 __free_page(page);
3106 adjust_managed_page_count(page, 1);
3107}
3108#define free_highmem_page(page) free_reserved_page(page)
3109
3110static inline void mark_page_reserved(struct page *page)
3111{
3112 SetPageReserved(page);
3113 adjust_managed_page_count(page, -1);
3114}
3115
3116static inline void free_reserved_ptdesc(struct ptdesc *pt)
3117{
3118 free_reserved_page(ptdesc_page(pt));
3119}
3120
3121
3122
3123
3124
3125
3126
3127static inline unsigned long free_initmem_default(int poison)
3128{
3129 extern char __init_begin[], __init_end[];
3130
3131 return free_reserved_area(&__init_begin, &__init_end,
3132 poison, "unused kernel image (initmem)");
3133}
3134
3135static inline unsigned long get_num_physpages(void)
3136{
3137 int nid;
3138 unsigned long phys_pages = 0;
3139
3140 for_each_online_node(nid)
3141 phys_pages += node_present_pages(nid);
3142
3143 return phys_pages;
3144}
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162void free_area_init(unsigned long *max_zone_pfn);
3163unsigned long node_map_pfn_alignment(void);
3164unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
3165 unsigned long end_pfn);
3166extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3167 unsigned long end_pfn);
3168extern void get_pfn_range_for_nid(unsigned int nid,
3169 unsigned long *start_pfn, unsigned long *end_pfn);
3170
3171#ifndef CONFIG_NUMA
3172static inline int early_pfn_to_nid(unsigned long pfn)
3173{
3174 return 0;
3175}
3176#else
3177
3178extern int __meminit early_pfn_to_nid(unsigned long pfn);
3179#endif
3180
3181extern void set_dma_reserve(unsigned long new_dma_reserve);
3182extern void mem_init(void);
3183extern void __init mmap_init(void);
3184
3185extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
3186static inline void show_mem(void)
3187{
3188 __show_mem(0, NULL, MAX_NR_ZONES - 1);
3189}
3190extern long si_mem_available(void);
3191extern void si_meminfo(struct sysinfo * val);
3192extern void si_meminfo_node(struct sysinfo *val, int nid);
3193#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
3194extern unsigned long arch_reserved_kernel_pages(void);
3195#endif
3196
3197extern __printf(3, 4)
3198void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
3199
3200extern void setup_per_cpu_pageset(void);
3201
3202
3203extern atomic_long_t mmap_pages_allocated;
3204extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
3205
3206
3207void vma_interval_tree_insert(struct vm_area_struct *node,
3208 struct rb_root_cached *root);
3209void vma_interval_tree_insert_after(struct vm_area_struct *node,
3210 struct vm_area_struct *prev,
3211 struct rb_root_cached *root);
3212void vma_interval_tree_remove(struct vm_area_struct *node,
3213 struct rb_root_cached *root);
3214struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
3215 unsigned long start, unsigned long last);
3216struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
3217 unsigned long start, unsigned long last);
3218
3219#define vma_interval_tree_foreach(vma, root, start, last) \
3220 for (vma = vma_interval_tree_iter_first(root, start, last); \
3221 vma; vma = vma_interval_tree_iter_next(vma, start, last))
3222
3223void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
3224 struct rb_root_cached *root);
3225void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
3226 struct rb_root_cached *root);
3227struct anon_vma_chain *
3228anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
3229 unsigned long start, unsigned long last);
3230struct anon_vma_chain *anon_vma_interval_tree_iter_next(
3231 struct anon_vma_chain *node, unsigned long start, unsigned long last);
3232#ifdef CONFIG_DEBUG_VM_RB
3233void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
3234#endif
3235
3236#define anon_vma_interval_tree_foreach(avc, root, start, last) \
3237 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
3238 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
3239
3240
3241extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
3242extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
3243 unsigned long start, unsigned long end, pgoff_t pgoff,
3244 struct vm_area_struct *next);
3245extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
3246 unsigned long start, unsigned long end, pgoff_t pgoff);
3247extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
3248extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
3249extern void unlink_file_vma(struct vm_area_struct *);
3250extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
3251 unsigned long addr, unsigned long len, pgoff_t pgoff,
3252 bool *need_rmap_locks);
3253extern void exit_mmap(struct mm_struct *);
3254struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
3255 struct vm_area_struct *prev,
3256 struct vm_area_struct *vma,
3257 unsigned long start, unsigned long end,
3258 unsigned long vm_flags,
3259 struct mempolicy *policy,
3260 struct vm_userfaultfd_ctx uffd_ctx,
3261 struct anon_vma_name *anon_name);
3262
3263
3264static inline struct vm_area_struct
3265*vma_modify_flags(struct vma_iterator *vmi,
3266 struct vm_area_struct *prev,
3267 struct vm_area_struct *vma,
3268 unsigned long start, unsigned long end,
3269 unsigned long new_flags)
3270{
3271 return vma_modify(vmi, prev, vma, start, end, new_flags,
3272 vma_policy(vma), vma->vm_userfaultfd_ctx,
3273 anon_vma_name(vma));
3274}
3275
3276
3277static inline struct vm_area_struct
3278*vma_modify_flags_name(struct vma_iterator *vmi,
3279 struct vm_area_struct *prev,
3280 struct vm_area_struct *vma,
3281 unsigned long start,
3282 unsigned long end,
3283 unsigned long new_flags,
3284 struct anon_vma_name *new_name)
3285{
3286 return vma_modify(vmi, prev, vma, start, end, new_flags,
3287 vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
3288}
3289
3290
3291static inline struct vm_area_struct
3292*vma_modify_policy(struct vma_iterator *vmi,
3293 struct vm_area_struct *prev,
3294 struct vm_area_struct *vma,
3295 unsigned long start, unsigned long end,
3296 struct mempolicy *new_pol)
3297{
3298 return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
3299 new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3300}
3301
3302
3303static inline struct vm_area_struct
3304*vma_modify_flags_uffd(struct vma_iterator *vmi,
3305 struct vm_area_struct *prev,
3306 struct vm_area_struct *vma,
3307 unsigned long start, unsigned long end,
3308 unsigned long new_flags,
3309 struct vm_userfaultfd_ctx new_ctx)
3310{
3311 return vma_modify(vmi, prev, vma, start, end, new_flags,
3312 vma_policy(vma), new_ctx, anon_vma_name(vma));
3313}
3314
3315static inline int check_data_rlimit(unsigned long rlim,
3316 unsigned long new,
3317 unsigned long start,
3318 unsigned long end_data,
3319 unsigned long start_data)
3320{
3321 if (rlim < RLIM_INFINITY) {
3322 if (((new - start) + (end_data - start_data)) > rlim)
3323 return -ENOSPC;
3324 }
3325
3326 return 0;
3327}
3328
3329extern int mm_take_all_locks(struct mm_struct *mm);
3330extern void mm_drop_all_locks(struct mm_struct *mm);
3331
3332extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3333extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3334extern struct file *get_mm_exe_file(struct mm_struct *mm);
3335extern struct file *get_task_exe_file(struct task_struct *task);
3336
3337extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
3338extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
3339
3340extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3341 const struct vm_special_mapping *sm);
3342extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3343 unsigned long addr, unsigned long len,
3344 unsigned long flags,
3345 const struct vm_special_mapping *spec);
3346
3347extern int install_special_mapping(struct mm_struct *mm,
3348 unsigned long addr, unsigned long len,
3349 unsigned long flags, struct page **pages);
3350
3351unsigned long randomize_stack_top(unsigned long stack_top);
3352unsigned long randomize_page(unsigned long start, unsigned long range);
3353
3354extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
3355
3356extern unsigned long mmap_region(struct file *file, unsigned long addr,
3357 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
3358 struct list_head *uf);
3359extern unsigned long do_mmap(struct file *file, unsigned long addr,
3360 unsigned long len, unsigned long prot, unsigned long flags,
3361 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
3362 struct list_head *uf);
3363extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3364 unsigned long start, size_t len, struct list_head *uf,
3365 bool unlock);
3366extern int do_munmap(struct mm_struct *, unsigned long, size_t,
3367 struct list_head *uf);
3368extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3369
3370#ifdef CONFIG_MMU
3371extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3372 unsigned long start, unsigned long end,
3373 struct list_head *uf, bool unlock);
3374extern int __mm_populate(unsigned long addr, unsigned long len,
3375 int ignore_errors);
3376static inline void mm_populate(unsigned long addr, unsigned long len)
3377{
3378
3379 (void) __mm_populate(addr, len, 1);
3380}
3381#else
3382static inline void mm_populate(unsigned long addr, unsigned long len) {}
3383#endif
3384
3385
3386extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
3387extern int vm_munmap(unsigned long, size_t);
3388extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
3389 unsigned long, unsigned long,
3390 unsigned long, unsigned long);
3391
3392struct vm_unmapped_area_info {
3393#define VM_UNMAPPED_AREA_TOPDOWN 1
3394 unsigned long flags;
3395 unsigned long length;
3396 unsigned long low_limit;
3397 unsigned long high_limit;
3398 unsigned long align_mask;
3399 unsigned long align_offset;
3400};
3401
3402extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
3403
3404
3405extern void truncate_inode_pages(struct address_space *, loff_t);
3406extern void truncate_inode_pages_range(struct address_space *,
3407 loff_t lstart, loff_t lend);
3408extern void truncate_inode_pages_final(struct address_space *);
3409
3410
3411extern vm_fault_t filemap_fault(struct vm_fault *vmf);
3412extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3413 pgoff_t start_pgoff, pgoff_t end_pgoff);
3414extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
3415
3416extern unsigned long stack_guard_gap;
3417
3418int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3419struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3420
3421
3422int expand_downwards(struct vm_area_struct *vma, unsigned long address);
3423
3424
3425extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3426extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3427 struct vm_area_struct **pprev);
3428
3429
3430
3431
3432
3433struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3434 unsigned long start_addr, unsigned long end_addr);
3435
3436
3437
3438
3439
3440
3441
3442
3443static inline
3444struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3445{
3446 return mtree_load(&mm->mm_mt, addr);
3447}
3448
3449static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
3450{
3451 if (vma->vm_flags & VM_GROWSDOWN)
3452 return stack_guard_gap;
3453
3454
3455 if (vma->vm_flags & VM_SHADOW_STACK)
3456 return PAGE_SIZE;
3457
3458 return 0;
3459}
3460
3461static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
3462{
3463 unsigned long gap = stack_guard_start_gap(vma);
3464 unsigned long vm_start = vma->vm_start;
3465
3466 vm_start -= gap;
3467 if (vm_start > vma->vm_start)
3468 vm_start = 0;
3469 return vm_start;
3470}
3471
3472static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
3473{
3474 unsigned long vm_end = vma->vm_end;
3475
3476 if (vma->vm_flags & VM_GROWSUP) {
3477 vm_end += stack_guard_gap;
3478 if (vm_end < vma->vm_end)
3479 vm_end = -PAGE_SIZE;
3480 }
3481 return vm_end;
3482}
3483
3484static inline unsigned long vma_pages(struct vm_area_struct *vma)
3485{
3486 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3487}
3488
3489
3490static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3491 unsigned long vm_start, unsigned long vm_end)
3492{
3493 struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3494
3495 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
3496 vma = NULL;
3497
3498 return vma;
3499}
3500
3501static inline bool range_in_vma(struct vm_area_struct *vma,
3502 unsigned long start, unsigned long end)
3503{
3504 return (vma && vma->vm_start <= start && end <= vma->vm_end);
3505}
3506
3507#ifdef CONFIG_MMU
3508pgprot_t vm_get_page_prot(unsigned long vm_flags);
3509void vma_set_page_prot(struct vm_area_struct *vma);
3510#else
3511static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
3512{
3513 return __pgprot(0);
3514}
3515static inline void vma_set_page_prot(struct vm_area_struct *vma)
3516{
3517 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3518}
3519#endif
3520
3521void vma_set_file(struct vm_area_struct *vma, struct file *file);
3522
3523#ifdef CONFIG_NUMA_BALANCING
3524unsigned long change_prot_numa(struct vm_area_struct *vma,
3525 unsigned long start, unsigned long end);
3526#endif
3527
3528struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
3529 unsigned long addr);
3530int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
3531 unsigned long pfn, unsigned long size, pgprot_t);
3532int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3533 unsigned long pfn, unsigned long size, pgprot_t prot);
3534int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3535int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3536 struct page **pages, unsigned long *num);
3537int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3538 unsigned long num);
3539int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3540 unsigned long num);
3541vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3542 unsigned long pfn);
3543vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3544 unsigned long pfn, pgprot_t pgprot);
3545vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3546 pfn_t pfn);
3547vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3548 unsigned long addr, pfn_t pfn);
3549int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3550
3551static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3552 unsigned long addr, struct page *page)
3553{
3554 int err = vm_insert_page(vma, addr, page);
3555
3556 if (err == -ENOMEM)
3557 return VM_FAULT_OOM;
3558 if (err < 0 && err != -EBUSY)
3559 return VM_FAULT_SIGBUS;
3560
3561 return VM_FAULT_NOPAGE;
3562}
3563
3564#ifndef io_remap_pfn_range
3565static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3566 unsigned long addr, unsigned long pfn,
3567 unsigned long size, pgprot_t prot)
3568{
3569 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
3570}
3571#endif
3572
3573static inline vm_fault_t vmf_error(int err)
3574{
3575 if (err == -ENOMEM)
3576 return VM_FAULT_OOM;
3577 else if (err == -EHWPOISON)
3578 return VM_FAULT_HWPOISON;
3579 return VM_FAULT_SIGBUS;
3580}
3581
3582
3583
3584
3585
3586
3587
3588static inline vm_fault_t vmf_fs_error(int err)
3589{
3590 if (err == 0)
3591 return VM_FAULT_LOCKED;
3592 if (err == -EFAULT || err == -EAGAIN)
3593 return VM_FAULT_NOPAGE;
3594 if (err == -ENOMEM)
3595 return VM_FAULT_OOM;
3596
3597 return VM_FAULT_SIGBUS;
3598}
3599
3600struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
3601 unsigned int foll_flags);
3602
3603static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
3604{
3605 if (vm_fault & VM_FAULT_OOM)
3606 return -ENOMEM;
3607 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3608 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
3609 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3610 return -EFAULT;
3611 return 0;
3612}
3613
3614
3615
3616
3617
3618static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
3619 unsigned int flags)
3620{
3621
3622
3623
3624
3625 if (!(flags & FOLL_HONOR_NUMA_FAULT))
3626 return true;
3627
3628
3629
3630
3631
3632
3633
3634
3635 return !vma_is_accessible(vma);
3636}
3637
3638typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
3639extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3640 unsigned long size, pte_fn_t fn, void *data);
3641extern int apply_to_existing_page_range(struct mm_struct *mm,
3642 unsigned long address, unsigned long size,
3643 pte_fn_t fn, void *data);
3644
3645#ifdef CONFIG_PAGE_POISONING
3646extern void __kernel_poison_pages(struct page *page, int numpages);
3647extern void __kernel_unpoison_pages(struct page *page, int numpages);
3648extern bool _page_poisoning_enabled_early;
3649DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
3650static inline bool page_poisoning_enabled(void)
3651{
3652 return _page_poisoning_enabled_early;
3653}
3654
3655
3656
3657
3658static inline bool page_poisoning_enabled_static(void)
3659{
3660 return static_branch_unlikely(&_page_poisoning_enabled);
3661}
3662static inline void kernel_poison_pages(struct page *page, int numpages)
3663{
3664 if (page_poisoning_enabled_static())
3665 __kernel_poison_pages(page, numpages);
3666}
3667static inline void kernel_unpoison_pages(struct page *page, int numpages)
3668{
3669 if (page_poisoning_enabled_static())
3670 __kernel_unpoison_pages(page, numpages);
3671}
3672#else
3673static inline bool page_poisoning_enabled(void) { return false; }
3674static inline bool page_poisoning_enabled_static(void) { return false; }
3675static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
3676static inline void kernel_poison_pages(struct page *page, int numpages) { }
3677static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3678#endif
3679
3680DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
3681static inline bool want_init_on_alloc(gfp_t flags)
3682{
3683 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3684 &init_on_alloc))
3685 return true;
3686 return flags & __GFP_ZERO;
3687}
3688
3689DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
3690static inline bool want_init_on_free(void)
3691{
3692 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3693 &init_on_free);
3694}
3695
3696extern bool _debug_pagealloc_enabled_early;
3697DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3698
3699static inline bool debug_pagealloc_enabled(void)
3700{
3701 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3702 _debug_pagealloc_enabled_early;
3703}
3704
3705
3706
3707
3708
3709static inline bool debug_pagealloc_enabled_static(void)
3710{
3711 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3712 return false;
3713
3714 return static_branch_unlikely(&_debug_pagealloc_enabled);
3715}
3716
3717
3718
3719
3720
3721extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3722#ifdef CONFIG_DEBUG_PAGEALLOC
3723static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3724{
3725 if (debug_pagealloc_enabled_static())
3726 __kernel_map_pages(page, numpages, 1);
3727}
3728
3729static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3730{
3731 if (debug_pagealloc_enabled_static())
3732 __kernel_map_pages(page, numpages, 0);
3733}
3734
3735extern unsigned int _debug_guardpage_minorder;
3736DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3737
3738static inline unsigned int debug_guardpage_minorder(void)
3739{
3740 return _debug_guardpage_minorder;
3741}
3742
3743static inline bool debug_guardpage_enabled(void)
3744{
3745 return static_branch_unlikely(&_debug_guardpage_enabled);
3746}
3747
3748static inline bool page_is_guard(struct page *page)
3749{
3750 if (!debug_guardpage_enabled())
3751 return false;
3752
3753 return PageGuard(page);
3754}
3755
3756bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
3757 int migratetype);
3758static inline bool set_page_guard(struct zone *zone, struct page *page,
3759 unsigned int order, int migratetype)
3760{
3761 if (!debug_guardpage_enabled())
3762 return false;
3763 return __set_page_guard(zone, page, order, migratetype);
3764}
3765
3766void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
3767 int migratetype);
3768static inline void clear_page_guard(struct zone *zone, struct page *page,
3769 unsigned int order, int migratetype)
3770{
3771 if (!debug_guardpage_enabled())
3772 return;
3773 __clear_page_guard(zone, page, order, migratetype);
3774}
3775
3776#else
3777static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3778static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3779static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3780static inline bool debug_guardpage_enabled(void) { return false; }
3781static inline bool page_is_guard(struct page *page) { return false; }
3782static inline bool set_page_guard(struct zone *zone, struct page *page,
3783 unsigned int order, int migratetype) { return false; }
3784static inline void clear_page_guard(struct zone *zone, struct page *page,
3785 unsigned int order, int migratetype) {}
3786#endif
3787
3788#ifdef __HAVE_ARCH_GATE_AREA
3789extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3790extern int in_gate_area_no_mm(unsigned long addr);
3791extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3792#else
3793static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3794{
3795 return NULL;
3796}
3797static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3798static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3799{
3800 return 0;
3801}
3802#endif
3803
3804extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3805
3806#ifdef CONFIG_SYSCTL
3807extern int sysctl_drop_caches;
3808int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
3809 loff_t *);
3810#endif
3811
3812void drop_slab(void);
3813
3814#ifndef CONFIG_MMU
3815#define randomize_va_space 0
3816#else
3817extern int randomize_va_space;
3818#endif
3819
3820const char * arch_vma_name(struct vm_area_struct *vma);
3821#ifdef CONFIG_MMU
3822void print_vma_addr(char *prefix, unsigned long rip);
3823#else
3824static inline void print_vma_addr(char *prefix, unsigned long rip)
3825{
3826}
3827#endif
3828
3829void *sparse_buffer_alloc(unsigned long size);
3830struct page * __populate_section_memmap(unsigned long pfn,
3831 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3832 struct dev_pagemap *pgmap);
3833void pmd_init(void *addr);
3834void pud_init(void *addr);
3835pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3836p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3837pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3838pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3839pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3840 struct vmem_altmap *altmap, struct page *reuse);
3841void *vmemmap_alloc_block(unsigned long size, int node);
3842struct vmem_altmap;
3843void *vmemmap_alloc_block_buf(unsigned long size, int node,
3844 struct vmem_altmap *altmap);
3845void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3846void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
3847 unsigned long addr, unsigned long next);
3848int vmemmap_check_pmd(pmd_t *pmd, int node,
3849 unsigned long addr, unsigned long next);
3850int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3851 int node, struct vmem_altmap *altmap);
3852int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
3853 int node, struct vmem_altmap *altmap);
3854int vmemmap_populate(unsigned long start, unsigned long end, int node,
3855 struct vmem_altmap *altmap);
3856void vmemmap_populate_print_last(void);
3857#ifdef CONFIG_MEMORY_HOTPLUG
3858void vmemmap_free(unsigned long start, unsigned long end,
3859 struct vmem_altmap *altmap);
3860#endif
3861
3862#define VMEMMAP_RESERVE_NR 2
3863#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
3864static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
3865 struct dev_pagemap *pgmap)
3866{
3867 unsigned long nr_pages;
3868 unsigned long nr_vmemmap_pages;
3869
3870 if (!pgmap || !is_power_of_2(sizeof(struct page)))
3871 return false;
3872
3873 nr_pages = pgmap_vmemmap_nr(pgmap);
3874 nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
3875
3876
3877
3878
3879 return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
3880}
3881
3882
3883
3884#ifndef vmemmap_can_optimize
3885#define vmemmap_can_optimize __vmemmap_can_optimize
3886#endif
3887
3888#else
3889static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
3890 struct dev_pagemap *pgmap)
3891{
3892 return false;
3893}
3894#endif
3895
3896void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3897 unsigned long nr_pages);
3898
3899enum mf_flags {
3900 MF_COUNT_INCREASED = 1 << 0,
3901 MF_ACTION_REQUIRED = 1 << 1,
3902 MF_MUST_KILL = 1 << 2,
3903 MF_SOFT_OFFLINE = 1 << 3,
3904 MF_UNPOISON = 1 << 4,
3905 MF_SW_SIMULATED = 1 << 5,
3906 MF_NO_RETRY = 1 << 6,
3907};
3908int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
3909 unsigned long count, int mf_flags);
3910extern int memory_failure(unsigned long pfn, int flags);
3911extern void memory_failure_queue_kick(int cpu);
3912extern int unpoison_memory(unsigned long pfn);
3913extern void shake_page(struct page *p);
3914extern atomic_long_t num_poisoned_pages __read_mostly;
3915extern int soft_offline_page(unsigned long pfn, int flags);
3916#ifdef CONFIG_MEMORY_FAILURE
3917
3918
3919
3920extern const struct attribute_group memory_failure_attr_group;
3921extern void memory_failure_queue(unsigned long pfn, int flags);
3922extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3923 bool *migratable_cleared);
3924void num_poisoned_pages_inc(unsigned long pfn);
3925void num_poisoned_pages_sub(unsigned long pfn, long i);
3926struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
3927#else
3928static inline void memory_failure_queue(unsigned long pfn, int flags)
3929{
3930}
3931
3932static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3933 bool *migratable_cleared)
3934{
3935 return 0;
3936}
3937
3938static inline void num_poisoned_pages_inc(unsigned long pfn)
3939{
3940}
3941
3942static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
3943{
3944}
3945#endif
3946
3947#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_KSM)
3948void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
3949 struct vm_area_struct *vma, struct list_head *to_kill,
3950 unsigned long ksm_addr);
3951#endif
3952
3953#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
3954extern void memblk_nr_poison_inc(unsigned long pfn);
3955extern void memblk_nr_poison_sub(unsigned long pfn, long i);
3956#else
3957static inline void memblk_nr_poison_inc(unsigned long pfn)
3958{
3959}
3960
3961static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
3962{
3963}
3964#endif
3965
3966#ifndef arch_memory_failure
3967static inline int arch_memory_failure(unsigned long pfn, int flags)
3968{
3969 return -ENXIO;
3970}
3971#endif
3972
3973#ifndef arch_is_platform_page
3974static inline bool arch_is_platform_page(u64 paddr)
3975{
3976 return false;
3977}
3978#endif
3979
3980
3981
3982
3983enum mf_result {
3984 MF_IGNORED,
3985 MF_FAILED,
3986 MF_DELAYED,
3987 MF_RECOVERED,
3988};
3989
3990enum mf_action_page_type {
3991 MF_MSG_KERNEL,
3992 MF_MSG_KERNEL_HIGH_ORDER,
3993 MF_MSG_SLAB,
3994 MF_MSG_DIFFERENT_COMPOUND,
3995 MF_MSG_HUGE,
3996 MF_MSG_FREE_HUGE,
3997 MF_MSG_UNMAP_FAILED,
3998 MF_MSG_DIRTY_SWAPCACHE,
3999 MF_MSG_CLEAN_SWAPCACHE,
4000 MF_MSG_DIRTY_MLOCKED_LRU,
4001 MF_MSG_CLEAN_MLOCKED_LRU,
4002 MF_MSG_DIRTY_UNEVICTABLE_LRU,
4003 MF_MSG_CLEAN_UNEVICTABLE_LRU,
4004 MF_MSG_DIRTY_LRU,
4005 MF_MSG_CLEAN_LRU,
4006 MF_MSG_TRUNCATED_LRU,
4007 MF_MSG_BUDDY,
4008 MF_MSG_DAX,
4009 MF_MSG_UNSPLIT_THP,
4010 MF_MSG_UNKNOWN,
4011};
4012
4013#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4014extern void clear_huge_page(struct page *page,
4015 unsigned long addr_hint,
4016 unsigned int pages_per_huge_page);
4017int copy_user_large_folio(struct folio *dst, struct folio *src,
4018 unsigned long addr_hint,
4019 struct vm_area_struct *vma);
4020long copy_folio_from_user(struct folio *dst_folio,
4021 const void __user *usr_src,
4022 bool allow_pagefault);
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
4035{
4036 return vma_is_dax(vma) || (vma->vm_file &&
4037 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
4038}
4039
4040#endif
4041
4042#if MAX_NUMNODES > 1
4043void __init setup_nr_node_ids(void);
4044#else
4045static inline void setup_nr_node_ids(void) {}
4046#endif
4047
4048extern int memcmp_pages(struct page *page1, struct page *page2);
4049
4050static inline int pages_identical(struct page *page1, struct page *page2)
4051{
4052 return !memcmp_pages(page1, page2);
4053}
4054
4055#ifdef CONFIG_MAPPING_DIRTY_HELPERS
4056unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
4057 pgoff_t first_index, pgoff_t nr,
4058 pgoff_t bitmap_pgoff,
4059 unsigned long *bitmap,
4060 pgoff_t *start,
4061 pgoff_t *end);
4062
4063unsigned long wp_shared_mapping_range(struct address_space *mapping,
4064 pgoff_t first_index, pgoff_t nr);
4065#endif
4066
4067extern int sysctl_nr_trim_pages;
4068
4069#ifdef CONFIG_PRINTK
4070void mem_dump_obj(void *object);
4071#else
4072static inline void mem_dump_obj(void *object) {}
4073#endif
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084static inline int seal_check_write(int seals, struct vm_area_struct *vma)
4085{
4086 if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
4087
4088
4089
4090
4091 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
4092 return -EPERM;
4093
4094
4095
4096
4097
4098
4099
4100
4101 if (vma->vm_flags & VM_SHARED)
4102 vm_flags_clear(vma, VM_MAYWRITE);
4103 }
4104
4105 return 0;
4106}
4107
4108#ifdef CONFIG_ANON_VMA_NAME
4109int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4110 unsigned long len_in,
4111 struct anon_vma_name *anon_name);
4112#else
4113static inline int
4114madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4115 unsigned long len_in, struct anon_vma_name *anon_name) {
4116 return 0;
4117}
4118#endif
4119
4120#ifdef CONFIG_UNACCEPTED_MEMORY
4121
4122bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
4123void accept_memory(phys_addr_t start, phys_addr_t end);
4124
4125#else
4126
4127static inline bool range_contains_unaccepted_memory(phys_addr_t start,
4128 phys_addr_t end)
4129{
4130 return false;
4131}
4132
4133static inline void accept_memory(phys_addr_t start, phys_addr_t end)
4134{
4135}
4136
4137#endif
4138
4139static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
4140{
4141 phys_addr_t paddr = pfn << PAGE_SHIFT;
4142
4143 return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
4144}
4145
4146#endif
4147