1
2#ifndef _LINUX_KHUGEPAGED_H
3#define _LINUX_KHUGEPAGED_H
4
5#include <linux/sched/coredump.h>
6#include <linux/shmem_fs.h>
7
8
9#ifdef CONFIG_TRANSPARENT_HUGEPAGE
10extern struct attribute_group khugepaged_attr_group;
11
12extern int khugepaged_init(void);
13extern void khugepaged_destroy(void);
14extern int start_stop_khugepaged(void);
15extern int __khugepaged_enter(struct mm_struct *mm);
16extern void __khugepaged_exit(struct mm_struct *mm);
17extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
18 unsigned long vm_flags);
19extern void khugepaged_min_free_kbytes_update(void);
20#ifdef CONFIG_SHMEM
21extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
22#else
23static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
24 unsigned long addr)
25{
26}
27#endif
28
29#define khugepaged_enabled() \
30 (transparent_hugepage_flags & \
31 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
32 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
33#define khugepaged_always() \
34 (transparent_hugepage_flags & \
35 (1<<TRANSPARENT_HUGEPAGE_FLAG))
36#define khugepaged_req_madv() \
37 (transparent_hugepage_flags & \
38 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
39#define khugepaged_defrag() \
40 (transparent_hugepage_flags & \
41 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
42
43static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
44{
45 if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
46 return __khugepaged_enter(mm);
47 return 0;
48}
49
50static inline void khugepaged_exit(struct mm_struct *mm)
51{
52 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
53 __khugepaged_exit(mm);
54}
55
56static inline int khugepaged_enter(struct vm_area_struct *vma,
57 unsigned long vm_flags)
58{
59 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
60 if ((khugepaged_always() ||
61 (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
62 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
63 !(vm_flags & VM_NOHUGEPAGE) &&
64 !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
65 if (__khugepaged_enter(vma->vm_mm))
66 return -ENOMEM;
67 return 0;
68}
69#else
70static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
71{
72 return 0;
73}
74static inline void khugepaged_exit(struct mm_struct *mm)
75{
76}
77static inline int khugepaged_enter(struct vm_area_struct *vma,
78 unsigned long vm_flags)
79{
80 return 0;
81}
82static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
83 unsigned long vm_flags)
84{
85 return 0;
86}
87static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
88 unsigned long addr)
89{
90}
91
92static inline void khugepaged_min_free_kbytes_update(void)
93{
94}
95#endif
96
97#endif
98