linux/include/linux/huge_mm.h
<<
>>
Prefs
   1#ifndef _LINUX_HUGE_MM_H
   2#define _LINUX_HUGE_MM_H
   3
   4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
   5                                      struct vm_area_struct *vma,
   6                                      unsigned long address, pmd_t *pmd,
   7                                      unsigned int flags);
   8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
   9                         pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  10                         struct vm_area_struct *vma);
  11extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  12                               unsigned long address, pmd_t *pmd,
  13                               pmd_t orig_pmd);
  14extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  15                                          unsigned long addr,
  16                                          pmd_t *pmd,
  17                                          unsigned int flags);
  18extern int zap_huge_pmd(struct mmu_gather *tlb,
  19                        struct vm_area_struct *vma,
  20                        pmd_t *pmd, unsigned long addr);
  21extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  22                        unsigned long addr, unsigned long end,
  23                        unsigned char *vec);
  24extern int move_huge_pmd(struct vm_area_struct *vma,
  25                         struct vm_area_struct *new_vma,
  26                         unsigned long old_addr,
  27                         unsigned long new_addr, unsigned long old_end,
  28                         pmd_t *old_pmd, pmd_t *new_pmd);
  29extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  30                        unsigned long addr, pgprot_t newprot);
  31
  32enum transparent_hugepage_flag {
  33        TRANSPARENT_HUGEPAGE_FLAG,
  34        TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  35        TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  36        TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
  37        TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
  38#ifdef CONFIG_DEBUG_VM
  39        TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
  40#endif
  41};
  42
  43enum page_check_address_pmd_flag {
  44        PAGE_CHECK_ADDRESS_PMD_FLAG,
  45        PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
  46        PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
  47};
  48extern pmd_t *page_check_address_pmd(struct page *page,
  49                                     struct mm_struct *mm,
  50                                     unsigned long address,
  51                                     enum page_check_address_pmd_flag flag);
  52
  53#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
  54#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
  55
  56#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  57#define HPAGE_PMD_SHIFT HPAGE_SHIFT
  58#define HPAGE_PMD_MASK HPAGE_MASK
  59#define HPAGE_PMD_SIZE HPAGE_SIZE
  60
  61extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
  62
  63#define transparent_hugepage_enabled(__vma)                             \
  64        ((transparent_hugepage_flags &                                  \
  65          (1<<TRANSPARENT_HUGEPAGE_FLAG) ||                             \
  66          (transparent_hugepage_flags &                                 \
  67           (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&                   \
  68           ((__vma)->vm_flags & VM_HUGEPAGE))) &&                       \
  69         !((__vma)->vm_flags & VM_NOHUGEPAGE) &&                        \
  70         !is_vma_temporary_stack(__vma))
  71#define transparent_hugepage_defrag(__vma)                              \
  72        ((transparent_hugepage_flags &                                  \
  73          (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||                     \
  74         (transparent_hugepage_flags &                                  \
  75          (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&             \
  76          (__vma)->vm_flags & VM_HUGEPAGE))
  77#ifdef CONFIG_DEBUG_VM
  78#define transparent_hugepage_debug_cow()                                \
  79        (transparent_hugepage_flags &                                   \
  80         (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
  81#else /* CONFIG_DEBUG_VM */
  82#define transparent_hugepage_debug_cow() 0
  83#endif /* CONFIG_DEBUG_VM */
  84
  85extern unsigned long transparent_hugepage_flags;
  86extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  87                          pmd_t *dst_pmd, pmd_t *src_pmd,
  88                          struct vm_area_struct *vma,
  89                          unsigned long addr, unsigned long end);
  90extern int handle_pte_fault(struct mm_struct *mm,
  91                            struct vm_area_struct *vma, unsigned long address,
  92                            pte_t *pte, pmd_t *pmd, unsigned int flags);
  93extern int split_huge_page(struct page *page);
  94extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
  95#define split_huge_page_pmd(__mm, __pmd)                                \
  96        do {                                                            \
  97                pmd_t *____pmd = (__pmd);                               \
  98                if (unlikely(pmd_trans_huge(*____pmd)))                 \
  99                        __split_huge_page_pmd(__mm, ____pmd);           \
 100        }  while (0)
 101#define wait_split_huge_page(__anon_vma, __pmd)                         \
 102        do {                                                            \
 103                pmd_t *____pmd = (__pmd);                               \
 104                anon_vma_lock(__anon_vma);                              \
 105                anon_vma_unlock(__anon_vma);                            \
 106                BUG_ON(pmd_trans_splitting(*____pmd) ||                 \
 107                       pmd_trans_huge(*____pmd));                       \
 108        } while (0)
 109#if HPAGE_PMD_ORDER > MAX_ORDER
 110#error "hugepages can't be allocated by the buddy allocator"
 111#endif
 112extern int hugepage_madvise(struct vm_area_struct *vma,
 113                            unsigned long *vm_flags, int advice);
 114extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
 115                                    unsigned long start,
 116                                    unsigned long end,
 117                                    long adjust_next);
 118extern int __pmd_trans_huge_lock(pmd_t *pmd,
 119                                 struct vm_area_struct *vma);
 120/* mmap_sem must be held on entry */
 121static inline int pmd_trans_huge_lock(pmd_t *pmd,
 122                                      struct vm_area_struct *vma)
 123{
 124        VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
 125        if (pmd_trans_huge(*pmd))
 126                return __pmd_trans_huge_lock(pmd, vma);
 127        else
 128                return 0;
 129}
 130static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
 131                                         unsigned long start,
 132                                         unsigned long end,
 133                                         long adjust_next)
 134{
 135        if (!vma->anon_vma || vma->vm_ops)
 136                return;
 137        __vma_adjust_trans_huge(vma, start, end, adjust_next);
 138}
 139static inline int hpage_nr_pages(struct page *page)
 140{
 141        if (unlikely(PageTransHuge(page)))
 142                return HPAGE_PMD_NR;
 143        return 1;
 144}
 145static inline struct page *compound_trans_head(struct page *page)
 146{
 147        if (PageTail(page)) {
 148                struct page *head;
 149                head = page->first_page;
 150                smp_rmb();
 151                /*
 152                 * head may be a dangling pointer.
 153                 * __split_huge_page_refcount clears PageTail before
 154                 * overwriting first_page, so if PageTail is still
 155                 * there it means the head pointer isn't dangling.
 156                 */
 157                if (PageTail(page))
 158                        return head;
 159        }
 160        return page;
 161}
 162#else /* CONFIG_TRANSPARENT_HUGEPAGE */
 163#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 164#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
 165#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
 166
 167#define hpage_nr_pages(x) 1
 168
 169#define transparent_hugepage_enabled(__vma) 0
 170
 171#define transparent_hugepage_flags 0UL
 172static inline int split_huge_page(struct page *page)
 173{
 174        return 0;
 175}
 176#define split_huge_page_pmd(__mm, __pmd)        \
 177        do { } while (0)
 178#define wait_split_huge_page(__anon_vma, __pmd) \
 179        do { } while (0)
 180#define compound_trans_head(page) compound_head(page)
 181static inline int hugepage_madvise(struct vm_area_struct *vma,
 182                                   unsigned long *vm_flags, int advice)
 183{
 184        BUG();
 185        return 0;
 186}
 187static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
 188                                         unsigned long start,
 189                                         unsigned long end,
 190                                         long adjust_next)
 191{
 192}
 193static inline int pmd_trans_huge_lock(pmd_t *pmd,
 194                                      struct vm_area_struct *vma)
 195{
 196        return 0;
 197}
 198#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 199
 200#endif /* _LINUX_HUGE_MM_H */
 201
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.