linux/arch/s390/include/asm/hugetlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  IBM System z Huge TLB Page Support for Kernel.
   4 *
   5 *    Copyright IBM Corp. 2008
   6 *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
   7 */
   8
   9#ifndef _ASM_S390_HUGETLB_H
  10#define _ASM_S390_HUGETLB_H
  11
  12#include <linux/pgtable.h>
  13#include <asm/page.h>
  14
  15#define hugetlb_free_pgd_range                  free_pgd_range
  16#define hugepages_supported()                   (MACHINE_HAS_EDAT1)
  17
  18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  19                     pte_t *ptep, pte_t pte);
  20pte_t huge_ptep_get(pte_t *ptep);
  21pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  22                              unsigned long addr, pte_t *ptep);
  23
  24/*
  25 * If the arch doesn't supply something else, assume that hugepage
  26 * size aligned regions are ok without further preparation.
  27 */
  28static inline int prepare_hugepage_range(struct file *file,
  29                        unsigned long addr, unsigned long len)
  30{
  31        if (len & ~HPAGE_MASK)
  32                return -EINVAL;
  33        if (addr & ~HPAGE_MASK)
  34                return -EINVAL;
  35        return 0;
  36}
  37
  38static inline void arch_clear_hugepage_flags(struct page *page)
  39{
  40        clear_bit(PG_arch_1, &page->flags);
  41}
  42#define arch_clear_hugepage_flags arch_clear_hugepage_flags
  43
  44static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
  45                                  pte_t *ptep, unsigned long sz)
  46{
  47        if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  48                pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
  49        else
  50                pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
  51}
  52
  53static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
  54                                         unsigned long address, pte_t *ptep)
  55{
  56        huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
  57}
  58
  59static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  60                                             unsigned long addr, pte_t *ptep,
  61                                             pte_t pte, int dirty)
  62{
  63        int changed = !pte_same(huge_ptep_get(ptep), pte);
  64        if (changed) {
  65                huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
  66                set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
  67        }
  68        return changed;
  69}
  70
  71static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
  72                                           unsigned long addr, pte_t *ptep)
  73{
  74        pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
  75        set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
  76}
  77
  78static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
  79{
  80        return mk_pte(page, pgprot);
  81}
  82
  83static inline int huge_pte_none(pte_t pte)
  84{
  85        return pte_none(pte);
  86}
  87
  88static inline int huge_pte_write(pte_t pte)
  89{
  90        return pte_write(pte);
  91}
  92
  93static inline int huge_pte_dirty(pte_t pte)
  94{
  95        return pte_dirty(pte);
  96}
  97
  98static inline pte_t huge_pte_mkwrite(pte_t pte)
  99{
 100        return pte_mkwrite(pte);
 101}
 102
 103static inline pte_t huge_pte_mkdirty(pte_t pte)
 104{
 105        return pte_mkdirty(pte);
 106}
 107
 108static inline pte_t huge_pte_wrprotect(pte_t pte)
 109{
 110        return pte_wrprotect(pte);
 111}
 112
 113static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
 114{
 115        return pte_modify(pte, newprot);
 116}
 117
 118static inline bool gigantic_page_runtime_supported(void)
 119{
 120        return true;
 121}
 122
 123#endif /* _ASM_S390_HUGETLB_H */
 124