linux/arch/s390/include/asm/page.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  S390 version
   4 *    Copyright IBM Corp. 1999, 2000
   5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   6 */
   7
   8#ifndef _S390_PAGE_H
   9#define _S390_PAGE_H
  10
  11#include <linux/const.h>
  12#include <asm/types.h>
  13
  14#define _PAGE_SHIFT     12
  15#define _PAGE_SIZE      (_AC(1, UL) << _PAGE_SHIFT)
  16#define _PAGE_MASK      (~(_PAGE_SIZE - 1))
  17
  18/* PAGE_SHIFT determines the page size */
  19#define PAGE_SHIFT      _PAGE_SHIFT
  20#define PAGE_SIZE       _PAGE_SIZE
  21#define PAGE_MASK       _PAGE_MASK
  22#define PAGE_DEFAULT_ACC        0
  23#define PAGE_DEFAULT_KEY        (PAGE_DEFAULT_ACC << 4)
  24
  25#define HPAGE_SHIFT     20
  26#define HPAGE_SIZE      (1UL << HPAGE_SHIFT)
  27#define HPAGE_MASK      (~(HPAGE_SIZE - 1))
  28#define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
  29#define HUGE_MAX_HSTATE         2
  30
  31#define ARCH_HAS_SETCLEAR_HUGE_PTE
  32#define ARCH_HAS_HUGE_PTE_TYPE
  33#define ARCH_HAS_PREPARE_HUGEPAGE
  34#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
  35
  36#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  37
  38#include <asm/setup.h>
  39#ifndef __ASSEMBLY__
  40
  41void __storage_key_init_range(unsigned long start, unsigned long end);
  42
  43static inline void storage_key_init_range(unsigned long start, unsigned long end)
  44{
  45        if (PAGE_DEFAULT_KEY != 0)
  46                __storage_key_init_range(start, end);
  47}
  48
  49#define clear_page(page)        memset((page), 0, PAGE_SIZE)
  50
  51/*
  52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
  53 * bypass caches when copying a page. Especially when copying huge pages
  54 * this keeps L1 and L2 data caches alive.
  55 */
  56static inline void copy_page(void *to, void *from)
  57{
  58        union register_pair dst, src;
  59
  60        dst.even = (unsigned long) to;
  61        dst.odd  = 0x1000;
  62        src.even = (unsigned long) from;
  63        src.odd  = 0xb0001000;
  64
  65        asm volatile(
  66                "       mvcl    %[dst],%[src]"
  67                : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
  68                : : "memory", "cc");
  69}
  70
  71#define clear_user_page(page, vaddr, pg)        clear_page(page)
  72#define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
  73
  74#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
  75        alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
  76#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
  77
  78/*
  79 * These are used to make use of C type-checking..
  80 */
  81
  82typedef struct { unsigned long pgprot; } pgprot_t;
  83typedef struct { unsigned long pgste; } pgste_t;
  84typedef struct { unsigned long pte; } pte_t;
  85typedef struct { unsigned long pmd; } pmd_t;
  86typedef struct { unsigned long pud; } pud_t;
  87typedef struct { unsigned long p4d; } p4d_t;
  88typedef struct { unsigned long pgd; } pgd_t;
  89typedef pte_t *pgtable_t;
  90
  91#define pgprot_val(x)   ((x).pgprot)
  92#define pgste_val(x)    ((x).pgste)
  93#define pte_val(x)      ((x).pte)
  94#define pmd_val(x)      ((x).pmd)
  95#define pud_val(x)      ((x).pud)
  96#define p4d_val(x)      ((x).p4d)
  97#define pgd_val(x)      ((x).pgd)
  98
  99#define __pgste(x)      ((pgste_t) { (x) } )
 100#define __pte(x)        ((pte_t) { (x) } )
 101#define __pmd(x)        ((pmd_t) { (x) } )
 102#define __pud(x)        ((pud_t) { (x) } )
 103#define __p4d(x)        ((p4d_t) { (x) } )
 104#define __pgd(x)        ((pgd_t) { (x) } )
 105#define __pgprot(x)     ((pgprot_t) { (x) } )
 106
 107static inline void page_set_storage_key(unsigned long addr,
 108                                        unsigned char skey, int mapped)
 109{
 110        if (!mapped)
 111                asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
 112                             : : "d" (skey), "a" (addr));
 113        else
 114                asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
 115}
 116
 117static inline unsigned char page_get_storage_key(unsigned long addr)
 118{
 119        unsigned char skey;
 120
 121        asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
 122        return skey;
 123}
 124
 125static inline int page_reset_referenced(unsigned long addr)
 126{
 127        int cc;
 128
 129        asm volatile(
 130                "       rrbe    0,%1\n"
 131                "       ipm     %0\n"
 132                "       srl     %0,28\n"
 133                : "=d" (cc) : "a" (addr) : "cc");
 134        return cc;
 135}
 136
 137/* Bits int the storage key */
 138#define _PAGE_CHANGED           0x02    /* HW changed bit               */
 139#define _PAGE_REFERENCED        0x04    /* HW referenced bit            */
 140#define _PAGE_FP_BIT            0x08    /* HW fetch protection bit      */
 141#define _PAGE_ACC_BITS          0xf0    /* HW access control bits       */
 142
 143struct page;
 144void arch_free_page(struct page *page, int order);
 145void arch_alloc_page(struct page *page, int order);
 146void arch_set_page_dat(struct page *page, int order);
 147void arch_set_page_nodat(struct page *page, int order);
 148int arch_test_page_nodat(struct page *page);
 149void arch_set_page_states(int make_stable);
 150
 151static inline int devmem_is_allowed(unsigned long pfn)
 152{
 153        return 0;
 154}
 155
 156#define HAVE_ARCH_FREE_PAGE
 157#define HAVE_ARCH_ALLOC_PAGE
 158
 159#if IS_ENABLED(CONFIG_PGSTE)
 160int arch_make_page_accessible(struct page *page);
 161#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
 162#endif
 163
 164#endif /* !__ASSEMBLY__ */
 165
 166#define __PAGE_OFFSET           0x0UL
 167#define PAGE_OFFSET             0x0UL
 168
 169#define __pa(x)                 ((unsigned long)(x))
 170#define __va(x)                 ((void *)(unsigned long)(x))
 171
 172#define phys_to_pfn(phys)       ((phys) >> PAGE_SHIFT)
 173#define pfn_to_phys(pfn)        ((pfn) << PAGE_SHIFT)
 174
 175#define phys_to_page(phys)      pfn_to_page(phys_to_pfn(phys))
 176#define page_to_phys(page)      pfn_to_phys(page_to_pfn(page))
 177
 178#define pfn_to_virt(pfn)        __va(pfn_to_phys(pfn))
 179#define virt_to_pfn(kaddr)      (phys_to_pfn(__pa(kaddr)))
 180#define pfn_to_kaddr(pfn)       pfn_to_virt(pfn)
 181
 182#define virt_to_page(kaddr)     pfn_to_page(virt_to_pfn(kaddr))
 183#define page_to_virt(page)      pfn_to_virt(page_to_pfn(page))
 184
 185#define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 186
 187#define VM_DATA_DEFAULT_FLAGS   VM_DATA_FLAGS_NON_EXEC
 188
 189#include <asm-generic/memory_model.h>
 190#include <asm-generic/getorder.h>
 191
 192#endif /* _S390_PAGE_H */
 193
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.