linux/arch/powerpc/include/asm/pgtable-ppc64.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
   2#define _ASM_POWERPC_PGTABLE_PPC64_H_
   3/*
   4 * This file contains the functions and defines necessary to modify and use
   5 * the ppc64 hashed page table.
   6 */
   7
   8#ifndef __ASSEMBLY__
   9#include <linux/stddef.h>
  10#include <asm/tlbflush.h>
  11#endif /* __ASSEMBLY__ */
  12
  13#ifdef CONFIG_PPC_64K_PAGES
  14#include <asm/pgtable-ppc64-64k.h>
  15#else
  16#include <asm/pgtable-ppc64-4k.h>
  17#endif
  18
  19#define FIRST_USER_ADDRESS      0
  20
  21/*
  22 * Size of EA range mapped by our pagetables.
  23 */
  24#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
  25                            PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
  26#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
  27
  28
  29/* Some sanity checking */
  30#if TASK_SIZE_USER64 > PGTABLE_RANGE
  31#error TASK_SIZE_USER64 exceeds pagetable range
  32#endif
  33
  34#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
  35#error TASK_SIZE_USER64 exceeds user VSID range
  36#endif
  37
  38/*
  39 * Define the address range of the vmalloc VM area.
  40 */
  41#define VMALLOC_START ASM_CONST(0xD000000000000000)
  42#define VMALLOC_SIZE  (PGTABLE_RANGE >> 1)
  43#define VMALLOC_END   (VMALLOC_START + VMALLOC_SIZE)
  44
  45/*
  46 * Define the address ranges for MMIO and IO space :
  47 *
  48 *  ISA_IO_BASE = VMALLOC_END, 64K reserved area
  49 *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
  50 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
  51 */
  52#define FULL_IO_SIZE    0x80000000ul
  53#define  ISA_IO_BASE    (VMALLOC_END)
  54#define  ISA_IO_END     (VMALLOC_END + 0x10000ul)
  55#define  PHB_IO_BASE    (ISA_IO_END)
  56#define  PHB_IO_END     (VMALLOC_END + FULL_IO_SIZE)
  57#define IOREMAP_BASE    (PHB_IO_END)
  58#define IOREMAP_END     (VMALLOC_START + PGTABLE_RANGE)
  59
  60/*
  61 * Region IDs
  62 */
  63#define REGION_SHIFT            60UL
  64#define REGION_MASK             (0xfUL << REGION_SHIFT)
  65#define REGION_ID(ea)           (((unsigned long)(ea)) >> REGION_SHIFT)
  66
  67#define VMALLOC_REGION_ID       (REGION_ID(VMALLOC_START))
  68#define KERNEL_REGION_ID        (REGION_ID(PAGE_OFFSET))
  69#define VMEMMAP_REGION_ID       (0xfUL)
  70#define USER_REGION_ID          (0UL)
  71
  72/*
  73 * Defines the address of the vmemap area, in its own region
  74 */
  75#define VMEMMAP_BASE            (VMEMMAP_REGION_ID << REGION_SHIFT)
  76#define vmemmap                 ((struct page *)VMEMMAP_BASE)
  77
  78
  79/*
  80 * Include the PTE bits definitions
  81 */
  82#include <asm/pte-hash64.h>
  83#include <asm/pte-common.h>
  84
  85
  86#ifdef CONFIG_PPC_MM_SLICES
  87#define HAVE_ARCH_UNMAPPED_AREA
  88#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
  89#endif /* CONFIG_PPC_MM_SLICES */
  90
  91#ifndef __ASSEMBLY__
  92
  93/*
  94 * This is the default implementation of various PTE accessors, it's
  95 * used in all cases except Book3S with 64K pages where we have a
  96 * concept of sub-pages
  97 */
  98#ifndef __real_pte
  99
 100#ifdef STRICT_MM_TYPECHECKS
 101#define __real_pte(e,p)         ((real_pte_t){(e)})
 102#define __rpte_to_pte(r)        ((r).pte)
 103#else
 104#define __real_pte(e,p)         (e)
 105#define __rpte_to_pte(r)        (__pte(r))
 106#endif
 107#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
 108
 109#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
 110        do {                                                             \
 111                index = 0;                                               \
 112                shift = mmu_psize_defs[psize].shift;                     \
 113
 114#define pte_iterate_hashed_end() } while(0)
 115
 116#ifdef CONFIG_PPC_HAS_HASH_64K
 117#define pte_pagesize_index(mm, addr, pte)       get_slice_psize(mm, addr)
 118#else
 119#define pte_pagesize_index(mm, addr, pte)       MMU_PAGE_4K
 120#endif
 121
 122#endif /* __real_pte */
 123
 124
 125/* pte_clear moved to later in this file */
 126
 127#define PMD_BAD_BITS            (PTE_TABLE_SIZE-1)
 128#define PUD_BAD_BITS            (PMD_TABLE_SIZE-1)
 129
 130#define pmd_set(pmdp, pmdval)   (pmd_val(*(pmdp)) = (pmdval))
 131#define pmd_none(pmd)           (!pmd_val(pmd))
 132#define pmd_bad(pmd)            (!is_kernel_addr(pmd_val(pmd)) \
 133                                 || (pmd_val(pmd) & PMD_BAD_BITS))
 134#define pmd_present(pmd)        (pmd_val(pmd) != 0)
 135#define pmd_clear(pmdp)         (pmd_val(*(pmdp)) = 0)
 136#define pmd_page_vaddr(pmd)     (pmd_val(pmd) & ~PMD_MASKED_BITS)
 137#define pmd_page(pmd)           virt_to_page(pmd_page_vaddr(pmd))
 138
 139#define pud_set(pudp, pudval)   (pud_val(*(pudp)) = (pudval))
 140#define pud_none(pud)           (!pud_val(pud))
 141#define pud_bad(pud)            (!is_kernel_addr(pud_val(pud)) \
 142                                 || (pud_val(pud) & PUD_BAD_BITS))
 143#define pud_present(pud)        (pud_val(pud) != 0)
 144#define pud_clear(pudp)         (pud_val(*(pudp)) = 0)
 145#define pud_page_vaddr(pud)     (pud_val(pud) & ~PUD_MASKED_BITS)
 146#define pud_page(pud)           virt_to_page(pud_page_vaddr(pud))
 147
 148#define pgd_set(pgdp, pudp)     ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
 149
 150/*
 151 * Find an entry in a page-table-directory.  We combine the address region
 152 * (the high order N bits) and the pgd portion of the address.
 153 */
 154/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
 155#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
 156
 157#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 158
 159#define pmd_offset(pudp,addr) \
 160  (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
 161
 162#define pte_offset_kernel(dir,addr) \
 163  (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
 164
 165#define pte_offset_map(dir,addr)        pte_offset_kernel((dir), (addr))
 166#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
 167#define pte_unmap(pte)                  do { } while(0)
 168#define pte_unmap_nested(pte)           do { } while(0)
 169
 170/* to find an entry in a kernel page-table-directory */
 171/* This now only contains the vmalloc pages */
 172#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 173
 174
 175/* Atomic PTE updates */
 176static inline unsigned long pte_update(struct mm_struct *mm,
 177                                       unsigned long addr,
 178                                       pte_t *ptep, unsigned long clr,
 179                                       int huge)
 180{
 181#ifdef PTE_ATOMIC_UPDATES
 182        unsigned long old, tmp;
 183
 184        __asm__ __volatile__(
 185        "1:     ldarx   %0,0,%3         # pte_update\n\
 186        andi.   %1,%0,%6\n\
 187        bne-    1b \n\
 188        andc    %1,%0,%4 \n\
 189        stdcx.  %1,0,%3 \n\
 190        bne-    1b"
 191        : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
 192        : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
 193        : "cc" );
 194#else
 195        unsigned long old = pte_val(*ptep);
 196        *ptep = __pte(old & ~clr);
 197#endif
 198        /* huge pages use the old page table lock */
 199        if (!huge)
 200                assert_pte_locked(mm, addr);
 201
 202        if (old & _PAGE_HASHPTE)
 203                hpte_need_flush(mm, addr, ptep, old, huge);
 204        return old;
 205}
 206
 207static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 208                                              unsigned long addr, pte_t *ptep)
 209{
 210        unsigned long old;
 211
 212        if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
 213                return 0;
 214        old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
 215        return (old & _PAGE_ACCESSED) != 0;
 216}
 217#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 218#define ptep_test_and_clear_young(__vma, __addr, __ptep)                   \
 219({                                                                         \
 220        int __r;                                                           \
 221        __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
 222        __r;                                                               \
 223})
 224
 225#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 226static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 227                                      pte_t *ptep)
 228{
 229        unsigned long old;
 230
 231        if ((pte_val(*ptep) & _PAGE_RW) == 0)
 232                return;
 233        old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
 234}
 235
 236static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 237                                           unsigned long addr, pte_t *ptep)
 238{
 239        unsigned long old;
 240
 241        if ((pte_val(*ptep) & _PAGE_RW) == 0)
 242                return;
 243        old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
 244}
 245
 246/*
 247 * We currently remove entries from the hashtable regardless of whether
 248 * the entry was young or dirty. The generic routines only flush if the
 249 * entry was young or dirty which is not good enough.
 250 *
 251 * We should be more intelligent about this but for the moment we override
 252 * these functions and force a tlb flush unconditionally
 253 */
 254#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 255#define ptep_clear_flush_young(__vma, __address, __ptep)                \
 256({                                                                      \
 257        int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
 258                                                  __ptep);              \
 259        __young;                                                        \
 260})
 261
 262#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 263static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 264                                       unsigned long addr, pte_t *ptep)
 265{
 266        unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
 267        return __pte(old);
 268}
 269
 270static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 271                             pte_t * ptep)
 272{
 273        pte_update(mm, addr, ptep, ~0UL, 0);
 274}
 275
 276
 277/* Set the dirty and/or accessed bits atomically in a linux PTE, this
 278 * function doesn't need to flush the hash entry
 279 */
 280static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
 281{
 282        unsigned long bits = pte_val(entry) &
 283                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
 284                 _PAGE_EXEC | _PAGE_HWEXEC);
 285
 286#ifdef PTE_ATOMIC_UPDATES
 287        unsigned long old, tmp;
 288
 289        __asm__ __volatile__(
 290        "1:     ldarx   %0,0,%4\n\
 291                andi.   %1,%0,%6\n\
 292                bne-    1b \n\
 293                or      %0,%3,%0\n\
 294                stdcx.  %0,0,%4\n\
 295                bne-    1b"
 296        :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
 297        :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
 298        :"cc");
 299#else
 300        unsigned long old = pte_val(*ptep);
 301        *ptep = __pte(old | bits);
 302#endif
 303}
 304
 305#define __HAVE_ARCH_PTE_SAME
 306#define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
 307
 308#define pte_ERROR(e) \
 309        printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 310#define pmd_ERROR(e) \
 311        printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
 312#define pgd_ERROR(e) \
 313        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 314
 315/* Encode and de-code a swap entry */
 316#define __swp_type(entry)       (((entry).val >> 1) & 0x3f)
 317#define __swp_offset(entry)     ((entry).val >> 8)
 318#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
 319#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
 320#define __swp_entry_to_pte(x)   ((pte_t) { (x).val << PTE_RPN_SHIFT })
 321#define pte_to_pgoff(pte)       (pte_val(pte) >> PTE_RPN_SHIFT)
 322#define pgoff_to_pte(off)       ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
 323#define PTE_FILE_MAX_BITS       (BITS_PER_LONG - PTE_RPN_SHIFT)
 324
 325void pgtable_cache_init(void);
 326
 327/*
 328 * find_linux_pte returns the address of a linux pte for a given
 329 * effective address and directory.  If not found, it returns zero.
 330 */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
 331{
 332        pgd_t *pg;
 333        pud_t *pu;
 334        pmd_t *pm;
 335        pte_t *pt = NULL;
 336
 337        pg = pgdir + pgd_index(ea);
 338        if (!pgd_none(*pg)) {
 339                pu = pud_offset(pg, ea);
 340                if (!pud_none(*pu)) {
 341                        pm = pmd_offset(pu, ea);
 342                        if (pmd_present(*pm))
 343                                pt = pte_offset_kernel(pm, ea);
 344                }
 345        }
 346        return pt;
 347}
 348
 349pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long address);
 350
 351#endif /* __ASSEMBLY__ */
 352
 353#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
 354