linux/arch/mips/include/asm/pgtable-64.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
   7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_PGTABLE_64_H
  10#define _ASM_PGTABLE_64_H
  11
  12#include <linux/compiler.h>
  13#include <linux/linkage.h>
  14
  15#include <asm/addrspace.h>
  16#include <asm/page.h>
  17#include <asm/cachectl.h>
  18#include <asm/fixmap.h>
  19
  20#if CONFIG_PGTABLE_LEVELS == 2
  21#include <asm-generic/pgtable-nopmd.h>
  22#elif CONFIG_PGTABLE_LEVELS == 3
  23#include <asm-generic/pgtable-nopud.h>
  24#else
  25#include <asm-generic/pgtable-nop4d.h>
  26#endif
  27
  28/*
  29 * Each address space has 2 4K pages as its page directory, giving 1024
  30 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
  31 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
  32 * tables. Each page table is also a single 4K page, giving 512 (==
  33 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
  34 * invalid_pmd_table, each pmd entry is initialized to point to
  35 * invalid_pte_table, each pte is initialized to 0.
  36 *
  37 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
  38 * The layout is identical to userspace except it's indexed with the
  39 * fault address - VMALLOC_START.
  40 */
  41
  42
  43/* PGDIR_SHIFT determines what a third-level page table entry can map */
  44#ifdef __PAGETABLE_PMD_FOLDED
  45#define PGDIR_SHIFT     (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
  46#else
  47
  48/* PMD_SHIFT determines the size of the area a second-level page table can map */
  49#define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
  50#define PMD_SIZE        (1UL << PMD_SHIFT)
  51#define PMD_MASK        (~(PMD_SIZE-1))
  52
  53# ifdef __PAGETABLE_PUD_FOLDED
  54# define PGDIR_SHIFT    (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
  55# endif
  56#endif
  57
  58#ifndef __PAGETABLE_PUD_FOLDED
  59#define PUD_SHIFT       (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
  60#define PUD_SIZE        (1UL << PUD_SHIFT)
  61#define PUD_MASK        (~(PUD_SIZE-1))
  62#define PGDIR_SHIFT     (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
  63#endif
  64
  65#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  66#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  67
  68/*
  69 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
  70 * permits us mapping 40 bits of virtual address space.
  71 *
  72 * We used to implement 41 bits by having an order 1 pmd level but that seemed
  73 * rather pointless.
  74 *
  75 * For 8kB page size we use a 3 level page tree which permits a total of
  76 * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
  77 * two levels would be easy to implement.
  78 *
  79 * For 16kB page size we use a 2 level page tree which permits a total of
  80 * 36 bits of virtual address space.  We could add a third level but it seems
  81 * like at the moment there's no need for this.
  82 *
  83 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
  84 * of virtual address space.
  85 */
  86#ifdef CONFIG_PAGE_SIZE_4KB
  87# ifdef CONFIG_MIPS_VA_BITS_48
  88#  define PGD_ORDER             0
  89#  define PUD_ORDER             0
  90# else
  91#  define PGD_ORDER             1
  92#  define PUD_ORDER             aieeee_attempt_to_allocate_pud
  93# endif
  94#define PMD_ORDER               0
  95#define PTE_ORDER               0
  96#endif
  97#ifdef CONFIG_PAGE_SIZE_8KB
  98#define PGD_ORDER               0
  99#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 100#define PMD_ORDER               0
 101#define PTE_ORDER               0
 102#endif
 103#ifdef CONFIG_PAGE_SIZE_16KB
 104#ifdef CONFIG_MIPS_VA_BITS_48
 105#define PGD_ORDER               1
 106#else
 107#define PGD_ORDER               0
 108#endif
 109#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 110#define PMD_ORDER               0
 111#define PTE_ORDER               0
 112#endif
 113#ifdef CONFIG_PAGE_SIZE_32KB
 114#define PGD_ORDER               0
 115#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 116#define PMD_ORDER               0
 117#define PTE_ORDER               0
 118#endif
 119#ifdef CONFIG_PAGE_SIZE_64KB
 120#define PGD_ORDER               0
 121#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 122#ifdef CONFIG_MIPS_VA_BITS_48
 123#define PMD_ORDER               0
 124#else
 125#define PMD_ORDER               aieeee_attempt_to_allocate_pmd
 126#endif
 127#define PTE_ORDER               0
 128#endif
 129
 130#define PTRS_PER_PGD    ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
 131#ifndef __PAGETABLE_PUD_FOLDED
 132#define PTRS_PER_PUD    ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
 133#endif
 134#ifndef __PAGETABLE_PMD_FOLDED
 135#define PTRS_PER_PMD    ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
 136#endif
 137#define PTRS_PER_PTE    ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 138
 139#define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
 140
 141/*
 142 * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
 143 * the first couple of pages so NULL pointer dereferences will still
 144 * reliably trap.
 145 */
 146#define VMALLOC_START           (MAP_BASE + (2 * PAGE_SIZE))
 147#define VMALLOC_END     \
 148        (MAP_BASE + \
 149         min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
 150             (1UL << cpu_vmbits)) - (1UL << 32))
 151
 152#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
 153        VMALLOC_START != CKSSEG
 154/* Load modules into 32bit-compatible segment. */
 155#define MODULE_START    CKSSEG
 156#define MODULE_END      (FIXADDR_START-2*PAGE_SIZE)
 157#endif
 158
 159#define pte_ERROR(e) \
 160        printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 161#ifndef __PAGETABLE_PMD_FOLDED
 162#define pmd_ERROR(e) \
 163        printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 164#endif
 165#ifndef __PAGETABLE_PUD_FOLDED
 166#define pud_ERROR(e) \
 167        printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
 168#endif
 169#define pgd_ERROR(e) \
 170        printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 171
 172extern pte_t invalid_pte_table[PTRS_PER_PTE];
 173
 174#ifndef __PAGETABLE_PUD_FOLDED
 175/*
 176 * For 4-level pagetables we defines these ourselves, for 3-level the
 177 * definitions are below, for 2-level the
 178 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
 179 */
 180typedef struct { unsigned long pud; } pud_t;
 181#define pud_val(x)      ((x).pud)
 182#define __pud(x)        ((pud_t) { (x) })
 183
 184extern pud_t invalid_pud_table[PTRS_PER_PUD];
 185
 186/*
 187 * Empty pgd entries point to the invalid_pud_table.
 188 */
 189static inline int p4d_none(p4d_t p4d)
 190{
 191        return p4d_val(p4d) == (unsigned long)invalid_pud_table;
 192}
 193
 194static inline int p4d_bad(p4d_t p4d)
 195{
 196        if (unlikely(p4d_val(p4d) & ~PAGE_MASK))
 197                return 1;
 198
 199        return 0;
 200}
 201
 202static inline int p4d_present(p4d_t p4d)
 203{
 204        return p4d_val(p4d) != (unsigned long)invalid_pud_table;
 205}
 206
 207static inline void p4d_clear(p4d_t *p4dp)
 208{
 209        p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
 210}
 211
 212static inline pud_t *p4d_pgtable(p4d_t p4d)
 213{
 214        return (pud_t *)p4d_val(p4d);
 215}
 216
 217#define p4d_phys(p4d)           virt_to_phys((void *)p4d_val(p4d))
 218#define p4d_page(p4d)           (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
 219
 220#define p4d_index(address)      (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
 221
 222static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
 223{
 224        *p4d = p4dval;
 225}
 226
 227#endif
 228
 229#ifndef __PAGETABLE_PMD_FOLDED
 230/*
 231 * For 3-level pagetables we defines these ourselves, for 2-level the
 232 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
 233 */
 234typedef struct { unsigned long pmd; } pmd_t;
 235#define pmd_val(x)      ((x).pmd)
 236#define __pmd(x)        ((pmd_t) { (x) } )
 237
 238
 239extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
 240#endif
 241
 242/*
 243 * Empty pgd/pmd entries point to the invalid_pte_table.
 244 */
 245static inline int pmd_none(pmd_t pmd)
 246{
 247        return pmd_val(pmd) == (unsigned long) invalid_pte_table;
 248}
 249
 250static inline int pmd_bad(pmd_t pmd)
 251{
 252#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 253        /* pmd_huge(pmd) but inline */
 254        if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
 255                return 0;
 256#endif
 257
 258        if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
 259                return 1;
 260
 261        return 0;
 262}
 263
 264static inline int pmd_present(pmd_t pmd)
 265{
 266#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 267        if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
 268                return pmd_val(pmd) & _PAGE_PRESENT;
 269#endif
 270
 271        return pmd_val(pmd) != (unsigned long) invalid_pte_table;
 272}
 273
 274static inline void pmd_clear(pmd_t *pmdp)
 275{
 276        pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 277}
 278#ifndef __PAGETABLE_PMD_FOLDED
 279
 280/*
 281 * Empty pud entries point to the invalid_pmd_table.
 282 */
 283static inline int pud_none(pud_t pud)
 284{
 285        return pud_val(pud) == (unsigned long) invalid_pmd_table;
 286}
 287
 288static inline int pud_bad(pud_t pud)
 289{
 290        return pud_val(pud) & ~PAGE_MASK;
 291}
 292
 293static inline int pud_present(pud_t pud)
 294{
 295        return pud_val(pud) != (unsigned long) invalid_pmd_table;
 296}
 297
 298static inline void pud_clear(pud_t *pudp)
 299{
 300        pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
 301}
 302#endif
 303
 304#define pte_page(x)             pfn_to_page(pte_pfn(x))
 305
 306#ifdef CONFIG_CPU_VR41XX
 307#define pte_pfn(x)              ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
 308#define pfn_pte(pfn, prot)      __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
 309#else
 310#define pte_pfn(x)              ((unsigned long)((x).pte >> _PFN_SHIFT))
 311#define pfn_pte(pfn, prot)      __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 312#define pfn_pmd(pfn, prot)      __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 313#endif
 314
 315#ifndef __PAGETABLE_PMD_FOLDED
 316static inline pmd_t *pud_pgtable(pud_t pud)
 317{
 318        return (pmd_t *)pud_val(pud);
 319}
 320#define pud_phys(pud)           virt_to_phys((void *)pud_val(pud))
 321#define pud_page(pud)           (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
 322
 323#endif
 324
 325/*
 326 * Initialize a new pgd / pmd table with invalid pointers.
 327 */
 328extern void pgd_init(unsigned long page);
 329extern void pud_init(unsigned long page, unsigned long pagetable);
 330extern void pmd_init(unsigned long page, unsigned long pagetable);
 331
 332/*
 333 * Non-present pages:  high 40 bits are offset, next 8 bits type,
 334 * low 16 bits zero.
 335 */
 336static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 337{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
 338
 339#define __swp_type(x)           (((x).val >> 16) & 0xff)
 340#define __swp_offset(x)         ((x).val >> 24)
 341#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
 342#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 343#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
 344
 345#endif /* _ASM_PGTABLE_64_H */
 346