linux/arch/powerpc/include/asm/page.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _ASM_POWERPC_PAGE_H
   3#define _ASM_POWERPC_PAGE_H
   4
   5/*
   6 * Copyright (C) 2001,2005 IBM Corporation.
   7 */
   8
   9#ifndef __ASSEMBLY__
  10#include <linux/types.h>
  11#include <linux/kernel.h>
  12#else
  13#include <asm/types.h>
  14#endif
  15#include <asm/asm-const.h>
  16
  17/*
  18 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
  19 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
  20 * page size. When using 64K pages however, whether we are really supporting
  21 * 64K pages in HW or not is irrelevant to those definitions.
  22 */
  23#define PAGE_SHIFT              CONFIG_PPC_PAGE_SHIFT
  24#define PAGE_SIZE               (ASM_CONST(1) << PAGE_SHIFT)
  25
  26#ifndef __ASSEMBLY__
  27#ifndef CONFIG_HUGETLB_PAGE
  28#define HPAGE_SHIFT PAGE_SHIFT
  29#elif defined(CONFIG_PPC_BOOK3S_64)
  30extern unsigned int hpage_shift;
  31#define HPAGE_SHIFT hpage_shift
  32#elif defined(CONFIG_PPC_8xx)
  33#define HPAGE_SHIFT             19      /* 512k pages */
  34#elif defined(CONFIG_PPC_FSL_BOOK3E)
  35#define HPAGE_SHIFT             22      /* 4M pages */
  36#endif
  37#define HPAGE_SIZE              ((1UL) << HPAGE_SHIFT)
  38#define HPAGE_MASK              (~(HPAGE_SIZE - 1))
  39#define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
  40#define HUGE_MAX_HSTATE         (MMU_PAGE_COUNT-1)
  41#endif
  42
  43/*
  44 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
  45 * assign PAGE_MASK to a larger type it gets extended the way we want
  46 * (i.e. with 1s in the high bits)
  47 */
  48#define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
  49
  50/*
  51 * KERNELBASE is the virtual address of the start of the kernel, it's often
  52 * the same as PAGE_OFFSET, but _might not be_.
  53 *
  54 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
  55 *
  56 * PAGE_OFFSET is the virtual address of the start of lowmem.
  57 *
  58 * PHYSICAL_START is the physical address of the start of the kernel.
  59 *
  60 * MEMORY_START is the physical address of the start of lowmem.
  61 *
  62 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
  63 * ppc32 and based on how they are set we determine MEMORY_START.
  64 *
  65 * For the linear mapping the following equation should be true:
  66 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
  67 *
  68 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
  69 *
  70 * There are two ways to determine a physical address from a virtual one:
  71 * va = pa + PAGE_OFFSET - MEMORY_START
  72 * va = pa + KERNELBASE - PHYSICAL_START
  73 *
  74 * If you want to know something's offset from the start of the kernel you
  75 * should subtract KERNELBASE.
  76 *
  77 * If you want to test if something's a kernel address, use is_kernel_addr().
  78 */
  79
  80#define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
  81#define PAGE_OFFSET     ASM_CONST(CONFIG_PAGE_OFFSET)
  82#define LOAD_OFFSET     ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
  83
  84#if defined(CONFIG_NONSTATIC_KERNEL)
  85#ifndef __ASSEMBLY__
  86
  87extern phys_addr_t memstart_addr;
  88extern phys_addr_t kernstart_addr;
  89
  90#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
  91extern long long virt_phys_offset;
  92#endif
  93
  94#endif /* __ASSEMBLY__ */
  95#define PHYSICAL_START  kernstart_addr
  96
  97#else   /* !CONFIG_NONSTATIC_KERNEL */
  98#define PHYSICAL_START  ASM_CONST(CONFIG_PHYSICAL_START)
  99#endif
 100
 101/* See Description below for VIRT_PHYS_OFFSET */
 102#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
 103#ifdef CONFIG_RELOCATABLE
 104#define VIRT_PHYS_OFFSET virt_phys_offset
 105#else
 106#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 107#endif
 108#endif
 109
 110#ifdef CONFIG_PPC64
 111#define MEMORY_START    0UL
 112#elif defined(CONFIG_NONSTATIC_KERNEL)
 113#define MEMORY_START    memstart_addr
 114#else
 115#define MEMORY_START    (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
 116#endif
 117
 118#ifdef CONFIG_FLATMEM
 119#define ARCH_PFN_OFFSET         ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
 120#ifndef __ASSEMBLY__
 121extern unsigned long max_mapnr;
 122static inline bool pfn_valid(unsigned long pfn)
 123{
 124        unsigned long min_pfn = ARCH_PFN_OFFSET;
 125
 126        return pfn >= min_pfn && pfn < max_mapnr;
 127}
 128#endif
 129#endif
 130
 131#define virt_to_pfn(kaddr)      (__pa(kaddr) >> PAGE_SHIFT)
 132#define virt_to_page(kaddr)     pfn_to_page(virt_to_pfn(kaddr))
 133#define pfn_to_kaddr(pfn)       __va((pfn) << PAGE_SHIFT)
 134
 135#define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 136
 137/*
 138 * On Book-E parts we need __va to parse the device tree and we can't
 139 * determine MEMORY_START until then.  However we can determine PHYSICAL_START
 140 * from information at hand (program counter, TLB lookup).
 141 *
 142 * On BookE with RELOCATABLE && PPC32
 143 *
 144 *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
 145 *   address without any restriction on the page alignment.
 146 *
 147 *   We find the runtime address of _stext and relocate ourselves based on 
 148 *   the following calculation:
 149 *
 150 *        virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
 151 *                              MODULO(_stext.run,256M)
 152 *   and create the following mapping:
 153 *
 154 *        ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
 155 *
 156 *   When we process relocations, we cannot depend on the
 157 *   existing equation for the __va()/__pa() translations:
 158 *
 159 *         __va(x) = (x)  - PHYSICAL_START + KERNELBASE
 160 *
 161 *   Where:
 162 *       PHYSICAL_START = kernstart_addr = Physical address of _stext
 163 *       KERNELBASE = Compiled virtual address of _stext.
 164 *
 165 *   This formula holds true iff, kernel load address is TLB page aligned.
 166 *
 167 *   In our case, we need to also account for the shift in the kernel Virtual 
 168 *   address.
 169 *
 170 *   E.g.,
 171 *
 172 *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
 173 *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
 174 *
 175 *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
 176 *                 = 0xbc100000 , which is wrong.
 177 *
 178 *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
 179 *              according to our mapping.
 180 *
 181 *   Hence we use the following formula to get the translations right:
 182 *
 183 *        __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
 184 *
 185 *        Where :
 186 *              PHYSICAL_START = dynamic load address.(kernstart_addr variable)
 187 *              Effective KERNELBASE = virtual_base =
 188 *                                   = ALIGN_DOWN(KERNELBASE,256M) +
 189 *                                              MODULO(PHYSICAL_START,256M)
 190 *
 191 *      To make the cost of __va() / __pa() more light weight, we introduce
 192 *      a new variable virt_phys_offset, which will hold :
 193 *
 194 *      virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
 195 *                       = ALIGN_DOWN(KERNELBASE,256M) - 
 196 *                              ALIGN_DOWN(PHYSICALSTART,256M)
 197 *
 198 *      Hence :
 199 *
 200 *      __va(x) = x - PHYSICAL_START + Effective KERNELBASE
 201 *              = x + virt_phys_offset
 202 *
 203 *              and
 204 *      __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
 205 *              = x - virt_phys_offset
 206 *              
 207 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
 208 * the other definitions for __va & __pa.
 209 */
 210#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
 211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
 212#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
 213#else
 214#ifdef CONFIG_PPC64
 215/*
 216 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
 217 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
 218 * This also results in better code generation.
 219 */
 220#define __va(x)                                                         \
 221({                                                                      \
 222        VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET);              \
 223        (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET);        \
 224})
 225
 226#define __pa(x)                                                         \
 227({                                                                      \
 228        VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET);               \
 229        (unsigned long)(x) & 0x0fffffffffffffffUL;                      \
 230})
 231
 232#else /* 32-bit, non book E */
 233#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
 234#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
 235#endif
 236#endif
 237
 238/*
 239 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
 240 * and needs to be executable.  This means the whole heap ends
 241 * up being executable.
 242 */
 243#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
 244#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
 245
 246#ifdef __powerpc64__
 247#include <asm/page_64.h>
 248#else
 249#include <asm/page_32.h>
 250#endif
 251
 252/*
 253 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
 254 * "kernelness", use is_kernel_addr() - it should do what you want.
 255 */
 256#ifdef CONFIG_PPC_BOOK3E_64
 257#define is_kernel_addr(x)       ((x) >= 0x8000000000000000ul)
 258#elif defined(CONFIG_PPC_BOOK3S_64)
 259#define is_kernel_addr(x)       ((x) >= PAGE_OFFSET)
 260#else
 261#define is_kernel_addr(x)       ((x) >= TASK_SIZE)
 262#endif
 263
 264#ifndef CONFIG_PPC_BOOK3S_64
 265/*
 266 * Use the top bit of the higher-level page table entries to indicate whether
 267 * the entries we point to contain hugepages.  This works because we know that
 268 * the page tables live in kernel space.  If we ever decide to support having
 269 * page tables at arbitrary addresses, this breaks and will have to change.
 270 */
 271#ifdef CONFIG_PPC64
 272#define PD_HUGE 0x8000000000000000UL
 273#else
 274#define PD_HUGE 0x80000000
 275#endif
 276
 277#else   /* CONFIG_PPC_BOOK3S_64 */
 278/*
 279 * Book3S 64 stores real addresses in the hugepd entries to
 280 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
 281 */
 282#define HUGEPD_ADDR_MASK        (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
 283#endif /* CONFIG_PPC_BOOK3S_64 */
 284
 285/*
 286 * Some number of bits at the level of the page table that points to
 287 * a hugepte are used to encode the size.  This masks those bits.
 288 * On 8xx, HW assistance requires 4k alignment for the hugepte.
 289 */
 290#ifdef CONFIG_PPC_8xx
 291#define HUGEPD_SHIFT_MASK     0xfff
 292#else
 293#define HUGEPD_SHIFT_MASK     0x3f
 294#endif
 295
 296#ifndef __ASSEMBLY__
 297
 298#ifdef CONFIG_PPC_BOOK3S_64
 299#include <asm/pgtable-be-types.h>
 300#else
 301#include <asm/pgtable-types.h>
 302#endif
 303
 304
 305#ifndef CONFIG_HUGETLB_PAGE
 306#define is_hugepd(pdep)         (0)
 307#define pgd_huge(pgd)           (0)
 308#endif /* CONFIG_HUGETLB_PAGE */
 309
 310struct page;
 311extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
 312extern void copy_user_page(void *to, void *from, unsigned long vaddr,
 313                struct page *p);
 314extern int devmem_is_allowed(unsigned long pfn);
 315
 316#ifdef CONFIG_PPC_SMLPAR
 317void arch_free_page(struct page *page, int order);
 318#define HAVE_ARCH_FREE_PAGE
 319#endif
 320
 321struct vm_area_struct;
 322
 323extern unsigned long kernstart_virt_addr;
 324
 325static inline unsigned long kaslr_offset(void)
 326{
 327        return kernstart_virt_addr - KERNELBASE;
 328}
 329
 330#include <asm-generic/memory_model.h>
 331#endif /* __ASSEMBLY__ */
 332#include <asm/slice.h>
 333
 334#endif /* _ASM_POWERPC_PAGE_H */
 335
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.