linux/arch/powerpc/include/asm/book3s/32/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   3#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   4
   5#include <asm-generic/pgtable-nopmd.h>
   6
   7#include <asm/book3s/32/hash.h>
   8
   9/* And here we include common definitions */
  10
  11#define _PAGE_KERNEL_RO         0
  12#define _PAGE_KERNEL_ROX        (_PAGE_EXEC)
  13#define _PAGE_KERNEL_RW         (_PAGE_DIRTY | _PAGE_RW)
  14#define _PAGE_KERNEL_RWX        (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
  15
  16#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
  17
  18#ifndef __ASSEMBLY__
  19
  20static inline bool pte_user(pte_t pte)
  21{
  22        return pte_val(pte) & _PAGE_USER;
  23}
  24#endif /* __ASSEMBLY__ */
  25
  26/*
  27 * Location of the PFN in the PTE. Most 32-bit platforms use the same
  28 * as _PAGE_SHIFT here (ie, naturally aligned).
  29 * Platform who don't just pre-define the value so we don't override it here.
  30 */
  31#define PTE_RPN_SHIFT   (PAGE_SHIFT)
  32
  33/*
  34 * The mask covered by the RPN must be a ULL on 32-bit platforms with
  35 * 64-bit PTEs.
  36 */
  37#ifdef CONFIG_PTE_64BIT
  38#define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
  39#define MAX_POSSIBLE_PHYSMEM_BITS 36
  40#else
  41#define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
  42#define MAX_POSSIBLE_PHYSMEM_BITS 32
  43#endif
  44
  45/*
  46 * _PAGE_CHG_MASK masks of bits that are to be preserved across
  47 * pgprot changes.
  48 */
  49#define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
  50                         _PAGE_ACCESSED | _PAGE_SPECIAL)
  51
  52/*
  53 * We define 2 sets of base prot bits, one for basic pages (ie,
  54 * cacheable kernel and user pages) and one for non cacheable
  55 * pages. We always set _PAGE_COHERENT when SMP is enabled or
  56 * the processor might need it for DMA coherency.
  57 */
  58#define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED)
  59#define _PAGE_BASE      (_PAGE_BASE_NC | _PAGE_COHERENT)
  60
  61/*
  62 * Permission masks used to generate the __P and __S table.
  63 *
  64 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
  65 *
  66 * Write permissions imply read permissions for now.
  67 */
  68#define PAGE_NONE       __pgprot(_PAGE_BASE)
  69#define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
  70#define PAGE_SHARED_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
  71#define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
  72#define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
  73#define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
  74#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
  75
  76/* Permission masks used for kernel mappings */
  77#define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
  78#define PAGE_KERNEL_NC  __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
  79#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
  80                                 _PAGE_NO_CACHE | _PAGE_GUARDED)
  81#define PAGE_KERNEL_X   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
  82#define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
  83#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
  84
  85/*
  86 * Protection used for kernel text. We want the debuggers to be able to
  87 * set breakpoints anywhere, so don't write protect the kernel text
  88 * on platforms where such control is possible.
  89 */
  90#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
  91        defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
  92#define PAGE_KERNEL_TEXT        PAGE_KERNEL_X
  93#else
  94#define PAGE_KERNEL_TEXT        PAGE_KERNEL_ROX
  95#endif
  96
  97/* Make modules code happy. We don't set RO yet */
  98#define PAGE_KERNEL_EXEC        PAGE_KERNEL_X
  99
 100/* Advertise special mapping type for AGP */
 101#define PAGE_AGP                (PAGE_KERNEL_NC)
 102#define HAVE_PAGE_AGP
 103
 104#define PTE_INDEX_SIZE  PTE_SHIFT
 105#define PMD_INDEX_SIZE  0
 106#define PUD_INDEX_SIZE  0
 107#define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
 108
 109#define PMD_CACHE_INDEX PMD_INDEX_SIZE
 110#define PUD_CACHE_INDEX PUD_INDEX_SIZE
 111
 112#ifndef __ASSEMBLY__
 113#define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
 114#define PMD_TABLE_SIZE  0
 115#define PUD_TABLE_SIZE  0
 116#define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
 117
 118/* Bits to mask out from a PMD to get to the PTE page */
 119#define PMD_MASKED_BITS         (PTE_TABLE_SIZE - 1)
 120#endif  /* __ASSEMBLY__ */
 121
 122#define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
 123#define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
 124
 125/*
 126 * The normal case is that PTEs are 32-bits and we have a 1-page
 127 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
 128 *
 129 * For any >32-bit physical address platform, we can use the following
 130 * two level page table layout where the pgdir is 8KB and the MS 13 bits
 131 * are an index to the second level table.  The combined pgdir/pmd first
 132 * level has 2048 entries and the second level has 512 64-bit PTE entries.
 133 * -Matt
 134 */
 135/* PGDIR_SHIFT determines what a top-level page table entry can map */
 136#define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
 137#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 138#define PGDIR_MASK      (~(PGDIR_SIZE-1))
 139
 140#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
 141
 142#ifndef __ASSEMBLY__
 143
 144int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 145
 146#endif /* !__ASSEMBLY__ */
 147
 148/*
 149 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
 150 * value (for now) on others, from where we can start layout kernel
 151 * virtual space that goes below PKMAP and FIXMAP
 152 */
 153#include <asm/fixmap.h>
 154
 155/*
 156 * ioremap_bot starts at that address. Early ioremaps move down from there,
 157 * until mem_init() at which point this becomes the top of the vmalloc
 158 * and ioremap space
 159 */
 160#ifdef CONFIG_HIGHMEM
 161#define IOREMAP_TOP     PKMAP_BASE
 162#else
 163#define IOREMAP_TOP     FIXADDR_START
 164#endif
 165
 166/* PPC32 shares vmalloc area with ioremap */
 167#define IOREMAP_START   VMALLOC_START
 168#define IOREMAP_END     VMALLOC_END
 169
 170/*
 171 * Just any arbitrary offset to the start of the vmalloc VM area: the
 172 * current 16MB value just means that there will be a 64MB "hole" after the
 173 * physical memory until the kernel virtual memory starts.  That means that
 174 * any out-of-bounds memory accesses will hopefully be caught.
 175 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 176 * area for the same reason. ;)
 177 *
 178 * We no longer map larger than phys RAM with the BATs so we don't have
 179 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
 180 * about clashes between our early calls to ioremap() that start growing down
 181 * from ioremap_base being run into the VM area allocations (growing upwards
 182 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
 183 * we actually run into our mappings setup in the early boot with the VM
 184 * system.  This really does become a problem for machines with good amounts
 185 * of RAM.  -- Cort
 186 */
 187#define VMALLOC_OFFSET (0x1000000) /* 16M */
 188
 189#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 190
 191#ifdef CONFIG_KASAN_VMALLOC
 192#define VMALLOC_END     ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
 193#else
 194#define VMALLOC_END     ioremap_bot
 195#endif
 196
 197#define MODULES_END     ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
 198#define MODULES_VADDR   (MODULES_END - SZ_256M)
 199
 200#ifndef __ASSEMBLY__
 201#include <linux/sched.h>
 202#include <linux/threads.h>
 203
 204/* Bits to mask out from a PGD to get to the PUD page */
 205#define PGD_MASKED_BITS         0
 206
 207#define pte_ERROR(e) \
 208        pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
 209                (unsigned long long)pte_val(e))
 210#define pgd_ERROR(e) \
 211        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 212/*
 213 * Bits in a linux-style PTE.  These match the bits in the
 214 * (hardware-defined) PowerPC PTE as closely as possible.
 215 */
 216
 217#define pte_clear(mm, addr, ptep) \
 218        do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
 219
 220#define pmd_none(pmd)           (!pmd_val(pmd))
 221#define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 222#define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
 223static inline void pmd_clear(pmd_t *pmdp)
 224{
 225        *pmdp = __pmd(0);
 226}
 227
 228
 229/*
 230 * When flushing the tlb entry for a page, we also need to flush the hash
 231 * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
 232 */
 233extern int flush_hash_pages(unsigned context, unsigned long va,
 234                            unsigned long pmdval, int count);
 235
 236/* Add an HPTE to the hash table */
 237extern void add_hash_page(unsigned context, unsigned long va,
 238                          unsigned long pmdval);
 239
 240/* Flush an entry from the TLB/hash table */
 241static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
 242{
 243        if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
 244                unsigned long ptephys = __pa(ptep) & PAGE_MASK;
 245
 246                flush_hash_pages(mm->context.id, addr, ptephys, 1);
 247        }
 248}
 249
 250/*
 251 * PTE updates. This function is called whenever an existing
 252 * valid PTE is updated. This does -not- include set_pte_at()
 253 * which nowadays only sets a new PTE.
 254 *
 255 * Depending on the type of MMU, we may need to use atomic updates
 256 * and the PTE may be either 32 or 64 bit wide. In the later case,
 257 * when using atomic updates, only the low part of the PTE is
 258 * accessed atomically.
 259 */
 260static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
 261                                     unsigned long clr, unsigned long set, int huge)
 262{
 263        pte_basic_t old;
 264        unsigned long tmp;
 265
 266        __asm__ __volatile__(
 267#ifndef CONFIG_PTE_64BIT
 268"1:     lwarx   %0, 0, %3\n"
 269"       andc    %1, %0, %4\n"
 270#else
 271"1:     lwarx   %L0, 0, %3\n"
 272"       lwz     %0, -4(%3)\n"
 273"       andc    %1, %L0, %4\n"
 274#endif
 275"       or      %1, %1, %5\n"
 276"       stwcx.  %1, 0, %3\n"
 277"       bne-    1b"
 278        : "=&r" (old), "=&r" (tmp), "=m" (*p)
 279#ifndef CONFIG_PTE_64BIT
 280        : "r" (p),
 281#else
 282        : "b" ((unsigned long)(p) + 4),
 283#endif
 284          "r" (clr), "r" (set), "m" (*p)
 285        : "cc" );
 286
 287        return old;
 288}
 289
 290/*
 291 * 2.6 calls this without flushing the TLB entry; this is wrong
 292 * for our hash-based implementation, we fix that up here.
 293 */
 294#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 295static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 296                                              unsigned long addr, pte_t *ptep)
 297{
 298        unsigned long old;
 299        old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
 300        if (old & _PAGE_HASHPTE)
 301                flush_hash_entry(mm, ptep, addr);
 302
 303        return (old & _PAGE_ACCESSED) != 0;
 304}
 305#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 306        __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
 307
 308#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 309static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 310                                       pte_t *ptep)
 311{
 312        return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
 313}
 314
 315#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 316static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 317                                      pte_t *ptep)
 318{
 319        pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
 320}
 321
 322static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 323                                           pte_t *ptep, pte_t entry,
 324                                           unsigned long address,
 325                                           int psize)
 326{
 327        unsigned long set = pte_val(entry) &
 328                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 329
 330        pte_update(vma->vm_mm, address, ptep, 0, set, 0);
 331
 332        flush_tlb_page(vma, address);
 333}
 334
 335#define __HAVE_ARCH_PTE_SAME
 336#define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
 337
 338#define pmd_page(pmd)           \
 339        pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 340
 341/*
 342 * Encode and decode a swap entry.
 343 * Note that the bits we use in a PTE for representing a swap entry
 344 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
 345 *   -- paulus
 346 */
 347#define __swp_type(entry)               ((entry).val & 0x1f)
 348#define __swp_offset(entry)             ((entry).val >> 5)
 349#define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
 350#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
 351#define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
 352
 353/* Generic accessors to PTE bits */
 354static inline int pte_write(pte_t pte)          { return !!(pte_val(pte) & _PAGE_RW);}
 355static inline int pte_read(pte_t pte)           { return 1; }
 356static inline int pte_dirty(pte_t pte)          { return !!(pte_val(pte) & _PAGE_DIRTY); }
 357static inline int pte_young(pte_t pte)          { return !!(pte_val(pte) & _PAGE_ACCESSED); }
 358static inline int pte_special(pte_t pte)        { return !!(pte_val(pte) & _PAGE_SPECIAL); }
 359static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
 360static inline bool pte_exec(pte_t pte)          { return pte_val(pte) & _PAGE_EXEC; }
 361
 362static inline int pte_present(pte_t pte)
 363{
 364        return pte_val(pte) & _PAGE_PRESENT;
 365}
 366
 367static inline bool pte_hw_valid(pte_t pte)
 368{
 369        return pte_val(pte) & _PAGE_PRESENT;
 370}
 371
 372static inline bool pte_hashpte(pte_t pte)
 373{
 374        return !!(pte_val(pte) & _PAGE_HASHPTE);
 375}
 376
 377static inline bool pte_ci(pte_t pte)
 378{
 379        return !!(pte_val(pte) & _PAGE_NO_CACHE);
 380}
 381
 382/*
 383 * We only find page table entry in the last level
 384 * Hence no need for other accessors
 385 */
 386#define pte_access_permitted pte_access_permitted
 387static inline bool pte_access_permitted(pte_t pte, bool write)
 388{
 389        /*
 390         * A read-only access is controlled by _PAGE_USER bit.
 391         * We have _PAGE_READ set for WRITE and EXECUTE
 392         */
 393        if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
 394                return false;
 395
 396        if (write && !pte_write(pte))
 397                return false;
 398
 399        return true;
 400}
 401
 402/* Conversion functions: convert a page and protection to a page entry,
 403 * and a page entry and page directory to the page they refer to.
 404 *
 405 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
 406 * long for now.
 407 */
 408static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
 409{
 410        return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
 411                     pgprot_val(pgprot));
 412}
 413
 414static inline unsigned long pte_pfn(pte_t pte)
 415{
 416        return pte_val(pte) >> PTE_RPN_SHIFT;
 417}
 418
 419/* Generic modifiers for PTE bits */
 420static inline pte_t pte_wrprotect(pte_t pte)
 421{
 422        return __pte(pte_val(pte) & ~_PAGE_RW);
 423}
 424
 425static inline pte_t pte_exprotect(pte_t pte)
 426{
 427        return __pte(pte_val(pte) & ~_PAGE_EXEC);
 428}
 429
 430static inline pte_t pte_mkclean(pte_t pte)
 431{
 432        return __pte(pte_val(pte) & ~_PAGE_DIRTY);
 433}
 434
 435static inline pte_t pte_mkold(pte_t pte)
 436{
 437        return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
 438}
 439
 440static inline pte_t pte_mkexec(pte_t pte)
 441{
 442        return __pte(pte_val(pte) | _PAGE_EXEC);
 443}
 444
 445static inline pte_t pte_mkpte(pte_t pte)
 446{
 447        return pte;
 448}
 449
 450static inline pte_t pte_mkwrite(pte_t pte)
 451{
 452        return __pte(pte_val(pte) | _PAGE_RW);
 453}
 454
 455static inline pte_t pte_mkdirty(pte_t pte)
 456{
 457        return __pte(pte_val(pte) | _PAGE_DIRTY);
 458}
 459
 460static inline pte_t pte_mkyoung(pte_t pte)
 461{
 462        return __pte(pte_val(pte) | _PAGE_ACCESSED);
 463}
 464
 465static inline pte_t pte_mkspecial(pte_t pte)
 466{
 467        return __pte(pte_val(pte) | _PAGE_SPECIAL);
 468}
 469
 470static inline pte_t pte_mkhuge(pte_t pte)
 471{
 472        return pte;
 473}
 474
 475static inline pte_t pte_mkprivileged(pte_t pte)
 476{
 477        return __pte(pte_val(pte) & ~_PAGE_USER);
 478}
 479
 480static inline pte_t pte_mkuser(pte_t pte)
 481{
 482        return __pte(pte_val(pte) | _PAGE_USER);
 483}
 484
 485static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 486{
 487        return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 488}
 489
 490
 491
 492/* This low level function performs the actual PTE insertion
 493 * Setting the PTE depends on the MMU type and other factors. It's
 494 * an horrible mess that I'm not going to try to clean up now but
 495 * I'm keeping it in one place rather than spread around
 496 */
 497static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 498                                pte_t *ptep, pte_t pte, int percpu)
 499{
 500#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
 501        /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
 502         * helper pte_update() which does an atomic update. We need to do that
 503         * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
 504         * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
 505         * the hash bits instead (ie, same as the non-SMP case)
 506         */
 507        if (percpu)
 508                *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 509                              | (pte_val(pte) & ~_PAGE_HASHPTE));
 510        else
 511                pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
 512
 513#elif defined(CONFIG_PTE_64BIT)
 514        /* Second case is 32-bit with 64-bit PTE.  In this case, we
 515         * can just store as long as we do the two halves in the right order
 516         * with a barrier in between. This is possible because we take care,
 517         * in the hash code, to pre-invalidate if the PTE was already hashed,
 518         * which synchronizes us with any concurrent invalidation.
 519         * In the percpu case, we also fallback to the simple update preserving
 520         * the hash bits
 521         */
 522        if (percpu) {
 523                *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 524                              | (pte_val(pte) & ~_PAGE_HASHPTE));
 525                return;
 526        }
 527        if (pte_val(*ptep) & _PAGE_HASHPTE)
 528                flush_hash_entry(mm, ptep, addr);
 529        __asm__ __volatile__("\
 530                stw%X0 %2,%0\n\
 531                eieio\n\
 532                stw%X1 %L2,%1"
 533        : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
 534        : "r" (pte) : "memory");
 535
 536#else
 537        /* Third case is 32-bit hash table in UP mode, we need to preserve
 538         * the _PAGE_HASHPTE bit since we may not have invalidated the previous
 539         * translation in the hash yet (done in a subsequent flush_tlb_xxx())
 540         * and see we need to keep track that this PTE needs invalidating
 541         */
 542        *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 543                      | (pte_val(pte) & ~_PAGE_HASHPTE));
 544#endif
 545}
 546
 547/*
 548 * Macro to mark a page protection value as "uncacheable".
 549 */
 550
 551#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
 552                         _PAGE_WRITETHRU)
 553
 554#define pgprot_noncached pgprot_noncached
 555static inline pgprot_t pgprot_noncached(pgprot_t prot)
 556{
 557        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 558                        _PAGE_NO_CACHE | _PAGE_GUARDED);
 559}
 560
 561#define pgprot_noncached_wc pgprot_noncached_wc
 562static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
 563{
 564        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 565                        _PAGE_NO_CACHE);
 566}
 567
 568#define pgprot_cached pgprot_cached
 569static inline pgprot_t pgprot_cached(pgprot_t prot)
 570{
 571        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 572                        _PAGE_COHERENT);
 573}
 574
 575#define pgprot_cached_wthru pgprot_cached_wthru
 576static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
 577{
 578        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 579                        _PAGE_COHERENT | _PAGE_WRITETHRU);
 580}
 581
 582#define pgprot_cached_noncoherent pgprot_cached_noncoherent
 583static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
 584{
 585        return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
 586}
 587
 588#define pgprot_writecombine pgprot_writecombine
 589static inline pgprot_t pgprot_writecombine(pgprot_t prot)
 590{
 591        return pgprot_noncached_wc(prot);
 592}
 593
 594#endif /* !__ASSEMBLY__ */
 595
 596#endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
 597