linux/include/asm-generic/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_GENERIC_PGTABLE_H
   2#define _ASM_GENERIC_PGTABLE_H
   3
   4#ifndef __ASSEMBLY__
   5#ifdef CONFIG_MMU
   6
   7#include <linux/mm_types.h>
   8#include <linux/bug.h>
   9
  10#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  11extern int ptep_set_access_flags(struct vm_area_struct *vma,
  12                                 unsigned long address, pte_t *ptep,
  13                                 pte_t entry, int dirty);
  14#endif
  15
  16#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  17extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  18                                 unsigned long address, pmd_t *pmdp,
  19                                 pmd_t entry, int dirty);
  20#endif
  21
  22#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  23static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  24                                            unsigned long address,
  25                                            pte_t *ptep)
  26{
  27        pte_t pte = *ptep;
  28        int r = 1;
  29        if (!pte_young(pte))
  30                r = 0;
  31        else
  32                set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
  33        return r;
  34}
  35#endif
  36
  37#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  38#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  39static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  40                                            unsigned long address,
  41                                            pmd_t *pmdp)
  42{
  43        pmd_t pmd = *pmdp;
  44        int r = 1;
  45        if (!pmd_young(pmd))
  46                r = 0;
  47        else
  48                set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
  49        return r;
  50}
  51#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  52static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  53                                            unsigned long address,
  54                                            pmd_t *pmdp)
  55{
  56        BUG();
  57        return 0;
  58}
  59#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  60#endif
  61
  62#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  63int ptep_clear_flush_young(struct vm_area_struct *vma,
  64                           unsigned long address, pte_t *ptep);
  65#endif
  66
  67#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  68int pmdp_clear_flush_young(struct vm_area_struct *vma,
  69                           unsigned long address, pmd_t *pmdp);
  70#endif
  71
  72#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
  73static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  74                                       unsigned long address,
  75                                       pte_t *ptep)
  76{
  77        pte_t pte = *ptep;
  78        pte_clear(mm, address, ptep);
  79        return pte;
  80}
  81#endif
  82
  83#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
  84#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  85static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
  86                                       unsigned long address,
  87                                       pmd_t *pmdp)
  88{
  89        pmd_t pmd = *pmdp;
  90        pmd_clear(pmdp);
  91        return pmd;
  92}
  93#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  94#endif
  95
  96#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  97static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  98                                            unsigned long address, pte_t *ptep,
  99                                            int full)
 100{
 101        pte_t pte;
 102        pte = ptep_get_and_clear(mm, address, ptep);
 103        return pte;
 104}
 105#endif
 106
 107/*
 108 * Some architectures may be able to avoid expensive synchronization
 109 * primitives when modifications are made to PTE's which are already
 110 * not present, or in the process of an address space destruction.
 111 */
 112#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
 113static inline void pte_clear_not_present_full(struct mm_struct *mm,
 114                                              unsigned long address,
 115                                              pte_t *ptep,
 116                                              int full)
 117{
 118        pte_clear(mm, address, ptep);
 119}
 120#endif
 121
 122#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
 123extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
 124                              unsigned long address,
 125                              pte_t *ptep);
 126#endif
 127
 128#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
 129extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
 130                              unsigned long address,
 131                              pmd_t *pmdp);
 132#endif
 133
 134#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
 135struct mm_struct;
 136static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
 137{
 138        pte_t old_pte = *ptep;
 139        set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
 140}
 141#endif
 142
 143#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
 144#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 145static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 146                                      unsigned long address, pmd_t *pmdp)
 147{
 148        pmd_t old_pmd = *pmdp;
 149        set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
 150}
 151#else /* CONFIG_TRANSPARENT_HUGEPAGE */
 152static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 153                                      unsigned long address, pmd_t *pmdp)
 154{
 155        BUG();
 156}
 157#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 158#endif
 159
 160#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 161extern void pmdp_splitting_flush(struct vm_area_struct *vma,
 162                                 unsigned long address, pmd_t *pmdp);
 163#endif
 164
 165#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
 166extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
 167#endif
 168
 169#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
 170extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
 171#endif
 172
 173#ifndef __HAVE_ARCH_PMDP_INVALIDATE
 174extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 175                            pmd_t *pmdp);
 176#endif
 177
 178#ifndef __HAVE_ARCH_PTE_SAME
 179static inline int pte_same(pte_t pte_a, pte_t pte_b)
 180{
 181        return pte_val(pte_a) == pte_val(pte_b);
 182}
 183#endif
 184
 185#ifndef __HAVE_ARCH_PMD_SAME
 186#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 187static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 188{
 189        return pmd_val(pmd_a) == pmd_val(pmd_b);
 190}
 191#else /* CONFIG_TRANSPARENT_HUGEPAGE */
 192static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 193{
 194        BUG();
 195        return 0;
 196}
 197#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 198#endif
 199
 200#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
 201#define page_test_and_clear_young(pfn) (0)
 202#endif
 203
 204#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
 205#define pgd_offset_gate(mm, addr)       pgd_offset(mm, addr)
 206#endif
 207
 208#ifndef __HAVE_ARCH_MOVE_PTE
 209#define move_pte(pte, prot, old_addr, new_addr) (pte)
 210#endif
 211
 212#ifndef pte_accessible
 213# define pte_accessible(pte)            ((void)(pte),1)
 214#endif
 215
 216#ifndef flush_tlb_fix_spurious_fault
 217#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
 218#endif
 219
 220#ifndef pgprot_noncached
 221#define pgprot_noncached(prot)  (prot)
 222#endif
 223
 224#ifndef pgprot_writecombine
 225#define pgprot_writecombine pgprot_noncached
 226#endif
 227
 228/*
 229 * When walking page tables, get the address of the next boundary,
 230 * or the end address of the range if that comes earlier.  Although no
 231 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
 232 */
 233
 234#define pgd_addr_end(addr, end)                                         \
 235({      unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;  \
 236        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 237})
 238
 239#ifndef pud_addr_end
 240#define pud_addr_end(addr, end)                                         \
 241({      unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;      \
 242        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 243})
 244#endif
 245
 246#ifndef pmd_addr_end
 247#define pmd_addr_end(addr, end)                                         \
 248({      unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;      \
 249        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 250})
 251#endif
 252
 253/*
 254 * When walking page tables, we usually want to skip any p?d_none entries;
 255 * and any p?d_bad entries - reporting the error before resetting to none.
 256 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
 257 */
 258void pgd_clear_bad(pgd_t *);
 259void pud_clear_bad(pud_t *);
 260void pmd_clear_bad(pmd_t *);
 261
 262static inline int pgd_none_or_clear_bad(pgd_t *pgd)
 263{
 264        if (pgd_none(*pgd))
 265                return 1;
 266        if (unlikely(pgd_bad(*pgd))) {
 267                pgd_clear_bad(pgd);
 268                return 1;
 269        }
 270        return 0;
 271}
 272
 273static inline int pud_none_or_clear_bad(pud_t *pud)
 274{
 275        if (pud_none(*pud))
 276                return 1;
 277        if (unlikely(pud_bad(*pud))) {
 278                pud_clear_bad(pud);
 279                return 1;
 280        }
 281        return 0;
 282}
 283
 284static inline int pmd_none_or_clear_bad(pmd_t *pmd)
 285{
 286        if (pmd_none(*pmd))
 287                return 1;
 288        if (unlikely(pmd_bad(*pmd))) {
 289                pmd_clear_bad(pmd);
 290                return 1;
 291        }
 292        return 0;
 293}
 294
 295static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
 296                                             unsigned long addr,
 297                                             pte_t *ptep)
 298{
 299        /*
 300         * Get the current pte state, but zero it out to make it
 301         * non-present, preventing the hardware from asynchronously
 302         * updating it.
 303         */
 304        return ptep_get_and_clear(mm, addr, ptep);
 305}
 306
 307static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
 308                                             unsigned long addr,
 309                                             pte_t *ptep, pte_t pte)
 310{
 311        /*
 312         * The pte is non-present, so there's no hardware state to
 313         * preserve.
 314         */
 315        set_pte_at(mm, addr, ptep, pte);
 316}
 317
 318#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 319/*
 320 * Start a pte protection read-modify-write transaction, which
 321 * protects against asynchronous hardware modifications to the pte.
 322 * The intention is not to prevent the hardware from making pte
 323 * updates, but to prevent any updates it may make from being lost.
 324 *
 325 * This does not protect against other software modifications of the
 326 * pte; the appropriate pte lock must be held over the transation.
 327 *
 328 * Note that this interface is intended to be batchable, meaning that
 329 * ptep_modify_prot_commit may not actually update the pte, but merely
 330 * queue the update to be done at some later time.  The update must be
 331 * actually committed before the pte lock is released, however.
 332 */
 333static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
 334                                           unsigned long addr,
 335                                           pte_t *ptep)
 336{
 337        return __ptep_modify_prot_start(mm, addr, ptep);
 338}
 339
 340/*
 341 * Commit an update to a pte, leaving any hardware-controlled bits in
 342 * the PTE unmodified.
 343 */
 344static inline void ptep_modify_prot_commit(struct mm_struct *mm,
 345                                           unsigned long addr,
 346                                           pte_t *ptep, pte_t pte)
 347{
 348        __ptep_modify_prot_commit(mm, addr, ptep, pte);
 349}
 350#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
 351#endif /* CONFIG_MMU */
 352
 353/*
 354 * A facility to provide lazy MMU batching.  This allows PTE updates and
 355 * page invalidations to be delayed until a call to leave lazy MMU mode
 356 * is issued.  Some architectures may benefit from doing this, and it is
 357 * beneficial for both shadow and direct mode hypervisors, which may batch
 358 * the PTE updates which happen during this window.  Note that using this
 359 * interface requires that read hazards be removed from the code.  A read
 360 * hazard could result in the direct mode hypervisor case, since the actual
 361 * write to the page tables may not yet have taken place, so reads though
 362 * a raw PTE pointer after it has been modified are not guaranteed to be
 363 * up to date.  This mode can only be entered and left under the protection of
 364 * the page table locks for all page tables which may be modified.  In the UP
 365 * case, this is required so that preemption is disabled, and in the SMP case,
 366 * it must synchronize the delayed page table writes properly on other CPUs.
 367 */
 368#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 369#define arch_enter_lazy_mmu_mode()      do {} while (0)
 370#define arch_leave_lazy_mmu_mode()      do {} while (0)
 371#define arch_flush_lazy_mmu_mode()      do {} while (0)
 372#endif
 373
 374/*
 375 * A facility to provide batching of the reload of page tables and
 376 * other process state with the actual context switch code for
 377 * paravirtualized guests.  By convention, only one of the batched
 378 * update (lazy) modes (CPU, MMU) should be active at any given time,
 379 * entry should never be nested, and entry and exits should always be
 380 * paired.  This is for sanity of maintaining and reasoning about the
 381 * kernel code.  In this case, the exit (end of the context switch) is
 382 * in architecture-specific code, and so doesn't need a generic
 383 * definition.
 384 */
 385#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
 386#define arch_start_context_switch(prev) do {} while (0)
 387#endif
 388
 389#ifndef __HAVE_PFNMAP_TRACKING
 390/*
 391 * Interfaces that can be used by architecture code to keep track of
 392 * memory type of pfn mappings specified by the remap_pfn_range,
 393 * vm_insert_pfn.
 394 */
 395
 396/*
 397 * track_pfn_remap is called when a _new_ pfn mapping is being established
 398 * by remap_pfn_range() for physical range indicated by pfn and size.
 399 */
 400static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
 401                                  unsigned long pfn, unsigned long addr,
 402                                  unsigned long size)
 403{
 404        return 0;
 405}
 406
 407/*
 408 * track_pfn_insert is called when a _new_ single pfn is established
 409 * by vm_insert_pfn().
 410 */
 411static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 412                                   unsigned long pfn)
 413{
 414        return 0;
 415}
 416
 417/*
 418 * track_pfn_copy is called when vma that is covering the pfnmap gets
 419 * copied through copy_page_range().
 420 */
 421static inline int track_pfn_copy(struct vm_area_struct *vma)
 422{
 423        return 0;
 424}
 425
 426/*
 427 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 428 * untrack can be called for a specific region indicated by pfn and size or
 429 * can be for the entire vma (in which case pfn, size are zero).
 430 */
 431static inline void untrack_pfn(struct vm_area_struct *vma,
 432                               unsigned long pfn, unsigned long size)
 433{
 434}
 435#else
 436extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
 437                           unsigned long pfn, unsigned long addr,
 438                           unsigned long size);
 439extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 440                            unsigned long pfn);
 441extern int track_pfn_copy(struct vm_area_struct *vma);
 442extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
 443                        unsigned long size);
 444#endif
 445
 446#ifdef __HAVE_COLOR_ZERO_PAGE
 447static inline int is_zero_pfn(unsigned long pfn)
 448{
 449        extern unsigned long zero_pfn;
 450        unsigned long offset_from_zero_pfn = pfn - zero_pfn;
 451        return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
 452}
 453
 454#define my_zero_pfn(addr)       page_to_pfn(ZERO_PAGE(addr))
 455
 456#else
 457static inline int is_zero_pfn(unsigned long pfn)
 458{
 459        extern unsigned long zero_pfn;
 460        return pfn == zero_pfn;
 461}
 462
 463static inline unsigned long my_zero_pfn(unsigned long addr)
 464{
 465        extern unsigned long zero_pfn;
 466        return zero_pfn;
 467}
 468#endif
 469
 470#ifdef CONFIG_MMU
 471
 472#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 473static inline int pmd_trans_huge(pmd_t pmd)
 474{
 475        return 0;
 476}
 477static inline int pmd_trans_splitting(pmd_t pmd)
 478{
 479        return 0;
 480}
 481#ifndef __HAVE_ARCH_PMD_WRITE
 482static inline int pmd_write(pmd_t pmd)
 483{
 484        BUG();
 485        return 0;
 486}
 487#endif /* __HAVE_ARCH_PMD_WRITE */
 488#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 489
 490#ifndef pmd_read_atomic
 491static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 492{
 493        /*
 494         * Depend on compiler for an atomic pmd read. NOTE: this is
 495         * only going to work, if the pmdval_t isn't larger than
 496         * an unsigned long.
 497         */
 498        return *pmdp;
 499}
 500#endif
 501
 502/*
 503 * This function is meant to be used by sites walking pagetables with
 504 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
 505 * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
 506 * into a null pmd and the transhuge page fault can convert a null pmd
 507 * into an hugepmd or into a regular pmd (if the hugepage allocation
 508 * fails). While holding the mmap_sem in read mode the pmd becomes
 509 * stable and stops changing under us only if it's not null and not a
 510 * transhuge pmd. When those races occurs and this function makes a
 511 * difference vs the standard pmd_none_or_clear_bad, the result is
 512 * undefined so behaving like if the pmd was none is safe (because it
 513 * can return none anyway). The compiler level barrier() is critically
 514 * important to compute the two checks atomically on the same pmdval.
 515 *
 516 * For 32bit kernels with a 64bit large pmd_t this automatically takes
 517 * care of reading the pmd atomically to avoid SMP race conditions
 518 * against pmd_populate() when the mmap_sem is hold for reading by the
 519 * caller (a special atomic read not done by "gcc" as in the generic
 520 * version above, is also needed when THP is disabled because the page
 521 * fault can populate the pmd from under us).
 522 */
 523static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 524{
 525        pmd_t pmdval = pmd_read_atomic(pmd);
 526        /*
 527         * The barrier will stabilize the pmdval in a register or on
 528         * the stack so that it will stop changing under the code.
 529         *
 530         * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
 531         * pmd_read_atomic is allowed to return a not atomic pmdval
 532         * (for example pointing to an hugepage that has never been
 533         * mapped in the pmd). The below checks will only care about
 534         * the low part of the pmd with 32bit PAE x86 anyway, with the
 535         * exception of pmd_none(). So the important thing is that if
 536         * the low part of the pmd is found null, the high part will
 537         * be also null or the pmd_none() check below would be
 538         * confused.
 539         */
 540#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 541        barrier();
 542#endif
 543        if (pmd_none(pmdval))
 544                return 1;
 545        if (unlikely(pmd_bad(pmdval))) {
 546                if (!pmd_trans_huge(pmdval))
 547                        pmd_clear_bad(pmd);
 548                return 1;
 549        }
 550        return 0;
 551}
 552
 553/*
 554 * This is a noop if Transparent Hugepage Support is not built into
 555 * the kernel. Otherwise it is equivalent to
 556 * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
 557 * places that already verified the pmd is not none and they want to
 558 * walk ptes while holding the mmap sem in read mode (write mode don't
 559 * need this). If THP is not enabled, the pmd can't go away under the
 560 * code even if MADV_DONTNEED runs, but if THP is enabled we need to
 561 * run a pmd_trans_unstable before walking the ptes after
 562 * split_huge_page_pmd returns (because it may have run when the pmd
 563 * become null, but then a page fault can map in a THP and not a
 564 * regular page).
 565 */
 566static inline int pmd_trans_unstable(pmd_t *pmd)
 567{
 568#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 569        return pmd_none_or_trans_huge_or_clear_bad(pmd);
 570#else
 571        return 0;
 572#endif
 573}
 574
 575#ifdef CONFIG_NUMA_BALANCING
 576#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
 577/*
 578 * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the
 579 * same bit too). It's set only when _PAGE_PRESET is not set and it's
 580 * never set if _PAGE_PRESENT is set.
 581 *
 582 * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
 583 * fault triggers on those regions if pte/pmd_numa returns true
 584 * (because _PAGE_PRESENT is not set).
 585 */
 586#ifndef pte_numa
 587static inline int pte_numa(pte_t pte)
 588{
 589        return (pte_flags(pte) &
 590                (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
 591}
 592#endif
 593
 594#ifndef pmd_numa
 595static inline int pmd_numa(pmd_t pmd)
 596{
 597        return (pmd_flags(pmd) &
 598                (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
 599}
 600#endif
 601
 602/*
 603 * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically
 604 * because they're called by the NUMA hinting minor page fault. If we
 605 * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler
 606 * would be forced to set it later while filling the TLB after we
 607 * return to userland. That would trigger a second write to memory
 608 * that we optimize away by setting _PAGE_ACCESSED here.
 609 */
 610#ifndef pte_mknonnuma
 611static inline pte_t pte_mknonnuma(pte_t pte)
 612{
 613        pte = pte_clear_flags(pte, _PAGE_NUMA);
 614        return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED);
 615}
 616#endif
 617
 618#ifndef pmd_mknonnuma
 619static inline pmd_t pmd_mknonnuma(pmd_t pmd)
 620{
 621        pmd = pmd_clear_flags(pmd, _PAGE_NUMA);
 622        return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED);
 623}
 624#endif
 625
 626#ifndef pte_mknuma
 627static inline pte_t pte_mknuma(pte_t pte)
 628{
 629        pte = pte_set_flags(pte, _PAGE_NUMA);
 630        return pte_clear_flags(pte, _PAGE_PRESENT);
 631}
 632#endif
 633
 634#ifndef pmd_mknuma
 635static inline pmd_t pmd_mknuma(pmd_t pmd)
 636{
 637        pmd = pmd_set_flags(pmd, _PAGE_NUMA);
 638        return pmd_clear_flags(pmd, _PAGE_PRESENT);
 639}
 640#endif
 641#else
 642extern int pte_numa(pte_t pte);
 643extern int pmd_numa(pmd_t pmd);
 644extern pte_t pte_mknonnuma(pte_t pte);
 645extern pmd_t pmd_mknonnuma(pmd_t pmd);
 646extern pte_t pte_mknuma(pte_t pte);
 647extern pmd_t pmd_mknuma(pmd_t pmd);
 648#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
 649#else
 650static inline int pmd_numa(pmd_t pmd)
 651{
 652        return 0;
 653}
 654
 655static inline int pte_numa(pte_t pte)
 656{
 657        return 0;
 658}
 659
 660static inline pte_t pte_mknonnuma(pte_t pte)
 661{
 662        return pte;
 663}
 664
 665static inline pmd_t pmd_mknonnuma(pmd_t pmd)
 666{
 667        return pmd;
 668}
 669
 670static inline pte_t pte_mknuma(pte_t pte)
 671{
 672        return pte;
 673}
 674
 675static inline pmd_t pmd_mknuma(pmd_t pmd)
 676{
 677        return pmd;
 678}
 679#endif /* CONFIG_NUMA_BALANCING */
 680
 681#endif /* CONFIG_MMU */
 682
 683#endif /* !__ASSEMBLY__ */
 684
 685#endif /* _ASM_GENERIC_PGTABLE_H */
 686
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.