linux/mm/pgtable-generic.c
<<
>>
Prefs
   1/*
   2 *  mm/pgtable-generic.c
   3 *
   4 *  Generic pgtable methods declared in asm-generic/pgtable.h
   5 *
   6 *  Copyright (C) 2010  Linus Torvalds
   7 */
   8
   9#include <linux/pagemap.h>
  10#include <asm/tlb.h>
  11#include <asm-generic/pgtable.h>
  12
  13/*
  14 * If a p?d_bad entry is found while walking page tables, report
  15 * the error, before resetting entry to p?d_none.  Usually (but
  16 * very seldom) called out from the p?d_none_or_clear_bad macros.
  17 */
  18
  19void pgd_clear_bad(pgd_t *pgd)
  20{
  21        pgd_ERROR(*pgd);
  22        pgd_clear(pgd);
  23}
  24
  25void pud_clear_bad(pud_t *pud)
  26{
  27        pud_ERROR(*pud);
  28        pud_clear(pud);
  29}
  30
  31void pmd_clear_bad(pmd_t *pmd)
  32{
  33        pmd_ERROR(*pmd);
  34        pmd_clear(pmd);
  35}
  36
  37#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  38/*
  39 * Only sets the access flags (dirty, accessed), as well as write 
  40 * permission. Furthermore, we know it always gets set to a "more
  41 * permissive" setting, which allows most architectures to optimize
  42 * this. We return whether the PTE actually changed, which in turn
  43 * instructs the caller to do things like update__mmu_cache.  This
  44 * used to be done in the caller, but sparc needs minor faults to
  45 * force that call on sun4c so we changed this macro slightly
  46 */
  47int ptep_set_access_flags(struct vm_area_struct *vma,
  48                          unsigned long address, pte_t *ptep,
  49                          pte_t entry, int dirty)
  50{
  51        int changed = !pte_same(*ptep, entry);
  52        if (changed) {
  53                set_pte_at(vma->vm_mm, address, ptep, entry);
  54                flush_tlb_fix_spurious_fault(vma, address);
  55        }
  56        return changed;
  57}
  58#endif
  59
  60#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  61int pmdp_set_access_flags(struct vm_area_struct *vma,
  62                          unsigned long address, pmd_t *pmdp,
  63                          pmd_t entry, int dirty)
  64{
  65#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  66        int changed = !pmd_same(*pmdp, entry);
  67        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  68        if (changed) {
  69                set_pmd_at(vma->vm_mm, address, pmdp, entry);
  70                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  71        }
  72        return changed;
  73#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  74        BUG();
  75        return 0;
  76#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  77}
  78#endif
  79
  80#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  81int ptep_clear_flush_young(struct vm_area_struct *vma,
  82                           unsigned long address, pte_t *ptep)
  83{
  84        int young;
  85        young = ptep_test_and_clear_young(vma, address, ptep);
  86        if (young)
  87                flush_tlb_page(vma, address);
  88        return young;
  89}
  90#endif
  91
  92#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  93int pmdp_clear_flush_young(struct vm_area_struct *vma,
  94                           unsigned long address, pmd_t *pmdp)
  95{
  96        int young;
  97#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  98        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  99#else
 100        BUG();
 101#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 102        young = pmdp_test_and_clear_young(vma, address, pmdp);
 103        if (young)
 104                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 105        return young;
 106}
 107#endif
 108
 109#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
 110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
 111                       pte_t *ptep)
 112{
 113        pte_t pte;
 114        pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
 115        if (pte_accessible(pte))
 116                flush_tlb_page(vma, address);
 117        return pte;
 118}
 119#endif
 120
 121#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
 122#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 123pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
 124                       pmd_t *pmdp)
 125{
 126        pmd_t pmd;
 127        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 128        pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
 129        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 130        return pmd;
 131}
 132#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 133#endif
 134
 135#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 136#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 137void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
 138                          pmd_t *pmdp)
 139{
 140        pmd_t pmd = pmd_mksplitting(*pmdp);
 141        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 142        set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 143        /* tlb flush only to serialize against gup-fast */
 144        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 145}
 146#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 147#endif
 148
 149#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
 150#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 151void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 152                                pgtable_t pgtable)
 153{
 154        assert_spin_locked(&mm->page_table_lock);
 155
 156        /* FIFO */
 157        if (!mm->pmd_huge_pte)
 158                INIT_LIST_HEAD(&pgtable->lru);
 159        else
 160                list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
 161        mm->pmd_huge_pte = pgtable;
 162}
 163#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 164#endif
 165
 166#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
 167#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 168/* no "address" argument so destroys page coloring of some arch */
 169pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 170{
 171        pgtable_t pgtable;
 172
 173        assert_spin_locked(&mm->page_table_lock);
 174
 175        /* FIFO */
 176        pgtable = mm->pmd_huge_pte;
 177        if (list_empty(&pgtable->lru))
 178                mm->pmd_huge_pte = NULL;
 179        else {
 180                mm->pmd_huge_pte = list_entry(pgtable->lru.next,
 181                                              struct page, lru);
 182                list_del(&pgtable->lru);
 183        }
 184        return pgtable;
 185}
 186#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 187#endif
 188
 189#ifndef __HAVE_ARCH_PMDP_INVALIDATE
 190#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 191void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 192                     pmd_t *pmdp)
 193{
 194        set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
 195        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 196}
 197#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 198#endif
 199
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.