linux/arch/x86/mm/pgtable.c
<<
>>
Prefs
   1#include <linux/mm.h>
   2#include <asm/pgalloc.h>
   3#include <asm/pgtable.h>
   4#include <asm/tlb.h>
   5#include <asm/fixmap.h>
   6
   7pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
   8{
   9        return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  10}
  11
  12pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  13{
  14        struct page *pte;
  15
  16#ifdef CONFIG_HIGHPTE
  17        pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
  18#else
  19        pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  20#endif
  21        if (pte)
  22                pgtable_page_ctor(pte);
  23        return pte;
  24}
  25
  26void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
  27{
  28        pgtable_page_dtor(pte);
  29        paravirt_release_pte(page_to_pfn(pte));
  30        tlb_remove_page(tlb, pte);
  31}
  32
  33#if PAGETABLE_LEVELS > 2
  34void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
  35{
  36        paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
  37        tlb_remove_page(tlb, virt_to_page(pmd));
  38}
  39
  40#if PAGETABLE_LEVELS > 3
  41void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
  42{
  43        paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
  44        tlb_remove_page(tlb, virt_to_page(pud));
  45}
  46#endif  /* PAGETABLE_LEVELS > 3 */
  47#endif  /* PAGETABLE_LEVELS > 2 */
  48
  49static inline void pgd_list_add(pgd_t *pgd)
  50{
  51        struct page *page = virt_to_page(pgd);
  52
  53        list_add(&page->lru, &pgd_list);
  54}
  55
  56static inline void pgd_list_del(pgd_t *pgd)
  57{
  58        struct page *page = virt_to_page(pgd);
  59
  60        list_del(&page->lru);
  61}
  62
  63#define UNSHARED_PTRS_PER_PGD                           \
  64        (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
  65
  66static void pgd_ctor(pgd_t *pgd)
  67{
  68        /* If the pgd points to a shared pagetable level (either the
  69           ptes in non-PAE, or shared PMD in PAE), then just copy the
  70           references from swapper_pg_dir. */
  71        if (PAGETABLE_LEVELS == 2 ||
  72            (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
  73            PAGETABLE_LEVELS == 4) {
  74                clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
  75                                swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  76                                KERNEL_PGD_PTRS);
  77                paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
  78                                         __pa(swapper_pg_dir) >> PAGE_SHIFT,
  79                                         KERNEL_PGD_BOUNDARY,
  80                                         KERNEL_PGD_PTRS);
  81        }
  82
  83        /* list required to sync kernel mapping updates */
  84        if (!SHARED_KERNEL_PMD)
  85                pgd_list_add(pgd);
  86}
  87
  88static void pgd_dtor(pgd_t *pgd)
  89{
  90        unsigned long flags; /* can be called from interrupt context */
  91
  92        if (SHARED_KERNEL_PMD)
  93                return;
  94
  95        spin_lock_irqsave(&pgd_lock, flags);
  96        pgd_list_del(pgd);
  97        spin_unlock_irqrestore(&pgd_lock, flags);
  98}
  99
 100/*
 101 * List of all pgd's needed for non-PAE so it can invalidate entries
 102 * in both cached and uncached pgd's; not needed for PAE since the
 103 * kernel pmd is shared. If PAE were not to share the pmd a similar
 104 * tactic would be needed. This is essentially codepath-based locking
 105 * against pageattr.c; it is the unique case in which a valid change
 106 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 107 * vmalloc faults work because attached pagetables are never freed.
 108 * -- wli
 109 */
 110
 111#ifdef CONFIG_X86_PAE
 112/*
 113 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 114 * updating the top-level pagetable entries to guarantee the
 115 * processor notices the update.  Since this is expensive, and
 116 * all 4 top-level entries are used almost immediately in a
 117 * new process's life, we just pre-populate them here.
 118 *
 119 * Also, if we're in a paravirt environment where the kernel pmd is
 120 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 121 * and initialize the kernel pmds here.
 122 */
 123#define PREALLOCATED_PMDS       UNSHARED_PTRS_PER_PGD
 124
 125void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 126{
 127        paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
 128
 129        /* Note: almost everything apart from _PAGE_PRESENT is
 130           reserved at the pmd (PDPT) level. */
 131        set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
 132
 133        /*
 134         * According to Intel App note "TLBs, Paging-Structure Caches,
 135         * and Their Invalidation", April 2007, document 317080-001,
 136         * section 8.1: in PAE mode we explicitly have to flush the
 137         * TLB via cr3 if the top-level pgd is changed...
 138         */
 139        if (mm == current->active_mm)
 140                write_cr3(read_cr3());
 141}
 142#else  /* !CONFIG_X86_PAE */
 143
 144/* No need to prepopulate any pagetable entries in non-PAE modes. */
 145#define PREALLOCATED_PMDS       0
 146
 147#endif  /* CONFIG_X86_PAE */
 148
 149static void free_pmds(pmd_t *pmds[])
 150{
 151        int i;
 152
 153        for(i = 0; i < PREALLOCATED_PMDS; i++)
 154                if (pmds[i])
 155                        free_page((unsigned long)pmds[i]);
 156}
 157
 158static int preallocate_pmds(pmd_t *pmds[])
 159{
 160        int i;
 161        bool failed = false;
 162
 163        for(i = 0; i < PREALLOCATED_PMDS; i++) {
 164                pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 165                if (pmd == NULL)
 166                        failed = true;
 167                pmds[i] = pmd;
 168        }
 169
 170        if (failed) {
 171                free_pmds(pmds);
 172                return -ENOMEM;
 173        }
 174
 175        return 0;
 176}
 177
 178/*
 179 * Mop up any pmd pages which may still be attached to the pgd.
 180 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 181 * preallocate which never got a corresponding vma will need to be
 182 * freed manually.
 183 */
 184static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 185{
 186        int i;
 187
 188        for(i = 0; i < PREALLOCATED_PMDS; i++) {
 189                pgd_t pgd = pgdp[i];
 190
 191                if (pgd_val(pgd) != 0) {
 192                        pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 193
 194                        pgdp[i] = native_make_pgd(0);
 195
 196                        paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
 197                        pmd_free(mm, pmd);
 198                }
 199        }
 200}
 201
 202static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
 203{
 204        pud_t *pud;
 205        unsigned long addr;
 206        int i;
 207
 208        if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
 209                return;
 210
 211        pud = pud_offset(pgd, 0);
 212
 213        for (addr = i = 0; i < PREALLOCATED_PMDS;
 214             i++, pud++, addr += PUD_SIZE) {
 215                pmd_t *pmd = pmds[i];
 216
 217                if (i >= KERNEL_PGD_BOUNDARY)
 218                        memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
 219                               sizeof(pmd_t) * PTRS_PER_PMD);
 220
 221                pud_populate(mm, pud, pmd);
 222        }
 223}
 224
 225pgd_t *pgd_alloc(struct mm_struct *mm)
 226{
 227        pgd_t *pgd;
 228        pmd_t *pmds[PREALLOCATED_PMDS];
 229        unsigned long flags;
 230
 231        pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 232
 233        if (pgd == NULL)
 234                goto out;
 235
 236        mm->pgd = pgd;
 237
 238        if (preallocate_pmds(pmds) != 0)
 239                goto out_free_pgd;
 240
 241        if (paravirt_pgd_alloc(mm) != 0)
 242                goto out_free_pmds;
 243
 244        /*
 245         * Make sure that pre-populating the pmds is atomic with
 246         * respect to anything walking the pgd_list, so that they
 247         * never see a partially populated pgd.
 248         */
 249        spin_lock_irqsave(&pgd_lock, flags);
 250
 251        pgd_ctor(pgd);
 252        pgd_prepopulate_pmd(mm, pgd, pmds);
 253
 254        spin_unlock_irqrestore(&pgd_lock, flags);
 255
 256        return pgd;
 257
 258out_free_pmds:
 259        free_pmds(pmds);
 260out_free_pgd:
 261        free_page((unsigned long)pgd);
 262out:
 263        return NULL;
 264}
 265
 266void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 267{
 268        pgd_mop_up_pmds(mm, pgd);
 269        pgd_dtor(pgd);
 270        paravirt_pgd_free(mm, pgd);
 271        free_page((unsigned long)pgd);
 272}
 273
 274int ptep_set_access_flags(struct vm_area_struct *vma,
 275                          unsigned long address, pte_t *ptep,
 276                          pte_t entry, int dirty)
 277{
 278        int changed = !pte_same(*ptep, entry);
 279
 280        if (changed && dirty) {
 281                *ptep = entry;
 282                pte_update_defer(vma->vm_mm, address, ptep);
 283                flush_tlb_page(vma, address);
 284        }
 285
 286        return changed;
 287}
 288
 289int ptep_test_and_clear_young(struct vm_area_struct *vma,
 290                              unsigned long addr, pte_t *ptep)
 291{
 292        int ret = 0;
 293
 294        if (pte_young(*ptep))
 295                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 296                                         (unsigned long *) &ptep->pte);
 297
 298        if (ret)
 299                pte_update(vma->vm_mm, addr, ptep);
 300
 301        return ret;
 302}
 303
 304int ptep_clear_flush_young(struct vm_area_struct *vma,
 305                           unsigned long address, pte_t *ptep)
 306{
 307        int young;
 308
 309        young = ptep_test_and_clear_young(vma, address, ptep);
 310        if (young)
 311                flush_tlb_page(vma, address);
 312
 313        return young;
 314}
 315
 316int fixmaps_set;
 317
 318void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
 319{
 320        unsigned long address = __fix_to_virt(idx);
 321
 322        if (idx >= __end_of_fixed_addresses) {
 323                BUG();
 324                return;
 325        }
 326        set_pte_vaddr(address, pte);
 327        fixmaps_set++;
 328}
 329
 330void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
 331{
 332        __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
 333}
 334