linux/arch/x86/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * IA-32 Huge TLB Page Support for Kernel.
   3 *
   4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
   5 */
   6
   7#include <linux/init.h>
   8#include <linux/fs.h>
   9#include <linux/mm.h>
  10#include <linux/hugetlb.h>
  11#include <linux/pagemap.h>
  12#include <linux/slab.h>
  13#include <linux/err.h>
  14#include <linux/sysctl.h>
  15#include <asm/mman.h>
  16#include <asm/tlb.h>
  17#include <asm/tlbflush.h>
  18#include <asm/pgalloc.h>
  19
  20static unsigned long page_table_shareable(struct vm_area_struct *svma,
  21                                struct vm_area_struct *vma,
  22                                unsigned long addr, pgoff_t idx)
  23{
  24        unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
  25                                svma->vm_start;
  26        unsigned long sbase = saddr & PUD_MASK;
  27        unsigned long s_end = sbase + PUD_SIZE;
  28
  29        /*
  30         * match the virtual addresses, permission and the alignment of the
  31         * page table page.
  32         */
  33        if (pmd_index(addr) != pmd_index(saddr) ||
  34            vma->vm_flags != svma->vm_flags ||
  35            sbase < svma->vm_start || svma->vm_end < s_end)
  36                return 0;
  37
  38        return saddr;
  39}
  40
  41static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
  42{
  43        unsigned long base = addr & PUD_MASK;
  44        unsigned long end = base + PUD_SIZE;
  45
  46        /*
  47         * check on proper vm_flags and page table alignment
  48         */
  49        if (vma->vm_flags & VM_MAYSHARE &&
  50            vma->vm_start <= base && end <= vma->vm_end)
  51                return 1;
  52        return 0;
  53}
  54
  55/*
  56 * search for a shareable pmd page for hugetlb.
  57 */
  58static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  59{
  60        struct vm_area_struct *vma = find_vma(mm, addr);
  61        struct address_space *mapping = vma->vm_file->f_mapping;
  62        pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
  63                        vma->vm_pgoff;
  64        struct prio_tree_iter iter;
  65        struct vm_area_struct *svma;
  66        unsigned long saddr;
  67        pte_t *spte = NULL;
  68
  69        if (!vma_shareable(vma, addr))
  70                return;
  71
  72        spin_lock(&mapping->i_mmap_lock);
  73        vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
  74                if (svma == vma)
  75                        continue;
  76
  77                saddr = page_table_shareable(svma, vma, addr, idx);
  78                if (saddr) {
  79                        spte = huge_pte_offset(svma->vm_mm, saddr);
  80                        if (spte) {
  81                                get_page(virt_to_page(spte));
  82                                break;
  83                        }
  84                }
  85        }
  86
  87        if (!spte)
  88                goto out;
  89
  90        spin_lock(&mm->page_table_lock);
  91        if (pud_none(*pud))
  92                pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
  93        else
  94                put_page(virt_to_page(spte));
  95        spin_unlock(&mm->page_table_lock);
  96out:
  97        spin_unlock(&mapping->i_mmap_lock);
  98}
  99
 100/*
 101 * unmap huge page backed by shared pte.
 102 *
 103 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
 104 * indicated by page_count > 1, unmap is achieved by clearing pud and
 105 * decrementing the ref count. If count == 1, the pte page is not shared.
 106 *
 107 * called with vma->vm_mm->page_table_lock held.
 108 *
 109 * returns: 1 successfully unmapped a shared pte page
 110 *          0 the underlying pte page is not shared, or it is the last user
 111 */
 112int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 113{
 114        pgd_t *pgd = pgd_offset(mm, *addr);
 115        pud_t *pud = pud_offset(pgd, *addr);
 116
 117        BUG_ON(page_count(virt_to_page(ptep)) == 0);
 118        if (page_count(virt_to_page(ptep)) == 1)
 119                return 0;
 120
 121        pud_clear(pud);
 122        put_page(virt_to_page(ptep));
 123        *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
 124        return 1;
 125}
 126
 127pte_t *huge_pte_alloc(struct mm_struct *mm,
 128                        unsigned long addr, unsigned long sz)
 129{
 130        pgd_t *pgd;
 131        pud_t *pud;
 132        pte_t *pte = NULL;
 133
 134        pgd = pgd_offset(mm, addr);
 135        pud = pud_alloc(mm, pgd, addr);
 136        if (pud) {
 137                if (sz == PUD_SIZE) {
 138                        pte = (pte_t *)pud;
 139                } else {
 140                        BUG_ON(sz != PMD_SIZE);
 141                        if (pud_none(*pud))
 142                                huge_pmd_share(mm, addr, pud);
 143                        pte = (pte_t *) pmd_alloc(mm, pud, addr);
 144                }
 145        }
 146        BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
 147
 148        return pte;
 149}
 150
 151pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 152{
 153        pgd_t *pgd;
 154        pud_t *pud;
 155        pmd_t *pmd = NULL;
 156
 157        pgd = pgd_offset(mm, addr);
 158        if (pgd_present(*pgd)) {
 159                pud = pud_offset(pgd, addr);
 160                if (pud_present(*pud)) {
 161                        if (pud_large(*pud))
 162                                return (pte_t *)pud;
 163                        pmd = pmd_offset(pud, addr);
 164                }
 165        }
 166        return (pte_t *) pmd;
 167}
 168
 169#if 0   /* This is just for testing */
 170struct page *
 171follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 172{
 173        unsigned long start = address;
 174        int length = 1;
 175        int nr;
 176        struct page *page;
 177        struct vm_area_struct *vma;
 178
 179        vma = find_vma(mm, addr);
 180        if (!vma || !is_vm_hugetlb_page(vma))
 181                return ERR_PTR(-EINVAL);
 182
 183        pte = huge_pte_offset(mm, address);
 184
 185        /* hugetlb should be locked, and hence, prefaulted */
 186        WARN_ON(!pte || pte_none(*pte));
 187
 188        page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
 189
 190        WARN_ON(!PageHead(page));
 191
 192        return page;
 193}
 194
 195int pmd_huge(pmd_t pmd)
 196{
 197        return 0;
 198}
 199
 200int pud_huge(pud_t pud)
 201{
 202        return 0;
 203}
 204
 205struct page *
 206follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 207                pmd_t *pmd, int write)
 208{
 209        return NULL;
 210}
 211
 212#else
 213
 214struct page *
 215follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 216{
 217        return ERR_PTR(-EINVAL);
 218}
 219
 220int pmd_huge(pmd_t pmd)
 221{
 222        return !!(pmd_val(pmd) & _PAGE_PSE);
 223}
 224
 225int pud_huge(pud_t pud)
 226{
 227        return !!(pud_val(pud) & _PAGE_PSE);
 228}
 229
 230struct page *
 231follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 232                pmd_t *pmd, int write)
 233{
 234        struct page *page;
 235
 236        page = pte_page(*(pte_t *)pmd);
 237        if (page)
 238                page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
 239        return page;
 240}
 241
 242struct page *
 243follow_huge_pud(struct mm_struct *mm, unsigned long address,
 244                pud_t *pud, int write)
 245{
 246        struct page *page;
 247
 248        page = pte_page(*(pte_t *)pud);
 249        if (page)
 250                page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
 251        return page;
 252}
 253
 254#endif
 255
 256/* x86_64 also uses this file */
 257
 258#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 259static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 260                unsigned long addr, unsigned long len,
 261                unsigned long pgoff, unsigned long flags)
 262{
 263        struct hstate *h = hstate_file(file);
 264        struct mm_struct *mm = current->mm;
 265        struct vm_area_struct *vma;
 266        unsigned long start_addr;
 267
 268        if (len > mm->cached_hole_size) {
 269                start_addr = mm->free_area_cache;
 270        } else {
 271                start_addr = TASK_UNMAPPED_BASE;
 272                mm->cached_hole_size = 0;
 273        }
 274
 275full_search:
 276        addr = ALIGN(start_addr, huge_page_size(h));
 277
 278        for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 279                /* At this point:  (!vma || addr < vma->vm_end). */
 280                if (TASK_SIZE - len < addr) {
 281                        /*
 282                         * Start a new search - just in case we missed
 283                         * some holes.
 284                         */
 285                        if (start_addr != TASK_UNMAPPED_BASE) {
 286                                start_addr = TASK_UNMAPPED_BASE;
 287                                mm->cached_hole_size = 0;
 288                                goto full_search;
 289                        }
 290                        return -ENOMEM;
 291                }
 292                if (!vma || addr + len <= vma->vm_start) {
 293                        mm->free_area_cache = addr + len;
 294                        return addr;
 295                }
 296                if (addr + mm->cached_hole_size < vma->vm_start)
 297                        mm->cached_hole_size = vma->vm_start - addr;
 298                addr = ALIGN(vma->vm_end, huge_page_size(h));
 299        }
 300}
 301
 302static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 303                unsigned long addr0, unsigned long len,
 304                unsigned long pgoff, unsigned long flags)
 305{
 306        struct hstate *h = hstate_file(file);
 307        struct mm_struct *mm = current->mm;
 308        struct vm_area_struct *vma, *prev_vma;
 309        unsigned long base = mm->mmap_base, addr = addr0;
 310        unsigned long largest_hole = mm->cached_hole_size;
 311        int first_time = 1;
 312
 313        /* don't allow allocations above current base */
 314        if (mm->free_area_cache > base)
 315                mm->free_area_cache = base;
 316
 317        if (len <= largest_hole) {
 318                largest_hole = 0;
 319                mm->free_area_cache  = base;
 320        }
 321try_again:
 322        /* make sure it can fit in the remaining address space */
 323        if (mm->free_area_cache < len)
 324                goto fail;
 325
 326        /* either no address requested or cant fit in requested address hole */
 327        addr = (mm->free_area_cache - len) & huge_page_mask(h);
 328        do {
 329                /*
 330                 * Lookup failure means no vma is above this address,
 331                 * i.e. return with success:
 332                 */
 333                if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
 334                        return addr;
 335
 336                /*
 337                 * new region fits between prev_vma->vm_end and
 338                 * vma->vm_start, use it:
 339                 */
 340                if (addr + len <= vma->vm_start &&
 341                            (!prev_vma || (addr >= prev_vma->vm_end))) {
 342                        /* remember the address as a hint for next time */
 343                        mm->cached_hole_size = largest_hole;
 344                        return (mm->free_area_cache = addr);
 345                } else {
 346                        /* pull free_area_cache down to the first hole */
 347                        if (mm->free_area_cache == vma->vm_end) {
 348                                mm->free_area_cache = vma->vm_start;
 349                                mm->cached_hole_size = largest_hole;
 350                        }
 351                }
 352
 353                /* remember the largest hole we saw so far */
 354                if (addr + largest_hole < vma->vm_start)
 355                        largest_hole = vma->vm_start - addr;
 356
 357                /* try just below the current vma->vm_start */
 358                addr = (vma->vm_start - len) & huge_page_mask(h);
 359        } while (len <= vma->vm_start);
 360
 361fail:
 362        /*
 363         * if hint left us with no space for the requested
 364         * mapping then try again:
 365         */
 366        if (first_time) {
 367                mm->free_area_cache = base;
 368                largest_hole = 0;
 369                first_time = 0;
 370                goto try_again;
 371        }
 372        /*
 373         * A failed mmap() very likely causes application failure,
 374         * so fall back to the bottom-up function here. This scenario
 375         * can happen with large stack limits and large mmap()
 376         * allocations.
 377         */
 378        mm->free_area_cache = TASK_UNMAPPED_BASE;
 379        mm->cached_hole_size = ~0UL;
 380        addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
 381                        len, pgoff, flags);
 382
 383        /*
 384         * Restore the topdown base:
 385         */
 386        mm->free_area_cache = base;
 387        mm->cached_hole_size = ~0UL;
 388
 389        return addr;
 390}
 391
 392unsigned long
 393hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 394                unsigned long len, unsigned long pgoff, unsigned long flags)
 395{
 396        struct hstate *h = hstate_file(file);
 397        struct mm_struct *mm = current->mm;
 398        struct vm_area_struct *vma;
 399
 400        if (len & ~huge_page_mask(h))
 401                return -EINVAL;
 402        if (len > TASK_SIZE)
 403                return -ENOMEM;
 404
 405        if (flags & MAP_FIXED) {
 406                if (prepare_hugepage_range(file, addr, len))
 407                        return -EINVAL;
 408                return addr;
 409        }
 410
 411        if (addr) {
 412                addr = ALIGN(addr, huge_page_size(h));
 413                vma = find_vma(mm, addr);
 414                if (TASK_SIZE - len >= addr &&
 415                    (!vma || addr + len <= vma->vm_start))
 416                        return addr;
 417        }
 418        if (mm->get_unmapped_area == arch_get_unmapped_area)
 419                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
 420                                pgoff, flags);
 421        else
 422                return hugetlb_get_unmapped_area_topdown(file, addr, len,
 423                                pgoff, flags);
 424}
 425
 426#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
 427
 428#ifdef CONFIG_X86_64
 429static __init int setup_hugepagesz(char *opt)
 430{
 431        unsigned long ps = memparse(opt, &opt);
 432        if (ps == PMD_SIZE) {
 433                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 434        } else if (ps == PUD_SIZE && cpu_has_gbpages) {
 435                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 436        } else {
 437                printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
 438                        ps >> 20);
 439                return 0;
 440        }
 441        return 1;
 442}
 443__setup("hugepagesz=", setup_hugepagesz);
 444#endif
 445