linux/arch/powerpc/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * PPC Huge TLB Page Support for Kernel.
   3 *
   4 * Copyright (C) 2003 David Gibson, IBM Corporation.
   5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
   6 *
   7 * Based on the IA-32 version:
   8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/io.h>
  13#include <linux/slab.h>
  14#include <linux/hugetlb.h>
  15#include <linux/of_fdt.h>
  16#include <linux/memblock.h>
  17#include <linux/bootmem.h>
  18#include <linux/moduleparam.h>
  19#include <asm/pgtable.h>
  20#include <asm/pgalloc.h>
  21#include <asm/tlb.h>
  22#include <asm/setup.h>
  23
  24#define PAGE_SHIFT_64K  16
  25#define PAGE_SHIFT_16M  24
  26#define PAGE_SHIFT_16G  34
  27
  28unsigned int HPAGE_SHIFT;
  29
  30/*
  31 * Tracks gpages after the device tree is scanned and before the
  32 * huge_boot_pages list is ready.  On non-Freescale implementations, this is
  33 * just used to track 16G pages and so is a single array.  FSL-based
  34 * implementations may have more than one gpage size, so we need multiple
  35 * arrays
  36 */
  37#ifdef CONFIG_PPC_FSL_BOOK3E
  38#define MAX_NUMBER_GPAGES       128
  39struct psize_gpages {
  40        u64 gpage_list[MAX_NUMBER_GPAGES];
  41        unsigned int nr_gpages;
  42};
  43static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
  44#else
  45#define MAX_NUMBER_GPAGES       1024
  46static u64 gpage_freearray[MAX_NUMBER_GPAGES];
  47static unsigned nr_gpages;
  48#endif
  49
  50static inline int shift_to_mmu_psize(unsigned int shift)
  51{
  52        int psize;
  53
  54        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
  55                if (mmu_psize_defs[psize].shift == shift)
  56                        return psize;
  57        return -1;
  58}
  59
  60static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
  61{
  62        if (mmu_psize_defs[mmu_psize].shift)
  63                return mmu_psize_defs[mmu_psize].shift;
  64        BUG();
  65}
  66
  67#define hugepd_none(hpd)        ((hpd).pd == 0)
  68
  69pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
  70{
  71        pgd_t *pg;
  72        pud_t *pu;
  73        pmd_t *pm;
  74        hugepd_t *hpdp = NULL;
  75        unsigned pdshift = PGDIR_SHIFT;
  76
  77        if (shift)
  78                *shift = 0;
  79
  80        pg = pgdir + pgd_index(ea);
  81        if (is_hugepd(pg)) {
  82                hpdp = (hugepd_t *)pg;
  83        } else if (!pgd_none(*pg)) {
  84                pdshift = PUD_SHIFT;
  85                pu = pud_offset(pg, ea);
  86                if (is_hugepd(pu))
  87                        hpdp = (hugepd_t *)pu;
  88                else if (!pud_none(*pu)) {
  89                        pdshift = PMD_SHIFT;
  90                        pm = pmd_offset(pu, ea);
  91                        if (is_hugepd(pm))
  92                                hpdp = (hugepd_t *)pm;
  93                        else if (!pmd_none(*pm)) {
  94                                return pte_offset_kernel(pm, ea);
  95                        }
  96                }
  97        }
  98
  99        if (!hpdp)
 100                return NULL;
 101
 102        if (shift)
 103                *shift = hugepd_shift(*hpdp);
 104        return hugepte_offset(hpdp, ea, pdshift);
 105}
 106
 107pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 108{
 109        return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
 110}
 111
 112static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 113                           unsigned long address, unsigned pdshift, unsigned pshift)
 114{
 115        struct kmem_cache *cachep;
 116        pte_t *new;
 117
 118#ifdef CONFIG_PPC_FSL_BOOK3E
 119        int i;
 120        int num_hugepd = 1 << (pshift - pdshift);
 121        cachep = hugepte_cache;
 122#else
 123        cachep = PGT_CACHE(pdshift - pshift);
 124#endif
 125
 126        new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
 127
 128        BUG_ON(pshift > HUGEPD_SHIFT_MASK);
 129        BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
 130
 131        if (! new)
 132                return -ENOMEM;
 133
 134        spin_lock(&mm->page_table_lock);
 135#ifdef CONFIG_PPC_FSL_BOOK3E
 136        /*
 137         * We have multiple higher-level entries that point to the same
 138         * actual pte location.  Fill in each as we go and backtrack on error.
 139         * We need all of these so the DTLB pgtable walk code can find the
 140         * right higher-level entry without knowing if it's a hugepage or not.
 141         */
 142        for (i = 0; i < num_hugepd; i++, hpdp++) {
 143                if (unlikely(!hugepd_none(*hpdp)))
 144                        break;
 145                else
 146                        hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
 147        }
 148        /* If we bailed from the for loop early, an error occurred, clean up */
 149        if (i < num_hugepd) {
 150                for (i = i - 1 ; i >= 0; i--, hpdp--)
 151                        hpdp->pd = 0;
 152                kmem_cache_free(cachep, new);
 153        }
 154#else
 155        if (!hugepd_none(*hpdp))
 156                kmem_cache_free(cachep, new);
 157        else
 158                hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
 159#endif
 160        spin_unlock(&mm->page_table_lock);
 161        return 0;
 162}
 163
 164/*
 165 * These macros define how to determine which level of the page table holds
 166 * the hpdp.
 167 */
 168#ifdef CONFIG_PPC_FSL_BOOK3E
 169#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
 170#define HUGEPD_PUD_SHIFT PUD_SHIFT
 171#else
 172#define HUGEPD_PGD_SHIFT PUD_SHIFT
 173#define HUGEPD_PUD_SHIFT PMD_SHIFT
 174#endif
 175
 176pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
 177{
 178        pgd_t *pg;
 179        pud_t *pu;
 180        pmd_t *pm;
 181        hugepd_t *hpdp = NULL;
 182        unsigned pshift = __ffs(sz);
 183        unsigned pdshift = PGDIR_SHIFT;
 184
 185        addr &= ~(sz-1);
 186
 187        pg = pgd_offset(mm, addr);
 188
 189        if (pshift >= HUGEPD_PGD_SHIFT) {
 190                hpdp = (hugepd_t *)pg;
 191        } else {
 192                pdshift = PUD_SHIFT;
 193                pu = pud_alloc(mm, pg, addr);
 194                if (pshift >= HUGEPD_PUD_SHIFT) {
 195                        hpdp = (hugepd_t *)pu;
 196                } else {
 197                        pdshift = PMD_SHIFT;
 198                        pm = pmd_alloc(mm, pu, addr);
 199                        hpdp = (hugepd_t *)pm;
 200                }
 201        }
 202
 203        if (!hpdp)
 204                return NULL;
 205
 206        BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
 207
 208        if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
 209                return NULL;
 210
 211        return hugepte_offset(hpdp, addr, pdshift);
 212}
 213
 214#ifdef CONFIG_PPC_FSL_BOOK3E
 215/* Build list of addresses of gigantic pages.  This function is used in early
 216 * boot before the buddy or bootmem allocator is setup.
 217 */
 218void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
 219{
 220        unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
 221        int i;
 222
 223        if (addr == 0)
 224                return;
 225
 226        gpage_freearray[idx].nr_gpages = number_of_pages;
 227
 228        for (i = 0; i < number_of_pages; i++) {
 229                gpage_freearray[idx].gpage_list[i] = addr;
 230                addr += page_size;
 231        }
 232}
 233
 234/*
 235 * Moves the gigantic page addresses from the temporary list to the
 236 * huge_boot_pages list.
 237 */
 238int alloc_bootmem_huge_page(struct hstate *hstate)
 239{
 240        struct huge_bootmem_page *m;
 241        int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
 242        int nr_gpages = gpage_freearray[idx].nr_gpages;
 243
 244        if (nr_gpages == 0)
 245                return 0;
 246
 247#ifdef CONFIG_HIGHMEM
 248        /*
 249         * If gpages can be in highmem we can't use the trick of storing the
 250         * data structure in the page; allocate space for this
 251         */
 252        m = alloc_bootmem(sizeof(struct huge_bootmem_page));
 253        m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
 254#else
 255        m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
 256#endif
 257
 258        list_add(&m->list, &huge_boot_pages);
 259        gpage_freearray[idx].nr_gpages = nr_gpages;
 260        gpage_freearray[idx].gpage_list[nr_gpages] = 0;
 261        m->hstate = hstate;
 262
 263        return 1;
 264}
 265/*
 266 * Scan the command line hugepagesz= options for gigantic pages; store those in
 267 * a list that we use to allocate the memory once all options are parsed.
 268 */
 269
 270unsigned long gpage_npages[MMU_PAGE_COUNT];
 271
 272static int __init do_gpage_early_setup(char *param, char *val)
 273{
 274        static phys_addr_t size;
 275        unsigned long npages;
 276
 277        /*
 278         * The hugepagesz and hugepages cmdline options are interleaved.  We
 279         * use the size variable to keep track of whether or not this was done
 280         * properly and skip over instances where it is incorrect.  Other
 281         * command-line parsing code will issue warnings, so we don't need to.
 282         *
 283         */
 284        if ((strcmp(param, "default_hugepagesz") == 0) ||
 285            (strcmp(param, "hugepagesz") == 0)) {
 286                size = memparse(val, NULL);
 287        } else if (strcmp(param, "hugepages") == 0) {
 288                if (size != 0) {
 289                        if (sscanf(val, "%lu", &npages) <= 0)
 290                                npages = 0;
 291                        gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
 292                        size = 0;
 293                }
 294        }
 295        return 0;
 296}
 297
 298
 299/*
 300 * This function allocates physical space for pages that are larger than the
 301 * buddy allocator can handle.  We want to allocate these in highmem because
 302 * the amount of lowmem is limited.  This means that this function MUST be
 303 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
 304 * allocate to grab highmem.
 305 */
 306void __init reserve_hugetlb_gpages(void)
 307{
 308        static __initdata char cmdline[COMMAND_LINE_SIZE];
 309        phys_addr_t size, base;
 310        int i;
 311
 312        strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
 313        parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup);
 314
 315        /*
 316         * Walk gpage list in reverse, allocating larger page sizes first.
 317         * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
 318         * When we reach the point in the list where pages are no longer
 319         * considered gpages, we're done.
 320         */
 321        for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
 322                if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
 323                        continue;
 324                else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
 325                        break;
 326
 327                size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
 328                base = memblock_alloc_base(size * gpage_npages[i], size,
 329                                           MEMBLOCK_ALLOC_ANYWHERE);
 330                add_gpage(base, size, gpage_npages[i]);
 331        }
 332}
 333
 334#else /* !PPC_FSL_BOOK3E */
 335
 336/* Build list of addresses of gigantic pages.  This function is used in early
 337 * boot before the buddy or bootmem allocator is setup.
 338 */
 339void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
 340{
 341        if (!addr)
 342                return;
 343        while (number_of_pages > 0) {
 344                gpage_freearray[nr_gpages] = addr;
 345                nr_gpages++;
 346                number_of_pages--;
 347                addr += page_size;
 348        }
 349}
 350
 351/* Moves the gigantic page addresses from the temporary list to the
 352 * huge_boot_pages list.
 353 */
 354int alloc_bootmem_huge_page(struct hstate *hstate)
 355{
 356        struct huge_bootmem_page *m;
 357        if (nr_gpages == 0)
 358                return 0;
 359        m = phys_to_virt(gpage_freearray[--nr_gpages]);
 360        gpage_freearray[nr_gpages] = 0;
 361        list_add(&m->list, &huge_boot_pages);
 362        m->hstate = hstate;
 363        return 1;
 364}
 365#endif
 366
 367int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 368{
 369        return 0;
 370}
 371
 372#ifdef CONFIG_PPC_FSL_BOOK3E
 373#define HUGEPD_FREELIST_SIZE \
 374        ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
 375
 376struct hugepd_freelist {
 377        struct rcu_head rcu;
 378        unsigned int index;
 379        void *ptes[0];
 380};
 381
 382static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
 383
 384static void hugepd_free_rcu_callback(struct rcu_head *head)
 385{
 386        struct hugepd_freelist *batch =
 387                container_of(head, struct hugepd_freelist, rcu);
 388        unsigned int i;
 389
 390        for (i = 0; i < batch->index; i++)
 391                kmem_cache_free(hugepte_cache, batch->ptes[i]);
 392
 393        free_page((unsigned long)batch);
 394}
 395
 396static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
 397{
 398        struct hugepd_freelist **batchp;
 399
 400        batchp = &__get_cpu_var(hugepd_freelist_cur);
 401
 402        if (atomic_read(&tlb->mm->mm_users) < 2 ||
 403            cpumask_equal(mm_cpumask(tlb->mm),
 404                          cpumask_of(smp_processor_id()))) {
 405                kmem_cache_free(hugepte_cache, hugepte);
 406                return;
 407        }
 408
 409        if (*batchp == NULL) {
 410                *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
 411                (*batchp)->index = 0;
 412        }
 413
 414        (*batchp)->ptes[(*batchp)->index++] = hugepte;
 415        if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
 416                call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
 417                *batchp = NULL;
 418        }
 419}
 420#endif
 421
 422static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
 423                              unsigned long start, unsigned long end,
 424                              unsigned long floor, unsigned long ceiling)
 425{
 426        pte_t *hugepte = hugepd_page(*hpdp);
 427        int i;
 428
 429        unsigned long pdmask = ~((1UL << pdshift) - 1);
 430        unsigned int num_hugepd = 1;
 431
 432#ifdef CONFIG_PPC_FSL_BOOK3E
 433        /* Note: On fsl the hpdp may be the first of several */
 434        num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
 435#else
 436        unsigned int shift = hugepd_shift(*hpdp);
 437#endif
 438
 439        start &= pdmask;
 440        if (start < floor)
 441                return;
 442        if (ceiling) {
 443                ceiling &= pdmask;
 444                if (! ceiling)
 445                        return;
 446        }
 447        if (end - 1 > ceiling - 1)
 448                return;
 449
 450        for (i = 0; i < num_hugepd; i++, hpdp++)
 451                hpdp->pd = 0;
 452
 453        tlb->need_flush = 1;
 454
 455#ifdef CONFIG_PPC_FSL_BOOK3E
 456        hugepd_free(tlb, hugepte);
 457#else
 458        pgtable_free_tlb(tlb, hugepte, pdshift - shift);
 459#endif
 460}
 461
 462static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 463                                   unsigned long addr, unsigned long end,
 464                                   unsigned long floor, unsigned long ceiling)
 465{
 466        pmd_t *pmd;
 467        unsigned long next;
 468        unsigned long start;
 469
 470        start = addr;
 471        do {
 472                pmd = pmd_offset(pud, addr);
 473                next = pmd_addr_end(addr, end);
 474                if (pmd_none(*pmd))
 475                        continue;
 476#ifdef CONFIG_PPC_FSL_BOOK3E
 477                /*
 478                 * Increment next by the size of the huge mapping since
 479                 * there may be more than one entry at this level for a
 480                 * single hugepage, but all of them point to
 481                 * the same kmem cache that holds the hugepte.
 482                 */
 483                next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
 484#endif
 485                free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
 486                                  addr, next, floor, ceiling);
 487        } while (addr = next, addr != end);
 488
 489        start &= PUD_MASK;
 490        if (start < floor)
 491                return;
 492        if (ceiling) {
 493                ceiling &= PUD_MASK;
 494                if (!ceiling)
 495                        return;
 496        }
 497        if (end - 1 > ceiling - 1)
 498                return;
 499
 500        pmd = pmd_offset(pud, start);
 501        pud_clear(pud);
 502        pmd_free_tlb(tlb, pmd, start);
 503}
 504
 505static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 506                                   unsigned long addr, unsigned long end,
 507                                   unsigned long floor, unsigned long ceiling)
 508{
 509        pud_t *pud;
 510        unsigned long next;
 511        unsigned long start;
 512
 513        start = addr;
 514        do {
 515                pud = pud_offset(pgd, addr);
 516                next = pud_addr_end(addr, end);
 517                if (!is_hugepd(pud)) {
 518                        if (pud_none_or_clear_bad(pud))
 519                                continue;
 520                        hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
 521                                               ceiling);
 522                } else {
 523#ifdef CONFIG_PPC_FSL_BOOK3E
 524                        /*
 525                         * Increment next by the size of the huge mapping since
 526                         * there may be more than one entry at this level for a
 527                         * single hugepage, but all of them point to
 528                         * the same kmem cache that holds the hugepte.
 529                         */
 530                        next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
 531#endif
 532                        free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
 533                                          addr, next, floor, ceiling);
 534                }
 535        } while (addr = next, addr != end);
 536
 537        start &= PGDIR_MASK;
 538        if (start < floor)
 539                return;
 540        if (ceiling) {
 541                ceiling &= PGDIR_MASK;
 542                if (!ceiling)
 543                        return;
 544        }
 545        if (end - 1 > ceiling - 1)
 546                return;
 547
 548        pud = pud_offset(pgd, start);
 549        pgd_clear(pgd);
 550        pud_free_tlb(tlb, pud, start);
 551}
 552
 553/*
 554 * This function frees user-level page tables of a process.
 555 *
 556 * Must be called with pagetable lock held.
 557 */
 558void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 559                            unsigned long addr, unsigned long end,
 560                            unsigned long floor, unsigned long ceiling)
 561{
 562        pgd_t *pgd;
 563        unsigned long next;
 564
 565        /*
 566         * Because there are a number of different possible pagetable
 567         * layouts for hugepage ranges, we limit knowledge of how
 568         * things should be laid out to the allocation path
 569         * (huge_pte_alloc(), above).  Everything else works out the
 570         * structure as it goes from information in the hugepd
 571         * pointers.  That means that we can't here use the
 572         * optimization used in the normal page free_pgd_range(), of
 573         * checking whether we're actually covering a large enough
 574         * range to have to do anything at the top level of the walk
 575         * instead of at the bottom.
 576         *
 577         * To make sense of this, you should probably go read the big
 578         * block comment at the top of the normal free_pgd_range(),
 579         * too.
 580         */
 581
 582        do {
 583                next = pgd_addr_end(addr, end);
 584                pgd = pgd_offset(tlb->mm, addr);
 585                if (!is_hugepd(pgd)) {
 586                        if (pgd_none_or_clear_bad(pgd))
 587                                continue;
 588                        hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
 589                } else {
 590#ifdef CONFIG_PPC_FSL_BOOK3E
 591                        /*
 592                         * Increment next by the size of the huge mapping since
 593                         * there may be more than one entry at the pgd level
 594                         * for a single hugepage, but all of them point to the
 595                         * same kmem cache that holds the hugepte.
 596                         */
 597                        next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
 598#endif
 599                        free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
 600                                          addr, next, floor, ceiling);
 601                }
 602        } while (addr = next, addr != end);
 603}
 604
 605struct page *
 606follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 607{
 608        pte_t *ptep;
 609        struct page *page;
 610        unsigned shift;
 611        unsigned long mask;
 612
 613        ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
 614
 615        /* Verify it is a huge page else bail. */
 616        if (!ptep || !shift)
 617                return ERR_PTR(-EINVAL);
 618
 619        mask = (1UL << shift) - 1;
 620        page = pte_page(*ptep);
 621        if (page)
 622                page += (address & mask) / PAGE_SIZE;
 623
 624        return page;
 625}
 626
 627int pmd_huge(pmd_t pmd)
 628{
 629        return 0;
 630}
 631
 632int pud_huge(pud_t pud)
 633{
 634        return 0;
 635}
 636
 637struct page *
 638follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 639                pmd_t *pmd, int write)
 640{
 641        BUG();
 642        return NULL;
 643}
 644
 645static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
 646                       unsigned long end, int write, struct page **pages, int *nr)
 647{
 648        unsigned long mask;
 649        unsigned long pte_end;
 650        struct page *head, *page, *tail;
 651        pte_t pte;
 652        int refs;
 653
 654        pte_end = (addr + sz) & ~(sz-1);
 655        if (pte_end < end)
 656                end = pte_end;
 657
 658        pte = *ptep;
 659        mask = _PAGE_PRESENT | _PAGE_USER;
 660        if (write)
 661                mask |= _PAGE_RW;
 662
 663        if ((pte_val(pte) & mask) != mask)
 664                return 0;
 665
 666        /* hugepages are never "special" */
 667        VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 668
 669        refs = 0;
 670        head = pte_page(pte);
 671
 672        page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
 673        tail = page;
 674        do {
 675                VM_BUG_ON(compound_head(page) != head);
 676                pages[*nr] = page;
 677                (*nr)++;
 678                page++;
 679                refs++;
 680        } while (addr += PAGE_SIZE, addr != end);
 681
 682        if (!page_cache_add_speculative(head, refs)) {
 683                *nr -= refs;
 684                return 0;
 685        }
 686
 687        if (unlikely(pte_val(pte) != pte_val(*ptep))) {
 688                /* Could be optimized better */
 689                *nr -= refs;
 690                while (refs--)
 691                        put_page(head);
 692                return 0;
 693        }
 694
 695        /*
 696         * Any tail page need their mapcount reference taken before we
 697         * return.
 698         */
 699        while (refs--) {
 700                if (PageTail(tail))
 701                        get_huge_page_tail(tail);
 702                tail++;
 703        }
 704
 705        return 1;
 706}
 707
 708static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
 709                                      unsigned long sz)
 710{
 711        unsigned long __boundary = (addr + sz) & ~(sz-1);
 712        return (__boundary - 1 < end - 1) ? __boundary : end;
 713}
 714
 715int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
 716               unsigned long addr, unsigned long end,
 717               int write, struct page **pages, int *nr)
 718{
 719        pte_t *ptep;
 720        unsigned long sz = 1UL << hugepd_shift(*hugepd);
 721        unsigned long next;
 722
 723        ptep = hugepte_offset(hugepd, addr, pdshift);
 724        do {
 725                next = hugepte_addr_end(addr, end, sz);
 726                if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
 727                        return 0;
 728        } while (ptep++, addr = next, addr != end);
 729
 730        return 1;
 731}
 732
 733#ifdef CONFIG_PPC_MM_SLICES
 734unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 735                                        unsigned long len, unsigned long pgoff,
 736                                        unsigned long flags)
 737{
 738        struct hstate *hstate = hstate_file(file);
 739        int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
 740
 741        return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
 742}
 743#endif
 744
 745unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 746{
 747#ifdef CONFIG_PPC_MM_SLICES
 748        unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
 749
 750        return 1UL << mmu_psize_to_shift(psize);
 751#else
 752        if (!is_vm_hugetlb_page(vma))
 753                return PAGE_SIZE;
 754
 755        return huge_page_size(hstate_vma(vma));
 756#endif
 757}
 758
 759static inline bool is_power_of_4(unsigned long x)
 760{
 761        if (is_power_of_2(x))
 762                return (__ilog2(x) % 2) ? false : true;
 763        return false;
 764}
 765
 766static int __init add_huge_page_size(unsigned long long size)
 767{
 768        int shift = __ffs(size);
 769        int mmu_psize;
 770
 771        /* Check that it is a page size supported by the hardware and
 772         * that it fits within pagetable and slice limits. */
 773#ifdef CONFIG_PPC_FSL_BOOK3E
 774        if ((size < PAGE_SIZE) || !is_power_of_4(size))
 775                return -EINVAL;
 776#else
 777        if (!is_power_of_2(size)
 778            || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
 779                return -EINVAL;
 780#endif
 781
 782        if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
 783                return -EINVAL;
 784
 785#ifdef CONFIG_SPU_FS_64K_LS
 786        /* Disable support for 64K huge pages when 64K SPU local store
 787         * support is enabled as the current implementation conflicts.
 788         */
 789        if (shift == PAGE_SHIFT_64K)
 790                return -EINVAL;
 791#endif /* CONFIG_SPU_FS_64K_LS */
 792
 793        BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
 794
 795        /* Return if huge page size has already been setup */
 796        if (size_to_hstate(size))
 797                return 0;
 798
 799        hugetlb_add_hstate(shift - PAGE_SHIFT);
 800
 801        return 0;
 802}
 803
 804static int __init hugepage_setup_sz(char *str)
 805{
 806        unsigned long long size;
 807
 808        size = memparse(str, &str);
 809
 810        if (add_huge_page_size(size) != 0)
 811                printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
 812
 813        return 1;
 814}
 815__setup("hugepagesz=", hugepage_setup_sz);
 816
 817#ifdef CONFIG_PPC_FSL_BOOK3E
 818struct kmem_cache *hugepte_cache;
 819static int __init hugetlbpage_init(void)
 820{
 821        int psize;
 822
 823        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 824                unsigned shift;
 825
 826                if (!mmu_psize_defs[psize].shift)
 827                        continue;
 828
 829                shift = mmu_psize_to_shift(psize);
 830
 831                /* Don't treat normal page sizes as huge... */
 832                if (shift != PAGE_SHIFT)
 833                        if (add_huge_page_size(1ULL << shift) < 0)
 834                                continue;
 835        }
 836
 837        /*
 838         * Create a kmem cache for hugeptes.  The bottom bits in the pte have
 839         * size information encoded in them, so align them to allow this
 840         */
 841        hugepte_cache =  kmem_cache_create("hugepte-cache", sizeof(pte_t),
 842                                           HUGEPD_SHIFT_MASK + 1, 0, NULL);
 843        if (hugepte_cache == NULL)
 844                panic("%s: Unable to create kmem cache for hugeptes\n",
 845                      __func__);
 846
 847        /* Default hpage size = 4M */
 848        if (mmu_psize_defs[MMU_PAGE_4M].shift)
 849                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
 850        else
 851                panic("%s: Unable to set default huge page size\n", __func__);
 852
 853
 854        return 0;
 855}
 856#else
 857static int __init hugetlbpage_init(void)
 858{
 859        int psize;
 860
 861        if (!mmu_has_feature(MMU_FTR_16M_PAGE))
 862                return -ENODEV;
 863
 864        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 865                unsigned shift;
 866                unsigned pdshift;
 867
 868                if (!mmu_psize_defs[psize].shift)
 869                        continue;
 870
 871                shift = mmu_psize_to_shift(psize);
 872
 873                if (add_huge_page_size(1ULL << shift) < 0)
 874                        continue;
 875
 876                if (shift < PMD_SHIFT)
 877                        pdshift = PMD_SHIFT;
 878                else if (shift < PUD_SHIFT)
 879                        pdshift = PUD_SHIFT;
 880                else
 881                        pdshift = PGDIR_SHIFT;
 882
 883                pgtable_cache_add(pdshift - shift, NULL);
 884                if (!PGT_CACHE(pdshift - shift))
 885                        panic("hugetlbpage_init(): could not create "
 886                              "pgtable cache for %d bit pagesize\n", shift);
 887        }
 888
 889        /* Set default large page size. Currently, we pick 16M or 1M
 890         * depending on what is available
 891         */
 892        if (mmu_psize_defs[MMU_PAGE_16M].shift)
 893                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
 894        else if (mmu_psize_defs[MMU_PAGE_1M].shift)
 895                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
 896
 897        return 0;
 898}
 899#endif
 900module_init(hugetlbpage_init);
 901
 902void flush_dcache_icache_hugepage(struct page *page)
 903{
 904        int i;
 905        void *start;
 906
 907        BUG_ON(!PageCompound(page));
 908
 909        for (i = 0; i < (1UL << compound_order(page)); i++) {
 910                if (!PageHighMem(page)) {
 911                        __flush_dcache_icache(page_address(page+i));
 912                } else {
 913                        start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
 914                        __flush_dcache_icache(start);
 915                        kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
 916                }
 917        }
 918}
 919