linux/mm/percpu.c
<<
>>
Prefs
   1/*
   2 * mm/percpu.c - percpu memory allocator
   3 *
   4 * Copyright (C) 2009           SUSE Linux Products GmbH
   5 * Copyright (C) 2009           Tejun Heo <tj@kernel.org>
   6 *
   7 * This file is released under the GPLv2.
   8 *
   9 * This is percpu allocator which can handle both static and dynamic
  10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
  11 * consisted of boot-time determined number of units and the first
  12 * chunk is used for static percpu variables in the kernel image
  13 * (special boot time alloc/init handling necessary as these areas
  14 * need to be brought up before allocation services are running).
  15 * Unit grows as necessary and all units grow or shrink in unison.
  16 * When a chunk is filled up, another chunk is allocated.
  17 *
  18 *  c0                           c1                         c2
  19 *  -------------------          -------------------        ------------
  20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  21 *  -------------------  ......  -------------------  ....  ------------
  22 *
  23 * Allocation is done in offset-size areas of single unit space.  Ie,
  24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
  26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
  27 * Percpu access can be done by configuring percpu base registers
  28 * according to cpu to unit mapping and pcpu_unit_size.
  29 *
  30 * There are usually many small percpu allocations many of them being
  31 * as small as 4 bytes.  The allocator organizes chunks into lists
  32 * according to free size and tries to allocate from the fullest one.
  33 * Each chunk keeps the maximum contiguous area size hint which is
  34 * guaranteed to be eqaul to or larger than the maximum contiguous
  35 * area in the chunk.  This helps the allocator not to iterate the
  36 * chunk maps unnecessarily.
  37 *
  38 * Allocation state in each chunk is kept using an array of integers
  39 * on chunk->map.  A positive value in the map represents a free
  40 * region and negative allocated.  Allocation inside a chunk is done
  41 * by scanning this map sequentially and serving the first matching
  42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
  43 * Chunks can be determined from the address using the index field
  44 * in the page struct. The index field contains a pointer to the chunk.
  45 *
  46 * To use this allocator, arch code should do the followings.
  47 *
  48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  49 *   regular address to percpu pointer and back if they need to be
  50 *   different from the default
  51 *
  52 * - use pcpu_setup_first_chunk() during percpu area initialization to
  53 *   setup the first chunk containing the kernel static percpu area
  54 */
  55
  56#include <linux/bitmap.h>
  57#include <linux/bootmem.h>
  58#include <linux/err.h>
  59#include <linux/list.h>
  60#include <linux/log2.h>
  61#include <linux/mm.h>
  62#include <linux/module.h>
  63#include <linux/mutex.h>
  64#include <linux/percpu.h>
  65#include <linux/pfn.h>
  66#include <linux/slab.h>
  67#include <linux/spinlock.h>
  68#include <linux/vmalloc.h>
  69#include <linux/workqueue.h>
  70
  71#include <asm/cacheflush.h>
  72#include <asm/sections.h>
  73#include <asm/tlbflush.h>
  74#include <asm/io.h>
  75
  76#define PCPU_SLOT_BASE_SHIFT            5       /* 1-31 shares the same slot */
  77#define PCPU_DFL_MAP_ALLOC              16      /* start a map with 16 ents */
  78
  79/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  80#ifndef __addr_to_pcpu_ptr
  81#define __addr_to_pcpu_ptr(addr)                                        \
  82        (void __percpu *)((unsigned long)(addr) -                       \
  83                          (unsigned long)pcpu_base_addr +               \
  84                          (unsigned long)__per_cpu_start)
  85#endif
  86#ifndef __pcpu_ptr_to_addr
  87#define __pcpu_ptr_to_addr(ptr)                                         \
  88        (void __force *)((unsigned long)(ptr) +                         \
  89                         (unsigned long)pcpu_base_addr -                \
  90                         (unsigned long)__per_cpu_start)
  91#endif
  92
  93struct pcpu_chunk {
  94        struct list_head        list;           /* linked to pcpu_slot lists */
  95        int                     free_size;      /* free bytes in the chunk */
  96        int                     contig_hint;    /* max contiguous size hint */
  97        void                    *base_addr;     /* base address of this chunk */
  98        int                     map_used;       /* # of map entries used */
  99        int                     map_alloc;      /* # of map entries allocated */
 100        int                     *map;           /* allocation map */
 101        void                    *data;          /* chunk data */
 102        bool                    immutable;      /* no [de]population allowed */
 103        unsigned long           populated[];    /* populated bitmap */
 104};
 105
 106static int pcpu_unit_pages __read_mostly;
 107static int pcpu_unit_size __read_mostly;
 108static int pcpu_nr_units __read_mostly;
 109static int pcpu_atom_size __read_mostly;
 110static int pcpu_nr_slots __read_mostly;
 111static size_t pcpu_chunk_struct_size __read_mostly;
 112
 113/* cpus with the lowest and highest unit numbers */
 114static unsigned int pcpu_first_unit_cpu __read_mostly;
 115static unsigned int pcpu_last_unit_cpu __read_mostly;
 116
 117/* the address of the first chunk which starts with the kernel static area */
 118void *pcpu_base_addr __read_mostly;
 119EXPORT_SYMBOL_GPL(pcpu_base_addr);
 120
 121static const int *pcpu_unit_map __read_mostly;          /* cpu -> unit */
 122const unsigned long *pcpu_unit_offsets __read_mostly;   /* cpu -> unit offset */
 123
 124/* group information, used for vm allocation */
 125static int pcpu_nr_groups __read_mostly;
 126static const unsigned long *pcpu_group_offsets __read_mostly;
 127static const size_t *pcpu_group_sizes __read_mostly;
 128
 129/*
 130 * The first chunk which always exists.  Note that unlike other
 131 * chunks, this one can be allocated and mapped in several different
 132 * ways and thus often doesn't live in the vmalloc area.
 133 */
 134static struct pcpu_chunk *pcpu_first_chunk;
 135
 136/*
 137 * Optional reserved chunk.  This chunk reserves part of the first
 138 * chunk and serves it for reserved allocations.  The amount of
 139 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 140 * area doesn't exist, the following variables contain NULL and 0
 141 * respectively.
 142 */
 143static struct pcpu_chunk *pcpu_reserved_chunk;
 144static int pcpu_reserved_chunk_limit;
 145
 146/*
 147 * Synchronization rules.
 148 *
 149 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
 150 * protects allocation/reclaim paths, chunks, populated bitmap and
 151 * vmalloc mapping.  The latter is a spinlock and protects the index
 152 * data structures - chunk slots, chunks and area maps in chunks.
 153 *
 154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 155 * pcpu_lock is grabbed and released as necessary.  All actual memory
 156 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 157 * general, percpu memory can't be allocated with irq off but
 158 * irqsave/restore are still used in alloc path so that it can be used
 159 * from early init path - sched_init() specifically.
 160 *
 161 * Free path accesses and alters only the index data structures, so it
 162 * can be safely called from atomic context.  When memory needs to be
 163 * returned to the system, free path schedules reclaim_work which
 164 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 165 * reclaimed, release both locks and frees the chunks.  Note that it's
 166 * necessary to grab both locks to remove a chunk from circulation as
 167 * allocation path might be referencing the chunk with only
 168 * pcpu_alloc_mutex locked.
 169 */
 170static DEFINE_MUTEX(pcpu_alloc_mutex);  /* protects whole alloc and reclaim */
 171static DEFINE_SPINLOCK(pcpu_lock);      /* protects index data structures */
 172
 173static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 174
 175/* reclaim work to release fully free chunks, scheduled from free path */
 176static void pcpu_reclaim(struct work_struct *work);
 177static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
 178
 179static bool pcpu_addr_in_first_chunk(void *addr)
 180{
 181        void *first_start = pcpu_first_chunk->base_addr;
 182
 183        return addr >= first_start && addr < first_start + pcpu_unit_size;
 184}
 185
 186static bool pcpu_addr_in_reserved_chunk(void *addr)
 187{
 188        void *first_start = pcpu_first_chunk->base_addr;
 189
 190        return addr >= first_start &&
 191                addr < first_start + pcpu_reserved_chunk_limit;
 192}
 193
 194static int __pcpu_size_to_slot(int size)
 195{
 196        int highbit = fls(size);        /* size is in bytes */
 197        return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 198}
 199
 200static int pcpu_size_to_slot(int size)
 201{
 202        if (size == pcpu_unit_size)
 203                return pcpu_nr_slots - 1;
 204        return __pcpu_size_to_slot(size);
 205}
 206
 207static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 208{
 209        if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
 210                return 0;
 211
 212        return pcpu_size_to_slot(chunk->free_size);
 213}
 214
 215/* set the pointer to a chunk in a page struct */
 216static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 217{
 218        page->index = (unsigned long)pcpu;
 219}
 220
 221/* obtain pointer to a chunk from a page struct */
 222static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 223{
 224        return (struct pcpu_chunk *)page->index;
 225}
 226
 227static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 228{
 229        return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 230}
 231
 232static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 233                                     unsigned int cpu, int page_idx)
 234{
 235        return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
 236                (page_idx << PAGE_SHIFT);
 237}
 238
 239static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
 240                                           int *rs, int *re, int end)
 241{
 242        *rs = find_next_zero_bit(chunk->populated, end, *rs);
 243        *re = find_next_bit(chunk->populated, end, *rs + 1);
 244}
 245
 246static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
 247                                         int *rs, int *re, int end)
 248{
 249        *rs = find_next_bit(chunk->populated, end, *rs);
 250        *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
 251}
 252
 253/*
 254 * (Un)populated page region iterators.  Iterate over (un)populated
 255 * page regions betwen @start and @end in @chunk.  @rs and @re should
 256 * be integer variables and will be set to start and end page index of
 257 * the current region.
 258 */
 259#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)               \
 260        for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
 261             (rs) < (re);                                                   \
 262             (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
 263
 264#define pcpu_for_each_pop_region(chunk, rs, re, start, end)                 \
 265        for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
 266             (rs) < (re);                                                   \
 267             (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 268
 269/**
 270 * pcpu_mem_alloc - allocate memory
 271 * @size: bytes to allocate
 272 *
 273 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 274 * kzalloc() is used; otherwise, vmalloc() is used.  The returned
 275 * memory is always zeroed.
 276 *
 277 * CONTEXT:
 278 * Does GFP_KERNEL allocation.
 279 *
 280 * RETURNS:
 281 * Pointer to the allocated area on success, NULL on failure.
 282 */
 283static void *pcpu_mem_alloc(size_t size)
 284{
 285        if (size <= PAGE_SIZE)
 286                return kzalloc(size, GFP_KERNEL);
 287        else {
 288                void *ptr = vmalloc(size);
 289                if (ptr)
 290                        memset(ptr, 0, size);
 291                return ptr;
 292        }
 293}
 294
 295/**
 296 * pcpu_mem_free - free memory
 297 * @ptr: memory to free
 298 * @size: size of the area
 299 *
 300 * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
 301 */
 302static void pcpu_mem_free(void *ptr, size_t size)
 303{
 304        if (size <= PAGE_SIZE)
 305                kfree(ptr);
 306        else
 307                vfree(ptr);
 308}
 309
 310/**
 311 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 312 * @chunk: chunk of interest
 313 * @oslot: the previous slot it was on
 314 *
 315 * This function is called after an allocation or free changed @chunk.
 316 * New slot according to the changed state is determined and @chunk is
 317 * moved to the slot.  Note that the reserved chunk is never put on
 318 * chunk slots.
 319 *
 320 * CONTEXT:
 321 * pcpu_lock.
 322 */
 323static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 324{
 325        int nslot = pcpu_chunk_slot(chunk);
 326
 327        if (chunk != pcpu_reserved_chunk && oslot != nslot) {
 328                if (oslot < nslot)
 329                        list_move(&chunk->list, &pcpu_slot[nslot]);
 330                else
 331                        list_move_tail(&chunk->list, &pcpu_slot[nslot]);
 332        }
 333}
 334
 335/**
 336 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 337 * @chunk: chunk of interest
 338 *
 339 * Determine whether area map of @chunk needs to be extended to
 340 * accomodate a new allocation.
 341 *
 342 * CONTEXT:
 343 * pcpu_lock.
 344 *
 345 * RETURNS:
 346 * New target map allocation length if extension is necessary, 0
 347 * otherwise.
 348 */
 349static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
 350{
 351        int new_alloc;
 352
 353        if (chunk->map_alloc >= chunk->map_used + 2)
 354                return 0;
 355
 356        new_alloc = PCPU_DFL_MAP_ALLOC;
 357        while (new_alloc < chunk->map_used + 2)
 358                new_alloc *= 2;
 359
 360        return new_alloc;
 361}
 362
 363/**
 364 * pcpu_extend_area_map - extend area map of a chunk
 365 * @chunk: chunk of interest
 366 * @new_alloc: new target allocation length of the area map
 367 *
 368 * Extend area map of @chunk to have @new_alloc entries.
 369 *
 370 * CONTEXT:
 371 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 372 *
 373 * RETURNS:
 374 * 0 on success, -errno on failure.
 375 */
 376static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
 377{
 378        int *old = NULL, *new = NULL;
 379        size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
 380        unsigned long flags;
 381
 382        new = pcpu_mem_alloc(new_size);
 383        if (!new)
 384                return -ENOMEM;
 385
 386        /* acquire pcpu_lock and switch to new area map */
 387        spin_lock_irqsave(&pcpu_lock, flags);
 388
 389        if (new_alloc <= chunk->map_alloc)
 390                goto out_unlock;
 391
 392        old_size = chunk->map_alloc * sizeof(chunk->map[0]);
 393        memcpy(new, chunk->map, old_size);
 394
 395        /*
 396         * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
 397         * one of the first chunks and still using static map.
 398         */
 399        if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
 400                old = chunk->map;
 401
 402        chunk->map_alloc = new_alloc;
 403        chunk->map = new;
 404        new = NULL;
 405
 406out_unlock:
 407        spin_unlock_irqrestore(&pcpu_lock, flags);
 408
 409        /*
 410         * pcpu_mem_free() might end up calling vfree() which uses
 411         * IRQ-unsafe lock and thus can't be called under pcpu_lock.
 412         */
 413        pcpu_mem_free(old, old_size);
 414        pcpu_mem_free(new, new_size);
 415
 416        return 0;
 417}
 418
 419/**
 420 * pcpu_split_block - split a map block
 421 * @chunk: chunk of interest
 422 * @i: index of map block to split
 423 * @head: head size in bytes (can be 0)
 424 * @tail: tail size in bytes (can be 0)
 425 *
 426 * Split the @i'th map block into two or three blocks.  If @head is
 427 * non-zero, @head bytes block is inserted before block @i moving it
 428 * to @i+1 and reducing its size by @head bytes.
 429 *
 430 * If @tail is non-zero, the target block, which can be @i or @i+1
 431 * depending on @head, is reduced by @tail bytes and @tail byte block
 432 * is inserted after the target block.
 433 *
 434 * @chunk->map must have enough free slots to accomodate the split.
 435 *
 436 * CONTEXT:
 437 * pcpu_lock.
 438 */
 439static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
 440                             int head, int tail)
 441{
 442        int nr_extra = !!head + !!tail;
 443
 444        BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
 445
 446        /* insert new subblocks */
 447        memmove(&chunk->map[i + nr_extra], &chunk->map[i],
 448                sizeof(chunk->map[0]) * (chunk->map_used - i));
 449        chunk->map_used += nr_extra;
 450
 451        if (head) {
 452                chunk->map[i + 1] = chunk->map[i] - head;
 453                chunk->map[i++] = head;
 454        }
 455        if (tail) {
 456                chunk->map[i++] -= tail;
 457                chunk->map[i] = tail;
 458        }
 459}
 460
 461/**
 462 * pcpu_alloc_area - allocate area from a pcpu_chunk
 463 * @chunk: chunk of interest
 464 * @size: wanted size in bytes
 465 * @align: wanted align
 466 *
 467 * Try to allocate @size bytes area aligned at @align from @chunk.
 468 * Note that this function only allocates the offset.  It doesn't
 469 * populate or map the area.
 470 *
 471 * @chunk->map must have at least two free slots.
 472 *
 473 * CONTEXT:
 474 * pcpu_lock.
 475 *
 476 * RETURNS:
 477 * Allocated offset in @chunk on success, -1 if no matching area is
 478 * found.
 479 */
 480static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
 481{
 482        int oslot = pcpu_chunk_slot(chunk);
 483        int max_contig = 0;
 484        int i, off;
 485
 486        for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
 487                bool is_last = i + 1 == chunk->map_used;
 488                int head, tail;
 489
 490                /* extra for alignment requirement */
 491                head = ALIGN(off, align) - off;
 492                BUG_ON(i == 0 && head != 0);
 493
 494                if (chunk->map[i] < 0)
 495                        continue;
 496                if (chunk->map[i] < head + size) {
 497                        max_contig = max(chunk->map[i], max_contig);
 498                        continue;
 499                }
 500
 501                /*
 502                 * If head is small or the previous block is free,
 503                 * merge'em.  Note that 'small' is defined as smaller
 504                 * than sizeof(int), which is very small but isn't too
 505                 * uncommon for percpu allocations.
 506                 */
 507                if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
 508                        if (chunk->map[i - 1] > 0)
 509                                chunk->map[i - 1] += head;
 510                        else {
 511                                chunk->map[i - 1] -= head;
 512                                chunk->free_size -= head;
 513                        }
 514                        chunk->map[i] -= head;
 515                        off += head;
 516                        head = 0;
 517                }
 518
 519                /* if tail is small, just keep it around */
 520                tail = chunk->map[i] - head - size;
 521                if (tail < sizeof(int))
 522                        tail = 0;
 523
 524                /* split if warranted */
 525                if (head || tail) {
 526                        pcpu_split_block(chunk, i, head, tail);
 527                        if (head) {
 528                                i++;
 529                                off += head;
 530                                max_contig = max(chunk->map[i - 1], max_contig);
 531                        }
 532                        if (tail)
 533                                max_contig = max(chunk->map[i + 1], max_contig);
 534                }
 535
 536                /* update hint and mark allocated */
 537                if (is_last)
 538                        chunk->contig_hint = max_contig; /* fully scanned */
 539                else
 540                        chunk->contig_hint = max(chunk->contig_hint,
 541                                                 max_contig);
 542
 543                chunk->free_size -= chunk->map[i];
 544                chunk->map[i] = -chunk->map[i];
 545
 546                pcpu_chunk_relocate(chunk, oslot);
 547                return off;
 548        }
 549
 550        chunk->contig_hint = max_contig;        /* fully scanned */
 551        pcpu_chunk_relocate(chunk, oslot);
 552
 553        /* tell the upper layer that this chunk has no matching area */
 554        return -1;
 555}
 556
 557/**
 558 * pcpu_free_area - free area to a pcpu_chunk
 559 * @chunk: chunk of interest
 560 * @freeme: offset of area to free
 561 *
 562 * Free area starting from @freeme to @chunk.  Note that this function
 563 * only modifies the allocation map.  It doesn't depopulate or unmap
 564 * the area.
 565 *
 566 * CONTEXT:
 567 * pcpu_lock.
 568 */
 569static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
 570{
 571        int oslot = pcpu_chunk_slot(chunk);
 572        int i, off;
 573
 574        for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
 575                if (off == freeme)
 576                        break;
 577        BUG_ON(off != freeme);
 578        BUG_ON(chunk->map[i] > 0);
 579
 580        chunk->map[i] = -chunk->map[i];
 581        chunk->free_size += chunk->map[i];
 582
 583        /* merge with previous? */
 584        if (i > 0 && chunk->map[i - 1] >= 0) {
 585                chunk->map[i - 1] += chunk->map[i];
 586                chunk->map_used--;
 587                memmove(&chunk->map[i], &chunk->map[i + 1],
 588                        (chunk->map_used - i) * sizeof(chunk->map[0]));
 589                i--;
 590        }
 591        /* merge with next? */
 592        if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
 593                chunk->map[i] += chunk->map[i + 1];
 594                chunk->map_used--;
 595                memmove(&chunk->map[i + 1], &chunk->map[i + 2],
 596                        (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
 597        }
 598
 599        chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
 600        pcpu_chunk_relocate(chunk, oslot);
 601}
 602
 603static struct pcpu_chunk *pcpu_alloc_chunk(void)
 604{
 605        struct pcpu_chunk *chunk;
 606
 607        chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
 608        if (!chunk)
 609                return NULL;
 610
 611        chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
 612        if (!chunk->map) {
 613                kfree(chunk);
 614                return NULL;
 615        }
 616
 617        chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
 618        chunk->map[chunk->map_used++] = pcpu_unit_size;
 619
 620        INIT_LIST_HEAD(&chunk->list);
 621        chunk->free_size = pcpu_unit_size;
 622        chunk->contig_hint = pcpu_unit_size;
 623
 624        return chunk;
 625}
 626
 627static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 628{
 629        if (!chunk)
 630                return;
 631        pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
 632        kfree(chunk);
 633}
 634
 635/*
 636 * Chunk management implementation.
 637 *
 638 * To allow different implementations, chunk alloc/free and
 639 * [de]population are implemented in a separate file which is pulled
 640 * into this file and compiled together.  The following functions
 641 * should be implemented.
 642 *
 643 * pcpu_populate_chunk          - populate the specified range of a chunk
 644 * pcpu_depopulate_chunk        - depopulate the specified range of a chunk
 645 * pcpu_create_chunk            - create a new chunk
 646 * pcpu_destroy_chunk           - destroy a chunk, always preceded by full depop
 647 * pcpu_addr_to_page            - translate address to physical address
 648 * pcpu_verify_alloc_info       - check alloc_info is acceptable during init
 649 */
 650static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
 651static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
 652static struct pcpu_chunk *pcpu_create_chunk(void);
 653static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
 654static struct page *pcpu_addr_to_page(void *addr);
 655static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
 656
 657#ifdef CONFIG_NEED_PER_CPU_KM
 658#include "percpu-km.c"
 659#else
 660#include "percpu-vm.c"
 661#endif
 662
 663/**
 664 * pcpu_chunk_addr_search - determine chunk containing specified address
 665 * @addr: address for which the chunk needs to be determined.
 666 *
 667 * RETURNS:
 668 * The address of the found chunk.
 669 */
 670static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 671{
 672        /* is it in the first chunk? */
 673        if (pcpu_addr_in_first_chunk(addr)) {
 674                /* is it in the reserved area? */
 675                if (pcpu_addr_in_reserved_chunk(addr))
 676                        return pcpu_reserved_chunk;
 677                return pcpu_first_chunk;
 678        }
 679
 680        /*
 681         * The address is relative to unit0 which might be unused and
 682         * thus unmapped.  Offset the address to the unit space of the
 683         * current processor before looking it up in the vmalloc
 684         * space.  Note that any possible cpu id can be used here, so
 685         * there's no need to worry about preemption or cpu hotplug.
 686         */
 687        addr += pcpu_unit_offsets[raw_smp_processor_id()];
 688        return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
 689}
 690
 691/**
 692 * pcpu_alloc - the percpu allocator
 693 * @size: size of area to allocate in bytes
 694 * @align: alignment of area (max PAGE_SIZE)
 695 * @reserved: allocate from the reserved chunk if available
 696 *
 697 * Allocate percpu area of @size bytes aligned at @align.
 698 *
 699 * CONTEXT:
 700 * Does GFP_KERNEL allocation.
 701 *
 702 * RETURNS:
 703 * Percpu pointer to the allocated area on success, NULL on failure.
 704 */
 705static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 706{
 707        static int warn_limit = 10;
 708        struct pcpu_chunk *chunk;
 709        const char *err;
 710        int slot, off, new_alloc;
 711        unsigned long flags;
 712
 713        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
 714                WARN(true, "illegal size (%zu) or align (%zu) for "
 715                     "percpu allocation\n", size, align);
 716                return NULL;
 717        }
 718
 719        mutex_lock(&pcpu_alloc_mutex);
 720        spin_lock_irqsave(&pcpu_lock, flags);
 721
 722        /* serve reserved allocations from the reserved chunk if available */
 723        if (reserved && pcpu_reserved_chunk) {
 724                chunk = pcpu_reserved_chunk;
 725
 726                if (size > chunk->contig_hint) {
 727                        err = "alloc from reserved chunk failed";
 728                        goto fail_unlock;
 729                }
 730
 731                while ((new_alloc = pcpu_need_to_extend(chunk))) {
 732                        spin_unlock_irqrestore(&pcpu_lock, flags);
 733                        if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
 734                                err = "failed to extend area map of reserved chunk";
 735                                goto fail_unlock_mutex;
 736                        }
 737                        spin_lock_irqsave(&pcpu_lock, flags);
 738                }
 739
 740                off = pcpu_alloc_area(chunk, size, align);
 741                if (off >= 0)
 742                        goto area_found;
 743
 744                err = "alloc from reserved chunk failed";
 745                goto fail_unlock;
 746        }
 747
 748restart:
 749        /* search through normal chunks */
 750        for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
 751                list_for_each_entry(chunk, &pcpu_slot[slot], list) {
 752                        if (size > chunk->contig_hint)
 753                                continue;
 754
 755                        new_alloc = pcpu_need_to_extend(chunk);
 756                        if (new_alloc) {
 757                                spin_unlock_irqrestore(&pcpu_lock, flags);
 758                                if (pcpu_extend_area_map(chunk,
 759                                                         new_alloc) < 0) {
 760                                        err = "failed to extend area map";
 761                                        goto fail_unlock_mutex;
 762                                }
 763                                spin_lock_irqsave(&pcpu_lock, flags);
 764                                /*
 765                                 * pcpu_lock has been dropped, need to
 766                                 * restart cpu_slot list walking.
 767                                 */
 768                                goto restart;
 769                        }
 770
 771                        off = pcpu_alloc_area(chunk, size, align);
 772                        if (off >= 0)
 773                                goto area_found;
 774                }
 775        }
 776
 777        /* hmmm... no space left, create a new chunk */
 778        spin_unlock_irqrestore(&pcpu_lock, flags);
 779
 780        chunk = pcpu_create_chunk();
 781        if (!chunk) {
 782                err = "failed to allocate new chunk";
 783                goto fail_unlock_mutex;
 784        }
 785
 786        spin_lock_irqsave(&pcpu_lock, flags);
 787        pcpu_chunk_relocate(chunk, -1);
 788        goto restart;
 789
 790area_found:
 791        spin_unlock_irqrestore(&pcpu_lock, flags);
 792
 793        /* populate, map and clear the area */
 794        if (pcpu_populate_chunk(chunk, off, size)) {
 795                spin_lock_irqsave(&pcpu_lock, flags);
 796                pcpu_free_area(chunk, off);
 797                err = "failed to populate";
 798                goto fail_unlock;
 799        }
 800
 801        mutex_unlock(&pcpu_alloc_mutex);
 802
 803        /* return address relative to base address */
 804        return __addr_to_pcpu_ptr(chunk->base_addr + off);
 805
 806fail_unlock:
 807        spin_unlock_irqrestore(&pcpu_lock, flags);
 808fail_unlock_mutex:
 809        mutex_unlock(&pcpu_alloc_mutex);
 810        if (warn_limit) {
 811                pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
 812                           "%s\n", size, align, err);
 813                dump_stack();
 814                if (!--warn_limit)
 815                        pr_info("PERCPU: limit reached, disable warning\n");
 816        }
 817        return NULL;
 818}
 819
 820/**
 821 * __alloc_percpu - allocate dynamic percpu area
 822 * @size: size of area to allocate in bytes
 823 * @align: alignment of area (max PAGE_SIZE)
 824 *
 825 * Allocate percpu area of @size bytes aligned at @align.  Might
 826 * sleep.  Might trigger writeouts.
 827 *
 828 * CONTEXT:
 829 * Does GFP_KERNEL allocation.
 830 *
 831 * RETURNS:
 832 * Percpu pointer to the allocated area on success, NULL on failure.
 833 */
 834void __percpu *__alloc_percpu(size_t size, size_t align)
 835{
 836        return pcpu_alloc(size, align, false);
 837}
 838EXPORT_SYMBOL_GPL(__alloc_percpu);
 839
 840/**
 841 * __alloc_reserved_percpu - allocate reserved percpu area
 842 * @size: size of area to allocate in bytes
 843 * @align: alignment of area (max PAGE_SIZE)
 844 *
 845 * Allocate percpu area of @size bytes aligned at @align from reserved
 846 * percpu area if arch has set it up; otherwise, allocation is served
 847 * from the same dynamic area.  Might sleep.  Might trigger writeouts.
 848 *
 849 * CONTEXT:
 850 * Does GFP_KERNEL allocation.
 851 *
 852 * RETURNS:
 853 * Percpu pointer to the allocated area on success, NULL on failure.
 854 */
 855void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 856{
 857        return pcpu_alloc(size, align, true);
 858}
 859
 860/**
 861 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 862 * @work: unused
 863 *
 864 * Reclaim all fully free chunks except for the first one.
 865 *
 866 * CONTEXT:
 867 * workqueue context.
 868 */
 869static void pcpu_reclaim(struct work_struct *work)
 870{
 871        LIST_HEAD(todo);
 872        struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
 873        struct pcpu_chunk *chunk, *next;
 874
 875        mutex_lock(&pcpu_alloc_mutex);
 876        spin_lock_irq(&pcpu_lock);
 877
 878        list_for_each_entry_safe(chunk, next, head, list) {
 879                WARN_ON(chunk->immutable);
 880
 881                /* spare the first one */
 882                if (chunk == list_first_entry(head, struct pcpu_chunk, list))
 883                        continue;
 884
 885                list_move(&chunk->list, &todo);
 886        }
 887
 888        spin_unlock_irq(&pcpu_lock);
 889
 890        list_for_each_entry_safe(chunk, next, &todo, list) {
 891                pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
 892                pcpu_destroy_chunk(chunk);
 893        }
 894
 895        mutex_unlock(&pcpu_alloc_mutex);
 896}
 897
 898/**
 899 * free_percpu - free percpu area
 900 * @ptr: pointer to area to free
 901 *
 902 * Free percpu area @ptr.
 903 *
 904 * CONTEXT:
 905 * Can be called from atomic context.
 906 */
 907void free_percpu(void __percpu *ptr)
 908{
 909        void *addr;
 910        struct pcpu_chunk *chunk;
 911        unsigned long flags;
 912        int off;
 913
 914        if (!ptr)
 915                return;
 916
 917        addr = __pcpu_ptr_to_addr(ptr);
 918
 919        spin_lock_irqsave(&pcpu_lock, flags);
 920
 921        chunk = pcpu_chunk_addr_search(addr);
 922        off = addr - chunk->base_addr;
 923
 924        pcpu_free_area(chunk, off);
 925
 926        /* if there are more than one fully free chunks, wake up grim reaper */
 927        if (chunk->free_size == pcpu_unit_size) {
 928                struct pcpu_chunk *pos;
 929
 930                list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
 931                        if (pos != chunk) {
 932                                schedule_work(&pcpu_reclaim_work);
 933                                break;
 934                        }
 935        }
 936
 937        spin_unlock_irqrestore(&pcpu_lock, flags);
 938}
 939EXPORT_SYMBOL_GPL(free_percpu);
 940
 941/**
 942 * is_kernel_percpu_address - test whether address is from static percpu area
 943 * @addr: address to test
 944 *
 945 * Test whether @addr belongs to in-kernel static percpu area.  Module
 946 * static percpu areas are not considered.  For those, use
 947 * is_module_percpu_address().
 948 *
 949 * RETURNS:
 950 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 951 */
 952bool is_kernel_percpu_address(unsigned long addr)
 953{
 954        const size_t static_size = __per_cpu_end - __per_cpu_start;
 955        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
 956        unsigned int cpu;
 957
 958        for_each_possible_cpu(cpu) {
 959                void *start = per_cpu_ptr(base, cpu);
 960
 961                if ((void *)addr >= start && (void *)addr < start + static_size)
 962                        return true;
 963        }
 964        return false;
 965}
 966
 967/**
 968 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 969 * @addr: the address to be converted to physical address
 970 *
 971 * Given @addr which is dereferenceable address obtained via one of
 972 * percpu access macros, this function translates it into its physical
 973 * address.  The caller is responsible for ensuring @addr stays valid
 974 * until this function finishes.
 975 *
 976 * RETURNS:
 977 * The physical address for @addr.
 978 */
 979phys_addr_t per_cpu_ptr_to_phys(void *addr)
 980{
 981        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
 982        bool in_first_chunk = false;
 983        unsigned long first_start, first_end;
 984        unsigned int cpu;
 985
 986        /*
 987         * The following test on first_start/end isn't strictly
 988         * necessary but will speed up lookups of addresses which
 989         * aren't in the first chunk.
 990         */
 991        first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
 992        first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
 993                                    pcpu_unit_pages);
 994        if ((unsigned long)addr >= first_start &&
 995            (unsigned long)addr < first_end) {
 996                for_each_possible_cpu(cpu) {
 997                        void *start = per_cpu_ptr(base, cpu);
 998
 999                        if (addr >= start && addr < start + pcpu_unit_size) {
1000                                in_first_chunk = true;
1001                                break;
1002                        }
1003                }
1004        }
1005
1006        if (in_first_chunk) {
1007                if ((unsigned long)addr < VMALLOC_START ||
1008                    (unsigned long)addr >= VMALLOC_END)
1009                        return __pa(addr);
1010                else
1011                        return page_to_phys(vmalloc_to_page(addr));
1012        } else
1013                return page_to_phys(pcpu_addr_to_page(addr));
1014}
1015
1016static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1017                                        size_t reserved_size,
1018                                        ssize_t *dyn_sizep)
1019{
1020        size_t size_sum;
1021
1022        size_sum = PFN_ALIGN(static_size + reserved_size +
1023                             (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1024        if (*dyn_sizep != 0)
1025                *dyn_sizep = size_sum - static_size - reserved_size;
1026
1027        return size_sum;
1028}
1029
1030/**
1031 * pcpu_alloc_alloc_info - allocate percpu allocation info
1032 * @nr_groups: the number of groups
1033 * @nr_units: the number of units
1034 *
1035 * Allocate ai which is large enough for @nr_groups groups containing
1036 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1037 * cpu_map array which is long enough for @nr_units and filled with
1038 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1039 * pointer of other groups.
1040 *
1041 * RETURNS:
1042 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1043 * failure.
1044 */
1045struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1046                                                      int nr_units)
1047{
1048        struct pcpu_alloc_info *ai;
1049        size_t base_size, ai_size;
1050        void *ptr;
1051        int unit;
1052
1053        base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1054                          __alignof__(ai->groups[0].cpu_map[0]));
1055        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1056
1057        ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1058        if (!ptr)
1059                return NULL;
1060        ai = ptr;
1061        ptr += base_size;
1062
1063        ai->groups[0].cpu_map = ptr;
1064
1065        for (unit = 0; unit < nr_units; unit++)
1066                ai->groups[0].cpu_map[unit] = NR_CPUS;
1067
1068        ai->nr_groups = nr_groups;
1069        ai->__ai_size = PFN_ALIGN(ai_size);
1070
1071        return ai;
1072}
1073
1074/**
1075 * pcpu_free_alloc_info - free percpu allocation info
1076 * @ai: pcpu_alloc_info to free
1077 *
1078 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1079 */
1080void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1081{
1082        free_bootmem(__pa(ai), ai->__ai_size);
1083}
1084
1085/**
1086 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1087 * @reserved_size: the size of reserved percpu area in bytes
1088 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1089 * @atom_size: allocation atom size
1090 * @cpu_distance_fn: callback to determine distance between cpus, optional
1091 *
1092 * This function determines grouping of units, their mappings to cpus
1093 * and other parameters considering needed percpu size, allocation
1094 * atom size and distances between CPUs.
1095 *
1096 * Groups are always mutliples of atom size and CPUs which are of
1097 * LOCAL_DISTANCE both ways are grouped together and share space for
1098 * units in the same group.  The returned configuration is guaranteed
1099 * to have CPUs on different nodes on different groups and >=75% usage
1100 * of allocated virtual address space.
1101 *
1102 * RETURNS:
1103 * On success, pointer to the new allocation_info is returned.  On
1104 * failure, ERR_PTR value is returned.
1105 */
1106struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1107                                size_t reserved_size, ssize_t dyn_size,
1108                                size_t atom_size,
1109                                pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1110{
1111        static int group_map[NR_CPUS] __initdata;
1112        static int group_cnt[NR_CPUS] __initdata;
1113        const size_t static_size = __per_cpu_end - __per_cpu_start;
1114        int nr_groups = 1, nr_units = 0;
1115        size_t size_sum, min_unit_size, alloc_size;
1116        int upa, max_upa, uninitialized_var(best_upa);  /* units_per_alloc */
1117        int last_allocs, group, unit;
1118        unsigned int cpu, tcpu;
1119        struct pcpu_alloc_info *ai;
1120        unsigned int *cpu_map;
1121
1122        /* this function may be called multiple times */
1123        memset(group_map, 0, sizeof(group_map));
1124        memset(group_cnt, 0, sizeof(group_cnt));
1125
1126        /*
1127         * Determine min_unit_size, alloc_size and max_upa such that
1128         * alloc_size is multiple of atom_size and is the smallest
1129         * which can accomodate 4k aligned segments which are equal to
1130         * or larger than min_unit_size.
1131         */
1132        size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1133        min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1134
1135        alloc_size = roundup(min_unit_size, atom_size);
1136        upa = alloc_size / min_unit_size;
1137        while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1138                upa--;
1139        max_upa = upa;
1140
1141        /* group cpus according to their proximity */
1142        for_each_possible_cpu(cpu) {
1143                group = 0;
1144        next_group:
1145                for_each_possible_cpu(tcpu) {
1146                        if (cpu == tcpu)
1147                                break;
1148                        if (group_map[tcpu] == group && cpu_distance_fn &&
1149                            (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1150                             cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1151                                group++;
1152                                nr_groups = max(nr_groups, group + 1);
1153                                goto next_group;
1154                        }
1155                }
1156                group_map[cpu] = group;
1157                group_cnt[group]++;
1158        }
1159
1160        /*
1161         * Expand unit size until address space usage goes over 75%
1162         * and then as much as possible without using more address
1163         * space.
1164         */
1165        last_allocs = INT_MAX;
1166        for (upa = max_upa; upa; upa--) {
1167                int allocs = 0, wasted = 0;
1168
1169                if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1170                        continue;
1171
1172                for (group = 0; group < nr_groups; group++) {
1173                        int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1174                        allocs += this_allocs;
1175                        wasted += this_allocs * upa - group_cnt[group];
1176                }
1177
1178                /*
1179                 * Don't accept if wastage is over 25%.  The
1180                 * greater-than comparison ensures upa==1 always
1181                 * passes the following check.
1182                 */
1183                if (wasted > num_possible_cpus() / 3)
1184                        continue;
1185
1186                /* and then don't consume more memory */
1187                if (allocs > last_allocs)
1188                        break;
1189                last_allocs = allocs;
1190                best_upa = upa;
1191        }
1192        upa = best_upa;
1193
1194        /* allocate and fill alloc_info */
1195        for (group = 0; group < nr_groups; group++)
1196                nr_units += roundup(group_cnt[group], upa);
1197
1198        ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1199        if (!ai)
1200                return ERR_PTR(-ENOMEM);
1201        cpu_map = ai->groups[0].cpu_map;
1202
1203        for (group = 0; group < nr_groups; group++) {
1204                ai->groups[group].cpu_map = cpu_map;
1205                cpu_map += roundup(group_cnt[group], upa);
1206        }
1207
1208        ai->static_size = static_size;
1209        ai->reserved_size = reserved_size;
1210        ai->dyn_size = dyn_size;
1211        ai->unit_size = alloc_size / upa;
1212        ai->atom_size = atom_size;
1213        ai->alloc_size = alloc_size;
1214
1215        for (group = 0, unit = 0; group_cnt[group]; group++) {
1216                struct pcpu_group_info *gi = &ai->groups[group];
1217
1218                /*
1219                 * Initialize base_offset as if all groups are located
1220                 * back-to-back.  The caller should update this to
1221                 * reflect actual allocation.
1222                 */
1223                gi->base_offset = unit * ai->unit_size;
1224
1225                for_each_possible_cpu(cpu)
1226                        if (group_map[cpu] == group)
1227                                gi->cpu_map[gi->nr_units++] = cpu;
1228                gi->nr_units = roundup(gi->nr_units, upa);
1229                unit += gi->nr_units;
1230        }
1231        BUG_ON(unit != nr_units);
1232
1233        return ai;
1234}
1235
1236/**
1237 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1238 * @lvl: loglevel
1239 * @ai: allocation info to dump
1240 *
1241 * Print out information about @ai using loglevel @lvl.
1242 */
1243static void pcpu_dump_alloc_info(const char *lvl,
1244                                 const struct pcpu_alloc_info *ai)
1245{
1246        int group_width = 1, cpu_width = 1, width;
1247        char empty_str[] = "--------";
1248        int alloc = 0, alloc_end = 0;
1249        int group, v;
1250        int upa, apl;   /* units per alloc, allocs per line */
1251
1252        v = ai->nr_groups;
1253        while (v /= 10)
1254                group_width++;
1255
1256        v = num_possible_cpus();
1257        while (v /= 10)
1258                cpu_width++;
1259        empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1260
1261        upa = ai->alloc_size / ai->unit_size;
1262        width = upa * (cpu_width + 1) + group_width + 3;
1263        apl = rounddown_pow_of_two(max(60 / width, 1));
1264
1265        printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1266               lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1267               ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1268
1269        for (group = 0; group < ai->nr_groups; group++) {
1270                const struct pcpu_group_info *gi = &ai->groups[group];
1271                int unit = 0, unit_end = 0;
1272
1273                BUG_ON(gi->nr_units % upa);
1274                for (alloc_end += gi->nr_units / upa;
1275                     alloc < alloc_end; alloc++) {
1276                        if (!(alloc % apl)) {
1277                                printk("\n");
1278                                printk("%spcpu-alloc: ", lvl);
1279                        }
1280                        printk("[%0*d] ", group_width, group);
1281
1282                        for (unit_end += upa; unit < unit_end; unit++)
1283                                if (gi->cpu_map[unit] != NR_CPUS)
1284                                        printk("%0*d ", cpu_width,
1285                                               gi->cpu_map[unit]);
1286                                else
1287                                        printk("%s ", empty_str);
1288                }
1289        }
1290        printk("\n");
1291}
1292
1293/**
1294 * pcpu_setup_first_chunk - initialize the first percpu chunk
1295 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1296 * @base_addr: mapped address
1297 *
1298 * Initialize the first percpu chunk which contains the kernel static
1299 * perpcu area.  This function is to be called from arch percpu area
1300 * setup path.
1301 *
1302 * @ai contains all information necessary to initialize the first
1303 * chunk and prime the dynamic percpu allocator.
1304 *
1305 * @ai->static_size is the size of static percpu area.
1306 *
1307 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1308 * reserve after the static area in the first chunk.  This reserves
1309 * the first chunk such that it's available only through reserved
1310 * percpu allocation.  This is primarily used to serve module percpu
1311 * static areas on architectures where the addressing model has
1312 * limited offset range for symbol relocations to guarantee module
1313 * percpu symbols fall inside the relocatable range.
1314 *
1315 * @ai->dyn_size determines the number of bytes available for dynamic
1316 * allocation in the first chunk.  The area between @ai->static_size +
1317 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1318 *
1319 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1320 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1321 * @ai->dyn_size.
1322 *
1323 * @ai->atom_size is the allocation atom size and used as alignment
1324 * for vm areas.
1325 *
1326 * @ai->alloc_size is the allocation size and always multiple of
1327 * @ai->atom_size.  This is larger than @ai->atom_size if
1328 * @ai->unit_size is larger than @ai->atom_size.
1329 *
1330 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1331 * percpu areas.  Units which should be colocated are put into the
1332 * same group.  Dynamic VM areas will be allocated according to these
1333 * groupings.  If @ai->nr_groups is zero, a single group containing
1334 * all units is assumed.
1335 *
1336 * The caller should have mapped the first chunk at @base_addr and
1337 * copied static data to each unit.
1338 *
1339 * If the first chunk ends up with both reserved and dynamic areas, it
1340 * is served by two chunks - one to serve the core static and reserved
1341 * areas and the other for the dynamic area.  They share the same vm
1342 * and page map but uses different area allocation map to stay away
1343 * from each other.  The latter chunk is circulated in the chunk slots
1344 * and available for dynamic allocation like any other chunks.
1345 *
1346 * RETURNS:
1347 * 0 on success, -errno on failure.
1348 */
1349int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1350                                  void *base_addr)
1351{
1352        static char cpus_buf[4096] __initdata;
1353        static int smap[2], dmap[2];
1354        size_t dyn_size = ai->dyn_size;
1355        size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1356        struct pcpu_chunk *schunk, *dchunk = NULL;
1357        unsigned long *group_offsets;
1358        size_t *group_sizes;
1359        unsigned long *unit_off;
1360        unsigned int cpu;
1361        int *unit_map;
1362        int group, unit, i;
1363
1364        cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1365
1366#define PCPU_SETUP_BUG_ON(cond) do {                                    \
1367        if (unlikely(cond)) {                                           \
1368                pr_emerg("PERCPU: failed to initialize, %s", #cond);    \
1369                pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);   \
1370                pcpu_dump_alloc_info(KERN_EMERG, ai);                   \
1371                BUG();                                                  \
1372        }                                                               \
1373} while (0)
1374
1375        /* sanity checks */
1376        BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1377                     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1378        PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1379        PCPU_SETUP_BUG_ON(!ai->static_size);
1380        PCPU_SETUP_BUG_ON(!base_addr);
1381        PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1382        PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1383        PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1384        PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1385
1386        /* process group information and build config tables accordingly */
1387        group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1388        group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1389        unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1390        unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1391
1392        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1393                unit_map[cpu] = UINT_MAX;
1394        pcpu_first_unit_cpu = NR_CPUS;
1395
1396        for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1397                const struct pcpu_group_info *gi = &ai->groups[group];
1398
1399                group_offsets[group] = gi->base_offset;
1400                group_sizes[group] = gi->nr_units * ai->unit_size;
1401
1402                for (i = 0; i < gi->nr_units; i++) {
1403                        cpu = gi->cpu_map[i];
1404                        if (cpu == NR_CPUS)
1405                                continue;
1406
1407                        PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1408                        PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1409                        PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1410
1411                        unit_map[cpu] = unit + i;
1412                        unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1413
1414                        if (pcpu_first_unit_cpu == NR_CPUS)
1415                                pcpu_first_unit_cpu = cpu;
1416                }
1417        }
1418        pcpu_last_unit_cpu = cpu;
1419        pcpu_nr_units = unit;
1420
1421        for_each_possible_cpu(cpu)
1422                PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1423
1424        /* we're done parsing the input, undefine BUG macro and dump config */
1425#undef PCPU_SETUP_BUG_ON
1426        pcpu_dump_alloc_info(KERN_INFO, ai);
1427
1428        pcpu_nr_groups = ai->nr_groups;
1429        pcpu_group_offsets = group_offsets;
1430        pcpu_group_sizes = group_sizes;
1431        pcpu_unit_map = unit_map;
1432        pcpu_unit_offsets = unit_off;
1433
1434        /* determine basic parameters */
1435        pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1436        pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1437        pcpu_atom_size = ai->atom_size;
1438        pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1439                BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1440
1441        /*
1442         * Allocate chunk slots.  The additional last slot is for
1443         * empty chunks.
1444         */
1445        pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1446        pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1447        for (i = 0; i < pcpu_nr_slots; i++)
1448                INIT_LIST_HEAD(&pcpu_slot[i]);
1449
1450        /*
1451         * Initialize static chunk.  If reserved_size is zero, the
1452         * static chunk covers static area + dynamic allocation area
1453         * in the first chunk.  If reserved_size is not zero, it
1454         * covers static area + reserved area (mostly used for module
1455         * static percpu allocation).
1456         */
1457        schunk = alloc_bootmem(pcpu_chunk_struct_size);
1458        INIT_LIST_HEAD(&schunk->list);
1459        schunk->base_addr = base_addr;
1460        schunk->map = smap;
1461        schunk->map_alloc = ARRAY_SIZE(smap);
1462        schunk->immutable = true;
1463        bitmap_fill(schunk->populated, pcpu_unit_pages);
1464
1465        if (ai->reserved_size) {
1466                schunk->free_size = ai->reserved_size;
1467                pcpu_reserved_chunk = schunk;
1468                pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1469        } else {
1470                schunk->free_size = dyn_size;
1471                dyn_size = 0;                   /* dynamic area covered */
1472        }
1473        schunk->contig_hint = schunk->free_size;
1474
1475        schunk->map[schunk->map_used++] = -ai->static_size;
1476        if (schunk->free_size)
1477                schunk->map[schunk->map_used++] = schunk->free_size;
1478
1479        /* init dynamic chunk if necessary */
1480        if (dyn_size) {
1481                dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1482                INIT_LIST_HEAD(&dchunk->list);
1483                dchunk->base_addr = base_addr;
1484                dchunk->map = dmap;
1485                dchunk->map_alloc = ARRAY_SIZE(dmap);
1486                dchunk->immutable = true;
1487                bitmap_fill(dchunk->populated, pcpu_unit_pages);
1488
1489                dchunk->contig_hint = dchunk->free_size = dyn_size;
1490                dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1491                dchunk->map[dchunk->map_used++] = dchunk->free_size;
1492        }
1493
1494        /* link the first chunk in */
1495        pcpu_first_chunk = dchunk ?: schunk;
1496        pcpu_chunk_relocate(pcpu_first_chunk, -1);
1497
1498        /* we're done */
1499        pcpu_base_addr = base_addr;
1500        return 0;
1501}
1502
1503const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1504        [PCPU_FC_AUTO]  = "auto",
1505        [PCPU_FC_EMBED] = "embed",
1506        [PCPU_FC_PAGE]  = "page",
1507};
1508
1509enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1510
1511static int __init percpu_alloc_setup(char *str)
1512{
1513        if (0)
1514                /* nada */;
1515#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1516        else if (!strcmp(str, "embed"))
1517                pcpu_chosen_fc = PCPU_FC_EMBED;
1518#endif
1519#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1520        else if (!strcmp(str, "page"))
1521                pcpu_chosen_fc = PCPU_FC_PAGE;
1522#endif
1523        else
1524                pr_warning("PERCPU: unknown allocator %s specified\n", str);
1525
1526        return 0;
1527}
1528early_param("percpu_alloc", percpu_alloc_setup);
1529
1530#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1531        !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1532/**
1533 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1534 * @reserved_size: the size of reserved percpu area in bytes
1535 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1536 * @atom_size: allocation atom size
1537 * @cpu_distance_fn: callback to determine distance between cpus, optional
1538 * @alloc_fn: function to allocate percpu page
1539 * @free_fn: funtion to free percpu page
1540 *
1541 * This is a helper to ease setting up embedded first percpu chunk and
1542 * can be called where pcpu_setup_first_chunk() is expected.
1543 *
1544 * If this function is used to setup the first chunk, it is allocated
1545 * by calling @alloc_fn and used as-is without being mapped into
1546 * vmalloc area.  Allocations are always whole multiples of @atom_size
1547 * aligned to @atom_size.
1548 *
1549 * This enables the first chunk to piggy back on the linear physical
1550 * mapping which often uses larger page size.  Please note that this
1551 * can result in very sparse cpu->unit mapping on NUMA machines thus
1552 * requiring large vmalloc address space.  Don't use this allocator if
1553 * vmalloc space is not orders of magnitude larger than distances
1554 * between node memory addresses (ie. 32bit NUMA machines).
1555 *
1556 * When @dyn_size is positive, dynamic area might be larger than
1557 * specified to fill page alignment.  When @dyn_size is auto,
1558 * @dyn_size is just big enough to fill page alignment after static
1559 * and reserved areas.
1560 *
1561 * If the needed size is smaller than the minimum or specified unit
1562 * size, the leftover is returned using @free_fn.
1563 *
1564 * RETURNS:
1565 * 0 on success, -errno on failure.
1566 */
1567int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1568                                  size_t atom_size,
1569                                  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1570                                  pcpu_fc_alloc_fn_t alloc_fn,
1571                                  pcpu_fc_free_fn_t free_fn)
1572{
1573        void *base = (void *)ULONG_MAX;
1574        void **areas = NULL;
1575        struct pcpu_alloc_info *ai;
1576        size_t size_sum, areas_size, max_distance;
1577        int group, i, rc;
1578
1579        ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1580                                   cpu_distance_fn);
1581        if (IS_ERR(ai))
1582                return PTR_ERR(ai);
1583
1584        size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1585        areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1586
1587        areas = alloc_bootmem_nopanic(areas_size);
1588        if (!areas) {
1589                rc = -ENOMEM;
1590                goto out_free;
1591        }
1592
1593        /* allocate, copy and determine base address */
1594        for (group = 0; group < ai->nr_groups; group++) {
1595                struct pcpu_group_info *gi = &ai->groups[group];
1596                unsigned int cpu = NR_CPUS;
1597                void *ptr;
1598
1599                for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1600                        cpu = gi->cpu_map[i];
1601                BUG_ON(cpu == NR_CPUS);
1602
1603                /* allocate space for the whole group */
1604                ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1605                if (!ptr) {
1606                        rc = -ENOMEM;
1607                        goto out_free_areas;
1608                }
1609                areas[group] = ptr;
1610
1611                base = min(ptr, base);
1612
1613                for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1614                        if (gi->cpu_map[i] == NR_CPUS) {
1615                                /* unused unit, free whole */
1616                                free_fn(ptr, ai->unit_size);
1617                                continue;
1618                        }
1619                        /* copy and return the unused part */
1620                        memcpy(ptr, __per_cpu_load, ai->static_size);
1621                        free_fn(ptr + size_sum, ai->unit_size - size_sum);
1622                }
1623        }
1624
1625        /* base address is now known, determine group base offsets */
1626        max_distance = 0;
1627        for (group = 0; group < ai->nr_groups; group++) {
1628                ai->groups[group].base_offset = areas[group] - base;
1629                max_distance = max_t(size_t, max_distance,
1630                                     ai->groups[group].base_offset);
1631        }
1632        max_distance += ai->unit_size;
1633
1634        /* warn if maximum distance is further than 75% of vmalloc space */
1635        if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1636                pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1637                           "space 0x%lx\n",
1638                           max_distance, VMALLOC_END - VMALLOC_START);
1639#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1640                /* and fail if we have fallback */
1641                rc = -EINVAL;
1642                goto out_free;
1643#endif
1644        }
1645
1646        pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1647                PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1648                ai->dyn_size, ai->unit_size);
1649
1650        rc = pcpu_setup_first_chunk(ai, base);
1651        goto out_free;
1652
1653out_free_areas:
1654        for (group = 0; group < ai->nr_groups; group++)
1655                free_fn(areas[group],
1656                        ai->groups[group].nr_units * ai->unit_size);
1657out_free:
1658        pcpu_free_alloc_info(ai);
1659        if (areas)
1660                free_bootmem(__pa(areas), areas_size);
1661        return rc;
1662}
1663#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1664          !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1665
1666#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1667/**
1668 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1669 * @reserved_size: the size of reserved percpu area in bytes
1670 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1671 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1672 * @populate_pte_fn: function to populate pte
1673 *
1674 * This is a helper to ease setting up page-remapped first percpu
1675 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1676 *
1677 * This is the basic allocator.  Static percpu area is allocated
1678 * page-by-page into vmalloc area.
1679 *
1680 * RETURNS:
1681 * 0 on success, -errno on failure.
1682 */
1683int __init pcpu_page_first_chunk(size_t reserved_size,
1684                                 pcpu_fc_alloc_fn_t alloc_fn,
1685                                 pcpu_fc_free_fn_t free_fn,
1686                                 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1687{
1688        static struct vm_struct vm;
1689        struct pcpu_alloc_info *ai;
1690        char psize_str[16];
1691        int unit_pages;
1692        size_t pages_size;
1693        struct page **pages;
1694        int unit, i, j, rc;
1695
1696        snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1697
1698        ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
1699        if (IS_ERR(ai))
1700                return PTR_ERR(ai);
1701        BUG_ON(ai->nr_groups != 1);
1702        BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1703
1704        unit_pages = ai->unit_size >> PAGE_SHIFT;
1705
1706        /* unaligned allocations can't be freed, round up to page size */
1707        pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1708                               sizeof(pages[0]));
1709        pages = alloc_bootmem(pages_size);
1710
1711        /* allocate pages */
1712        j = 0;
1713        for (unit = 0; unit < num_possible_cpus(); unit++)
1714                for (i = 0; i < unit_pages; i++) {
1715                        unsigned int cpu = ai->groups[0].cpu_map[unit];
1716                        void *ptr;
1717
1718                        ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1719                        if (!ptr) {
1720                                pr_warning("PERCPU: failed to allocate %s page "
1721                                           "for cpu%u\n", psize_str, cpu);
1722                                goto enomem;
1723                        }
1724                        pages[j++] = virt_to_page(ptr);
1725                }
1726
1727        /* allocate vm area, map the pages and copy static data */
1728        vm.flags = VM_ALLOC;
1729        vm.size = num_possible_cpus() * ai->unit_size;
1730        vm_area_register_early(&vm, PAGE_SIZE);
1731
1732        for (unit = 0; unit < num_possible_cpus(); unit++) {
1733                unsigned long unit_addr =
1734                        (unsigned long)vm.addr + unit * ai->unit_size;
1735
1736                for (i = 0; i < unit_pages; i++)
1737                        populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1738
1739                /* pte already populated, the following shouldn't fail */
1740                rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1741                                      unit_pages);
1742                if (rc < 0)
1743                        panic("failed to map percpu area, err=%d\n", rc);
1744
1745                /*
1746                 * FIXME: Archs with virtual cache should flush local
1747                 * cache for the linear mapping here - something
1748                 * equivalent to flush_cache_vmap() on the local cpu.
1749                 * flush_cache_vmap() can't be used as most supporting
1750                 * data structures are not set up yet.
1751                 */
1752
1753                /* copy static data */
1754                memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1755        }
1756
1757        /* we're ready, commit */
1758        pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1759                unit_pages, psize_str, vm.addr, ai->static_size,
1760                ai->reserved_size, ai->dyn_size);
1761
1762        rc = pcpu_setup_first_chunk(ai, vm.addr);
1763        goto out_free_ar;
1764
1765enomem:
1766        while (--j >= 0)
1767                free_fn(page_address(pages[j]), PAGE_SIZE);
1768        rc = -ENOMEM;
1769out_free_ar:
1770        free_bootmem(__pa(pages), pages_size);
1771        pcpu_free_alloc_info(ai);
1772        return rc;
1773}
1774#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1775
1776/*
1777 * Generic percpu area setup.
1778 *
1779 * The embedding helper is used because its behavior closely resembles
1780 * the original non-dynamic generic percpu area setup.  This is
1781 * important because many archs have addressing restrictions and might
1782 * fail if the percpu area is located far away from the previous
1783 * location.  As an added bonus, in non-NUMA cases, embedding is
1784 * generally a good idea TLB-wise because percpu area can piggy back
1785 * on the physical linear memory mapping which uses large page
1786 * mappings on applicable archs.
1787 */
1788#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1789unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1790EXPORT_SYMBOL(__per_cpu_offset);
1791
1792static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1793                                       size_t align)
1794{
1795        return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1796}
1797
1798static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1799{
1800        free_bootmem(__pa(ptr), size);
1801}
1802
1803void __init setup_per_cpu_areas(void)
1804{
1805        unsigned long delta;
1806        unsigned int cpu;
1807        int rc;
1808
1809        /*
1810         * Always reserve area for module percpu variables.  That's
1811         * what the legacy allocator did.
1812         */
1813        rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1814                                    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1815                                    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1816        if (rc < 0)
1817                panic("Failed to initialized percpu areas.");
1818
1819        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1820        for_each_possible_cpu(cpu)
1821                __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1822}
1823#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1824