linux/mm/percpu.c
<<
>>
Prefs
   1/*
   2 * mm/percpu.c - percpu memory allocator
   3 *
   4 * Copyright (C) 2009           SUSE Linux Products GmbH
   5 * Copyright (C) 2009           Tejun Heo <tj@kernel.org>
   6 *
   7 * This file is released under the GPLv2.
   8 *
   9 * This is percpu allocator which can handle both static and dynamic
  10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
  11 * consisted of boot-time determined number of units and the first
  12 * chunk is used for static percpu variables in the kernel image
  13 * (special boot time alloc/init handling necessary as these areas
  14 * need to be brought up before allocation services are running).
  15 * Unit grows as necessary and all units grow or shrink in unison.
  16 * When a chunk is filled up, another chunk is allocated.
  17 *
  18 *  c0                           c1                         c2
  19 *  -------------------          -------------------        ------------
  20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  21 *  -------------------  ......  -------------------  ....  ------------
  22 *
  23 * Allocation is done in offset-size areas of single unit space.  Ie,
  24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
  26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
  27 * Percpu access can be done by configuring percpu base registers
  28 * according to cpu to unit mapping and pcpu_unit_size.
  29 *
  30 * There are usually many small percpu allocations many of them being
  31 * as small as 4 bytes.  The allocator organizes chunks into lists
  32 * according to free size and tries to allocate from the fullest one.
  33 * Each chunk keeps the maximum contiguous area size hint which is
  34 * guaranteed to be equal to or larger than the maximum contiguous
  35 * area in the chunk.  This helps the allocator not to iterate the
  36 * chunk maps unnecessarily.
  37 *
  38 * Allocation state in each chunk is kept using an array of integers
  39 * on chunk->map.  A positive value in the map represents a free
  40 * region and negative allocated.  Allocation inside a chunk is done
  41 * by scanning this map sequentially and serving the first matching
  42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
  43 * Chunks can be determined from the address using the index field
  44 * in the page struct. The index field contains a pointer to the chunk.
  45 *
  46 * To use this allocator, arch code should do the followings.
  47 *
  48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  49 *   regular address to percpu pointer and back if they need to be
  50 *   different from the default
  51 *
  52 * - use pcpu_setup_first_chunk() during percpu area initialization to
  53 *   setup the first chunk containing the kernel static percpu area
  54 */
  55
  56#include <linux/bitmap.h>
  57#include <linux/bootmem.h>
  58#include <linux/err.h>
  59#include <linux/list.h>
  60#include <linux/log2.h>
  61#include <linux/mm.h>
  62#include <linux/module.h>
  63#include <linux/mutex.h>
  64#include <linux/percpu.h>
  65#include <linux/pfn.h>
  66#include <linux/slab.h>
  67#include <linux/spinlock.h>
  68#include <linux/vmalloc.h>
  69#include <linux/workqueue.h>
  70#include <linux/kmemleak.h>
  71
  72#include <asm/cacheflush.h>
  73#include <asm/sections.h>
  74#include <asm/tlbflush.h>
  75#include <asm/io.h>
  76
  77#define PCPU_SLOT_BASE_SHIFT            5       /* 1-31 shares the same slot */
  78#define PCPU_DFL_MAP_ALLOC              16      /* start a map with 16 ents */
  79
  80#ifdef CONFIG_SMP
  81/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  82#ifndef __addr_to_pcpu_ptr
  83#define __addr_to_pcpu_ptr(addr)                                        \
  84        (void __percpu *)((unsigned long)(addr) -                       \
  85                          (unsigned long)pcpu_base_addr +               \
  86                          (unsigned long)__per_cpu_start)
  87#endif
  88#ifndef __pcpu_ptr_to_addr
  89#define __pcpu_ptr_to_addr(ptr)                                         \
  90        (void __force *)((unsigned long)(ptr) +                         \
  91                         (unsigned long)pcpu_base_addr -                \
  92                         (unsigned long)__per_cpu_start)
  93#endif
  94#else   /* CONFIG_SMP */
  95/* on UP, it's always identity mapped */
  96#define __addr_to_pcpu_ptr(addr)        (void __percpu *)(addr)
  97#define __pcpu_ptr_to_addr(ptr)         (void __force *)(ptr)
  98#endif  /* CONFIG_SMP */
  99
 100struct pcpu_chunk {
 101        struct list_head        list;           /* linked to pcpu_slot lists */
 102        int                     free_size;      /* free bytes in the chunk */
 103        int                     contig_hint;    /* max contiguous size hint */
 104        void                    *base_addr;     /* base address of this chunk */
 105        int                     map_used;       /* # of map entries used */
 106        int                     map_alloc;      /* # of map entries allocated */
 107        int                     *map;           /* allocation map */
 108        void                    *data;          /* chunk data */
 109        bool                    immutable;      /* no [de]population allowed */
 110        unsigned long           populated[];    /* populated bitmap */
 111};
 112
 113static int pcpu_unit_pages __read_mostly;
 114static int pcpu_unit_size __read_mostly;
 115static int pcpu_nr_units __read_mostly;
 116static int pcpu_atom_size __read_mostly;
 117static int pcpu_nr_slots __read_mostly;
 118static size_t pcpu_chunk_struct_size __read_mostly;
 119
 120/* cpus with the lowest and highest unit addresses */
 121static unsigned int pcpu_low_unit_cpu __read_mostly;
 122static unsigned int pcpu_high_unit_cpu __read_mostly;
 123
 124/* the address of the first chunk which starts with the kernel static area */
 125void *pcpu_base_addr __read_mostly;
 126EXPORT_SYMBOL_GPL(pcpu_base_addr);
 127
 128static const int *pcpu_unit_map __read_mostly;          /* cpu -> unit */
 129const unsigned long *pcpu_unit_offsets __read_mostly;   /* cpu -> unit offset */
 130
 131/* group information, used for vm allocation */
 132static int pcpu_nr_groups __read_mostly;
 133static const unsigned long *pcpu_group_offsets __read_mostly;
 134static const size_t *pcpu_group_sizes __read_mostly;
 135
 136/*
 137 * The first chunk which always exists.  Note that unlike other
 138 * chunks, this one can be allocated and mapped in several different
 139 * ways and thus often doesn't live in the vmalloc area.
 140 */
 141static struct pcpu_chunk *pcpu_first_chunk;
 142
 143/*
 144 * Optional reserved chunk.  This chunk reserves part of the first
 145 * chunk and serves it for reserved allocations.  The amount of
 146 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 147 * area doesn't exist, the following variables contain NULL and 0
 148 * respectively.
 149 */
 150static struct pcpu_chunk *pcpu_reserved_chunk;
 151static int pcpu_reserved_chunk_limit;
 152
 153/*
 154 * Synchronization rules.
 155 *
 156 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
 157 * protects allocation/reclaim paths, chunks, populated bitmap and
 158 * vmalloc mapping.  The latter is a spinlock and protects the index
 159 * data structures - chunk slots, chunks and area maps in chunks.
 160 *
 161 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 162 * pcpu_lock is grabbed and released as necessary.  All actual memory
 163 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 164 * general, percpu memory can't be allocated with irq off but
 165 * irqsave/restore are still used in alloc path so that it can be used
 166 * from early init path - sched_init() specifically.
 167 *
 168 * Free path accesses and alters only the index data structures, so it
 169 * can be safely called from atomic context.  When memory needs to be
 170 * returned to the system, free path schedules reclaim_work which
 171 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 172 * reclaimed, release both locks and frees the chunks.  Note that it's
 173 * necessary to grab both locks to remove a chunk from circulation as
 174 * allocation path might be referencing the chunk with only
 175 * pcpu_alloc_mutex locked.
 176 */
 177static DEFINE_MUTEX(pcpu_alloc_mutex);  /* protects whole alloc and reclaim */
 178static DEFINE_SPINLOCK(pcpu_lock);      /* protects index data structures */
 179
 180static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 181
 182/* reclaim work to release fully free chunks, scheduled from free path */
 183static void pcpu_reclaim(struct work_struct *work);
 184static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
 185
 186static bool pcpu_addr_in_first_chunk(void *addr)
 187{
 188        void *first_start = pcpu_first_chunk->base_addr;
 189
 190        return addr >= first_start && addr < first_start + pcpu_unit_size;
 191}
 192
 193static bool pcpu_addr_in_reserved_chunk(void *addr)
 194{
 195        void *first_start = pcpu_first_chunk->base_addr;
 196
 197        return addr >= first_start &&
 198                addr < first_start + pcpu_reserved_chunk_limit;
 199}
 200
 201static int __pcpu_size_to_slot(int size)
 202{
 203        int highbit = fls(size);        /* size is in bytes */
 204        return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 205}
 206
 207static int pcpu_size_to_slot(int size)
 208{
 209        if (size == pcpu_unit_size)
 210                return pcpu_nr_slots - 1;
 211        return __pcpu_size_to_slot(size);
 212}
 213
 214static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 215{
 216        if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
 217                return 0;
 218
 219        return pcpu_size_to_slot(chunk->free_size);
 220}
 221
 222/* set the pointer to a chunk in a page struct */
 223static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 224{
 225        page->index = (unsigned long)pcpu;
 226}
 227
 228/* obtain pointer to a chunk from a page struct */
 229static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 230{
 231        return (struct pcpu_chunk *)page->index;
 232}
 233
 234static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 235{
 236        return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 237}
 238
 239static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 240                                     unsigned int cpu, int page_idx)
 241{
 242        return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
 243                (page_idx << PAGE_SHIFT);
 244}
 245
 246static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
 247                                           int *rs, int *re, int end)
 248{
 249        *rs = find_next_zero_bit(chunk->populated, end, *rs);
 250        *re = find_next_bit(chunk->populated, end, *rs + 1);
 251}
 252
 253static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
 254                                         int *rs, int *re, int end)
 255{
 256        *rs = find_next_bit(chunk->populated, end, *rs);
 257        *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
 258}
 259
 260/*
 261 * (Un)populated page region iterators.  Iterate over (un)populated
 262 * page regions between @start and @end in @chunk.  @rs and @re should
 263 * be integer variables and will be set to start and end page index of
 264 * the current region.
 265 */
 266#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)               \
 267        for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
 268             (rs) < (re);                                                   \
 269             (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
 270
 271#define pcpu_for_each_pop_region(chunk, rs, re, start, end)                 \
 272        for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
 273             (rs) < (re);                                                   \
 274             (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 275
 276/**
 277 * pcpu_mem_zalloc - allocate memory
 278 * @size: bytes to allocate
 279 *
 280 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 281 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
 282 * memory is always zeroed.
 283 *
 284 * CONTEXT:
 285 * Does GFP_KERNEL allocation.
 286 *
 287 * RETURNS:
 288 * Pointer to the allocated area on success, NULL on failure.
 289 */
 290static void *pcpu_mem_zalloc(size_t size)
 291{
 292        if (WARN_ON_ONCE(!slab_is_available()))
 293                return NULL;
 294
 295        if (size <= PAGE_SIZE)
 296                return kzalloc(size, GFP_KERNEL);
 297        else
 298                return vzalloc(size);
 299}
 300
 301/**
 302 * pcpu_mem_free - free memory
 303 * @ptr: memory to free
 304 * @size: size of the area
 305 *
 306 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 307 */
 308static void pcpu_mem_free(void *ptr, size_t size)
 309{
 310        if (size <= PAGE_SIZE)
 311                kfree(ptr);
 312        else
 313                vfree(ptr);
 314}
 315
 316/**
 317 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 318 * @chunk: chunk of interest
 319 * @oslot: the previous slot it was on
 320 *
 321 * This function is called after an allocation or free changed @chunk.
 322 * New slot according to the changed state is determined and @chunk is
 323 * moved to the slot.  Note that the reserved chunk is never put on
 324 * chunk slots.
 325 *
 326 * CONTEXT:
 327 * pcpu_lock.
 328 */
 329static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 330{
 331        int nslot = pcpu_chunk_slot(chunk);
 332
 333        if (chunk != pcpu_reserved_chunk && oslot != nslot) {
 334                if (oslot < nslot)
 335                        list_move(&chunk->list, &pcpu_slot[nslot]);
 336                else
 337                        list_move_tail(&chunk->list, &pcpu_slot[nslot]);
 338        }
 339}
 340
 341/**
 342 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 343 * @chunk: chunk of interest
 344 *
 345 * Determine whether area map of @chunk needs to be extended to
 346 * accommodate a new allocation.
 347 *
 348 * CONTEXT:
 349 * pcpu_lock.
 350 *
 351 * RETURNS:
 352 * New target map allocation length if extension is necessary, 0
 353 * otherwise.
 354 */
 355static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
 356{
 357        int new_alloc;
 358
 359        if (chunk->map_alloc >= chunk->map_used + 2)
 360                return 0;
 361
 362        new_alloc = PCPU_DFL_MAP_ALLOC;
 363        while (new_alloc < chunk->map_used + 2)
 364                new_alloc *= 2;
 365
 366        return new_alloc;
 367}
 368
 369/**
 370 * pcpu_extend_area_map - extend area map of a chunk
 371 * @chunk: chunk of interest
 372 * @new_alloc: new target allocation length of the area map
 373 *
 374 * Extend area map of @chunk to have @new_alloc entries.
 375 *
 376 * CONTEXT:
 377 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 378 *
 379 * RETURNS:
 380 * 0 on success, -errno on failure.
 381 */
 382static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
 383{
 384        int *old = NULL, *new = NULL;
 385        size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
 386        unsigned long flags;
 387
 388        new = pcpu_mem_zalloc(new_size);
 389        if (!new)
 390                return -ENOMEM;
 391
 392        /* acquire pcpu_lock and switch to new area map */
 393        spin_lock_irqsave(&pcpu_lock, flags);
 394
 395        if (new_alloc <= chunk->map_alloc)
 396                goto out_unlock;
 397
 398        old_size = chunk->map_alloc * sizeof(chunk->map[0]);
 399        old = chunk->map;
 400
 401        memcpy(new, old, old_size);
 402
 403        chunk->map_alloc = new_alloc;
 404        chunk->map = new;
 405        new = NULL;
 406
 407out_unlock:
 408        spin_unlock_irqrestore(&pcpu_lock, flags);
 409
 410        /*
 411         * pcpu_mem_free() might end up calling vfree() which uses
 412         * IRQ-unsafe lock and thus can't be called under pcpu_lock.
 413         */
 414        pcpu_mem_free(old, old_size);
 415        pcpu_mem_free(new, new_size);
 416
 417        return 0;
 418}
 419
 420/**
 421 * pcpu_split_block - split a map block
 422 * @chunk: chunk of interest
 423 * @i: index of map block to split
 424 * @head: head size in bytes (can be 0)
 425 * @tail: tail size in bytes (can be 0)
 426 *
 427 * Split the @i'th map block into two or three blocks.  If @head is
 428 * non-zero, @head bytes block is inserted before block @i moving it
 429 * to @i+1 and reducing its size by @head bytes.
 430 *
 431 * If @tail is non-zero, the target block, which can be @i or @i+1
 432 * depending on @head, is reduced by @tail bytes and @tail byte block
 433 * is inserted after the target block.
 434 *
 435 * @chunk->map must have enough free slots to accommodate the split.
 436 *
 437 * CONTEXT:
 438 * pcpu_lock.
 439 */
 440static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
 441                             int head, int tail)
 442{
 443        int nr_extra = !!head + !!tail;
 444
 445        BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
 446
 447        /* insert new subblocks */
 448        memmove(&chunk->map[i + nr_extra], &chunk->map[i],
 449                sizeof(chunk->map[0]) * (chunk->map_used - i));
 450        chunk->map_used += nr_extra;
 451
 452        if (head) {
 453                chunk->map[i + 1] = chunk->map[i] - head;
 454                chunk->map[i++] = head;
 455        }
 456        if (tail) {
 457                chunk->map[i++] -= tail;
 458                chunk->map[i] = tail;
 459        }
 460}
 461
 462/**
 463 * pcpu_alloc_area - allocate area from a pcpu_chunk
 464 * @chunk: chunk of interest
 465 * @size: wanted size in bytes
 466 * @align: wanted align
 467 *
 468 * Try to allocate @size bytes area aligned at @align from @chunk.
 469 * Note that this function only allocates the offset.  It doesn't
 470 * populate or map the area.
 471 *
 472 * @chunk->map must have at least two free slots.
 473 *
 474 * CONTEXT:
 475 * pcpu_lock.
 476 *
 477 * RETURNS:
 478 * Allocated offset in @chunk on success, -1 if no matching area is
 479 * found.
 480 */
 481static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
 482{
 483        int oslot = pcpu_chunk_slot(chunk);
 484        int max_contig = 0;
 485        int i, off;
 486
 487        for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
 488                bool is_last = i + 1 == chunk->map_used;
 489                int head, tail;
 490
 491                /* extra for alignment requirement */
 492                head = ALIGN(off, align) - off;
 493                BUG_ON(i == 0 && head != 0);
 494
 495                if (chunk->map[i] < 0)
 496                        continue;
 497                if (chunk->map[i] < head + size) {
 498                        max_contig = max(chunk->map[i], max_contig);
 499                        continue;
 500                }
 501
 502                /*
 503                 * If head is small or the previous block is free,
 504                 * merge'em.  Note that 'small' is defined as smaller
 505                 * than sizeof(int), which is very small but isn't too
 506                 * uncommon for percpu allocations.
 507                 */
 508                if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
 509                        if (chunk->map[i - 1] > 0)
 510                                chunk->map[i - 1] += head;
 511                        else {
 512                                chunk->map[i - 1] -= head;
 513                                chunk->free_size -= head;
 514                        }
 515                        chunk->map[i] -= head;
 516                        off += head;
 517                        head = 0;
 518                }
 519
 520                /* if tail is small, just keep it around */
 521                tail = chunk->map[i] - head - size;
 522                if (tail < sizeof(int))
 523                        tail = 0;
 524
 525                /* split if warranted */
 526                if (head || tail) {
 527                        pcpu_split_block(chunk, i, head, tail);
 528                        if (head) {
 529                                i++;
 530                                off += head;
 531                                max_contig = max(chunk->map[i - 1], max_contig);
 532                        }
 533                        if (tail)
 534                                max_contig = max(chunk->map[i + 1], max_contig);
 535                }
 536
 537                /* update hint and mark allocated */
 538                if (is_last)
 539                        chunk->contig_hint = max_contig; /* fully scanned */
 540                else
 541                        chunk->contig_hint = max(chunk->contig_hint,
 542                                                 max_contig);
 543
 544                chunk->free_size -= chunk->map[i];
 545                chunk->map[i] = -chunk->map[i];
 546
 547                pcpu_chunk_relocate(chunk, oslot);
 548                return off;
 549        }
 550
 551        chunk->contig_hint = max_contig;        /* fully scanned */
 552        pcpu_chunk_relocate(chunk, oslot);
 553
 554        /* tell the upper layer that this chunk has no matching area */
 555        return -1;
 556}
 557
 558/**
 559 * pcpu_free_area - free area to a pcpu_chunk
 560 * @chunk: chunk of interest
 561 * @freeme: offset of area to free
 562 *
 563 * Free area starting from @freeme to @chunk.  Note that this function
 564 * only modifies the allocation map.  It doesn't depopulate or unmap
 565 * the area.
 566 *
 567 * CONTEXT:
 568 * pcpu_lock.
 569 */
 570static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
 571{
 572        int oslot = pcpu_chunk_slot(chunk);
 573        int i, off;
 574
 575        for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
 576                if (off == freeme)
 577                        break;
 578        BUG_ON(off != freeme);
 579        BUG_ON(chunk->map[i] > 0);
 580
 581        chunk->map[i] = -chunk->map[i];
 582        chunk->free_size += chunk->map[i];
 583
 584        /* merge with previous? */
 585        if (i > 0 && chunk->map[i - 1] >= 0) {
 586                chunk->map[i - 1] += chunk->map[i];
 587                chunk->map_used--;
 588                memmove(&chunk->map[i], &chunk->map[i + 1],
 589                        (chunk->map_used - i) * sizeof(chunk->map[0]));
 590                i--;
 591        }
 592        /* merge with next? */
 593        if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
 594                chunk->map[i] += chunk->map[i + 1];
 595                chunk->map_used--;
 596                memmove(&chunk->map[i + 1], &chunk->map[i + 2],
 597                        (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
 598        }
 599
 600        chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
 601        pcpu_chunk_relocate(chunk, oslot);
 602}
 603
 604static struct pcpu_chunk *pcpu_alloc_chunk(void)
 605{
 606        struct pcpu_chunk *chunk;
 607
 608        chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
 609        if (!chunk)
 610                return NULL;
 611
 612        chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
 613                                                sizeof(chunk->map[0]));
 614        if (!chunk->map) {
 615                kfree(chunk);
 616                return NULL;
 617        }
 618
 619        chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
 620        chunk->map[chunk->map_used++] = pcpu_unit_size;
 621
 622        INIT_LIST_HEAD(&chunk->list);
 623        chunk->free_size = pcpu_unit_size;
 624        chunk->contig_hint = pcpu_unit_size;
 625
 626        return chunk;
 627}
 628
 629static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 630{
 631        if (!chunk)
 632                return;
 633        pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
 634        pcpu_mem_free(chunk, pcpu_chunk_struct_size);
 635}
 636
 637/*
 638 * Chunk management implementation.
 639 *
 640 * To allow different implementations, chunk alloc/free and
 641 * [de]population are implemented in a separate file which is pulled
 642 * into this file and compiled together.  The following functions
 643 * should be implemented.
 644 *
 645 * pcpu_populate_chunk          - populate the specified range of a chunk
 646 * pcpu_depopulate_chunk        - depopulate the specified range of a chunk
 647 * pcpu_create_chunk            - create a new chunk
 648 * pcpu_destroy_chunk           - destroy a chunk, always preceded by full depop
 649 * pcpu_addr_to_page            - translate address to physical address
 650 * pcpu_verify_alloc_info       - check alloc_info is acceptable during init
 651 */
 652static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
 653static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
 654static struct pcpu_chunk *pcpu_create_chunk(void);
 655static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
 656static struct page *pcpu_addr_to_page(void *addr);
 657static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
 658
 659#ifdef CONFIG_NEED_PER_CPU_KM
 660#include "percpu-km.c"
 661#else
 662#include "percpu-vm.c"
 663#endif
 664
 665/**
 666 * pcpu_chunk_addr_search - determine chunk containing specified address
 667 * @addr: address for which the chunk needs to be determined.
 668 *
 669 * RETURNS:
 670 * The address of the found chunk.
 671 */
 672static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 673{
 674        /* is it in the first chunk? */
 675        if (pcpu_addr_in_first_chunk(addr)) {
 676                /* is it in the reserved area? */
 677                if (pcpu_addr_in_reserved_chunk(addr))
 678                        return pcpu_reserved_chunk;
 679                return pcpu_first_chunk;
 680        }
 681
 682        /*
 683         * The address is relative to unit0 which might be unused and
 684         * thus unmapped.  Offset the address to the unit space of the
 685         * current processor before looking it up in the vmalloc
 686         * space.  Note that any possible cpu id can be used here, so
 687         * there's no need to worry about preemption or cpu hotplug.
 688         */
 689        addr += pcpu_unit_offsets[raw_smp_processor_id()];
 690        return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
 691}
 692
 693/**
 694 * pcpu_alloc - the percpu allocator
 695 * @size: size of area to allocate in bytes
 696 * @align: alignment of area (max PAGE_SIZE)
 697 * @reserved: allocate from the reserved chunk if available
 698 *
 699 * Allocate percpu area of @size bytes aligned at @align.
 700 *
 701 * CONTEXT:
 702 * Does GFP_KERNEL allocation.
 703 *
 704 * RETURNS:
 705 * Percpu pointer to the allocated area on success, NULL on failure.
 706 */
 707static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 708{
 709        static int warn_limit = 10;
 710        struct pcpu_chunk *chunk;
 711        const char *err;
 712        int slot, off, new_alloc;
 713        unsigned long flags;
 714        void __percpu *ptr;
 715
 716        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
 717                WARN(true, "illegal size (%zu) or align (%zu) for "
 718                     "percpu allocation\n", size, align);
 719                return NULL;
 720        }
 721
 722        mutex_lock(&pcpu_alloc_mutex);
 723        spin_lock_irqsave(&pcpu_lock, flags);
 724
 725        /* serve reserved allocations from the reserved chunk if available */
 726        if (reserved && pcpu_reserved_chunk) {
 727                chunk = pcpu_reserved_chunk;
 728
 729                if (size > chunk->contig_hint) {
 730                        err = "alloc from reserved chunk failed";
 731                        goto fail_unlock;
 732                }
 733
 734                while ((new_alloc = pcpu_need_to_extend(chunk))) {
 735                        spin_unlock_irqrestore(&pcpu_lock, flags);
 736                        if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
 737                                err = "failed to extend area map of reserved chunk";
 738                                goto fail_unlock_mutex;
 739                        }
 740                        spin_lock_irqsave(&pcpu_lock, flags);
 741                }
 742
 743                off = pcpu_alloc_area(chunk, size, align);
 744                if (off >= 0)
 745                        goto area_found;
 746
 747                err = "alloc from reserved chunk failed";
 748                goto fail_unlock;
 749        }
 750
 751restart:
 752        /* search through normal chunks */
 753        for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
 754                list_for_each_entry(chunk, &pcpu_slot[slot], list) {
 755                        if (size > chunk->contig_hint)
 756                                continue;
 757
 758                        new_alloc = pcpu_need_to_extend(chunk);
 759                        if (new_alloc) {
 760                                spin_unlock_irqrestore(&pcpu_lock, flags);
 761                                if (pcpu_extend_area_map(chunk,
 762                                                         new_alloc) < 0) {
 763                                        err = "failed to extend area map";
 764                                        goto fail_unlock_mutex;
 765                                }
 766                                spin_lock_irqsave(&pcpu_lock, flags);
 767                                /*
 768                                 * pcpu_lock has been dropped, need to
 769                                 * restart cpu_slot list walking.
 770                                 */
 771                                goto restart;
 772                        }
 773
 774                        off = pcpu_alloc_area(chunk, size, align);
 775                        if (off >= 0)
 776                                goto area_found;
 777                }
 778        }
 779
 780        /* hmmm... no space left, create a new chunk */
 781        spin_unlock_irqrestore(&pcpu_lock, flags);
 782
 783        chunk = pcpu_create_chunk();
 784        if (!chunk) {
 785                err = "failed to allocate new chunk";
 786                goto fail_unlock_mutex;
 787        }
 788
 789        spin_lock_irqsave(&pcpu_lock, flags);
 790        pcpu_chunk_relocate(chunk, -1);
 791        goto restart;
 792
 793area_found:
 794        spin_unlock_irqrestore(&pcpu_lock, flags);
 795
 796        /* populate, map and clear the area */
 797        if (pcpu_populate_chunk(chunk, off, size)) {
 798                spin_lock_irqsave(&pcpu_lock, flags);
 799                pcpu_free_area(chunk, off);
 800                err = "failed to populate";
 801                goto fail_unlock;
 802        }
 803
 804        mutex_unlock(&pcpu_alloc_mutex);
 805
 806        /* return address relative to base address */
 807        ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
 808        kmemleak_alloc_percpu(ptr, size);
 809        return ptr;
 810
 811fail_unlock:
 812        spin_unlock_irqrestore(&pcpu_lock, flags);
 813fail_unlock_mutex:
 814        mutex_unlock(&pcpu_alloc_mutex);
 815        if (warn_limit) {
 816                pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
 817                           "%s\n", size, align, err);
 818                dump_stack();
 819                if (!--warn_limit)
 820                        pr_info("PERCPU: limit reached, disable warning\n");
 821        }
 822        return NULL;
 823}
 824
 825/**
 826 * __alloc_percpu - allocate dynamic percpu area
 827 * @size: size of area to allocate in bytes
 828 * @align: alignment of area (max PAGE_SIZE)
 829 *
 830 * Allocate zero-filled percpu area of @size bytes aligned at @align.
 831 * Might sleep.  Might trigger writeouts.
 832 *
 833 * CONTEXT:
 834 * Does GFP_KERNEL allocation.
 835 *
 836 * RETURNS:
 837 * Percpu pointer to the allocated area on success, NULL on failure.
 838 */
 839void __percpu *__alloc_percpu(size_t size, size_t align)
 840{
 841        return pcpu_alloc(size, align, false);
 842}
 843EXPORT_SYMBOL_GPL(__alloc_percpu);
 844
 845/**
 846 * __alloc_reserved_percpu - allocate reserved percpu area
 847 * @size: size of area to allocate in bytes
 848 * @align: alignment of area (max PAGE_SIZE)
 849 *
 850 * Allocate zero-filled percpu area of @size bytes aligned at @align
 851 * from reserved percpu area if arch has set it up; otherwise,
 852 * allocation is served from the same dynamic area.  Might sleep.
 853 * Might trigger writeouts.
 854 *
 855 * CONTEXT:
 856 * Does GFP_KERNEL allocation.
 857 *
 858 * RETURNS:
 859 * Percpu pointer to the allocated area on success, NULL on failure.
 860 */
 861void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 862{
 863        return pcpu_alloc(size, align, true);
 864}
 865
 866/**
 867 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 868 * @work: unused
 869 *
 870 * Reclaim all fully free chunks except for the first one.
 871 *
 872 * CONTEXT:
 873 * workqueue context.
 874 */
 875static void pcpu_reclaim(struct work_struct *work)
 876{
 877        LIST_HEAD(todo);
 878        struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
 879        struct pcpu_chunk *chunk, *next;
 880
 881        mutex_lock(&pcpu_alloc_mutex);
 882        spin_lock_irq(&pcpu_lock);
 883
 884        list_for_each_entry_safe(chunk, next, head, list) {
 885                WARN_ON(chunk->immutable);
 886
 887                /* spare the first one */
 888                if (chunk == list_first_entry(head, struct pcpu_chunk, list))
 889                        continue;
 890
 891                list_move(&chunk->list, &todo);
 892        }
 893
 894        spin_unlock_irq(&pcpu_lock);
 895
 896        list_for_each_entry_safe(chunk, next, &todo, list) {
 897                pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
 898                pcpu_destroy_chunk(chunk);
 899        }
 900
 901        mutex_unlock(&pcpu_alloc_mutex);
 902}
 903
 904/**
 905 * free_percpu - free percpu area
 906 * @ptr: pointer to area to free
 907 *
 908 * Free percpu area @ptr.
 909 *
 910 * CONTEXT:
 911 * Can be called from atomic context.
 912 */
 913void free_percpu(void __percpu *ptr)
 914{
 915        void *addr;
 916        struct pcpu_chunk *chunk;
 917        unsigned long flags;
 918        int off;
 919
 920        if (!ptr)
 921                return;
 922
 923        kmemleak_free_percpu(ptr);
 924
 925        addr = __pcpu_ptr_to_addr(ptr);
 926
 927        spin_lock_irqsave(&pcpu_lock, flags);
 928
 929        chunk = pcpu_chunk_addr_search(addr);
 930        off = addr - chunk->base_addr;
 931
 932        pcpu_free_area(chunk, off);
 933
 934        /* if there are more than one fully free chunks, wake up grim reaper */
 935        if (chunk->free_size == pcpu_unit_size) {
 936                struct pcpu_chunk *pos;
 937
 938                list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
 939                        if (pos != chunk) {
 940                                schedule_work(&pcpu_reclaim_work);
 941                                break;
 942                        }
 943        }
 944
 945        spin_unlock_irqrestore(&pcpu_lock, flags);
 946}
 947EXPORT_SYMBOL_GPL(free_percpu);
 948
 949/**
 950 * is_kernel_percpu_address - test whether address is from static percpu area
 951 * @addr: address to test
 952 *
 953 * Test whether @addr belongs to in-kernel static percpu area.  Module
 954 * static percpu areas are not considered.  For those, use
 955 * is_module_percpu_address().
 956 *
 957 * RETURNS:
 958 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 959 */
 960bool is_kernel_percpu_address(unsigned long addr)
 961{
 962#ifdef CONFIG_SMP
 963        const size_t static_size = __per_cpu_end - __per_cpu_start;
 964        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
 965        unsigned int cpu;
 966
 967        for_each_possible_cpu(cpu) {
 968                void *start = per_cpu_ptr(base, cpu);
 969
 970                if ((void *)addr >= start && (void *)addr < start + static_size)
 971                        return true;
 972        }
 973#endif
 974        /* on UP, can't distinguish from other static vars, always false */
 975        return false;
 976}
 977
 978/**
 979 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 980 * @addr: the address to be converted to physical address
 981 *
 982 * Given @addr which is dereferenceable address obtained via one of
 983 * percpu access macros, this function translates it into its physical
 984 * address.  The caller is responsible for ensuring @addr stays valid
 985 * until this function finishes.
 986 *
 987 * percpu allocator has special setup for the first chunk, which currently
 988 * supports either embedding in linear address space or vmalloc mapping,
 989 * and, from the second one, the backing allocator (currently either vm or
 990 * km) provides translation.
 991 *
 992 * The addr can be tranlated simply without checking if it falls into the
 993 * first chunk. But the current code reflects better how percpu allocator
 994 * actually works, and the verification can discover both bugs in percpu
 995 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
 996 * code.
 997 *
 998 * RETURNS:
 999 * The physical address for @addr.
1000 */
1001phys_addr_t per_cpu_ptr_to_phys(void *addr)
1002{
1003        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1004        bool in_first_chunk = false;
1005        unsigned long first_low, first_high;
1006        unsigned int cpu;
1007
1008        /*
1009         * The following test on unit_low/high isn't strictly
1010         * necessary but will speed up lookups of addresses which
1011         * aren't in the first chunk.
1012         */
1013        first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1014        first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1015                                     pcpu_unit_pages);
1016        if ((unsigned long)addr >= first_low &&
1017            (unsigned long)addr < first_high) {
1018                for_each_possible_cpu(cpu) {
1019                        void *start = per_cpu_ptr(base, cpu);
1020
1021                        if (addr >= start && addr < start + pcpu_unit_size) {
1022                                in_first_chunk = true;
1023                                break;
1024                        }
1025                }
1026        }
1027
1028        if (in_first_chunk) {
1029                if (!is_vmalloc_addr(addr))
1030                        return __pa(addr);
1031                else
1032                        return page_to_phys(vmalloc_to_page(addr)) +
1033                               offset_in_page(addr);
1034        } else
1035                return page_to_phys(pcpu_addr_to_page(addr)) +
1036                       offset_in_page(addr);
1037}
1038
1039/**
1040 * pcpu_alloc_alloc_info - allocate percpu allocation info
1041 * @nr_groups: the number of groups
1042 * @nr_units: the number of units
1043 *
1044 * Allocate ai which is large enough for @nr_groups groups containing
1045 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1046 * cpu_map array which is long enough for @nr_units and filled with
1047 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1048 * pointer of other groups.
1049 *
1050 * RETURNS:
1051 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1052 * failure.
1053 */
1054struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1055                                                      int nr_units)
1056{
1057        struct pcpu_alloc_info *ai;
1058        size_t base_size, ai_size;
1059        void *ptr;
1060        int unit;
1061
1062        base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1063                          __alignof__(ai->groups[0].cpu_map[0]));
1064        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1065
1066        ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1067        if (!ptr)
1068                return NULL;
1069        ai = ptr;
1070        ptr += base_size;
1071
1072        ai->groups[0].cpu_map = ptr;
1073
1074        for (unit = 0; unit < nr_units; unit++)
1075                ai->groups[0].cpu_map[unit] = NR_CPUS;
1076
1077        ai->nr_groups = nr_groups;
1078        ai->__ai_size = PFN_ALIGN(ai_size);
1079
1080        return ai;
1081}
1082
1083/**
1084 * pcpu_free_alloc_info - free percpu allocation info
1085 * @ai: pcpu_alloc_info to free
1086 *
1087 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1088 */
1089void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1090{
1091        free_bootmem(__pa(ai), ai->__ai_size);
1092}
1093
1094/**
1095 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1096 * @lvl: loglevel
1097 * @ai: allocation info to dump
1098 *
1099 * Print out information about @ai using loglevel @lvl.
1100 */
1101static void pcpu_dump_alloc_info(const char *lvl,
1102                                 const struct pcpu_alloc_info *ai)
1103{
1104        int group_width = 1, cpu_width = 1, width;
1105        char empty_str[] = "--------";
1106        int alloc = 0, alloc_end = 0;
1107        int group, v;
1108        int upa, apl;   /* units per alloc, allocs per line */
1109
1110        v = ai->nr_groups;
1111        while (v /= 10)
1112                group_width++;
1113
1114        v = num_possible_cpus();
1115        while (v /= 10)
1116                cpu_width++;
1117        empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1118
1119        upa = ai->alloc_size / ai->unit_size;
1120        width = upa * (cpu_width + 1) + group_width + 3;
1121        apl = rounddown_pow_of_two(max(60 / width, 1));
1122
1123        printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1124               lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1125               ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1126
1127        for (group = 0; group < ai->nr_groups; group++) {
1128                const struct pcpu_group_info *gi = &ai->groups[group];
1129                int unit = 0, unit_end = 0;
1130
1131                BUG_ON(gi->nr_units % upa);
1132                for (alloc_end += gi->nr_units / upa;
1133                     alloc < alloc_end; alloc++) {
1134                        if (!(alloc % apl)) {
1135                                printk(KERN_CONT "\n");
1136                                printk("%spcpu-alloc: ", lvl);
1137                        }
1138                        printk(KERN_CONT "[%0*d] ", group_width, group);
1139
1140                        for (unit_end += upa; unit < unit_end; unit++)
1141                                if (gi->cpu_map[unit] != NR_CPUS)
1142                                        printk(KERN_CONT "%0*d ", cpu_width,
1143                                               gi->cpu_map[unit]);
1144                                else
1145                                        printk(KERN_CONT "%s ", empty_str);
1146                }
1147        }
1148        printk(KERN_CONT "\n");
1149}
1150
1151/**
1152 * pcpu_setup_first_chunk - initialize the first percpu chunk
1153 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1154 * @base_addr: mapped address
1155 *
1156 * Initialize the first percpu chunk which contains the kernel static
1157 * perpcu area.  This function is to be called from arch percpu area
1158 * setup path.
1159 *
1160 * @ai contains all information necessary to initialize the first
1161 * chunk and prime the dynamic percpu allocator.
1162 *
1163 * @ai->static_size is the size of static percpu area.
1164 *
1165 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1166 * reserve after the static area in the first chunk.  This reserves
1167 * the first chunk such that it's available only through reserved
1168 * percpu allocation.  This is primarily used to serve module percpu
1169 * static areas on architectures where the addressing model has
1170 * limited offset range for symbol relocations to guarantee module
1171 * percpu symbols fall inside the relocatable range.
1172 *
1173 * @ai->dyn_size determines the number of bytes available for dynamic
1174 * allocation in the first chunk.  The area between @ai->static_size +
1175 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1176 *
1177 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1178 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1179 * @ai->dyn_size.
1180 *
1181 * @ai->atom_size is the allocation atom size and used as alignment
1182 * for vm areas.
1183 *
1184 * @ai->alloc_size is the allocation size and always multiple of
1185 * @ai->atom_size.  This is larger than @ai->atom_size if
1186 * @ai->unit_size is larger than @ai->atom_size.
1187 *
1188 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1189 * percpu areas.  Units which should be colocated are put into the
1190 * same group.  Dynamic VM areas will be allocated according to these
1191 * groupings.  If @ai->nr_groups is zero, a single group containing
1192 * all units is assumed.
1193 *
1194 * The caller should have mapped the first chunk at @base_addr and
1195 * copied static data to each unit.
1196 *
1197 * If the first chunk ends up with both reserved and dynamic areas, it
1198 * is served by two chunks - one to serve the core static and reserved
1199 * areas and the other for the dynamic area.  They share the same vm
1200 * and page map but uses different area allocation map to stay away
1201 * from each other.  The latter chunk is circulated in the chunk slots
1202 * and available for dynamic allocation like any other chunks.
1203 *
1204 * RETURNS:
1205 * 0 on success, -errno on failure.
1206 */
1207int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1208                                  void *base_addr)
1209{
1210        static char cpus_buf[4096] __initdata;
1211        static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1212        static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1213        size_t dyn_size = ai->dyn_size;
1214        size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1215        struct pcpu_chunk *schunk, *dchunk = NULL;
1216        unsigned long *group_offsets;
1217        size_t *group_sizes;
1218        unsigned long *unit_off;
1219        unsigned int cpu;
1220        int *unit_map;
1221        int group, unit, i;
1222
1223        cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1224
1225#define PCPU_SETUP_BUG_ON(cond) do {                                    \
1226        if (unlikely(cond)) {                                           \
1227                pr_emerg("PERCPU: failed to initialize, %s", #cond);    \
1228                pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);   \
1229                pcpu_dump_alloc_info(KERN_EMERG, ai);                   \
1230                BUG();                                                  \
1231        }                                                               \
1232} while (0)
1233
1234        /* sanity checks */
1235        PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1236#ifdef CONFIG_SMP
1237        PCPU_SETUP_BUG_ON(!ai->static_size);
1238        PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1239#endif
1240        PCPU_SETUP_BUG_ON(!base_addr);
1241        PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1242        PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1243        PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1244        PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1245        PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1246        PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1247
1248        /* process group information and build config tables accordingly */
1249        group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1250        group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1251        unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1252        unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1253
1254" class="line" name="L12m/percpu.c#L125="L1177" class="line" name="L1177">1177alloc_bootmem( 220" id="L1220" class < PERCPUof(( 220" id="L1220" classss="line" name="L1141">11412/a> *
<2 href25a href="+code=printk"[0]));
(UINT244X 220" id="L1220UINT244Xlass="line" name="L1222">1222 * Initiali2e the25lass="line" name="L1127">11272/a> * perpcu a2ea.  25 class="sref">PCPU_SETUP_BUGclasslowupa( 220" id="L1220classlowupa(/a>;
1222        ass="comment"> * setup pa2h.printk(1222        ass="comment"> *
<2 href25lass="line" name="L1140">11402/a> * @ai cont2ins a260alloc_bootmemgroupi;
 < ai->nr_groups; group++) {
i;
11352/a> * chunk an2 prim26ef="+code=gi" class="lass="sref">pcpu_group_info *gi = &ai->groups[group];
11292/a> *
<2 href26lass="line" name="L1223">1223 * @ai->2tatic26      cpu_map[1222 *
<2 href26ref="mm/percpu.c#L114_sizes[0]));
cpu_map[upa;
unit_size;
11202/a> * @ai->2eserv2d_size"line" name="L1120">11202/a> * reserve 2fter 26147" id="L1147" classref">alloc_bootmem < cpu_map[upa;
< < 11342/a> * the firs2 chun26cpu.c#L1138" id="L1138" classalloc_bootmem( 220" id="L1220" classcpu_map[unit]);
11292/a>        ass="comment"> * percpu a2locat26a href="+code=base_addr" claskely()
11422/a> * static a2eas o26alloc_info" class="srcode=gi" class="lastinue="line" name="L1129">11292/a> * limited 2ffset27lass="line" name="L1151">11512/a> * percpu s2mbols27ef="+code=gi" class="sref">gi>PCPU_SETUP_BUG_ON(pcpu_veria>( 220" id="L1220" class< glass="sref">PERCPUof(1253 *
<2 href27a href="+code=printk" class=">PCPU_SETUP_BUG_ON(!base_addr);
unit+code=cpass="sref">pcpu_veria>( 220" id="L1220" classss="line" name="L1253">1253 * @ai->2yn_si27      PCPU_SETUP_BUG_ON(pcpu_veriref="mm/percpu.c#L1252" id="L1252" clt; )
1253 * allocati2n in 27lass="line" name="L1225">1225 * @ai->2eserv27a href="+code=printk" class="[0]));
( class="sref">i;
1222 *
<2 href27+code=printk" class="sref">pr[0]));
(_map" class="sref">cpu_map[unit_size;
11202/a> * @ai->2nit_s27lass="line" name="L1248">1248 * and equa2 to o27intk" class="sref">printk(ion and build config tabynamic(49" class="line" name="L1249">1249 * @ai->2yn_si27alloc_info" class="srcode=gi"kely(1249 *
<2 href28de=unit_end" class="sref">uni">pr[0]));
; 11422/a> * @ai->2tom_s28ef="+code=gi" class="sref">gi->>PCPU_SETUP_BUGclasslowupa( 220" id="L1220classlowupa(/a>;
11202/a> * for vm a2eas.<28a href="+code=printk" class="kely(1249 *
<2 href28      PERCPU+codemm/percpu.c#L1253" id="L1253" clt; 11422/a> * @ai->2lloc_28ref="mm/percpu.c#L1145" id="L1145" cl(( 220" id="L1220" class="line" name="L1120">11202/a> * @ai->2tom_s28a href="+code=printk"="line" name="L1150">11502/a> * @ai->2nit_s28+code=printk"="line" name="L1150">11502/a> *
<2 href28 class="sref">PCPU_SETUP_BUGclass"+code=upa" class="srefclass"+code=u/a>;
i;
11202/a> * @ai->2r_gro2ps and"line" name="L1120">11202/a> * percpu a2eas. 28ss="sref">group_offsets for_ the"+code=cpua>( 220" id="L1220for_ the"+code=cpua>(ass="sref">pcpu_veria>( 220" id="L1220" classs"line" name="L1120">11202/a> * same gro2p.  D29="sref">BUG();                  _ON(pcpu_veriref="mm/percpu.c#L1252" id="L1252" clt; )
1253 * grouping2.  If2@ai-&g"line" name="L1253">1253 * all unit2 is a29ref">unit_off
1249 *
<2 href2"mm/pe#pa="s                _ON(1249 * The call2r sho29 class="sref">PCPU_SETUP_BUGlloc_info(KERN_EMERG, 1249 * copied s2atic 29_size"line" name="L1120">11202/a> *
<2 href29 class="sref">PCPU_SETUP_BUGclass"+c=group" class="sref">class"+c=group/a>;
nr_groups; group+"line" name="L1120">11202/a> * If the f2rst c29 class="sref">PCPU_SETUP_BUGclass0]));
1217 * is serve2 by t29f">printk(1218 * areas an2 the 29ss="sref">group_offsets =lassref="mm/percpu.c#L1252"=lassref="mm//a>;
12213/a> * and page3map b30="sref">group_sizes = ;
12193/a> * from eac3 othe30ai-&g"line" name="L1253">12533/a> * and avai3able 30ref">unit_off12353/a> *
<3 href30 class="sref">PCPU_SETUP_BUGclassref="p tof="mm/percpu.c#Lclassref="p tof/a>;
nr_groups; PERCPUd="L11HIFing">"\n&qud="L11HIFilass="line" name="L1219">12193/4> * The call3/span30 class="sref">PCPU_SETUP_BUGlloc_href="+code=PCPU_MIN_UNIlloc_href="+co/a>;
;
t_en class="sref">PERCPU_="L11HIFing">"\n&qud="L11HIFilass="line" name="L1219">12193/5> * copied s3ess, 30 class="sref">PCPU_SETUP_BUG"sref"d="L1187="mm/percpu.c#Lclass"d="L1187/a>;
nr_groups; 12193/6> *
<3a hre30 class="sref">PCPU_SETUP_BUGclassNULL<_sref">L1187="mm/percpu.c#LclassNULL<_sref">L1187/a>;
pcpu_chunk *schunk12193/7> * If the f3sref"30class="sref">pr_emerg(;)">unit_off"sref">base_ad="line" name="L1219">12193/8> * is serve3void 30s and"line" name="L1120">11203/a>{
unit_off12353/a>       3static char 
al enttL1202ef="for class="line" name="L1235">12353/a>12033/a>       3static int 12353/a>       3size_t class"+c1202" c>;
__f="+cosref"oc1202ERN_EMERG, ;) + 2="line" name="L1219">12193/a>       3size_t f="+co202/a>;
nr_cpu_ids * sizeclass"+c1202"" class="sref">class"+c1202" c>;
>unit_off[0]));
f="+co202/a>;class="line" name="L1253">12533/a>       3struct alloc_bootmem < class"+c1202" c>; < 12533/6>f="+co202/a>;clt; 12533/7>12483/a>       3unsigned long *12353/a>{
cpus_span>
If 179" id="L1179ref="mm/p90" class="line" name="L1190">11903/a>       3int *cpus_spa covmi23

11583/a>       3int 
If 179" id="L1179refn02e="mm/p8" class="line" name="L1198">11983/a>

11713/a>       3cpuL1171"ref="mm/per)" class="line" name="L1203">12033/a>
12353/a>#define3PCPU_SETUP_BUGde=dchunk" class="sref">dchunk<
nr_cpu_ids * sizeclassNULL<_sref">L1187="mm/percpu.c#LclassNULL<_sref">L1187/a>;s="line" name="L1253">12533/a>       3if (PCPU_SETUP_BUGI"L12LIST_HEAD22" id="L1222" I"L12LIST_HEADr_cput;dchunk<_groups; lis2/a>;s="line" name="L1253">12533/7>size_t *dchunk<_groups; 12533/a>       3        printk(dchunk<_groups; 12533/a>{
group_offsets ee=dchunk" class="sref">dchunk<_groups; ;
);r_cpu_ids * sizeCPU_DYNAMIC_EARLY_SLOTS" cls="line" name="L1253">12533/a>       3        group_sizes = dchunk<_groups; ;
12533/a>       3}                        3     33ref">unit_map =  * sizeCe=dchunk" class="sref">dchunk<_groups; ;)="line" name="L1253">12533/a>
12233/a>
size_tkely(reserved_size + dyn_sss="line" name="L1134">11343/a>       3/* 3anity33ref="mm/percpu.c#L114_sizes[0]))Ce=dchunk" class="sref">dchunk<_groups; nr_groups; dyn_s="line" name="L1253">12533/a>#define3dchunk<="line" name="L1253">12533/a>       3a href="+code=CONFIG_SMP"3class33+code=printk" class="lt; isref"lass="srecode=_limla/a>;
nr_groups; ai->reserved_size + dyn_s="line" name="L1253">12533/7>11343/a>       3pr_emerg(dchunk<_groups; 12153/a>#endif
3a href="mm/percpu.c#L12403 id="33alloc_info" class="sref">pcpu_dump_a"+code=ai" class="sref">ai->12353/a>       311503/a>       3PCPU_SETUP_BUGCe=dchunk" class="sref">dchunk<_groups; ilastig_hina/a>;
dchunk<_groups; 12153/a>       312233/a>       3PCPU_SETUP_BUGee=dchunk" class="sref">dchunk<_groups; dchunk<_groups; nr_groups; ai->="line" name="L1215">12153/a>       3(dchunk<_groups; 12533/a>       3dchunk<_groups; dchunk<_groups; dchunk<_groups; 12153/a>       311273/a>
size_t ion and build config tab12493/a>       3/* 3roces3 group informakely(ai->ss="line" name="L1134">11343/a>       3pcpu_dump_a"e=NULL" class="sref">NULL;
nr_cpu_ids * sizeclassNULL<_sref">L1187="mm/percpu.c#LclassNULL<_sref">L1187/a>;s="line" name="L1253">12533/a>       3BUG();                  I"L12LIST_HEAD22" id="L1222" I"L12LIST_HEADr_cput;NULL;_groups; lis2/a>;s="line" name="L1253">12533/a>       3NULL;_groups; 12533/a>       3NULL;_groups; 12533/a>       3="mm/percpu.c#L12="L1154"31254"35      NULL;_groups; ;
);r_cpu_ids * sizeCPU_DYNAMIC_EARLY_SLOTS" cls="line" name="L1253">12533/a>       377" class="line" name="L1377">135ref="mm/percpu.c#L114_sizes[0]))"e=NULL" class="sref">NULL;_groups; ;
12533/a> *
<3 href35a href="+code=printk"[0]));
 * size"e=NULL" class="sref">NULL;_groups; ;)="line" name="L1253">12533/a> * Initiali3e the35lass="line" name="L1127">11273/a> * perpcu a3ea.  35class="sref">pr_emerg(NULL;_groups; ilastig_hina/a>;
NULL;_groups; 12153/a>       3ass="comment"> * setup pa3h.pr_emerg(NULL;_groups; NULL;_groups; isref"lass="srecode=_limla/a>;="line" name="L1215">12153/a>       3ass="comment"> *
<3 href35alloc_info" class="sref">pcpu_dump_a"e=NULL" class="sref">NULL;_groups; NULL;_groups; NULL;_groups; 12153/a> * @ai cont3ins a36 class="sref"="line" name="L1150">11503/a> * chunk an3 prim36ai-&g"line" name="L1253">12533/a> *
<3 href36ref">unit_offkatic_size +12493/a> * @ai->3tatic36f">size_t ;
NULL; ?:a href="mm/percpCe=dchunk" class="sref">dchunk<="line" name="L1253">12533/a> *
<3 href36f">size_t  * sizeclasssize ecode=schunk" class="sref"size ecode=/a>;, -1)="line" name="L1253">12533/a> * @ai->3eserv3d_size"line" name="L1120">11203/a> * reserve 3fter 36147" id="L114
<9" class="line" name="L1249">12493/a> * the firs3 chun36 class="sref">PCPU_SETUP_BUGclassa href="+code=PAGE_MASK"classa href="+ref">12533/a>       3ass="comment"> * percpu a3locat36a href="+codereturn 0="line" name="L1253">12533/a>       3ass="comment"> * static a3eas o36alloc="line" name="L1150">11503/a> * limited 3ffset37lass="line" name="L1151">11513/a> * percpu s3mbols37ef="+>CONFIG_SMP
12373/a> *
<3 href37lass="line" name="L1223">12233/a> * @ai->3yn_si37     lass="chart* coss="_ids * sizeclasssc_@ai-f="mm/percpu.c#Lclasssc_@ai-f/a>;clt; ;]a href="mm/percp__11343/a> * allocati3n in 37f">size_tclt; ;]a>cpus_bu"line" name="L1134">11343/a> * @ai->3eserv37a href="+codeclt; ;]acpus_bu"line" name="L1134">11343/a> *
<3 href37+code=printk"clt; cpus_bu"line" name="L1134">11343/a> * @ai->3nit_s37lass=}="line" name="L1253">12533/a> * and equa3 to o37s and"line" name="L1120">11203/a> * @ai->3yn_si37allocenum"_ids * sizeclasssc="mm/percpu.c#Lclasssccode= ;="line" name="L1253">12533/a> *
<3 href38lass="line" name="L1151">11513/a> * @ai->3tom_s38ef="+ef">cpuina= ;s"line" name="L1253">12533/a> * for vm a3eas.<38a hre="line" name="L1134">11343/a> *
<3 href38f">size_tkelyddr);
;s"line" name="L1253">12533/a> * @ai->3lloc_38ref="mm/percpu.c#L114return - href="mm/percpEINVALcode=PAGE_MASK"EINVAL/a>;="line" name="L1253">12533/a> * @ai->3tom_s38_size"line" name="L1120">11203/a> * @ai->3nit_s38+code=printk"kelys="line" name="L1233">12333/a> *
<3 href38class="sref">pr_emerg12533/a> * @ai->3r_gro3ps and>CONFIG_SMP
11203/a> * percpu a3eas. 38ss="sref">groelseskelyddr);
 * size h+code=PAGE_MASK" h+/a>;,a _mask=%s\n"cpus_b)="line" name="L1233">12333/a> * same gro3p.  D39="sref">BUG();                  f="+ccoosenssc="mm/percpu.c#Lclasscoosenssccode=;="line" name="L1253">12533/a> * grouping3.  If3@ai-&gclass="line" name="L1240">12403/a> * all unit3 is a39ref">>CONFIG_SMP
11203/a> *
<3 href393s="sref">groelseskelyddr);
 * size h+code=PAGE_MASK" h+/a>;,a _mask=%s\n"cpus_b)="line" name="L1233">12333/a> * The call3r sho39ref="mm/percpu.c#L114_sizes[0]))f="+ccoosenssc="mm/percpu.c#Lclasscoosenssccode=12533/a> * copied s3atic 39_sizeclass="line" name="L1240">12403/a> *
<3 href39 class="sref"else"line" name="L1240">12403/a> * If the f3rst c39class="sref">pr_emerg(cpus_bua href="mm/percpCh+code=PAGE_MASK" h+/a>;s="line" name="L1253">12533/a> * is serve3 by t39s and"line" name="L1120">11203/a> * areas an3 the 39ss="sref">groreturn 0="line" name="L1253">12534/a> * and page4map b40="sre="line" name="L1150">11504/a> * from eac4 othe40ai-&g href="mm/percpearly_"arndcode=PAGE_MASK"early_"arndr_cpu__mask=%s\n"cpus_bua href="mm/percpf1151"ss="sresetu/percpu.c#L1252"=1151"ss="sresetu/r_cps="line" name="L1253">12534/a> * and avai4able 40lass="line" name="L1223">12234/a> *
<4 href40mm/percpu.c#L1204" id="L120ta class="line" name="L1235">12354/4> * The call4/span4012034/5> * copied s4ess, 40 clasef="mm/percpu.c#L1205" * Bef="miteff needercbyatic_archmm/percpor tic_genercpusetu/9refgoquor_lass="line" name="L1203">12034/6> *
<4a hre40 clasef="mm/percpu.c#L1205" * to bef/per" class="line" name="L1203">12034/7> * If the f4sref"40classef="mm/percpu.c#L1205" *" class="line" name="L1249">12494/8> * is serve4void 40s and>COa href="mm/percp""srefru.c#L1221" id="""srefrr_cpu_ids * sizef="mm/pNEED_PE142" _EMBED_FIRST_CHUNrcpu.c#L1244" idf="mm/pNEED_PE142" _EMBED_FIRST_CHUNrsize) || \"line" name="L1249">12494/9> * areas an4L1210409ef">unit_offddr);
 * sizef="mm/pHAVElloc_inPE142" _AREAcpu.c#L1244" idf="mm/pHAVElloc_inPE142" _AREA/a>;s"line" name="L1253">12534/a>       4static char  * sizeBUILD_EMBED_FIRST_CHUNrcpu.c#L1244" idBUILD_EMBED_FIRST_CHUNrass="line" name="L1223">12234/a>12404/a>       4static int 12234/a>       412494/a>       4si>COa href="mm/percp""srefru.c#L1221" id="""srefrr_cpu_ids * sizef="mm/pNEED_PE142" _d="L1FIRST_CHUNrcpu.c#L1244" idf="mm/pNEED_PE142" _d="L1FIRST_CHUNrsizes"line" name="L1253">12534/5> * sizeBUILD_d="L1FIRST_CHUNrcpu.c#L1244" idBUILD_d="L1FIRST_CHUNrass="line" name="L1223">12234/6>12404/7>12484/a>       4unsigned long *K()9ref/percbyaboth embedpu.c#p to_size +12494/9> * sizeBUILD_EMBED_FIRST_CHUNrcpu.c#L1244" idBUILD_EMBED_FIRST_CHUNrass=) ||  href="mm/percp""srefru.c#L1221" id="""srefrr_cpu_ids * sizeBUILD_d="L1FIRST_CHUNrcpu.c#L1244" idBUILD_d="L1FIRST_CHUNrass=s"line" name="L1253">12534/a>       4int *12354/a>       4int K -href="ms="sref">K cosside&quo distances between 2" s class="line" name="L1235">12354/a>       4="mm/percpu.c#L1223" id="41223"42r dynamic allocation like an* @lass="sref">d:atic_L1179of 179" id=uL1171"rem/p incbytes class="line" name="L1235">12354/a>       4
12354/a>       4="mm/percpu.c#L1225" id="41225"4212354/5>al/alass="line" name="L1235">12354/6>12354/7>12354/a>       4        12354/9>12034/a>       4        12354/a>       4}                        4     43  The latter chunk is circulahGgrouprem/refways mutliples of 1d=" ="+cpu.c#2" s whichmem/rof/alass="line" name="L1235">12354/a>       4(0)
12354/a>
12354/a>       4/* 4anity4312354/5>12034/6>12354/7>12354/a>       4K iefreturnern>
Or/alass="line" name="L1235">12354/a>#endif
4a href="mm/percpu.c#L12404 id="43her for the dynamic area.  T*cfailurc,rERR_PTR value iefreturnern/alass="line" name="L1235">12354/a>       412494/a>       4cpu>pcpu_chunk *KERN_a* href="+code=uni__Kcode=PAGE_MASK"classaef="ss="sref">Kr_cpu"line" name="L1249">12494/a>       4dyn_sua href="mm/percpCsref"code=PAGE_MASK" sref"code= 11344/a>       4(11344/a>       4[0]))f="+cfc_lassdistance_fnf"code=PAGE_MASK"f="+cfc_lassdistance_fnf"code= 12534/a>       411344/a>       4cpuina= ;clt; 12534/a>
size_tef">cpuina= id="L12cna/a>;clt; 12534/a>       4/* 4roces4 group informacoss="_ids * sizeosref"code=PAGE_MASK" sref"code= ai-><__fe" clasef"r2ref"="line" name="L1253">12534/a>#endif
4group<;
12534/a>       4group_sizes = 12534/a>       4 * sizebest_upa="mm/percpu.c#Lbest_upar_cps=rg12494/a>       4id="L1yn_sua href="mm/percprefass="sref">i;
11204/a>       4="mm/percpu.c#L12="L1154"41254"45      baina= 11204/a>       477" class="line" name="L1477">145ref="mm/percp>pcpu_chunk *KERN_a* href="mm/percpsroups" class="sref">nr="line" name="L1120">11204/a>       4ass="comment"> *
<4 href45a href="+code"sref">baina=* href="mm/percpERG"rcpu.c#L1221" id="ERG"rcp">nr="line" name="L1120">11204/a>       4ass="comment"> * Initiali4e the45lass="line" name="L1127">11274/a> * perpcu a4ea.  45f">size_t ion and build config tabthf="func/percmay befcef=>bamultiple time235" class="line" name="L1235">12354/a>       4ass="comment"> * setup pa4h.printk(imemsear_cpu_ids * sized="L12rcpu.c#L1221" id="d="L12rcp/a>;, 0,unit_off[0]));
;ss="line" name="L1253">12534/a>       4ass="comment"> *
<4 href45ss="sref">group_offsets memseass="sref">imemsear_cpu_ids * sized="L12cnass="sref">id="L12cna/a>;, 0,unit_off[0]));
id="L12cna/a>;ss="line" name="L1253">12534/a> * @ai cont4ins a46lass="line" name="L1151">11514/a> * chunk an4 prim461">size_t ion and build config tabcefcercpu  srefsudre.c#ensure d1215" c iefenough9ref"early "="srt*" class="line" name="L1249">12494/a> *
<4 href46ref">unit_off< * size href="+code=ai" class="sref">ai->dyn_s +"line" name="L1219">12194/a> * @ai->4tatic46      imax_ar_cpu_ids * size sref"code=PAGE_MASK" sref"codeua href="mm/percpmm/percpu.c#L1215" id="L1215" classua href="mm/percpfcpu.c_DYNAMIC_EARL);
12534/a>       4ass="comment"> *
<4 href46f">size_t ai->ai-><-a href="mm/percp+ dyn_s="line" name="L1253">12534/a> * @ai->4eserv4d_size"line" name="L1120">11204/a> * reserve 4fter 46147" id="L11412494/a> * the firs4 chun46classef="mm/percpu.c#L1205"         * Dnamic11984/a>       4ass="comment"> * percpu a4locat46grouprcpu.c#L1204" id="L120         * ef="m_="+cpisamultiple of 1d="_="+cpu.c#isatic_smef=>s" class="line" name="L1198">11984/a>       4ass="comment"> * static a4eas o46her for the dynamic area.  Ttttttttt* whichmctheacc arodcpu 4k efef">basegrea.s whichmem/requalhto class="line" name="L1198">11984/a> * limited 4ffset47 uses different area allocattttttttt* ef"larger thffeminsref=""+con/alass="line" name="L1235">12354/a> * percpu s4mbols47  The latter chunk is circultttttttt*" class="line" name="L1249">12494/a> *
<4 href47ref">unit_off<imax_ar_cpu_ids * size sref"code=PAGE_MASK" sref"codeua href="mm/percp srefsudcode=PAGE_MASK" srefsudyn_sua href="mm/percpfy_alMIN_U"L12a>);
);r_cps="line" name="L1249">12494/a> * @ai->4yn_si47     "line" name="L1249">12494/a>       4ass="comment"> * allocati4n in 47f">size_t i="Lndour_cpu_ids * sizeminsref=""+code=PCPU_MIN_UNIminsref=""+coyn_sua href="mm/percpsd="L1187="mm/percpu.c#L"d="L1187/a>;s="line" name="L1249">12494/a> * @ai->4eserv47a href="+code 12494/a> *
<4 href47+code=printk"while y((12494/a> * @ai->4nit_s47class="sref">pr_emerg(12494/a>       4ass="comment"> * and equa4 to o47f">printk(12494/a>       4ass="comment"> * @ai->4yn_si47alloc"line" name="L1249">12494/a> *
<4 href48="sref">group_ion and build config tabd="L1 lasseacc rdquo to ticir proximla249" class="line" name="L1249">12494/a> * @ai->4tom_s48 class="sref">PCPU_SETUP_BUGref_each_possible_c>( 220" id="L1220ref_each_possible_c>(r_cpu_ids * sizec>( 220" id="L1220" classss="line" name="L1134">11344/a> * for vm a4eas.<48a href="+code=printk"href="+code=gro>grouss="sref">id="L1yn_s
12534/a> *
<4 href48f">size_t inext_>grou">si:"line" name="L1253">12534/a>       4ass="comment"> * @ai->4lloc_48ref="mm/percpu.c#L114>PCPU_SETUP_BUGref_each_possible_c>( 220" id="L1220ref_each_possible_c>(r_cpu_ids * sizetc>( 220" id="L1220tc classss="line" name="L1134">11344/a> * @ai->4tom_s48a href="+code=printk"""""""""kely(11344/a> * @ai->4nit_s48+code=printk" class="""""""""""""""""break="line" name="L1253">12534/a> *
<4 href48class="sref">pr_emerg""""""""kely(;clt; grouss="sref">id="L1yn_s
t;12534/a>       4ass="comment"> * @ai->4r_gro48class="sref">pr_emergggggggggggggy((12534/a>       4ass="comment"> * percpu a4eas. 48alloc_info" class="srpercpu.c#L114>PCPU_SETUP_BUGc>(sdistance_fncode=PAGE_MASK"lassdistance_fnass=y(( 220" id="L1220" classssgroua href="mm/percpLOCAL_DISTANCE 220" id="L1220LOCAL_DISTANCEyn_s)ss="line" name="L1134">11344/a> * same gro4p.  D49="sref">BUG();   "srpercpu.c#L114>PCPU_SETUP_BUG>grouss="sref">id="L1yn_s++ "line" name="L1253">12534/a> * grouping4.  If49ef="+code=gi" class=""srpercpu.c#L114>PCPU_SETUP_BUG="+code=group" class="sref">group<(groupua href="mm/percp>grouss="sref">id="L1yn_s + 1)="line" name="L1253">12534/a> * all unit4 is a49a href="+code=printk" class="pr_emerggoto  inext_>grou">si="line" name="L1253">12534/a> *
<4 href49      11504/a> * The call4r sho49ref="mm/percpu.c#L114="line" name="L1150">11504/a> * copied s4atic 49a href="+code=printk"[0]));
;clt; id="L1yn_s="line" name="L1253">12534/a> *
<4 href49+code=printk" class="lt; id="L12cna/a>;clt; id="L1yn_s]++ "line" name="L1253">12534/a> * If the f4rst c49class="sref">="line" name="L1150">11504/a>       4ass="comment"> * is serve4 by t49s and"line" name="L1120">11204/a> * areas an4 the 49ss="sref">gro12495/a> * and page5map b50 uses different area allocattttttttt* Expu.c#ref= ="+cpuntilhrdd179srmiccef/p toggozs ever 75% class="line" name="L1249">12495/1> * grouping5 othe50  The latter chunk is circultttttttt*pu.c#ticn asamuchmes possible withoutf/pquo mom/redd179s class="line" name="L1249">12495/2> * all unit5able 50r dynamic allocation like antttttttt*pmicce" class="line" name="L1203">12035/a> *
<5 href50mm/percpu.c#L1204" id="L120ntttttttt*" class="line" name="L1249">12495/4> * The call5/span50f">size_t 12535/5> * copied s5ess, 50a href="+coderef"y(11345/6> *
<5a hre50+code=printk" class="ina= 12535/7> * If the f5sref"50lass="line" name="L1248">12485/8> * is serve5void 50class="sref">pr_emergkely((12485/9> * areas an5L121050alloc_info" class="srpercpu.clastinue="line" name="L1253">12535/a>       5static char 11515/a>(id="L1yn_s
id="L1yn_s
tloua href="mm/percp="+code=group" class="sref">group=a href="mm/percp>grouss="sref">id="L1yn_s++ss="line" name="L1134">11345/a>       5static int (id="L12cna/a>;clt; id="L1yn_s]ua href="mm/percprpa="mm/percpu.c#Lupayn_s)="line" name="L1253">12535/a>12535/4>id="L12cna/a>;clt; id="L1yn_s]="line" name="L1253">12535/5>11505/6>11275/7>pr_emerg12495/a>       5unsigned long *12495/9>12495/a>       5int *12035/a>       5int 12495/a>       5="mm/percpu.c#L1223" id="51223"52a href="+code=printk"kely((group" class="sreum_possible_c>(gass=y)12485/a>       512535/a>       5="mm/percpu.c#L1225" id="51225"5212535/5>12495/6>12485/7>pr_emerg""""""""break="line" name="L1253">12535/a>       5        pr_emerg(12535/9>pcpu_dump_abest_upa="mm/percpu.c#Lbest_upar_cp
12495/a>       5        11505/a>       5}                        5     53 class="sref">PCPU_SETUP_BUGupa="mm/percpu.c#Lupayn_s
12495/a>       5(0)
12235/a>
size_t ion and build config tabu/percpubu.c#fillms="sref">K 9" class="line" name="L1249">12495/a>       5/* 5anity53ref="mm/percpref"y(id="L1yn_s
id="L1yn_s
tloua href="mm/percp="+code=group" class="sref">group=a href="mm/percp>grouss="sref">id="L1yn_s++s"line" name="L1249">12495/5>[0]));
;
+i="Lndour_cpu_ids * sized="L12cnass="sref">id="L12cna/a>;clt; id="L1yn_s]ua href="mm/percprpa="mm/percpu.c#Lupayn_s)="line" name="L1253">12535/6>11275/7>PCPU_SETUP_BUGsroups" class="sref">nr
Kr_cpu(groupua href="mm/percp"+code=upa" class="sref"+code=u/a>;)="line" name="L1253">12535/a>       5kelyddr);
nrs"line" name="L1249">12495/9>12535/a>       5group_sizes = nrall href="mm/percperoups" class="sref">nr_groups; nr="line" name="L1253">12535/a>       512535/a>       5(id="L1yn_s
id="L1yn_s
tloua href="mm/percp="+code=group" class="sref">group=a href="mm/percp>grouss="sref">id="L1yn_s++ss="line" name="L1134">11345/a>       5nr_groups; grouss="sref">id="L1yn_s]._sizes = nrall href="mm/percpERG"rcpu.c#L1221" id="ERG"rcp">nr="line" name="L1253">12535/a>       5 = nra+i="Lndour_cpu_ids * sized="L12cnass="sref">id="L12cna/a>;clt; id="L1yn_s]ua href="mm/percprpa="mm/percpu.c#Lupayn_s)="line" name="L1253">12535/a>       511505/a>       511275/a>
PCPU_SETUP_BUGsroups" class="sref">nr_groups; ai-><ai->="line" name="L1253">12535/a>       5/* 5roces54f">printk(nr_groups; dyn_s dyn_s="line" name="L1253">12535/a>#endif
5group_offsets sroups" class="sref">nr_groups; ai->ai->="line" name="L1253">12535/a>       5group_sizes = nr_groups; 12535/a>       5PCPU_SETUP_BUGsroups" class="sref">nr_groups; 12535/a>       5unit_off<nr_groups; 12535/a>       5="mm/percpu.c#L12="L1154"51254"55     "line" name="L1249">12495/a>       577" class="line" name="L1577">155ref="mm/percpref"y(id="L1yn_s
i;
id="L12cna/a>;clt; id="L1yn_s]=a href="mm/percp>grouss="sref">id="L1yn_s++ss="line" name="L1134">11345/a>       5ass="comment"> *
<5 href55a href="+code=printk">pcpu_chunk *roups" class="srgf">nr
nr_groups; grouss="sref">id="L1yn_s]="line" name="L1253">12535/a>       5ass="comment"> * Initiali5e the55lass="line" name="L1127">11275/a> * perpcu a5ea.  55class="sref">pr_emerg12495/a>       5ass="comment"> * setup pa5h.12495/a>#endif
5ass="comment"> *
<5 href55her for the dynamic area.  Tttttttttmm/percp* back-to-backn>
r should updcpu thf="to class="line" name="L1198">11985/a> * @ai cont5ins a56 uses different area allocaTttttttttmm/percp* " nlecteactualhrf="mm/per" class="line" name="L1203">12035/a> * chunk an5 prim56  The latter chunk is circulttttttttmm/percp*" class="line" name="L1249">12495/a> *
<5 href56a href="+code=printk"href="+code=gro>roups" class="srgf">nr_groups; i;
nr_groups; 12535/a> * @ai->5tatic56     "line" name="L1249">12495/a>       5ass="comment"> *
<5 href56ref="mm/percpu.c#L114>PCPU_SETUP_BUGref_each_possible_c>( 220" id="L1220ref_each_possible_c>(r_cpu_ids * sizec>( 220" id="L1220" classs"line" name="L1249">12495/a>       5ass="comment"> * @ai->5eserv56a href="+code=printk"""""""""kely(;clt; id="L1yn_ss"line" name="L1249">12495/a>       5ass="comment"> * reserve 5fter 56+code=printk" class="""""""""""""""""href="+code=gro>roups" class="srgf">nr_groups; nr[lt; nr_groups; ;++]all href="mm/percpc>( 220" id="L1220" class="line" name="L1253">12535/a> * the firs5 chun56class="sref">pr_emerg(roups" class="srgf">nr_groups; ;i="Lndour_cpu_ids * size>roups" class="srgf">nr_groups; ;ua href="mm/percprpa="mm/percpu.c#Lupayn_s)="line" name="L1253">12535/a>       5ass="comment"> * percpu a5locat56class="sref">pr_emerg(i;
nr_groups; ;="line" name="L1253">12535/a>#endif
5ass="comment"> * static a5eas o56ss="sref">gro="line" name="L1150">11505/a> * limited 5ffset57="sref">group_sizes =  * sizeuefass="sref">i;
;)="line" name="L1253">12535/a> * percpu s5mbols57ai-&g"line" name="L1253">12535/a> *
<5 href57ref">unit_offreturn dr);
nr="line" name="L1120">11205/a> * @ai->5yn_si57     ="line" name="L1150">11505/a>       5ass="comment"> * allocati5n in 57f">siclass=> ion and build config tabBUILD_EMBED_FIRST_CHUNr
|| BUILD_d="L1FIRST_CHUNrp*" class="line" name="L1249">12495/a> * @ai->5eserv57_size"line" name="L1120">11205/a> *
<5 href57+code>COa href="mm/percp""srefru.c#L1221" id="""srefrr_cpu_ids * sizeBUILD_EMBED_FIRST_CHUNrcpu.c#L1244" idBUILD_EMBED_FIRST_CHUNrass=)"line" name="L1120">11205/a> * @ai->5nit_s57classef="mm/percpu.c#L1205"/*a class="line" name="L1235">12355/a>       5ass="comment"> * and equa5 to o57grouprcpu.c#L1204" id="L120 * classembed"size ecode= - embedptic_fize +L1235"+12355/a>#endif
5ass="comment"> * @ai->5yn_si57her for the dynamic area.  T*c@lass="sref">d:atic_L1179of 179" id=uL1171"rem/p incbytes class="line" name="L1235">12355/a> *
<5 href58 uses different area allocat* @L1215" c: minimum free_L1179ref"n>
12355/a> * @ai->5tom_s58  The latter chunk is circulah@"d="L1187:ref="mm/perc1d=" ="+c/alass="line" name="L1235">12355/a> * for vm a5eas.<58r dynamic allocation like an* @cassdistance_fn: cef=back to ynamical/alass="line" name="L1235">12355/a> *
<5 href58mm/percpu.c#L1204" id="L1204* @"="srefn: func/percto u/percpubL1171"rp tor_lass="line" name="L1235">12355/a>       5ass="comment"> * @ai->5lloc_5812355/a> * @ai->5tom_s58 clasef="mm/percpu.c#L1205" *r_lass="line" name="L1235">12355/a> * @ai->5nit_s58 clasef="mm/percpu.c#L1205" * Thf="f="ineelper to ease_Lettquo up embeddercfize +L1235"+12495/a> *
<5 href58classef="mm/percpu.c#L1205" * cthebefcef=>bawhere classLetL12size ecode=()9rs expectern/alass="line" name="L1235">12355/a>       5ass="comment"> * @ai->5r_gro58grouprcpu.c#L1204" id="L120 */alass="line" name="L1235">12355/a>#endif
5ass="comment"> * percpu a5eas. 58her for the dynamic area.  T*cIfbthf="func/percref/percto LetL1ptic_fize +code=, it"f="ilpercpud class="line" name="L1249">12495/a> * same gro5p.  D59 uses different area allocat* byfcef=quo @"="srefnbu.c#/percas-f="withoutfbequo mapp>bainao class="line" name="L1198">11985/a> * grouping5.  If59  The latter chunk is circulahvm"="srtem/p.  Af="mm/perprem/refways wholeamultiples of @"d="L1187/alass="line" name="L1198">11985/a> * all unit5 is a59r dynamic allocation like an* efef">bato @"d="L1187n/alass="line" name="L1235">12355/a> *
<5 href59mm/percpu.c#L1204" id="L1204*/alass="line" name="L1235">12355/a>       5ass="comment"> * The call5r sho59 ar physical/alass="line" name="L1235">12355/a> * copied s5atic 59 clasef="mm/percpu.c#L1205" * map1quo whichmoften u esalarger p to_1187n  Please_nopu thatbthf=/alass="line" name="L1235">12355/a> *
<5 href59 clasef="mm/percpu.c#L1205" * cthe179ultcincverypmicrse_35"_grouref= map1quo ercNUMA machinesatiss/alass="line" name="L1235">12355/a> * If the f5rst c59classef="mm/percpu.c#L1205" * requi&quo largehvm"="srtedd179srmicce"  Don12355/a>       5ass="comment"> * is serve5 by t59grouprcpu.c#L1204" id="L120 * vm"="srtmicceff="nop ordmi23of magef=ude"larger thffeListances/alass="line" name="L1235">12355/a>#endif
5ass="comment"> * areas an5 the 59her for the dynamic area.  T*cbetween nsizrmemom24edd179sesa(ie" 32bf= NUMA machines)n/alass="line" name="L1235">12356/a> * and page6map b60 uses different area allocat*/alass="line" name="L1235">12356/1> * grouping6 othe60  The latter chunk is circul* @L1215" ctmiecifiesatic_minimum n>
12356/2> * all unit6able 60r dynamic allocation like an*/alass="line" name="L1235">12356/3> *
<6 href60mm/percpu.c#L1204" id="L120n*cIfbthe neederc="+cpisasmef=>r thffetic_minimum ef"miecifiec#ref=/alass="line" name="L1235">12356/4> * The call6/span6012356/5> * copied s6ess, 60 clasef="mm/percpu.c#L1205" *r_lass="line" name="L1235">12356/6> *
<6a hre60 clasef="mm/percpu.c#L1205" * RETURNS:/alass="line" name="L1235">12356/7> * If the f6sref"60classef="mm/percpu.c#L1205" * 0 ercsuc acc, -errno ercfailurcn/alass="line" name="L1235">12356/8> * is serve6void 60grouprcpu.c#L1204" id="L120 *" class="line" name="L1249">12496/9> * areas an6L121060allocina=  * sizeosref"code=PAGE_MASK" sref"code= dyn_sua href="mm/percpCsref"code=PAGE_MASK" sref"code= 11346/a>       6static char BUG();   "srpercpu.c#L114rg(11346/a>(11346/2>11346/3>(11206/4>11346/5>;nr="line" name="L1120">11206/6>;nr="line" name="L1120">11206/7>size_tefcpu_chunk *KERN_a* href="mm/percpsroups" class="sref">nr="line" name="L1120">11206/a>       6unsigned long *printk(nr="line" name="L1120">11206/9>id="L1yn_sua href="mm/percproups" class="sriyn_sua href="mm/percprcode=dyn_size" clc">nr="line" name="L1120">11206/a>       6int *11516/a>       6int PCPU_SETUP_BUGsroups" class="sref">nr
(( * size+ dyn_sua href="mm/percpmm/percpu.c#L1215" id="L1215" classua href="mm/percp"d="L1187="mm/percpu.c#L"d="L1187/a>;u"line" name="L1134">11346/a>       6="mm/percpu.c#L1223" id="61223"62a href="+code=printk"= class="u.c#L114rg(12536/a>       6nrs)"line" name="L1120">11206/a>       6="mm/percpu.c#L1225" id="61225"62ref="mm/percpu.c#L114return dr);
nrs="line" name="L1253">12536/5>11206/6>nr_groups; ai->nr_groups; dyn_s a href="+code=ressroups" class="sref">nr_groups; ai->="line" name="L1253">12536/7>PCPU_SETUP_BUGsm/puL1187="mm/percpu.c#L"m/puL1187yn_s= * sizesroups" class="sref">nr_groups; group<*unit_offvoida*)s="line" name="L1253">12536/a>       6        11206/9>group_offsets sm/pupa" class="sref"m/pu/a>;( * sizesm/puL1187="mm/percpu.c#L"m/puL1187yn_ss="line" name="L1253">12536/a>       6        );
;ss="line" name="L1134">11346/a>       6}                        6     63ef="+code=gi" class=" href="mm/percprcode=dyn_size" clc">nr12536/a>       6(0)
12536/a>       6="mm/percpu.c#L1234" id="61234"63f">size_t="line" name="L1150">11506/a>       6/* 6anity6312536/5>12496/6>(id="L1yn_s
id="L1yn_s
tloua href="mm/percpsroups" class="sref">nr_groups; group=a href="mm/percp>grouss="sref">id="L1yn_s++ss="line" name="L1134">11346/7>pr_emerg>pcpu_chunk *roups" class="srgf">nr
nr_groups; grouss="sref">id="L1yn_s]="line" name="L1134">11346/a>       6pr_emergunsef">baina= 12536/9>12536/a>       611516/a>       6(roups" class="srgf">nr_groups; ;12496/a>       6nr_groups; nr[lt; 11346/a>       6 * sizelas 220" id="L1220" class12536/a>       612536/a>       612496/a>       6(( 220" id="L1220" classua href="mm/percp>roups" class="srgf">nr_groups; ;<* href="+code=unisroups" class="sref">nr_groups; 12496/a>
pr_emergkelyddr);
11346/a>       6/* 6roces64class="sref">pr_emerggggggggg href="mm/percprcode=dyn_size" clc">nr12536/a>#endif
612536/a>       6BUG();   ="line" name="L1150">11506/a>       612496/a>       612536/a>       6="mm/percpu.c#L12="L1154"61254"65      ;[lt; id="L1yn_s]
;="line" name="L1253">12536/a>       677" class="line" name="L1677">16512536/a>       6ass="comment"> *
<6 href65a href="+code=printk" href="mm/percpbase="mm/percpu.c#Lbase/a>;(;ua href="mm/percpbase="mm/percpu.c#Lbase/a>;s="line" name="L1253">12536/a>       6ass="comment"> * Initiali6e the65+code=printk"="line" name="L1150">11506/a>
 * perpcu a6ea.  65lass="line" name="L1248">12486/a>       6ass="comment"> * setup pa6h.printk(ion and build config ta class="line" name="L1249">12496/a>#endif
6ass="comment"> *
<6 href65her for the dynamic area.  Ttttttttt* Copybdatabu.c#free_un/percpartsn>
n afterref= class="line" name="L1249">12496/a> * @ai cont6ins a66 uses different area allocaTtttttttt*ref="mm/perprem/rc aplete; oticrwise, we may e.c#/p"with class="line" name="L1249">12496/a>       6ass="comment"> * chunk an6 prim66  The latter chunk is circultttttttt* everlap1quo >groupn/alass="line" name="L1235">12356/a> *
<6 href66r dynamic allocation like antttttttt*" class="line" name="L1249">12496/a> * @ai->6tatic66      (id="L1yn_s
id="L1yn_s
tloua href="mm/percpsroups" class="sref">nr_groups; group=a href="mm/percp>grouss="sref">id="L1yn_s++ss="line" name="L1249">12496/a>       6ass="comment"> *
<6 href66ref="mm/percpu.c#L114>pcpu_chunk *roups" class="srgf">nr
nr_groups; grouss="sref">id="L1yn_s]="line" name="L1249">12496/a>       6ass="comment"> * @ai->6eserv66a href="+code=printk"voida* href="mm/percpptrf="+code=KERN_EMtr/a>;;[lt; id="L1yn_s]="line" name="L1249">12496/a>       6ass="comment"> * reserve 6fter 66lass="line" name="L1127">11276/a> * the firs6 chun66class="sref">pr_emergref"y(roups" class="srgf">nr_groups; ;=a href="mm/percproups" class="sriyn_s++ua href="mm/percpptrf="+code=KERN_EMtr/a>;<+nr_groups; 12496/a>       6ass="comment"> * percpu a6locat66class="sref">pr_emergggggggggkely(nr_groups; nr[lt; 12496/a>#endif
6ass="comment"> * static a6eas o66alloc_info" class="srrrrrrrrrintk(ion and build config ta_un/percref=,#free_wholea*" class="line" name="L1249">12496/a> * limited 6ffset67="sref">BUG();   "srpercpu.c#L114<nr_groups; 12496/a> * percpu s6mbols67ef="+code=gi" class=""srpercpu.c#L114lastinue="line" name="L1253">12536/a> *
<6 href67a href="+code=printk"= class=="line" name="L1150">11506/a> * @ai->6yn_si67      12496/a>       6ass="comment"> * allocati6n in 67ref="mm/percpu.c#L114mm/percp href="mm/percpmemcpy="mm/percpu.c#Lmemcpyass=y(;ua href="mm/percp__L12_lassloaru.c#L1221" id="__L12_lassloar/a>;ua href="mm/percpsroups" class="sref">nr_groups; ai->s="line" name="L1249">12496/a>       6ass="comment"> * @ai->6eserv67a href="+code=printk"""""""""<nr_groups; 12496/a>       6ass="comment"> *
<6 href67+code=printk" class="="line" name="L1150">11506/a> * @ai->6nit_s67class="sref">="line" name="L1150">11506/a>       6ass="comment"> * and equa6 to o67s and"line" name="L1120">11206/a>#endif
6ass="comment"> * @ai->6yn_si67ss="sref">grouion and build config ta_basetedd179srf="now known,#ynamic12496/a> *
<6 href68="sref">group_sizes = nr
12536/a> * @ai->6tom_s68ef="+code=gi"ref"y(id="L1yn_s
id="L1yn_s
tloua href="mm/percpsroups" class="sref">nr_groups; group=a href="mm/percp>grouss="sref">id="L1yn_s++ss="line" name="L1253">12536/a> * for vm a6eas.<68a href="+code=printk"href="+code=groeroups" class="sref">nr_groups; grouss="sref">id="L1yn_s]._sizes = ;[lt; id="L1yn_s]
-a href="mm/percpbase="mm/percpu.c#Lbase/a>;="line" name="L1253">12536/a> *
<6 href68      nr
(nru"line" name="L1134">11346/a>       6ass="comment"> * @ai->6lloc_68ref="mm/percpu.c#L114mm/percp+code=printk"href="+code=groeroups" class="sref">nr_groups; grouss="sref">id="L1yn_s]._sizes = 12496/a> * @ai->6tom_s68a href="+code="line" name="L1150">11506/a> * @ai->6nit_s68+code=printk" nr
+nr_groups; 12496/a> *
<6 href68lass="line" name="L1248">12486/a>       6ass="comment"> * @ai->6r_gro68f">printk(ion and build config ta warn if maximum nistance f="furticr thffe75%3of vm"="srtmiccef*" class="line" name="L1249">12496/a>#endif
6ass="comment"> * percpu a6eas. 68alloc_info" ciely(nr
grouay(;ss* 3 / 4ss="line" name="L1253">12536/a> * same gro6p.  D69="sref">BUG();   ("PERCPU: max_distance=0x%zx too largehref"vm"="srt" class="line" name="L1249">12496/a> * grouping6.  If69ef="+code=gi" class=""srpercpu.c"miccef0x%lx\n" class=ua href="mm/percpmax_distance="mm/percpu.c#Lmax_distance">nru"line" name="L1134">11346/a> * all unit6 is a69a href="+code=printk"= class="u.(unsef">balong)y(;ss="line" name="L1249">12496/a> *
<6 href69mm/pe#ifdeOa href="mm/percpCONFIG_NEED_PER_CPU_d="L1FIRST_CHUNr="mm/percpu.c#LCONFIG_NEED_PER_CPU_d="L1FIRST_CHUNrass="line" name="L1248">12486/a>       6ass="comment"> * The call6r sho69ref="mm/percpu.c#L114>ion and build config tabu.c#failcielwe havehref=back *" class="line" name="L1249">12496/a> * copied s6atic 69a href="+code=printk" href="mm/percprcode=dyn_size" clc">nr12496/a> *
<6 href69+code=printk" class="goto  12536/a> * If the f6rst c69classclass="line" name="L1253">12536/a>       6ass="comment"> * is serve6 by t698 href="+code="line" name="L1150">11506/a>#endif
6ass="comment"> * areas an6 the 69her f"line" name="L1150">11507/a> * and page7map b70="sref">group_sizes = "PERCPU: Embedderc%z"rp tos/71"r@%p s%z"rr%z"rd%z"ru%z"\n" class=u"line" name="L1150">11507/1> * grouping7 othe70ef="+code=gi" class=" href="mm/percpfFN_DOW"+code=pcpu_verifFN_DOW"ass=y(;ua href="mm/percpsroups" class="sref">nr_groups; ai->ua href="mm/percpsroups" class="sref">nr_groups; dyn_su"line" name="L1150">11507/2> * all unit7able 70a href="+code=printk"href="+code=groeroups" class="sref">nr_groups; nr_groups; 12497/3> *
<7 href70     "line" name="L1249">12497/4> * The call7/span704"sref">group_sizes = nr *(nrua href="mm/percpbase="mm/percpu.c#Lbase/a>;s="line" name="L1253">12537/5> * copied s7ess, 70a href="+codegoto  12537/6> *
<7a hre70lass="line" name="L1127">11277/7> * If the f7lurcn70classe/3>grouss="sref8ass="line" name="L1253">12536/a>  7an c670s="comment"> * @ai->6tom_s68ef="+code=gi"ref"y(id="L1yn_s
id="L1yn_s
tloua href="mm/percpsroups" class="sref">nr_groups; group=a href="mm/percp>a href="+code>grouss="sref9ass="line" name="L1150">11506/a>#e7an c670s="comment"> * static67a href="+code=printk"""""""""<;[lt; classu"lin7" name="L1134">11346/a>  7    67tatic char nr_groups; grouss="sref">s="srgf">nr_groups; ;<* href="+code=unisroups" class="sref">nr_groups; 11346/a>11346/2>(11346/3>);
11206/4> *
<5 href56ref="mf="+em_nopa"""""""""<;ua h__f"defreeefn 220" id="Lse_offsetr_cp
11346/5>nr="lin7" name="L1120">11206/6>nr="lin7" name="L1120">11206/7> * allocati5n in 57f">siclass=> ion and build conabu.c#failcielwe havehref=back *"7f">nr="lin7" name="L1120">11206/a>  7    671s="comment"> * and equa6 to o7e">nr="lin7" name="L1120">11206/9> *
<6 hr tabBUILD_EMBED_FIRST_ass="sriyn_sua  tabBUILD_EMBED_FIRST_="comment"> * and equa6 to o7c">nr="lin7" name="L1120">11206/a>  7    672s="comment"> * @ai cont6ins_s57classef="mm/percpu.c#L1205"/*a72lass="lin7" name="L1151">11516/a>  7    672s="comment"> * grouping6 othe6build&quo.c#L1204" id="Lmafbthf="func/percr "+co,rILD_ESIZE>"7classef="mm/percpu.c#L1205"/*a72ass=u"lin7" name="L1134">11346/a>  7    672s="comment"> * for vm a5eas.<58her for the dynamic area.  T*c@lass="sref">d:atic_L1179of 179" id=uL1171"rem/p incbytes7ass=)="lin7" name="L1253">12536/a>  7    672s="comment"> *
<5 href58mm/percpu.c#L1204" id="L1204* @"="srefn: f,hvm"="srf="mm/peithrILD_ESIZEf 179" id=uL1171"rem/p incbytes7a     "line" name="L1120">11206/a>  7    672s="comment"> * @ai->5lloc_5812536/5> * copied s5atic 5@popul4* _pthref="mm/percpu.c#popul4*  pthf 179" id=uL1171"rem/p incbytes7a">nr="lin7" name="L1120">11206/6> *
<6a href 179" id=uL1171"rem/p incbytes7a">nr="lin7" name="L1253">12536/7> * If the f6sref"68 clasef="mm/percpu.c#L1205" * Thf="eeef-renbu.c#/er to ease_Lf 179" id=uL1171"rem/p incbytes7a">nr="lin7" name="L1253">12536/a>  7    672s="comment"> * is serve5 by t5ttquo up 58classef="mm/percpu.c#L1205" * cthebefcef=>bawhere classLetL12size ecode=()9rs expectern72s and"lin7" name="L1120">11206/9> * areas an5 the etL12size ecode=()9rs expectern7yn_ss="lin7" name="L1253">12536/a>  7    673s="comment"> * same gro5p.  D58 clasefamicbas c: minimuor.  Shref ="sref">d:aticcto LetL1ptic_fize +code=, it"f="ilpercpud7a>;ss="lin7" name="L1134">11346/a>  7    673s="comment"> * grouping6 othe6beef-by-ap1qu- emb9  The latteric_fize +code=, it"f="ilpercpud7aass=u"lin7" name="L1253">12536/a>  7    673s="comment"> * all unit6able 60r dynamic allocation like an*7e/a>;="lin7" name="L1253">12536/a>  7    673s="comment"> *
<5 href50 clasef="mm/percpu.c#L1205" * RETURNS:7t="lin7" name="L1150">11506/a>  7    673s="comment"> * @ai->5lloc_50classef="mm/percpu.c#L1205" * 0 ercsuc acc, -errno ercfailurcn7312536/5> * copied s5atic bu.c#failcielwe havehref=back *"7class="lin7" name="L1249">12496/6> * areas an6L121060allocina= nr="lin7" name="L1134">11346/7>(11346/a>  7    67a href="+code=PCPU_SET href="+cod6=cpuscpu.c#L114mm/pelass="u.c#L114rg(12536/9>((12536/a>  7    67a hrefc#LVMALLOC_START/a>;ss* 374lass="lin7" name="L1151">11516/a>  7    67a href="+code= href >
<6 href66ref="mm/percvm_
<6 hr/a>(12496/a>  7    67a href="+code=size_t" cl6ss="s61f">size_tefcpu_chunk *KERN_a* href="mm/percpsroups" class="sr7yn_s]="lin7" name="L1134">11346/a>  7    67a href="+code=char" cl6ss="s61f">si href=220rreeefnass=ygrouss="sref">id="L7/a>;s="lin7" name="L1253">12536/a>  7    674s="comment"> mment"> * areas an6; 12536/a>  7    67a href="+code=pmax_t="mm/percpu.c#Lmax_tass=y(12496/a>  7    67a href="+code=size_t" cl6ss="s61f">siquode= nr="lin7" name="L1249">12496/a>
<7 hre67"mm/percpu.c#Lmment"> * areas an6; (d="L1yn_sua href="mm/percproups" jd="L1yn_suajhref="mm/percproups" class="sriyn_sua href="mm/percprcode=dyn_size" c7a>;ss="lin7" name="L1134">11346/a>  7    674s="comment"> * and equa6 to o7M/a>;="lin7" name="L1253">12536/a>#e7dif
67a href="+code=pmax_t="mm/percpnprmmefmax_tass=y("miccef0x%lx\ILD_ESIZE class=" href="mLD_ESIZEf ce="mm/"mm/p1087="mm/percpu.c#L"m/puL1187u/a>;="lin7" name="L1253">12536/a>  7    675 href="+code=PCPU_SETUP6BUG_O7);   ="lin7" name="L1150">11506/a>  7    675t PCPU_SETUP_BUGsroups" class="sref">nr
(( * size+ ;12496/a>  7    67a href="+code=cpumask_sc6print62      (12536/a>  7    67"mm/percpu.c#L12="L115="61225"62ref="mm/percpu.c#L114return dr);
12536/a>  7    675s="comment"> * The call7/spa  * sizesroups" class="sref">nr_groups; 12536/a>  7    675 href="+code=pmax_t="mm/perc  * sizesroups" class="sref">nr_groups; grouss="sref">s="srgf">nr_groups; nr_groups; umfpossible12_l"ercpB"mm/percpu.c#LVMALLOC_START7/a>;s="lin7" name="L1253">12536/a>  7    675s="comment"> *
<7a hre7intk"="lin7" name="L1150">11506/a>
<7 hre675      nr_groups; 12486/a>  7    675s="comment"> * and equa6 to o7class="lin7" name="L1249">12496/a>#e7dif
675s="comment"> * @ai->6yn_si67ss="sreun9r dynamifferent arecaargehvm"be#L120d, ; 12496/a> * and page7map bquotef"code= ; nr_groups; umfpossible12_l"ercpB" *lcielwe havehref=back *"7c;   ="lin7" name="L1249">12496/a>  7    676s="comment"> * percpu s6mbols67ef="+roup" cpmax_t="mm/perciquotnr_groups; "7c="c0+"mm/percpu.c#LVMALLOC_START7alass="lin7" name="L1235">12356/a>; "7c="> sm/pupa" class="sref"m/pu/a>;nf"code= 12496/a> *
<7 href7s++ss="lin7" name="L1249">12496/a>  7    67ss="comment"> UP6BUG_O64a href="+codef="+code io"c bu.c#failcielwe havehref=back *"7c12496/a>  7    676 href="+code=pmax_t="mm/percjd="L1yn_suajhref="mm/percpu.c#Lmax_distance">7yn_s]="lin7" name="L1249">12496/a>  7    676href="+code=CONFIG_SMP"6class63+c; (grouss="sref">i; (grouss="sref">inr_groups; umfpossible12_l"ercpB"rouss="sref">i; (a href="+code>grouss="sre6lass="lin7" name="L1127">11276/a> * the firs6 chun66class="sref">pr_emergref"y(; ;ss* 37n_sss="lin7" name="L1249">12496/a>  7    67ss="comment"> * percpu a6locatUP6BUG_O63class="sref">pr_emergunsef">baina=  * sizesroups" class="sref">nr_groups; grouss="sref">c="srgf">nr_groups; ; (grouss="sref">id="L7a>;ss="lin7" name="L1249">12496/a>#e7dif
67ss="comment"> * static a6eas o406 id="63alloc_info" class="srvoida* href="mm/percpptrf="+code=KERN_EM7class="lin7" name="L1249">12496/a>12496/a> * percpu s6mbols="63alloc_info" class="srvoida* href="mmef="+cptrf="+code=KERN_EMtr/a>;( 220" idILD_ESIZE class=" href="mLD_ESIZEf ce;miccef0x%lx\ILD_ESIZE class=" href="mLD_ESIZEf cemm/percpu.c#LVMALLOC_START7tinue="lin7" name="L1253">12536/a> *
<6 href="61248"64class="sref">pr_emergkelyddr);
11506/a> * @ai->6yn_si s6mbols="63alloc_info" ">BUG();   (12496/a>  7    67ss="comment"> * allocati6n in ->6yn_si s6mbols=f="+code=KERN_EMr_warningars6 rt& and 37;z"rr%z"rd&" cl6ss="s61f">si href=220rreeefnass=ysc#L"="srefnass=y(12496/a>  7    67ss="comment"> * @ai->6eservvvvvvvvv* copied s7ess, 70a enoa>;nf"code= ;="mm/percpptrf="+code=KERN_EM7cn_s]="lin7" name="L1249">12496/a>  7    67ss="comment"> *>>>>>>>>* is serve6 by t698 href=7ass="="lin7" name="L1150">11506/a>="lin7" name="L1150">11506/a>  7    677pan class="comment">/* 6roces64class="sref">f="+code=printk"href="+code=grokmemleak_freeoups" class="srkmemleak_freeass=y(11206/a>#e7dif
677s="comment"> * static a6eas opmax_t="mm/perciquotnr_groups; "7c="cpmax_t="mm/percjd="L1yn_suajhref+++c>grouss="sref">i12496/a>BUG7r
12536/a>BUG7rinue="lin7" name="L1253">12536/a> UP6BUG_O64a href="+codef="+code iovmlatte,Lmafbthf="cup 58ass= href >dynam_L1171"ref="mm/perprseparcpul249"7e/a>;="lin7" name="L1253">12536/a> _flagtnr_groups; flagt="mmef="+cptrf="+code=VM_pVMALnr_groups; VM_pVMAL="mm/percpptrf="+code=KERN_EM7e">nru"lin7" name="L1134">11346/a>  7    678s="comment"> * The call7/spava"""""""""<">nr_groups; nr_groups; umfpossible12_l"ercpB" *m/percpu.c#Lmax_distance">nr
+nr_groups; 12496/a>roups" class="va"""""""""<11506/a> *
<7a hre7oyn_s="lin7" name="L1249">12496/a>; (grouss="sref">i; (grouss="sref">inr_groups; umfpossible12_l"ercpB"rouss="sref">i; (a hreptrf="+code=KERN_EMtr78lass="lin7" name="L1248">12486/a>  7    678 href="+code=PCPU_SETUP6BUG_O6de=ppercproups" clas; (12496/a>#e7dif
678s="comment"> * static a6eas o href="+code=pr* The call7/spava"""""""""<ouionr_groups; ouio>(; (nr
+nr_groups; 12536/a>12496/a> * groupirs6 chun66class="sref">pr_emergref"y(; 11346/a> * all unit6 is aps" class="srkmeopul4* _pthref/a>(; (pr_emergref"y(<"mm/"mm/percproups" clasILD_ESHIFs<-a href="mm/peILD_ESHIFsef=""mm/percpu.c#LVMALLOC_START7a>;ss="lin7" name="L1249">12496/a> *
<7 href7Nrass="lin7" name="L1248">12486/a>  7    67ss="comment"> * The call6r sho69ref="mm/percpu.pth="+lr_dy#popul4* d,bthf="o+cow* Thabu.c#argehvm"14>io_L1171"ref="mm/perprseparcpul249"7class="lin7" name="L1249">12496/a> * copied s6atic 69a href="+code=printk" href="mm/ s6atic 69a hre__p; ; __p; ; (roups" class="iquotnr_groups; "7c="cpmax_t="mm/perc; (; Lyn_s="lin7" name="L1249">12496/a> *ent"> * all unit6 is aps" class="srkm; 12536/a>12536/a>  7    679pan class="comment">/* 6roces64class="sref">1"ri="+code=printk" 1"ri=arningf="+code=KERN_EMr_warninga205"_O6id=mafb"sref">d:at, err= and d7;z"rr%z"rd&" cl6ss="s61f">sf="+code=printk" href="mm/percpu.c#LVMALLOC_START7as and"lin7" name="L1150">11506/a>#e7dif
67ss="comment"> * areas an6 the 89her f"lin8" name="L1150">11507/a> * same gr setup pa6h.printk(ion and build config ta8lass=u"lin8" name="L1150">11507/1> * chunk an6 prim66  Theeeeeeeee* FIXME: Archspeithrvirtual cacmic bu.c#fflush codesn>
n afterref=8dyn_su"lin8" name="L1150">11507/2> *
<6 href66r dyneeeeeeee* cacmicbuild co50">ar=mafhunk rcpu.- somethr_war
n afterref=8d3n_su"lin8" name="L1249">12496/a> *
<5 href66r dyneeeeeeee* equival<5 u.c#Llush_cacmi_vmafB" o buildcodes rt&0 ercsuc acc, -errno ercfailurcn80     "lin8" name="L1249">12497/4> * @ai->5llocf66r dyneeeeeeee* Llush_cacmi_vmafB" caargehvm"be#onfigas most supportr_war
n afterref=8d5    "lin8" name="L1249">12496/a> * copied s5aticf66r dyneeeeeeee* dynamsize_turerea alnot205"c apyet0 ercsuc acc, -errno ercfailurcn806    "lin8" name="L1249">12496/a> *
<6a hrf66r dyneeeeeeee*L1171"ref="mm/perprseparcpul249"80lass="lin8" name="L1127">11277/7> *
<6 href8f8ass="lin8" name="L1253">12536/a>  8an c680pan class="comment">/67      11506/a>#e8an c680s="comment"> * static67a href="+codeu.c#L114mm/percp href="mm/percpm(406 idr* The call7/spa; (;ua href="mm/percp__L12_lassloaru.c#L1221" id="__L12_lassloar/a>;ua href="mm/percpsroups" class="sref">nr_groups; ai8classu"lin8" name="L1134">11346/a>  8    68tatic char BUG87/a>;u"lin8" name="L1134">11346/a>BUG8nass=u"lin8" name="L1134">11346/2> UP6BUG_O64a href="+codef=wergehv allr_dy,llocmi_copybu.c#return tic_un/percparta*"87n_ss="lin8" name="L1134">11346/3> _group_sizes = 11206/4> *
<5 href56ref="; si href=220rreeefnass=ysva"""""""""<ouionr_groups; ouio>(;ua href="mm/percpsroups" class="sref">nr_groups; 11346/5> * copied s6atic 69a href">ai->ua href="mm/percpsroups" class="sref">nr_groups; nr_groups; ai8c6    "lin8" name="L1120">11206/6> *
<7a hre8L">nr="lin8" name="L1120">11206/7>group_sizes = nr *(ouionr_groups; ouio>(ai8c8ass="lin8" name="L1120">11206/a>  8    681pan class="co64alloc_info" class="srrrrrrrrrnr_groups; "srrrrrrrrrf=""+code=PCPU_MIN_UNIref=""+8e">nr="lin8" name="L1120">11206/9> * areas an6 the 8c">nr="lin8" name="L1120">11206/a>  8    682s="comed s7ess, 70a enoa>;nf"code= ;="mm11516/a>  8    682s="comment"> wh5"_ (-percprcode=dyn_sjd="L1yn_suajhref=ode==p0m="+code=PCPU_SETUP6BUG_O82ass=u"lin8" name="L1134">11346/a>  8    682s="comment"> * all unit7able 70a hre=printk"""""""""<12536/a>  8    682s="comment"> _11206/a>  8    682s="comc_info" class="srrrrrrrrrnr_groups; "srrrrrrrrrf=""12536/5>;ua h__f"defreeefn 220" id="L1quotnr_groups; "7c="fsudcode=PAGE_MASKbquotef"code= nr="lin8" name="L1120">11206/6> * @ai->6nit_="+cfcf="+cr
(nr="lin8" name="L1253">12536/7>nr="lin8" name="L1253">12536/a>  8    682s="coze6" cla65="sref">BUG82s and"lin8" name="L1120">11206/9> * allocati5n in 57f">siclass=>ILD_EMBED_FIRST_copybu.c#return tic_un/percparta*"8yn_ss="lin8" name="L1253">12536/a>  8    683 href="+code=PCPU_SETUP6BUG_O8a>;ss="lin8" name="L1134">11346/a>  8    683s="co#ifnnt"> *
<6 href69mm/HAV_ESETUPCHUNr="mmAREAass="sriyn_sua ef69mm/HAV_ESETUPCHUNr="mmAREAhref="+code=PCPU_SETUP6BUG_O8aass=u"lin8" name="L1253">12536/a>  8    683s="comment"> * all unit6ablrintk(ion and build config ta8e/a>;="lin8" name="L1253">12536/a>  8    683s="comment"> *
<5 href5Generic SMP="sref">d:atisize 0 ercsuc acc, -errno ercfailurcn8t="lin8" name="L1150">11506/a>  8    683s="comment"> * @ai->5lloc_ ercsuc acc, -errno ercfailurcn8t5    "lin8" name="L1253">12536/5> * copied s5atic  The eand bunk rc/percis#onfigbecaonf itsgbehavis6 rlosaticref+mbl9of 179" id=uL1171"rem/p incbytes8class="lin8" name="L1249">12496/6> *
<6a hrebuildoriginal non- d:atisize 0 58 clasef 179" id=uL1171"rem/p incbytes8c">nr="lin8" name="L1134">11346/7> * If the f6sref"6importantgbecaonf many>d:chspbuildouion aunk on tricemleakup 5mightf 179" id=uL1171"rem/p incbytes8c">nr="lin8" name="L1134">11346/a>  8    683s="comment"> * is serve5 by t514>ion auild config :aticctLetL1pt514r away fromauild revisuef 179" id=uL1171"rem/p incbytes8cs and"lin8" name="L1253">12536/9> * areas an5 the  abkmemle0 5Aakupdouifigbonus,ic_Lnon-NUMA cases,ieand bunk sef 179" id=uL1171"rem/p incbytes8r/a>;="lin8" name="L1253">12536/a>  8    684s="comment"> * same gro5p.  D5genercr
y>do64od>;=atiTLB-winf becaonf  config :ati8clapiggy fig f 179" id=uL1171"rem/p incbytes8r>;ss="lin8" name="L1151">11516/a>  8    684s="comment"> * grouping6 othe6o buildphysides 50">ar=memory=mafhunk wh5ch#onfsCPU: maiquore179" id=uL1171"rem/p incbytes8rass=u"lin8" name="L1249">12496/a>  8    684s="comment"> * for vm a5eas.<5mafhunks6o bafhlidebl9>d:chs0 ercsuc acc, -errno ercfailurcn8yn_s]="lin8" name="L1134">11346/a>  8    684s="comment"> *
<5 hrefpybu.c#return tic_un/percparta*"8/a>;s="lin8" name="L1253">12536/a>  8    684s="coUP6BUG_O6de=ppercproups" clascp__L12_lad="L1yn_s]._sizescp__L12_lad="L1y7c="cpmax_t="mm/percNNr="mSn_s]._sizesNNr="mShref]percproups" clascplr_d_mostl114mm/percp hrefcplr_d_mostl1ef="mm/percprcode=dyn_size" c8y12536/a>  8    68a hrefercprcode=dyn_siXPORT_SYMBOclc">nrcp__L12_lad="L1y7c="ercpbase="mm/percpu.c#Lbas8ylass="lin8" name="L1249">12496/a>  8    684s="comment"> *
<7a hre8y">nr="lin8" name="L1249">12496/a>
<8 hre68"mm/pe href >406 idnt"> * areas an6L121060allocina= pr_emergunsef">baina= (nr_groups; ;ss="lin8" name="L1134">11346/a>  8    684 href="+code=PCPU_SET href="+cod6=cpusent"> * @ai->6nit_pu.c#Lmax_tass=y(12536/a>#e8dif
68a href hreptrf="+code=KERN_EMtr8u/a>;="lin8" name="L1253">12536/a>  8    685atic char nr_groups; ;ua h__f"defreeefn 220" id="LMAX_DMA_ADDRESSn_s]._sizesMAX_DMA_ADDRESSef=""mm/percpu.c#LVMALLOC_START8);   ="lin8" name="L1150">11506/a>  8    685t BUG8u_s++s"lin8" name="L1249">12496/a>  8    68a hrefe6" cla65="sref">BUG8un_s]="lin8" name="L1253">12536/a>  8    68"mm/pe href >406 it"> * areas an6L121060allocina= (nr_groups; ;="lin8" name="L1253">12536/a>  8    685s="co hreptrf="+code=KERN_EMtr8u12536/a>  8    685 href="+code=pmax_t="mm/percmf="+em_nopa"""""""""<;ua h__f"defreeefn 220" id="L1mleak_freeass=y(; 12536/a>  8    685signe* is serve6 by t698 href=8intk"="lin8" name="L1150">11506/a>
<8 hre685s="comment"> *
<6 href85lass="lin8" name="L1248">12486/a>  8    685s="co406 it"> * areas an6L121060allocina= 12496/a>#e8dif
685 href hreptrf="+code=KERN_EMtr8class="lin8" name="L1249">12496/a> UP6BUG_O6de=ppercproups" clasdelt"+code=u/a>;ua hdelt"ef="mm/percprcode=dyn_size" c8c;   ="lin8" name="L1249">12496/a>  8    686s="comment"> UP6BUG_O63class="sref">pr_emergunsef">baina= 12356/a>pr_emclass="sriyn_sua href="mm/percprcode=dyn_size" c8aa>;s="lin8" name="L1249">12496/a> *
<7 href8s++ss="lin8" name="L1249">12496/a>  8    68ss="comment"> UP6BUG_O64a href="+codefybu.c#return tic_un/percparta*"8c12496/a>  8    686s="comment"> * copied s5aticf66r dyn* Am"="srref+ 12496/a>  8    686s="comment"> *
<6a hrf66r dyn* whatbuildcegacy: minimuor disLetL12size ecode=()9rs expectern86lass="lin8" name="L1127">11276/a> * If the f6sreff66r dyn*pybu.c#return tic_un/percparta*"8n_sss="lin8" name="L1249">12496/a>  8    68ss="comment"> ="+code=pr6emern704"sref">group_sizes = nr *;ss="lin8" name="L1249">12496/a>#e8dif
68ss="comment"> * static a6eas o6=cpusent"> * @ai->6nit_mcpu_i_DYNAMIC_RESERVE class=" href="mcpu_i_DYNAMIC_RESERVE="mmsudcode=PAGE_MASKILD_ESIZE class=" href="mLD_ESIZEf ce;miccef0x%lx\sref"m/pu/a>;12496/a> * @ai->6nit_u.c#Ldflc_"="sref="mm/percpMERG"builddflc_"="sreffi=Ksudcode=PAGE_MASKb.c#Ldflc_"=printk"href="+code=u.c#Ldflc_"=prindefrem/percpu.c#LVMALLOC_START8yn_ss="lin8" name="L1249">12496/a> ="61 s6atic 69a href="+code=printk" href="m"mm/p0m="+code=PCPU_SETUP6BUG_O8tinue="lin8" name="L1253">12536/a> *dcode=PAGE_MASKbqri="+code=printk" 1"ri=arningf="+code=KERN_EMr_warningaF05"_O6id=f="+i/r zf  config :ats.z"rr%z"rdem/percpu.c#LVMALLOC_START8ya>;s="lin8" name="L1150">11506/a> *
<7 href8class="lin8" name="L1249">12496/a>  8    687s="comment"> * The call7/spadelt"+code=u/a>;ua hdelt"ef="/a> href="+code=pr* The call7/spabuild+aserouionr_groups; build+aserouioef="/-> href="+code=pr* The call7/spacp__L12_la hrryn_s]._sizescp__L12_la hrryef="mm/percprcode=dyn_size" c8t12496/a>  8    687 href="+code=pmax_t="mm/percmor_eachfpossible12_l"""""""""<baina= 12496/a>  8    68ss="comment"> *ercproups" clascp__L12_lad="L1yn_s]._sizescp__L12_lad="L1y7c="cpmax_t="mm/percergunsef">baina= grouss="sref">i;ua hdelt"ef="/ef="+ptrf="+code=buildbaina= 11506/a>="lin8" name="L1150">11506/a>  8    687pan cmment">> * allocati5n in 57f">sicef69mm/HAV_ESETUPCHUNr="mmAREAn*pybu.c#return tic_un/percparta*"87s and"lin8" name="L1120">11206/a>#e8dif
687s="comment"> * areas an6 the 8class="lin8" name="L1249">12496/a>> * allocati5n in 57f">sicef69mm/SMP=*pybu.c#return tic_un/percparta*"8r
12536/a>BUG8rinue="lin8" name="L1253">12536/a> * all unit6ablrintk(ion and build config ta8e/a>;="lin8" name="L1253">12536/a> *
<5 href5UP="sref">d:atisize 0 ercsuc acc, -errno ercfailurcn8e">nru"lin8" name="L1134">11346/a>  8    688s="comment"> * @ai->5lloc_ ercsuc acc, -errno ercfailurcn8r_cps="lin8" name="L1249">12496/a> * copied s5atic  UP=vm"="sronfsCkm-+asedd config tabkmeor eithrids5aity=mafhunk0 ercsuc acc, -errno ercfailurcn8en_s]="lin8" name="L1150">11506/a> *
<6a hrebShref ="sref">variebl9sea alientstr_wuishebl9>fromauildusual  href  ercsuc acc, -errno ercfailurcn8elass="lin8" name="L1249">12496/a> * If the f6sref"6variebl9seand doargehvm"requi alany>speci/rd reracksmle0 ercsuc acc, -errno ercfailurcn8eef">="lin8" name="L1248">12486/a>  8    688s="comment"> * is serve5 by tpybu.c#return tic_un/percparta*"8rs and"lin8" name="L1249">12496/a>#e8dif
688s="co406 it"> * areas an6L121060allocina= 12536/a>;ss* 38class="lin8" name="L1249">12496/a> conslass="sref">pr_empu.c#Lmax_tass=y(nr_groups; 11346/a> * all un s6atic 69a href nr#Lmax_tass=y(;ss="lin8" name="L1249">12496/a> * @ai->6yn_si s6mbols * all un s6atic 69a hremcpu_i_DYNAMIC_RESERVE class=" href="mcpu_i_DYNAMIC_RESERVE="mm"mm/percpu.c#LVMALLOC_START8Nrass="lin8" name="L1248">12486/a>  8    68ss="comment"> size_t" cl6ss="s61f">size_tefcpu_chunk *KERN_a* href="mm/percpsroups" class="sr8class="lin8" name="L1249">12496/a> 406 id="63alloc_info"f="+code=printk" fref="mm/percprcode=dyn_size" c8Lyn_s="lin8" name="L1249">12496/a> *
<7a hre8ayn_s="lin8" name="L1253">12536/a>PCPU_SETUP_BUGsroups" class="+/a>(<efcpu_chunk *12536/a>  8    689s="comment"> ="+code=pr6emerf="+code=printk" fref="mm/ s6atic 69a hre__sref"m/pu/a>;nf"code= ;fi=KERN_EMERG"LetL12s="sref">nr_groups; ;ua h__f"defreeefn 220" id="LMAX_DMA_ADDRESSn_s]._sizesMAX_DMA_ADDRESSef=""mm/percpu.c#LVMALLOC_START8as and"lin8" name="L1150">11506/a>#e8dif
689s="comment"> ="61248"64class="sres62 class="sref">PCPU_SE|| 248"64class="sref="+code=printk" fref="e#37;z"ru%z"\n" 99her f"lin9" name="L1150">11507/a> * same grcode=PAGE_MASKbqri="+code=printk" 1"ri=arningf="+code=KERN_EMr_warningaF05"_O6id="+code iomemory=buil config :ats.z"rr%z"rdem/percpu.c#LVMALLOC_START9lass=u"lin9" name="L1150">11507/1>11507/2>12496/a> *
<7 href90     "lin9" name="L1249">12497/4> * The call7/spaf="+code=printk"href="+code=groeroups" class="sref">nr_groups; nr_groups; 12496/a>nr_groups; nr_groups; 12496/a> * @ai->6nit_" srefsudyn_sall href="mm/percperoups" clasmeomref">nr_groups; ref">nr_gf=""+m/ s6atic 69a hres="sref">nr_groups; ma>>ode=P clasmeomref">nr_groups; ref">nr_gf=""+m/ s6atic 69a hres="sref">n8_groups * sizes9* * sizesroups" class="sref">nr_groups; n9_groups * sizesroups" class="sref">nr_groups; ; 9a href href="+code=ai" cl9ss="s91rcparta*"8yn_ss="lin8" name="946/a>  8  9 68tatic char group_sizes = nr * ="61 s6atic 69a href91134">11349/a>12536/a> *dcode=PAGE_MASKbqri="+code=printk" 1"ri=arningf="+code=KERN_EMr_warningaF05"_O6id=f="+i/r z9ehv allr_d9,llocmi_copybu.c#return t9c_un/9ercpare" c8tlass="lin8" name="L9p tos/71"r9z"rr%z"rd%z"r9%9"\n&qu" c8tlass="lin8" name="L9pr_groups="lin8" name="L1150">11506/a> s8an c688 href#else >> * allocati5n i9>nr_groups9/a>; ai8c6    "lin8" name="91120">11209/6>9*12536/a>nr="lin8" name="LF=dyn96/a>href="mm size"88s="comntk" 1"rdopied te134"ary"+co 6lass=L1253">12536/a>11209/9>9* are92 incbytes8r/a>;="lin8" name="Lbuildfunf"6im3s="c+coocf6fter slab3s="br_cghied s6/a>hrplace90t osildphysides 50">ar=memory=mafhun9a>;nf"code9 ;="mm;ss="lin8" name="Lpied promorly"+co 6laocfrincomment"> * for vm a5eas.<5mafhun9d="L1y9_suajhref=ode==p0m="+code9PCPU_92 incbytes8rass=u"lin8" name="c688 href#else >> * allocati5n i9class=" hr9f="mLD_ESIZEf cemm/percpu9c#LVM9LLOC_S_s]="lin8" name="L1253">12536/a>  8    68"mm/pe href >406 it"> * arelocatmm/pe_nt">/a>psize e__L12_lase_offsetr_cp
;="lin8" name="L9_groups1248"sref">group_sizes = size" clclin8" name="L1249tthe6n_size"e=pcpurifFN_ALIGtthe6n_size"ea hre]+osref"code=PAGE_s="sref">9= nr="lin8" name="L1120rn704"sref">group_sizes = e=dyn_size" clc_u.c#Ldflc_"="sref="mmhref="mm/"sref">group_sizes = href="mm/"sref="mmsudcode=PAGE_MASKILD_ESIZE class=" href="mLD }rningaF05"_O6id=f="+i/r z9tL12size e9ode=ass=y(ss="lin8" name="L1248">1248"sref">group_sizes = size" clclin8" name="L1249"sref">group_sizes12496/_  8    682s="coze6" cla95="sr92LOC_START8as a+s"lin8" name="L12 hrouionr_groups;rningaF05"_O6id=f="+i/r z9s=>ILD_EMB9D_FIRST_copybu.c#return t9c_un/9ercparta*"8yn_ss="lin8" name="91253">12539/a>  8    683 href="+code9PCPU_93LOC_START8yn_" name="L1249">12496/a> *hfpossible12_l"""sref">group_sizes>ode=P clasmtthe6n_size"e=pcpurifFN_ALIGtthe6n_size"ea hree="L1249">12496/a> hr"nr_groups; nr_groups; 12496/a> conslass="sref* The call7/spava"""""""""<;ua hun s6atic 69a hEARLY_SLOT" clcli>12496/a>  7    676s="cs="sref">nr_groups;  * @a9->5lloc_ ercsuc acc, -9rrno 93\n" 875    "lin8" name="L1134">1206/9"L1253">12536/a>  7    06/9"L1253owrd=_twfdefreeefn 220" e call7/spava"""""""""<nr_gr"mm/percjd="L1yn_suajhref]fsudcode=PAGE_MASKILD_ESIZE9vis6 rlosa9icref+mbl9of 179" id=uL1191"rem93f">ai8c6    "lin8" name="9">d:atisiz9 0 58 clasef 179" id=uL1191"rem93LOC_START7ayn_s="lin7>  7    676s="cs="sref">nr_groups; 
<6 href8f8ass="lin8" name="L1134">121253">12536/a>  7    675 href=ment"> ="61248"6s="sref">nr_groups; spin_f="k_irqsav e call7/spava"""pin_f="k_irqsav href=G"+code=pcpu_vegroup_sizes =  scfze ecode=f="+code=KERe ="61248"6s1506/a>#e8an c680s="comment"> *ent"> ="61248"6s="sref">nr_groups; sisref">group_sizesnr_groups; s20" e call7/spava"""""""""isref">group_sizesnr_groups; nr_groups; d:chs0 ercsuc acc, -9rrno 94\n" 8a>;ss="lin8" name="L1150">spin_unf="k_irq f6sor e call7/spava"""pin_unf="k_irq f6sor href=G"+code=pcpu_vegroup_sizes =  scfze ecode=f="+code=KERe
<5 hrefpybu.c#return t9c_un/94>
<7 href90  e" c8tlass="lin8" name="L9/percp hre9cplr_d_mostl1ef="mm/percp9code=9yn_size" c8tlass="lin8" name="L9/is6 rlosa9L12_lad="L1y7c="ercpbase=9mm/pe9cpu.c#


T249">12496/6LXR softws="cby0t LXR in8"; lxr@y7cux.noze e.
lxr.y7cux.no kindly hosaocfby0 c8tlass=http://www.redpill-y7cpro.no">Redpill L7cpro ASze ecoprovider of L7cux8ss="ult"> s6/a>omorlass=65efrvice90since 1995.