linux/arch/x86/mm/numa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Common code for 32 and 64-bit NUMA */
   3#include <linux/acpi.h>
   4#include <linux/kernel.h>
   5#include <linux/mm.h>
   6#include <linux/string.h>
   7#include <linux/init.h>
   8#include <linux/memblock.h>
   9#include <linux/mmzone.h>
  10#include <linux/ctype.h>
  11#include <linux/nodemask.h>
  12#include <linux/sched.h>
  13#include <linux/topology.h>
  14
  15#include <asm/e820/api.h>
  16#include <asm/proto.h>
  17#include <asm/dma.h>
  18#include <asm/amd_nb.h>
  19
  20#include "numa_internal.h"
  21
  22int numa_off;
  23nodemask_t numa_nodes_parsed __initdata;
  24
  25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  26EXPORT_SYMBOL(node_data);
  27
  28static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
  29static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
  30
  31static int numa_distance_cnt;
  32static u8 *numa_distance;
  33
  34static __init int numa_setup(char *opt)
  35{
  36        if (!opt)
  37                return -EINVAL;
  38        if (!strncmp(opt, "off", 3))
  39                numa_off = 1;
  40        if (!strncmp(opt, "fake=", 5))
  41                return numa_emu_cmdline(opt + 5);
  42        if (!strncmp(opt, "noacpi", 6))
  43                disable_srat();
  44        if (!strncmp(opt, "nohmat", 6))
  45                disable_hmat();
  46        return 0;
  47}
  48early_param("numa", numa_setup);
  49
  50/*
  51 * apicid, cpu, node mappings
  52 */
  53s16 __apicid_to_node[MAX_LOCAL_APIC] = {
  54        [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  55};
  56
  57int numa_cpu_node(int cpu)
  58{
  59        int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
  60
  61        if (apicid != BAD_APICID)
  62                return __apicid_to_node[apicid];
  63        return NUMA_NO_NODE;
  64}
  65
  66cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  67EXPORT_SYMBOL(node_to_cpumask_map);
  68
  69/*
  70 * Map cpu index to node index
  71 */
  72DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  73EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  74
  75void numa_set_node(int cpu, int node)
  76{
  77        int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
  78
  79        /* early setting, no percpu area yet */
  80        if (cpu_to_node_map) {
  81                cpu_to_node_map[cpu] = node;
  82                return;
  83        }
  84
  85#ifdef CONFIG_DEBUG_PER_CPU_MAPS
  86        if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
  87                printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
  88                dump_stack();
  89                return;
  90        }
  91#endif
  92        per_cpu(x86_cpu_to_node_map, cpu) = node;
  93
  94        set_cpu_numa_node(cpu, node);
  95}
  96
  97void numa_clear_node(int cpu)
  98{
  99        numa_set_node(cpu, NUMA_NO_NODE);
 100}
 101
 102/*
 103 * Allocate node_to_cpumask_map based on number of available nodes
 104 * Requires node_possible_map to be valid.
 105 *
 106 * Note: cpumask_of_node() is not valid until after this is done.
 107 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
 108 */
 109void __init setup_node_to_cpumask_map(void)
 110{
 111        unsigned int node;
 112
 113        /* setup nr_node_ids if not done yet */
 114        if (nr_node_ids == MAX_NUMNODES)
 115                setup_nr_node_ids();
 116
 117        /* allocate the map */
 118        for (node = 0; node < nr_node_ids; node++)
 119                alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 120
 121        /* cpumask_of_node() will now work */
 122        pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
 123}
 124
 125static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
 126                                     struct numa_meminfo *mi)
 127{
 128        /* ignore zero length blks */
 129        if (start == end)
 130                return 0;
 131
 132        /* whine about and ignore invalid blks */
 133        if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
 134                pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
 135                        nid, start, end - 1);
 136                return 0;
 137        }
 138
 139        if (mi->nr_blks >= NR_NODE_MEMBLKS) {
 140                pr_err("too many memblk ranges\n");
 141                return -EINVAL;
 142        }
 143
 144        mi->blk[mi->nr_blks].start = start;
 145        mi->blk[mi->nr_blks].end = end;
 146        mi->blk[mi->nr_blks].nid = nid;
 147        mi->nr_blks++;
 148        return 0;
 149}
 150
 151/**
 152 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
 153 * @idx: Index of memblk to remove
 154 * @mi: numa_meminfo to remove memblk from
 155 *
 156 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
 157 * decrementing @mi->nr_blks.
 158 */
 159void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
 160{
 161        mi->nr_blks--;
 162        memmove(&mi->blk[idx], &mi->blk[idx + 1],
 163                (mi->nr_blks - idx) * sizeof(mi->blk[0]));
 164}
 165
 166/**
 167 * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
 168 * @dst: numa_meminfo to append block to
 169 * @idx: Index of memblk to remove
 170 * @src: numa_meminfo to remove memblk from
 171 */
 172static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
 173                                         struct numa_meminfo *src)
 174{
 175        dst->blk[dst->nr_blks++] = src->blk[idx];
 176        numa_remove_memblk_from(idx, src);
 177}
 178
 179/**
 180 * numa_add_memblk - Add one numa_memblk to numa_meminfo
 181 * @nid: NUMA node ID of the new memblk
 182 * @start: Start address of the new memblk
 183 * @end: End address of the new memblk
 184 *
 185 * Add a new memblk to the default numa_meminfo.
 186 *
 187 * RETURNS:
 188 * 0 on success, -errno on failure.
 189 */
 190int __init numa_add_memblk(int nid, u64 start, u64 end)
 191{
 192        return numa_add_memblk_to(nid, start, end, &numa_meminfo);
 193}
 194
 195/* Allocate NODE_DATA for a node on the local memory */
 196static void __init alloc_node_data(int nid)
 197{
 198        const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 199        u64 nd_pa;
 200        void *nd;
 201        int tnid;
 202
 203        /*
 204         * Allocate node data.  Try node-local memory and then any node.
 205         * Never allocate in DMA zone.
 206         */
 207        nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 208        if (!nd_pa) {
 209                pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
 210                       nd_size, nid);
 211                return;
 212        }
 213        nd = __va(nd_pa);
 214
 215        /* report and initialize */
 216        printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
 217               nd_pa, nd_pa + nd_size - 1);
 218        tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 219        if (tnid != nid)
 220                printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
 221
 222        node_data[nid] = nd;
 223        memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 224
 225        node_set_online(nid);
 226}
 227
 228/**
 229 * numa_cleanup_meminfo - Cleanup a numa_meminfo
 230 * @mi: numa_meminfo to clean up
 231 *
 232 * Sanitize @mi by merging and removing unnecessary memblks.  Also check for
 233 * conflicts and clear unused memblks.
 234 *
 235 * RETURNS:
 236 * 0 on success, -errno on failure.
 237 */
 238int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
 239{
 240        const u64 low = 0;
 241        const u64 high = PFN_PHYS(max_pfn);
 242        int i, j, k;
 243
 244        /* first, trim all entries */
 245        for (i = 0; i < mi->nr_blks; i++) {
 246                struct numa_memblk *bi = &mi->blk[i];
 247
 248                /* move / save reserved memory ranges */
 249                if (!memblock_overlaps_region(&memblock.memory,
 250                                        bi->start, bi->end - bi->start)) {
 251                        numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
 252                        continue;
 253                }
 254
 255                /* make sure all non-reserved blocks are inside the limits */
 256                bi->start = max(bi->start, low);
 257
 258                /* preserve info for non-RAM areas above 'max_pfn': */
 259                if (bi->end > high) {
 260                        numa_add_memblk_to(bi->nid, high, bi->end,
 261                                           &numa_reserved_meminfo);
 262                        bi->end = high;
 263                }
 264
 265                /* and there's no empty block */
 266                if (bi->start >= bi->end)
 267                        numa_remove_memblk_from(i--, mi);
 268        }
 269
 270        /* merge neighboring / overlapping entries */
 271        for (i = 0; i < mi->nr_blks; i++) {
 272                struct numa_memblk *bi = &mi->blk[i];
 273
 274                for (j = i + 1; j < mi->nr_blks; j++) {
 275                        struct numa_memblk *bj = &mi->blk[j];
 276                        u64 start, end;
 277
 278                        /*
 279                         * See whether there are overlapping blocks.  Whine
 280                         * about but allow overlaps of the same nid.  They
 281                         * will be merged below.
 282                         */
 283                        if (bi->end > bj->start && bi->start < bj->end) {
 284                                if (bi->nid != bj->nid) {
 285                                        pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
 286                                               bi->nid, bi->start, bi->end - 1,
 287                                               bj->nid, bj->start, bj->end - 1);
 288                                        return -EINVAL;
 289                                }
 290                                pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
 291                                        bi->nid, bi->start, bi->end - 1,
 292                                        bj->start, bj->end - 1);
 293                        }
 294
 295                        /*
 296                         * Join together blocks on the same node, holes
 297                         * between which don't overlap with memory on other
 298                         * nodes.
 299                         */
 300                        if (bi->nid != bj->nid)
 301                                continue;
 302                        start = min(bi->start, bj->start);
 303                        end = max(bi->end, bj->end);
 304                        for (k = 0; k < mi->nr_blks; k++) {
 305                                struct numa_memblk *bk = &mi->blk[k];
 306
 307                                if (bi->nid == bk->nid)
 308                                        continue;
 309                                if (start < bk->end && end > bk->start)
 310                                        break;
 311                        }
 312                        if (k < mi->nr_blks)
 313                                continue;
 314                        printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
 315                               bi->nid, bi->start, bi->end - 1, bj->start,
 316                               bj->end - 1, start, end - 1);
 317                        bi->start = start;
 318                        bi->end = end;
 319                        numa_remove_memblk_from(j--, mi);
 320                }
 321        }
 322
 323        /* clear unused ones */
 324        for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
 325                mi->blk[i].start = mi->blk[i].end = 0;
 326                mi->blk[i].nid = NUMA_NO_NODE;
 327        }
 328
 329        return 0;
 330}
 331
 332/*
 333 * Set nodes, which have memory in @mi, in *@nodemask.
 334 */
 335static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
 336                                              const struct numa_meminfo *mi)
 337{
 338        int i;
 339
 340        for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
 341                if (mi->blk[i].start != mi->blk[i].end &&
 342                    mi->blk[i].nid != NUMA_NO_NODE)
 343                        node_set(mi->blk[i].nid, *nodemask);
 344}
 345
 346/**
 347 * numa_reset_distance - Reset NUMA distance table
 348 *
 349 * The current table is freed.  The next numa_set_distance() call will
 350 * create a new one.
 351 */
 352void __init numa_reset_distance(void)
 353{
 354        size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
 355
 356        /* numa_distance could be 1LU marking allocation failure, test cnt */
 357        if (numa_distance_cnt)
 358                memblock_free(__pa(numa_distance), size);
 359        numa_distance_cnt = 0;
 360        numa_distance = NULL;   /* enable table creation */
 361}
 362
 363static int __init numa_alloc_distance(void)
 364{
 365        nodemask_t nodes_parsed;
 366        size_t size;
 367        int i, j, cnt = 0;
 368        u64 phys;
 369
 370        /* size the new table and allocate it */
 371        nodes_parsed = numa_nodes_parsed;
 372        numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
 373
 374        for_each_node_mask(i, nodes_parsed)
 375                cnt = i;
 376        cnt++;
 377        size = cnt * cnt * sizeof(numa_distance[0]);
 378
 379        phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 380                                      size, PAGE_SIZE);
 381        if (!phys) {
 382                pr_warn("Warning: can't allocate distance table!\n");
 383                /* don't retry until explicitly reset */
 384                numa_distance = (void *)1LU;
 385                return -ENOMEM;
 386        }
 387        memblock_reserve(phys, size);
 388
 389        numa_distance = __va(phys);
 390        numa_distance_cnt = cnt;
 391
 392        /* fill with the default distances */
 393        for (i = 0; i < cnt; i++)
 394                for (j = 0; j < cnt; j++)
 395                        numa_distance[i * cnt + j] = i == j ?
 396                                LOCAL_DISTANCE : REMOTE_DISTANCE;
 397        printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
 398
 399        return 0;
 400}
 401
 402/**
 403 * numa_set_distance - Set NUMA distance from one NUMA to another
 404 * @from: the 'from' node to set distance
 405 * @to: the 'to'  node to set distance
 406 * @distance: NUMA distance
 407 *
 408 * Set the distance from node @from to @to to @distance.  If distance table
 409 * doesn't exist, one which is large enough to accommodate all the currently
 410 * known nodes will be created.
 411 *
 412 * If such table cannot be allocated, a warning is printed and further
 413 * calls are ignored until the distance table is reset with
 414 * numa_reset_distance().
 415 *
 416 * If @from or @to is higher than the highest known node or lower than zero
 417 * at the time of table creation or @distance doesn't make sense, the call
 418 * is ignored.
 419 * This is to allow simplification of specific NUMA config implementations.
 420 */
 421void __init numa_set_distance(int from, int to, int distance)
 422{
 423        if (!numa_distance && numa_alloc_distance() < 0)
 424                return;
 425
 426        if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
 427                        from < 0 || to < 0) {
 428                pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
 429                             from, to, distance);
 430                return;
 431        }
 432
 433        if ((u8)distance != distance ||
 434            (from == to && distance != LOCAL_DISTANCE)) {
 435                pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
 436                             from, to, distance);
 437                return;
 438        }
 439
 440        numa_distance[from * numa_distance_cnt + to] = distance;
 441}
 442
 443int __node_distance(int from, int to)
 444{
 445        if (from >= numa_distance_cnt || to >= numa_distance_cnt)
 446                return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
 447        return numa_distance[from * numa_distance_cnt + to];
 448}
 449EXPORT_SYMBOL(__node_distance);
 450
 451/*
 452 * Sanity check to catch more bad NUMA configurations (they are amazingly
 453 * common).  Make sure the nodes cover all memory.
 454 */
 455static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
 456{
 457        u64 numaram, e820ram;
 458        int i;
 459
 460        numaram = 0;
 461        for (i = 0; i < mi->nr_blks; i++) {
 462                u64 s = mi->blk[i].start >> PAGE_SHIFT;
 463                u64 e = mi->blk[i].end >> PAGE_SHIFT;
 464                numaram += e - s;
 465                numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
 466                if ((s64)numaram < 0)
 467                        numaram = 0;
 468        }
 469
 470        e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
 471
 472        /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
 473        if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
 474                printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
 475                       (numaram << PAGE_SHIFT) >> 20,
 476                       (e820ram << PAGE_SHIFT) >> 20);
 477                return false;
 478        }
 479        return true;
 480}
 481
 482/*
 483 * Mark all currently memblock-reserved physical memory (which covers the
 484 * kernel's own memory ranges) as hot-unswappable.
 485 */
 486static void __init numa_clear_kernel_node_hotplug(void)
 487{
 488        nodemask_t reserved_nodemask = NODE_MASK_NONE;
 489        struct memblock_region *mb_region;
 490        int i;
 491
 492        /*
 493         * We have to do some preprocessing of memblock regions, to
 494         * make them suitable for reservation.
 495         *
 496         * At this time, all memory regions reserved by memblock are
 497         * used by the kernel, but those regions are not split up
 498         * along node boundaries yet, and don't necessarily have their
 499         * node ID set yet either.
 500         *
 501         * So iterate over all memory known to the x86 architecture,
 502         * and use those ranges to set the nid in memblock.reserved.
 503         * This will split up the memblock regions along node
 504         * boundaries and will set the node IDs as well.
 505         */
 506        for (i = 0; i < numa_meminfo.nr_blks; i++) {
 507                struct numa_memblk *mb = numa_meminfo.blk + i;
 508                int ret;
 509
 510                ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
 511                WARN_ON_ONCE(ret);
 512        }
 513
 514        /*
 515         * Now go over all reserved memblock regions, to construct a
 516         * node mask of all kernel reserved memory areas.
 517         *
 518         * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
 519         *   numa_meminfo might not include all memblock.reserved
 520         *   memory ranges, because quirks such as trim_snb_memory()
 521         *   reserve specific pages for Sandy Bridge graphics. ]
 522         */
 523        for_each_reserved_mem_region(mb_region) {
 524                int nid = memblock_get_region_node(mb_region);
 525
 526                if (nid != MAX_NUMNODES)
 527                        node_set(nid, reserved_nodemask);
 528        }
 529
 530        /*
 531         * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
 532         * belonging to the reserved node mask.
 533         *
 534         * Note that this will include memory regions that reside
 535         * on nodes that contain kernel memory - entire nodes
 536         * become hot-unpluggable:
 537         */
 538        for (i = 0; i < numa_meminfo.nr_blks; i++) {
 539                struct numa_memblk *mb = numa_meminfo.blk + i;
 540
 541                if (!node_isset(mb->nid, reserved_nodemask))
 542                        continue;
 543
 544                memblock_clear_hotplug(mb->start, mb->end - mb->start);
 545        }
 546}
 547
 548static int __init numa_register_memblks(struct numa_meminfo *mi)
 549{
 550        int i, nid;
 551
 552        /* Account for nodes with cpus and no memory */
 553        node_possible_map = numa_nodes_parsed;
 554        numa_nodemask_from_meminfo(&node_possible_map, mi);
 555        if (WARN_ON(nodes_empty(node_possible_map)))
 556                return -EINVAL;
 557
 558        for (i = 0; i < mi->nr_blks; i++) {
 559                struct numa_memblk *mb = &mi->blk[i];
 560                memblock_set_node(mb->start, mb->end - mb->start,
 561                                  &memblock.memory, mb->nid);
 562        }
 563
 564        /*
 565         * At very early time, the kernel have to use some memory such as
 566         * loading the kernel image. We cannot prevent this anyway. So any
 567         * node the kernel resides in should be un-hotpluggable.
 568         *
 569         * And when we come here, alloc node data won't fail.
 570         */
 571        numa_clear_kernel_node_hotplug();
 572
 573        /*
 574         * If sections array is gonna be used for pfn -> nid mapping, check
 575         * whether its granularity is fine enough.
 576         */
 577        if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
 578                unsigned long pfn_align = node_map_pfn_alignment();
 579
 580                if (pfn_align && pfn_align < PAGES_PER_SECTION) {
 581                        pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
 582                                PFN_PHYS(pfn_align) >> 20,
 583                                PFN_PHYS(PAGES_PER_SECTION) >> 20);
 584                        return -EINVAL;
 585                }
 586        }
 587        if (!numa_meminfo_cover_memory(mi))
 588                return -EINVAL;
 589
 590        /* Finally register nodes. */
 591        for_each_node_mask(nid, node_possible_map) {
 592                u64 start = PFN_PHYS(max_pfn);
 593                u64 end = 0;
 594
 595                for (i = 0; i < mi->nr_blks; i++) {
 596                        if (nid != mi->blk[i].nid)
 597                                continue;
 598                        start = min(mi->blk[i].start, start);
 599                        end = max(mi->blk[i].end, end);
 600                }
 601
 602                if (start >= end)
 603                        continue;
 604
 605                /*
 606                 * Don't confuse VM with a node that doesn't have the
 607                 * minimum amount of memory:
 608                 */
 609                if (end && (end - start) < NODE_MIN_SIZE)
 610                        continue;
 611
 612                alloc_node_data(nid);
 613        }
 614
 615        /* Dump memblock with node info and return. */
 616        memblock_dump_all();
 617        return 0;
 618}
 619
 620/*
 621 * There are unfortunately some poorly designed mainboards around that
 622 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
 623 * mapping. To avoid this fill in the mapping for all possible CPUs,
 624 * as the number of CPUs is not known yet. We round robin the existing
 625 * nodes.
 626 */
 627static void __init numa_init_array(void)
 628{
 629        int rr, i;
 630
 631        rr = first_node(node_online_map);
 632        for (i = 0; i < nr_cpu_ids; i++) {
 633                if (early_cpu_to_node(i) != NUMA_NO_NODE)
 634                        continue;
 635                numa_set_node(i, rr);
 636                rr = next_node_in(rr, node_online_map);
 637        }
 638}
 639
 640static int __init numa_init(int (*init_func)(void))
 641{
 642        int i;
 643        int ret;
 644
 645        for (i = 0; i < MAX_LOCAL_APIC; i++)
 646                set_apicid_to_node(i, NUMA_NO_NODE);
 647
 648        nodes_clear(numa_nodes_parsed);
 649        nodes_clear(node_possible_map);
 650        nodes_clear(node_online_map);
 651        memset(&numa_meminfo, 0, sizeof(numa_meminfo));
 652        WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
 653                                  MAX_NUMNODES));
 654        WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
 655                                  MAX_NUMNODES));
 656        /* In case that parsing SRAT failed. */
 657        WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
 658        numa_reset_distance();
 659
 660        ret = init_func();
 661        if (ret < 0)
 662                return ret;
 663
 664        /*
 665         * We reset memblock back to the top-down direction
 666         * here because if we configured ACPI_NUMA, we have
 667         * parsed SRAT in init_func(). It is ok to have the
 668         * reset here even if we did't configure ACPI_NUMA
 669         * or acpi numa init fails and fallbacks to dummy
 670         * numa init.
 671         */
 672        memblock_set_bottom_up(false);
 673
 674        ret = numa_cleanup_meminfo(&numa_meminfo);
 675        if (ret < 0)
 676                return ret;
 677
 678        numa_emulation(&numa_meminfo, numa_distance_cnt);
 679
 680        ret = numa_register_memblks(&numa_meminfo);
 681        if (ret < 0)
 682                return ret;
 683
 684        for (i = 0; i < nr_cpu_ids; i++) {
 685                int nid = early_cpu_to_node(i);
 686
 687                if (nid == NUMA_NO_NODE)
 688                        continue;
 689                if (!node_online(nid))
 690                        numa_clear_node(i);
 691        }
 692        numa_init_array();
 693
 694        return 0;
 695}
 696
 697/**
 698 * dummy_numa_init - Fallback dummy NUMA init
 699 *
 700 * Used if there's no underlying NUMA architecture, NUMA initialization
 701 * fails, or NUMA is disabled on the command line.
 702 *
 703 * Must online at least one node and add memory blocks that cover all
 704 * allowed memory.  This function must not fail.
 705 */
 706static int __init dummy_numa_init(void)
 707{
 708        printk(KERN_INFO "%s\n",
 709               numa_off ? "NUMA turned off" : "No NUMA configuration found");
 710        printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
 711               0LLU, PFN_PHYS(max_pfn) - 1);
 712
 713        node_set(0, numa_nodes_parsed);
 714        numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
 715
 716        return 0;
 717}
 718
 719/**
 720 * x86_numa_init - Initialize NUMA
 721 *
 722 * Try each configured NUMA initialization method until one succeeds.  The
 723 * last fallback is dummy single node config encompassing whole memory and
 724 * never fails.
 725 */
 726void __init x86_numa_init(void)
 727{
 728        if (!numa_off) {
 729#ifdef CONFIG_ACPI_NUMA
 730                if (!numa_init(x86_acpi_numa_init))
 731                        return;
 732#endif
 733#ifdef CONFIG_AMD_NUMA
 734                if (!numa_init(amd_numa_init))
 735                        return;
 736#endif
 737        }
 738
 739        numa_init(dummy_numa_init);
 740}
 741
 742static void __init init_memory_less_node(int nid)
 743{
 744        /* Allocate and initialize node data. Memory-less node is now online.*/
 745        alloc_node_data(nid);
 746        free_area_init_memoryless_node(nid);
 747
 748        /*
 749         * All zonelists will be built later in start_kernel() after per cpu
 750         * areas are initialized.
 751         */
 752}
 753
 754/*
 755 * A node may exist which has one or more Generic Initiators but no CPUs and no
 756 * memory.
 757 *
 758 * This function must be called after init_cpu_to_node(), to ensure that any
 759 * memoryless CPU nodes have already been brought online, and before the
 760 * node_data[nid] is needed for zone list setup in build_all_zonelists().
 761 *
 762 * When this function is called, any nodes containing either memory and/or CPUs
 763 * will already be online and there is no need to do anything extra, even if
 764 * they also contain one or more Generic Initiators.
 765 */
 766void __init init_gi_nodes(void)
 767{
 768        int nid;
 769
 770        for_each_node_state(nid, N_GENERIC_INITIATOR)
 771                if (!node_online(nid))
 772                        init_memory_less_node(nid);
 773}
 774
 775/*
 776 * Setup early cpu_to_node.
 777 *
 778 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
 779 * and apicid_to_node[] tables have valid entries for a CPU.
 780 * This means we skip cpu_to_node[] initialisation for NUMA
 781 * emulation and faking node case (when running a kernel compiled
 782 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
 783 * is already initialized in a round robin manner at numa_init_array,
 784 * prior to this call, and this initialization is good enough
 785 * for the fake NUMA cases.
 786 *
 787 * Called before the per_cpu areas are setup.
 788 */
 789void __init init_cpu_to_node(void)
 790{
 791        int cpu;
 792        u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 793
 794        BUG_ON(cpu_to_apicid == NULL);
 795
 796        for_each_possible_cpu(cpu) {
 797                int node = numa_cpu_node(cpu);
 798
 799                if (node == NUMA_NO_NODE)
 800                        continue;
 801
 802                if (!node_online(node))
 803                        init_memory_less_node(node);
 804
 805                numa_set_node(cpu, node);
 806        }
 807}
 808
 809#ifndef CONFIG_DEBUG_PER_CPU_MAPS
 810
 811# ifndef CONFIG_NUMA_EMU
 812void numa_add_cpu(int cpu)
 813{
 814        cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 815}
 816
 817void numa_remove_cpu(int cpu)
 818{
 819        cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 820}
 821# endif /* !CONFIG_NUMA_EMU */
 822
 823#else   /* !CONFIG_DEBUG_PER_CPU_MAPS */
 824
 825int __cpu_to_node(int cpu)
 826{
 827        if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 828                printk(KERN_WARNING
 829                        "cpu_to_node(%d): usage too early!\n", cpu);
 830                dump_stack();
 831                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 832        }
 833        return per_cpu(x86_cpu_to_node_map, cpu);
 834}
 835EXPORT_SYMBOL(__cpu_to_node);
 836
 837/*
 838 * Same function as cpu_to_node() but used if called before the
 839 * per_cpu areas are setup.
 840 */
 841int early_cpu_to_node(int cpu)
 842{
 843        if (early_per_cpu_ptr(x86_cpu_to_node_map))
 844                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 845
 846        if (!cpu_possible(cpu)) {
 847                printk(KERN_WARNING
 848                        "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
 849                dump_stack();
 850                return NUMA_NO_NODE;
 851        }
 852        return per_cpu(x86_cpu_to_node_map, cpu);
 853}
 854
 855void debug_cpumask_set_cpu(int cpu, int node, bool enable)
 856{
 857        struct cpumask *mask;
 858
 859        if (node == NUMA_NO_NODE) {
 860                /* early_cpu_to_node() already emits a warning and trace */
 861                return;
 862        }
 863        mask = node_to_cpumask_map[node];
 864        if (!mask) {
 865                pr_err("node_to_cpumask_map[%i] NULL\n", node);
 866                dump_stack();
 867                return;
 868        }
 869
 870        if (enable)
 871                cpumask_set_cpu(cpu, mask);
 872        else
 873                cpumask_clear_cpu(cpu, mask);
 874
 875        printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
 876                enable ? "numa_add_cpu" : "numa_remove_cpu",
 877                cpu, node, cpumask_pr_args(mask));
 878        return;
 879}
 880
 881# ifndef CONFIG_NUMA_EMU
 882static void numa_set_cpumask(int cpu, bool enable)
 883{
 884        debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
 885}
 886
 887void numa_add_cpu(int cpu)
 888{
 889        numa_set_cpumask(cpu, true);
 890}
 891
 892void numa_remove_cpu(int cpu)
 893{
 894        numa_set_cpumask(cpu, false);
 895}
 896# endif /* !CONFIG_NUMA_EMU */
 897
 898/*
 899 * Returns a pointer to the bitmask of CPUs on Node 'node'.
 900 */
 901const struct cpumask *cpumask_of_node(int node)
 902{
 903        if ((unsigned)node >= nr_node_ids) {
 904                printk(KERN_WARNING
 905                        "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
 906                        node, nr_node_ids);
 907                dump_stack();
 908                return cpu_none_mask;
 909        }
 910        if (node_to_cpumask_map[node] == NULL) {
 911                printk(KERN_WARNING
 912                        "cpumask_of_node(%d): no node_to_cpumask_map!\n",
 913                        node);
 914                dump_stack();
 915                return cpu_online_mask;
 916        }
 917        return node_to_cpumask_map[node];
 918}
 919EXPORT_SYMBOL(cpumask_of_node);
 920
 921#endif  /* !CONFIG_DEBUG_PER_CPU_MAPS */
 922
 923#ifdef CONFIG_NUMA_KEEP_MEMINFO
 924static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
 925{
 926        int i;
 927
 928        for (i = 0; i < mi->nr_blks; i++)
 929                if (mi->blk[i].start <= start && mi->blk[i].end > start)
 930                        return mi->blk[i].nid;
 931        return NUMA_NO_NODE;
 932}
 933
 934int phys_to_target_node(phys_addr_t start)
 935{
 936        int nid = meminfo_to_nid(&numa_meminfo, start);
 937
 938        /*
 939         * Prefer online nodes, but if reserved memory might be
 940         * hot-added continue the search with reserved ranges.
 941         */
 942        if (nid != NUMA_NO_NODE)
 943                return nid;
 944
 945        return meminfo_to_nid(&numa_reserved_meminfo, start);
 946}
 947EXPORT_SYMBOL_GPL(phys_to_target_node);
 948
 949int memory_add_physaddr_to_nid(u64 start)
 950{
 951        int nid = meminfo_to_nid(&numa_meminfo, start);
 952
 953        if (nid == NUMA_NO_NODE)
 954                nid = numa_meminfo.blk[0].nid;
 955        return nid;
 956}
 957EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 958#endif
 959