linux/arch/avr32/kernel/setup.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004-2006 Atmel Corporation
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/init.h>
  11#include <linux/initrd.h>
  12#include <linux/sched.h>
  13#include <linux/console.h>
  14#include <linux/ioport.h>
  15#include <linux/bootmem.h>
  16#include <linux/fs.h>
  17#include <linux/module.h>
  18#include <linux/pfn.h>
  19#include <linux/root_dev.h>
  20#include <linux/cpu.h>
  21#include <linux/kernel.h>
  22
  23#include <asm/sections.h>
  24#include <asm/processor.h>
  25#include <asm/pgtable.h>
  26#include <asm/setup.h>
  27#include <asm/sysreg.h>
  28
  29#include <mach/board.h>
  30#include <mach/init.h>
  31
  32extern int root_mountflags;
  33
  34/*
  35 * Initialize loops_per_jiffy as 5000000 (500MIPS).
  36 * Better make it too large than too small...
  37 */
  38struct avr32_cpuinfo boot_cpu_data = {
  39        .loops_per_jiffy = 5000000
  40};
  41EXPORT_SYMBOL(boot_cpu_data);
  42
  43static char __initdata command_line[COMMAND_LINE_SIZE];
  44
  45/*
  46 * Standard memory resources
  47 */
  48static struct resource __initdata kernel_data = {
  49        .name   = "Kernel data",
  50        .start  = 0,
  51        .end    = 0,
  52        .flags  = IORESOURCE_MEM,
  53};
  54static struct resource __initdata kernel_code = {
  55        .name   = "Kernel code",
  56        .start  = 0,
  57        .end    = 0,
  58        .flags  = IORESOURCE_MEM,
  59        .sibling = &kernel_data,
  60};
  61
  62/*
  63 * Available system RAM and reserved regions as singly linked
  64 * lists. These lists are traversed using the sibling pointer in
  65 * struct resource and are kept sorted at all times.
  66 */
  67static struct resource *__initdata system_ram;
  68static struct resource *__initdata reserved = &kernel_code;
  69
  70/*
  71 * We need to allocate these before the bootmem allocator is up and
  72 * running, so we need this "cache". 32 entries are probably enough
  73 * for all but the most insanely complex systems.
  74 */
  75static struct resource __initdata res_cache[32];
  76static unsigned int __initdata res_cache_next_free;
  77
  78static void __init resource_init(void)
  79{
  80        struct resource *mem, *res;
  81        struct resource *new;
  82
  83        kernel_code.start = __pa(init_mm.start_code);
  84
  85        for (mem = system_ram; mem; mem = mem->sibling) {
  86                new = alloc_bootmem_low(sizeof(struct resource));
  87                memcpy(new, mem, sizeof(struct resource));
  88
  89                new->sibling = NULL;
  90                if (request_resource(&iomem_resource, new))
  91                        printk(KERN_WARNING "Bad RAM resource %08x-%08x\n",
  92                               mem->start, mem->end);
  93        }
  94
  95        for (res = reserved; res; res = res->sibling) {
  96                new = alloc_bootmem_low(sizeof(struct resource));
  97                memcpy(new, res, sizeof(struct resource));
  98
  99                new->sibling = NULL;
 100                if (insert_resource(&iomem_resource, new))
 101                        printk(KERN_WARNING
 102                               "Bad reserved resource %s (%08x-%08x)\n",
 103                               res->name, res->start, res->end);
 104        }
 105}
 106
 107static void __init
 108add_physical_memory(resource_size_t start, resource_size_t end)
 109{
 110        struct resource *new, *next, **pprev;
 111
 112        for (pprev = &system_ram, next = system_ram; next;
 113             pprev = &next->sibling, next = next->sibling) {
 114                if (end < next->start)
 115                        break;
 116                if (start <= next->end) {
 117                        printk(KERN_WARNING
 118                               "Warning: Physical memory map is broken\n");
 119                        printk(KERN_WARNING
 120                               "Warning: %08x-%08x overlaps %08x-%08x\n",
 121                               start, end, next->start, next->end);
 122                        return;
 123                }
 124        }
 125
 126        if (res_cache_next_free >= ARRAY_SIZE(res_cache)) {
 127                printk(KERN_WARNING
 128                       "Warning: Failed to add physical memory %08x-%08x\n",
 129                       start, end);
 130                return;
 131        }
 132
 133        new = &res_cache[res_cache_next_free++];
 134        new->start = start;
 135        new->end = end;
 136        new->name = "System RAM";
 137        new->flags = IORESOURCE_MEM;
 138
 139        *pprev = new;
 140}
 141
 142static int __init
 143add_reserved_region(resource_size_t start, resource_size_t end,
 144                    const char *name)
 145{
 146        struct resource *new, *next, **pprev;
 147
 148        if (end < start)
 149                return -EINVAL;
 150
 151        if (res_cache_next_free >= ARRAY_SIZE(res_cache))
 152                return -ENOMEM;
 153
 154        for (pprev = &reserved, next = reserved; next;
 155             pprev = &next->sibling, next = next->sibling) {
 156                if (end < next->start)
 157                        break;
 158                if (start <= next->end)
 159                        return -EBUSY;
 160        }
 161
 162        new = &res_cache[res_cache_next_free++];
 163        new->start = start;
 164        new->end = end;
 165        new->name = name;
 166        new->sibling = next;
 167        new->flags = IORESOURCE_MEM;
 168
 169        *pprev = new;
 170
 171        return 0;
 172}
 173
 174static unsigned long __init
 175find_free_region(const struct resource *mem, resource_size_t size,
 176                 resource_size_t align)
 177{
 178        struct resource *res;
 179        unsigned long target;
 180
 181        target = ALIGN(mem->start, align);
 182        for (res = reserved; res; res = res->sibling) {
 183                if ((target + size) <= res->start)
 184                        break;
 185                if (target <= res->end)
 186                        target = ALIGN(res->end + 1, align);
 187        }
 188
 189        if ((target + size) > (mem->end + 1))
 190                return mem->end + 1;
 191
 192        return target;
 193}
 194
 195static int __init
 196alloc_reserved_region(resource_size_t *start, resource_size_t size,
 197                      resource_size_t align, const char *name)
 198{
 199        struct resource *mem;
 200        resource_size_t target;
 201        int ret;
 202
 203        for (mem = system_ram; mem; mem = mem->sibling) {
 204                target = find_free_region(mem, size, align);
 205                if (target <= mem->end) {
 206                        ret = add_reserved_region(target, target + size - 1,
 207                                                  name);
 208                        if (!ret)
 209                                *start = target;
 210                        return ret;
 211                }
 212        }
 213
 214        return -ENOMEM;
 215}
 216
 217/*
 218 * Early framebuffer allocation. Works as follows:
 219 *   - If fbmem_size is zero, nothing will be allocated or reserved.
 220 *   - If fbmem_start is zero when setup_bootmem() is called,
 221 *     a block of fbmem_size bytes will be reserved before bootmem
 222 *     initialization. It will be aligned to the largest page size
 223 *     that fbmem_size is a multiple of.
 224 *   - If fbmem_start is nonzero, an area of size fbmem_size will be
 225 *     reserved at the physical address fbmem_start if possible. If
 226 *     it collides with other reserved memory, a different block of
 227 *     same size will be allocated, just as if fbmem_start was zero.
 228 *
 229 * Board-specific code may use these variables to set up platform data
 230 * for the framebuffer driver if fbmem_size is nonzero.
 231 */
 232resource_size_t __initdata fbmem_start;
 233resource_size_t __initdata fbmem_size;
 234
 235/*
 236 * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
 237 * use as framebuffer.
 238 *
 239 * "fbmem=xxx[kKmM]@yyy[kKmM]" defines a memory region of size xxx and
 240 * starting at yyy to be reserved for use as framebuffer.
 241 *
 242 * The kernel won't verify that the memory region starting at yyy
 243 * actually contains usable RAM.
 244 */
 245static int __init early_parse_fbmem(char *p)
 246{
 247        int ret;
 248        unsigned long align;
 249
 250        fbmem_size = memparse(p, &p);
 251        if (*p == '@') {
 252                fbmem_start = memparse(p + 1, &p);
 253                ret = add_reserved_region(fbmem_start,
 254                                          fbmem_start + fbmem_size - 1,
 255                                          "Framebuffer");
 256                if (ret) {
 257                        printk(KERN_WARNING
 258                               "Failed to reserve framebuffer memory\n");
 259                        fbmem_start = 0;
 260                }
 261        }
 262
 263        if (!fbmem_start) {
 264                if ((fbmem_size & 0x000fffffUL) == 0)
 265                        align = 0x100000;       /* 1 MiB */
 266                else if ((fbmem_size & 0x0000ffffUL) == 0)
 267                        align = 0x10000;        /* 64 KiB */
 268                else
 269                        align = 0x1000;         /* 4 KiB */
 270
 271                ret = alloc_reserved_region(&fbmem_start, fbmem_size,
 272                                            align, "Framebuffer");
 273                if (ret) {
 274                        printk(KERN_WARNING
 275                               "Failed to allocate framebuffer memory\n");
 276                        fbmem_size = 0;
 277                } else {
 278                        memset(__va(fbmem_start), 0, fbmem_size);
 279                }
 280        }
 281
 282        return 0;
 283}
 284early_param("fbmem", early_parse_fbmem);
 285
 286static int __init parse_tag_core(struct tag *tag)
 287{
 288        if (tag->hdr.size > 2) {
 289                if ((tag->u.core.flags & 1) == 0)
 290                        root_mountflags &= ~MS_RDONLY;
 291                ROOT_DEV = new_decode_dev(tag->u.core.rootdev);
 292        }
 293        return 0;
 294}
 295__tagtable(ATAG_CORE, parse_tag_core);
 296
 297static int __init parse_tag_mem(struct tag *tag)
 298{
 299        unsigned long start, end;
 300
 301        /*
 302         * Ignore zero-sized entries. If we're running standalone, the
 303         * SDRAM code may emit such entries if something goes
 304         * wrong...
 305         */
 306        if (tag->u.mem_range.size == 0)
 307                return 0;
 308
 309        start = tag->u.mem_range.addr;
 310        end = tag->u.mem_range.addr + tag->u.mem_range.size - 1;
 311
 312        add_physical_memory(start, end);
 313        return 0;
 314}
 315__tagtable(ATAG_MEM, parse_tag_mem);
 316
 317static int __init parse_tag_rdimg(struct tag *tag)
 318{
 319#ifdef CONFIG_BLK_DEV_INITRD
 320        struct tag_mem_range *mem = &tag->u.mem_range;
 321        int ret;
 322
 323        if (initrd_start) {
 324                printk(KERN_WARNING
 325                       "Warning: Only the first initrd image will be used\n");
 326                return 0;
 327        }
 328
 329        ret = add_reserved_region(mem->addr, mem->addr + mem->size - 1,
 330                                  "initrd");
 331        if (ret) {
 332                printk(KERN_WARNING
 333                       "Warning: Failed to reserve initrd memory\n");
 334                return ret;
 335        }
 336
 337        initrd_start = (unsigned long)__va(mem->addr);
 338        initrd_end = initrd_start + mem->size;
 339#else
 340        printk(KERN_WARNING "RAM disk image present, but "
 341               "no initrd support in kernel, ignoring\n");
 342#endif
 343
 344        return 0;
 345}
 346__tagtable(ATAG_RDIMG, parse_tag_rdimg);
 347
 348static int __init parse_tag_rsvd_mem(struct tag *tag)
 349{
 350        struct tag_mem_range *mem = &tag->u.mem_range;
 351
 352        return add_reserved_region(mem->addr, mem->addr + mem->size - 1,
 353                                   "Reserved");
 354}
 355__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
 356
 357static int __init parse_tag_cmdline(struct tag *tag)
 358{
 359        strlcpy(boot_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
 360        return 0;
 361}
 362__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
 363
 364static int __init parse_tag_clock(struct tag *tag)
 365{
 366        /*
 367         * We'll figure out the clocks by peeking at the system
 368         * manager regs directly.
 369         */
 370        return 0;
 371}
 372__tagtable(ATAG_CLOCK, parse_tag_clock);
 373
 374/*
 375 * Scan the tag table for this tag, and call its parse function. The
 376 * tag table is built by the linker from all the __tagtable
 377 * declarations.
 378 */
 379static int __init parse_tag(struct tag *tag)
 380{
 381        extern struct tagtable __tagtable_begin, __tagtable_end;
 382        struct tagtable *t;
 383
 384        for (t = &__tagtable_begin; t < &__tagtable_end; t++)
 385                if (tag->hdr.tag == t->tag) {
 386                        t->parse(tag);
 387                        break;
 388                }
 389
 390        return t < &__tagtable_end;
 391}
 392
 393/*
 394 * Parse all tags in the list we got from the boot loader
 395 */
 396static void __init parse_tags(struct tag *t)
 397{
 398        for (; t->hdr.tag != ATAG_NONE; t = tag_next(t))
 399                if (!parse_tag(t))
 400                        printk(KERN_WARNING
 401                               "Ignoring unrecognised tag 0x%08x\n",
 402                               t->hdr.tag);
 403}
 404
 405/*
 406 * Find a free memory region large enough for storing the
 407 * bootmem bitmap.
 408 */
 409static unsigned long __init
 410find_bootmap_pfn(const struct resource *mem)
 411{
 412        unsigned long bootmap_pages, bootmap_len;
 413        unsigned long node_pages = PFN_UP(mem->end - mem->start + 1);
 414        unsigned long bootmap_start;
 415
 416        bootmap_pages = bootmem_bootmap_pages(node_pages);
 417        bootmap_len = bootmap_pages << PAGE_SHIFT;
 418
 419        /*
 420         * Find a large enough region without reserved pages for
 421         * storing the bootmem bitmap. We can take advantage of the
 422         * fact that all lists have been sorted.
 423         *
 424         * We have to check that we don't collide with any reserved
 425         * regions, which includes the kernel image and any RAMDISK
 426         * images.
 427         */
 428        bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE);
 429
 430        return bootmap_start >> PAGE_SHIFT;
 431}
 432
 433#define MAX_LOWMEM      HIGHMEM_START
 434#define MAX_LOWMEM_PFN  PFN_DOWN(MAX_LOWMEM)
 435
 436static void __init setup_bootmem(void)
 437{
 438        unsigned bootmap_size;
 439        unsigned long first_pfn, bootmap_pfn, pages;
 440        unsigned long max_pfn, max_low_pfn;
 441        unsigned node = 0;
 442        struct resource *res;
 443
 444        printk(KERN_INFO "Physical memory:\n");
 445        for (res = system_ram; res; res = res->sibling)
 446                printk("  %08x-%08x\n", res->start, res->end);
 447        printk(KERN_INFO "Reserved memory:\n");
 448        for (res = reserved; res; res = res->sibling)
 449                printk("  %08x-%08x: %s\n",
 450                       res->start, res->end, res->name);
 451
 452        nodes_clear(node_online_map);
 453
 454        if (system_ram->sibling)
 455                printk(KERN_WARNING "Only using first memory bank\n");
 456
 457        for (res = system_ram; res; res = NULL) {
 458                first_pfn = PFN_UP(res->start);
 459                max_low_pfn = max_pfn = PFN_DOWN(res->end + 1);
 460                bootmap_pfn = find_bootmap_pfn(res);
 461                if (bootmap_pfn > max_pfn)
 462                        panic("No space for bootmem bitmap!\n");
 463
 464                if (max_low_pfn > MAX_LOWMEM_PFN) {
 465                        max_low_pfn = MAX_LOWMEM_PFN;
 466#ifndef CONFIG_HIGHMEM
 467                        /*
 468                         * Lowmem is memory that can be addressed
 469                         * directly through P1/P2
 470                         */
 471                        printk(KERN_WARNING
 472                               "Node %u: Only %ld MiB of memory will be used.\n",
 473                               node, MAX_LOWMEM >> 20);
 474                        printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
 475#else
 476#error HIGHMEM is not supported by AVR32 yet
 477#endif
 478                }
 479
 480                /* Initialize the boot-time allocator with low memory only. */
 481                bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
 482                                                 first_pfn, max_low_pfn);
 483
 484                /*
 485                 * Register fully available RAM pages with the bootmem
 486                 * allocator.
 487                 */
 488                pages = max_low_pfn - first_pfn;
 489                free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
 490                                   PFN_PHYS(pages));
 491
 492                /* Reserve space for the bootmem bitmap... */
 493                reserve_bootmem_node(NODE_DATA(node),
 494                                     PFN_PHYS(bootmap_pfn),
 495                                     bootmap_size,
 496                                     BOOTMEM_DEFAULT);
 497
 498                /* ...and any other reserved regions. */
 499                for (res = reserved; res; res = res->sibling) {
 500                        if (res->start > PFN_PHYS(max_pfn))
 501                                break;
 502
 503                        /*
 504                         * resource_init will complain about partial
 505                         * overlaps, so we'll just ignore such
 506                         * resources for now.
 507                         */
 508                        if (res->start >= PFN_PHYS(first_pfn)
 509                            && res->end < PFN_PHYS(max_pfn))
 510                                reserve_bootmem_node(
 511                                        NODE_DATA(node), res->start,
 512                                        res->end - res->start + 1,
 513                                        BOOTMEM_DEFAULT);
 514                }
 515
 516                node_set_online(node);
 517        }
 518}
 519
 520void __init setup_arch (char **cmdline_p)
 521{
 522        struct clk *cpu_clk;
 523
 524        init_mm.start_code = (unsigned long)_text;
 525        init_mm.end_code = (unsigned long)_etext;
 526        init_mm.end_data = (unsigned long)_edata;
 527        init_mm.brk = (unsigned long)_end;
 528
 529        /*
 530         * Include .init section to make allocations easier. It will
 531         * be removed before the resource is actually requested.
 532         */
 533        kernel_code.start = __pa(__init_begin);
 534        kernel_code.end = __pa(init_mm.end_code - 1);
 535        kernel_data.start = __pa(init_mm.end_code);
 536        kernel_data.end = __pa(init_mm.brk - 1);
 537
 538        parse_tags(bootloader_tags);
 539
 540        setup_processor();
 541        setup_platform();
 542        setup_board();
 543
 544        cpu_clk = clk_get(NULL, "cpu");
 545        if (IS_ERR(cpu_clk)) {
 546                printk(KERN_WARNING "Warning: Unable to get CPU clock\n");
 547        } else {
 548                unsigned long cpu_hz = clk_get_rate(cpu_clk);
 549
 550                /*
 551                 * Well, duh, but it's probably a good idea to
 552                 * increment the use count.
 553                 */
 554                clk_enable(cpu_clk);
 555
 556                boot_cpu_data.clk = cpu_clk;
 557                boot_cpu_data.loops_per_jiffy = cpu_hz * 4;
 558                printk("CPU: Running at %lu.%03lu MHz\n",
 559                       ((cpu_hz + 500) / 1000) / 1000,
 560                       ((cpu_hz + 500) / 1000) % 1000);
 561        }
 562
 563        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 564        *cmdline_p = command_line;
 565        parse_early_param();
 566
 567        setup_bootmem();
 568
 569#ifdef CONFIG_VT
 570        conswitchp = &dummy_con;
 571#endif
 572
 573        paging_init();
 574        resource_init();
 575}
 576