linux-old/arch/i386/kernel/setup.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/i386/kernel/setup.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *
   6 *  Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
   7 *  and Martin Mares, November 1997.
   8 *
   9 *  Force Cyrix 6x86(MX) and M II processors to report MTRR capability
  10 *  and Cyrix "coma bug" recognition by
  11 *      Zoltán Böszörményi <zboszor@mail.externet.hu> February 1999.
  12 * 
  13 *  Force Centaur C6 processors to report MTRR capability.
  14 *      Bart Hartgers <bart@etpmod.phys.tue.nl>, May 199.
  15 *
  16 *  Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
  17 *
  18 *  IDT Winchip tweaks, misc clean ups.
  19 *      Dave Jones <davej@suse.de>, August 1999
  20 *
  21 *      Added proper L2 cache detection for Coppermine
  22 *      Dragan Stancevic <visitor@valinux.com>, October 1999
  23 *
  24 *      Improved Intel cache detection.
  25 *      Dave Jones <davej@suse.de>, October 1999
  26 *
  27 *      Added proper Cascades CPU and L2 cache detection for Cascades
  28 *      and 8-way type cache happy bunch from Intel:^)
  29 *      Dragan Stancevic <visitor@valinux.com>, May 2000
  30 *
  31 *      Transmeta CPU detection.  H. Peter Anvin <hpa@zytor.com>, May 2000
  32 *
  33 *      Cleaned up get_model_name(), AMD_model(), added display_cacheinfo().
  34 *      Dave Jones <davej@suse.de>, September 2000
  35 *
  36 *      Added Cyrix III initial detection code
  37 *      Alan Cox <alan@redhat.com>, Septembr 2000
  38 *
  39 *      Improve cache size calculation
  40 *      Asit Mallick <asit.k.mallick@intel.com>, October 2000
  41 *      Andrew Ip <aip@turbolinux.com>, October 2000
  42 */
  43
  44/*
  45 * This file handles the architecture-dependent parts of initialization
  46 */
  47
  48#include <linux/errno.h>
  49#include <linux/sched.h>
  50#include <linux/kernel.h>
  51#include <linux/mm.h>
  52#include <linux/stddef.h>
  53#include <linux/unistd.h>
  54#include <linux/ptrace.h>
  55#include <linux/malloc.h>
  56#include <linux/user.h>
  57#include <linux/a.out.h>
  58#include <linux/tty.h>
  59#include <linux/ioport.h>
  60#include <linux/delay.h>
  61#include <linux/config.h>
  62#include <linux/init.h>
  63#include <linux/apm_bios.h>
  64#ifdef CONFIG_BLK_DEV_RAM
  65#include <linux/blk.h>
  66#endif
  67#include <asm/processor.h>
  68#include <linux/console.h>
  69#include <asm/uaccess.h>
  70#include <asm/system.h>
  71#include <asm/io.h>
  72#include <asm/smp.h>
  73#include <asm/cobalt.h>
  74#include <asm/msr.h>
  75#include <asm/dma.h>
  76#include <asm/e820.h>
  77
  78/*
  79 * Machine setup..
  80 */
  81
  82char ignore_irq13 = 0;          /* set if exception 16 works */
  83struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
  84
  85/*
  86 * Bus types ..
  87 */
  88int EISA_bus = 0;
  89int MCA_bus = 0;
  90
  91/* for MCA, but anyone else can use it if they want */
  92unsigned int machine_id = 0;
  93unsigned int machine_submodel_id = 0;
  94unsigned int BIOS_revision = 0;
  95unsigned int mca_pentium_flag = 0;
  96
  97/*
  98 * Setup options
  99 */
 100struct drive_info_struct { char dummy[32]; } drive_info;
 101struct screen_info screen_info;
 102struct apm_info apm_info;
 103struct sys_desc_table_struct {
 104        unsigned short length;
 105        unsigned char table[0];
 106};
 107struct e820map e820 __initdata = { 0, };
 108
 109unsigned char aux_device_present;
 110
 111#ifdef CONFIG_BLK_DEV_RAM
 112extern int rd_doload;           /* 1 = load ramdisk, 0 = don't load */
 113extern int rd_prompt;           /* 1 = prompt for ramdisk, 0 = don't prompt */
 114extern int rd_image_start;      /* starting block # of image */
 115#endif
 116
 117extern int root_mountflags;
 118extern int _etext, _edata, _end;
 119extern unsigned long cpu_khz;
 120
 121/*
 122 * This is set up by the setup-routine at boot-time
 123 */
 124#define PARAM   ((unsigned char *)empty_zero_page)
 125#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
 126#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
 127#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
 128#define E820_MAP_NR (*(char*) (PARAM+E820NR))
 129#define E820_MAP  ((struct e820entry *) (PARAM+E820MAP))
 130#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
 131#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
 132#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
 133#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
 134#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
 135#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
 136#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
 137#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
 138#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
 139#define INITRD_START (*(unsigned long *) (PARAM+0x218))
 140#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
 141#define COMMAND_LINE ((char *) (PARAM+2048))
 142#define COMMAND_LINE_SIZE 256
 143
 144#define RAMDISK_IMAGE_START_MASK        0x07FF
 145#define RAMDISK_PROMPT_FLAG             0x8000
 146#define RAMDISK_LOAD_FLAG               0x4000  
 147
 148#define BIOS_ENDBASE    0x9F000
 149
 150#ifdef  CONFIG_VISWS
 151char visws_board_type = -1;
 152char visws_board_rev = -1;
 153
 154#define PIIX_PM_START           0x0F80
 155
 156#define SIO_GPIO_START          0x0FC0
 157
 158#define SIO_PM_START            0x0FC8
 159
 160#define PMBASE                  PIIX_PM_START
 161#define GPIREG0                 (PMBASE+0x30)
 162#define GPIREG(x)               (GPIREG0+((x)/8))
 163#define PIIX_GPI_BD_ID1         18
 164#define PIIX_GPI_BD_REG         GPIREG(PIIX_GPI_BD_ID1)
 165
 166#define PIIX_GPI_BD_SHIFT       (PIIX_GPI_BD_ID1 % 8)
 167
 168#define SIO_INDEX       0x2e
 169#define SIO_DATA        0x2f
 170
 171#define SIO_DEV_SEL     0x7
 172#define SIO_DEV_ENB     0x30
 173#define SIO_DEV_MSB     0x60
 174#define SIO_DEV_LSB     0x61
 175
 176#define SIO_GP_DEV      0x7
 177
 178#define SIO_GP_BASE     SIO_GPIO_START
 179#define SIO_GP_MSB      (SIO_GP_BASE>>8)
 180#define SIO_GP_LSB      (SIO_GP_BASE&0xff)
 181
 182#define SIO_GP_DATA1    (SIO_GP_BASE+0)
 183
 184#define SIO_PM_DEV      0x8
 185
 186#define SIO_PM_BASE     SIO_PM_START
 187#define SIO_PM_MSB      (SIO_PM_BASE>>8)
 188#define SIO_PM_LSB      (SIO_PM_BASE&0xff)
 189#define SIO_PM_INDEX    (SIO_PM_BASE+0)
 190#define SIO_PM_DATA     (SIO_PM_BASE+1)
 191
 192#define SIO_PM_FER2     0x1
 193
 194#define SIO_PM_GP_EN    0x80
 195
 196static void
 197visws_get_board_type_and_rev(void)
 198{
 199        int raw;
 200
 201        visws_board_type = (char)(inb_p(PIIX_GPI_BD_REG) & PIIX_GPI_BD_REG)
 202                                                         >> PIIX_GPI_BD_SHIFT;
 203/*
 204 * Get Board rev.
 205 * First, we have to initialize the 307 part to allow us access
 206 * to the GPIO registers.  Let's map them at 0x0fc0 which is right
 207 * after the PIIX4 PM section.
 208 */
 209        outb_p(SIO_DEV_SEL, SIO_INDEX);
 210        outb_p(SIO_GP_DEV, SIO_DATA);   /* Talk to GPIO regs. */
 211    
 212        outb_p(SIO_DEV_MSB, SIO_INDEX);
 213        outb_p(SIO_GP_MSB, SIO_DATA);   /* MSB of GPIO base address */
 214
 215        outb_p(SIO_DEV_LSB, SIO_INDEX);
 216        outb_p(SIO_GP_LSB, SIO_DATA);   /* LSB of GPIO base address */
 217
 218        outb_p(SIO_DEV_ENB, SIO_INDEX);
 219        outb_p(1, SIO_DATA);            /* Enable GPIO registers. */
 220    
 221/*
 222 * Now, we have to map the power management section to write
 223 * a bit which enables access to the GPIO registers.
 224 * What lunatic came up with this shit?
 225 */
 226        outb_p(SIO_DEV_SEL, SIO_INDEX);
 227        outb_p(SIO_PM_DEV, SIO_DATA);   /* Talk to GPIO regs. */
 228
 229        outb_p(SIO_DEV_MSB, SIO_INDEX);
 230        outb_p(SIO_PM_MSB, SIO_DATA);   /* MSB of PM base address */
 231    
 232        outb_p(SIO_DEV_LSB, SIO_INDEX);
 233        outb_p(SIO_PM_LSB, SIO_DATA);   /* LSB of PM base address */
 234
 235        outb_p(SIO_DEV_ENB, SIO_INDEX);
 236        outb_p(1, SIO_DATA);            /* Enable PM registers. */
 237    
 238/*
 239 * Now, write the PM register which enables the GPIO registers.
 240 */
 241        outb_p(SIO_PM_FER2, SIO_PM_INDEX);
 242        outb_p(SIO_PM_GP_EN, SIO_PM_DATA);
 243    
 244/*
 245 * Now, initialize the GPIO registers.
 246 * We want them all to be inputs which is the
 247 * power on default, so let's leave them alone.
 248 * So, let's just read the board rev!
 249 */
 250        raw = inb_p(SIO_GP_DATA1);
 251        raw &= 0x7f;    /* 7 bits of valid board revision ID. */
 252
 253        if (visws_board_type == VISWS_320) {
 254                if (raw < 0x6) {
 255                        visws_board_rev = 4;
 256                } else if (raw < 0xc) {
 257                        visws_board_rev = 5;
 258                } else {
 259                        visws_board_rev = 6;
 260        
 261                }
 262        } else if (visws_board_type == VISWS_540) {
 263                        visws_board_rev = 2;
 264                } else {
 265                        visws_board_rev = raw;
 266                }
 267
 268                printk("Silicon Graphics %s (rev %d)\n",
 269                        visws_board_type == VISWS_320 ? "320" :
 270                        (visws_board_type == VISWS_540 ? "540" :
 271                                        "unknown"),
 272                                        visws_board_rev);
 273        }
 274#endif
 275
 276static void __init add_memory_region(unsigned long long start,
 277                                     unsigned long long size, int type)
 278{
 279       int x = e820.nr_map;
 280
 281       if (x == E820MAX) {
 282           printk("Ooops! Too many entries in the memory map!\n");
 283           return;
 284       }
 285
 286       e820.map[x].addr = start;
 287       e820.map[x].size = size;
 288       e820.map[x].type = type;
 289       e820.nr_map++;
 290} /* add_memory_region */
 291
 292unsigned long i386_endbase __initdata =  0;
 293
 294static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
 295{
 296       /* Only one memory region (or negative)? Ignore it */
 297       if (nr_map < 2)
 298               return -1;
 299
 300       do {
 301               unsigned long long start = biosmap->addr;
 302               unsigned long long size = biosmap->size;
 303               unsigned long long end = start + size;
 304               unsigned long type = biosmap->type;
 305
 306               /* Overflow in 64 bits? Ignore the memory map. */
 307               if (start > end)
 308                       return -1;
 309
 310               /*
 311                * Some BIOSes claim RAM in the 640k - 1M region.
 312                * Not right. Fix it up.
 313                */
 314               if (type == E820_RAM) {
 315                       if (start < 0x100000ULL && end > i386_endbase) {
 316                               if (start < i386_endbase)
 317                                       add_memory_region(start, i386_endbase-start, type);
 318                               if (end <= 0x100000ULL)
 319                                       continue;
 320                               start = 0x100000ULL;
 321                               size = end - start;
 322                       }
 323               }
 324               add_memory_region(start, size, type);
 325       } while (biosmap++,--nr_map);
 326       return 0;
 327}
 328
 329static void __init print_memory_map(char *who)
 330{
 331        int i;
 332
 333        for (i = 0; i < e820.nr_map; i++) {
 334                printk(" %s: %08lx @ %08lx ", who,
 335                       (unsigned long) e820.map[i].size,
 336                       (unsigned long) e820.map[i].addr);
 337                switch (e820.map[i].type) {
 338                case E820_RAM:  printk("(usable)\n");
 339                                break;
 340                case E820_RESERVED:
 341                                printk("(reserved)\n");
 342                                break;
 343                case E820_ACPI:
 344                                printk("(ACPI data)\n");
 345                                break;
 346                case E820_NVS:
 347                                printk("(ACPI NVS)\n");
 348                                break;
 349                default:        printk("type %lu\n", e820.map[i].type);
 350                                break;
 351                }
 352        }
 353}
 354
 355static void __init setup_memory_region(void)
 356{
 357       char *who = "BIOS-e820";
 358
 359       /*
 360        * Try to copy the BIOS-supplied E820-map.
 361        *
 362        * Otherwise fake a memory map; one section from 0k->640k,
 363        * the next section from 1mb->appropriate_mem_k
 364        */
 365       if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
 366               unsigned long mem_size;
 367
 368               /* compare results from other methods and take the greater */
 369               if (ALT_MEM_K < EXT_MEM_K) {
 370                       mem_size = EXT_MEM_K;
 371                       who = "BIOS-88";
 372               } else {
 373                       mem_size = ALT_MEM_K;
 374                       who = "BIOS-e801";
 375               }
 376
 377               e820.nr_map = 0;
 378               add_memory_region(0, i386_endbase, E820_RAM);
 379               add_memory_region(HIGH_MEMORY, (mem_size << 10), E820_RAM);
 380       }
 381       printk("BIOS-provided physical RAM map:\n");
 382       print_memory_map(who);
 383} /* setup_memory_region */
 384
 385
 386static char command_line[COMMAND_LINE_SIZE] = { 0, };
 387       char saved_command_line[COMMAND_LINE_SIZE];
 388
 389__initfunc(void setup_arch(char **cmdline_p,
 390        unsigned long * memory_start_p, unsigned long * memory_end_p))
 391{
 392        unsigned long memory_start, memory_end = 0;
 393        char c = ' ', *to = command_line, *from = COMMAND_LINE;
 394        int len = 0;
 395        int read_endbase_from_BIOS = 1;
 396        int i;
 397        unsigned long user_mem = 0;
 398
 399#ifdef CONFIG_VISWS
 400        visws_get_board_type_and_rev();
 401#endif
 402
 403        ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
 404        drive_info = DRIVE_INFO;
 405        screen_info = SCREEN_INFO;
 406        apm_info.bios = APM_BIOS_INFO;
 407        if( SYS_DESC_TABLE.length != 0 ) {
 408                MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
 409                machine_id = SYS_DESC_TABLE.table[0];
 410                machine_submodel_id = SYS_DESC_TABLE.table[1];
 411                BIOS_revision = SYS_DESC_TABLE.table[2];
 412        }
 413        aux_device_present = AUX_DEVICE_INFO;
 414
 415#ifdef CONFIG_BLK_DEV_RAM
 416        rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
 417        rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
 418        rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
 419#endif
 420        if (!MOUNT_ROOT_RDONLY)
 421                root_mountflags &= ~MS_RDONLY;
 422        memory_start = (unsigned long) &_end;
 423        init_task.mm->start_code = PAGE_OFFSET;
 424        init_task.mm->end_code = (unsigned long) &_etext;
 425        init_task.mm->end_data = (unsigned long) &_edata;
 426        init_task.mm->brk = (unsigned long) &_end;
 427
 428        /* Save unparsed command line copy for /proc/cmdline */
 429        memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
 430        saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
 431
 432        for (;;) {
 433                /*
 434                 * "mem=nopentium" disables the 4MB page tables.
 435                 * "mem=XXX[kKmM]" overrides the BIOS-reported
 436                 * memory size
 437                 */
 438                if (c == ' ' && *(const unsigned long *)from == *(const unsigned long *)"mem=") {
 439                        if (to != command_line) to--;
 440                        if (!memcmp(from+4, "nopentium", 9)) {
 441                                from += 9+4;
 442                                boot_cpu_data.x86_capability &= ~X86_FEATURE_PSE;
 443                        } else {
 444                                user_mem = simple_strtoul(from+4, &from, 0);
 445                                if ( *from == 'K' || *from == 'k' ) {
 446                                        user_mem = user_mem << 10;
 447                                        from++;
 448                                } else if ( *from == 'M' || *from == 'm' ) {
 449                                        user_mem = user_mem << 20;
 450                                        from++;
 451                                }
 452                        }
 453                }
 454                else if (c == ' ' && !memcmp(from, "endbase=", 8))
 455                {
 456                        if (to != command_line) to--;
 457                        i386_endbase = simple_strtoul(from+8, &from, 0);
 458                        read_endbase_from_BIOS = 0;
 459                }
 460                c = *(from++);
 461                if (!c)
 462                        break;
 463                if (COMMAND_LINE_SIZE <= ++len)
 464                        break;
 465                *(to++) = c;
 466        }
 467        *to = '\0';
 468        *cmdline_p = command_line;
 469
 470        if (read_endbase_from_BIOS)
 471        {
 472                /*
 473                 * The amount of available base memory is now taken from 
 474                 * WORD 40:13 (The BIOS EBDA pointer) in order to account for 
 475                 * some recent systems, where its value is smaller than the 
 476                 * 4K we blindly allowed before.
 477                 *
 478                 * (this was pointed out by Josef Moellers from
 479                 * Siemens Paderborn (Germany) ).
 480                 */
 481                i386_endbase = (*(unsigned short *)__va(0x413)*1024)&PAGE_MASK;
 482                
 483                if (!i386_endbase || i386_endbase > 0xA0000)
 484                {
 485                        /* Zero is valid according to the BIOS weenies */
 486                        if(i386_endbase)
 487                        {
 488                                printk(KERN_NOTICE "Ignoring bogus EBDA pointer %lX\n", 
 489                                        i386_endbase);
 490                        }
 491                        i386_endbase = BIOS_ENDBASE;
 492                }
 493        }
 494
 495        if (!user_mem)
 496                setup_memory_region();
 497        else {
 498                e820.nr_map = 0;
 499                add_memory_region(0, i386_endbase, E820_RAM);
 500                add_memory_region(HIGH_MEMORY, user_mem-HIGH_MEMORY, E820_RAM);
 501                printk("USER-provided physical RAM map:\n");
 502                print_memory_map("USER");
 503        }
 504
 505#define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
 506#define PFN_DOWN(x)    ((x) >> PAGE_SHIFT)
 507        for (i = 0; i < e820.nr_map; i++) {
 508                unsigned long long start, end;
 509                /* RAM? */
 510                if (e820.map[i].type != E820_RAM)
 511                        continue;
 512                start = e820.map[i].addr;
 513                if (start >= 0xffffffff)
 514                        continue;
 515                end = e820.map[i].addr + e820.map[i].size;
 516                if (start >= end)
 517                        continue;
 518                if (end > 0xffffffff)
 519                        end = 0xffffffff;
 520                if (end > memory_end)
 521                        memory_end = end;
 522        }
 523
 524#define VMALLOC_RESERVE (64 << 20)      /* 64MB for vmalloc */
 525#define MAXMEM  ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
 526
 527        if (memory_end > MAXMEM)
 528        {
 529                memory_end = MAXMEM;
 530                printk(KERN_WARNING "Warning only %ldMB will be used.\n",
 531                        MAXMEM>>20);
 532        }
 533
 534        memory_end += PAGE_OFFSET;
 535        *memory_start_p = memory_start;
 536        *memory_end_p = memory_end;
 537
 538#ifdef CONFIG_SMP
 539        /*
 540         *      Save possible boot-time SMP configuration:
 541         */
 542        init_smp_config();
 543#endif
 544
 545#ifdef CONFIG_BLK_DEV_INITRD
 546        if (LOADER_TYPE) {
 547                initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
 548                initrd_end = initrd_start+INITRD_SIZE;
 549                if (initrd_end > memory_end) {
 550                        printk("initrd extends beyond end of memory "
 551                            "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
 552                            initrd_end,memory_end);
 553                        initrd_start = 0;
 554                }
 555        }
 556#endif
 557
 558        /* request I/O space for devices used on all i[345]86 PCs */
 559        request_region(0x00,0x20,"dma1");
 560        request_region(0x40,0x20,"timer");
 561        request_region(0x80,0x10,"dma page reg");
 562        request_region(0xc0,0x20,"dma2");
 563        request_region(0xf0,0x10,"fpu");
 564
 565#ifdef CONFIG_VT
 566#if defined(CONFIG_VGA_CONSOLE)
 567        conswitchp = &vga_con;
 568#elif defined(CONFIG_DUMMY_CONSOLE)
 569        conswitchp = &dummy_con;
 570#endif
 571#endif
 572}
 573
 574
 575__initfunc(static int get_model_name(struct cpuinfo_x86 *c))
 576{
 577        unsigned int n, dummy, *v;
 578
 579        /* 
 580         * Actually we must have cpuid or we could never have
 581         * figured out that this was AMD/Centaur/Cyrix/Transmeta
 582         * from the vendor info :-).
 583         */
 584
 585        cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
 586        if (n < 0x80000004)
 587                return 0;
 588        cpuid(0x80000001, &dummy, &dummy, &dummy, &(c->x86_capability));
 589        v = (unsigned int *) c->x86_model_id;
 590        cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 591        cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 592        cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 593        c->x86_model_id[48] = 0;
 594        
 595        return 1;
 596}
 597
 598
 599__initfunc (static void display_cacheinfo(struct cpuinfo_x86 *c))
 600{
 601        unsigned int n, dummy, ecx, edx;
 602
 603        cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
 604
 605        if (n >= 0x80000005){
 606                cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
 607                printk("CPU: L1 I Cache: %dK  L1 D Cache: %dK\n",
 608                        ecx>>24, edx>>24);
 609                c->x86_cache_size=(ecx>>24)+(edx>>24);
 610        }
 611
 612        /* Yes this can occur - the CyrixIII just has a large L1 */
 613        if (n < 0x80000006)
 614                return; /* No function to get L2 info */
 615
 616        cpuid(0x80000006, &dummy, &dummy, &ecx, &edx);
 617        c->x86_cache_size = ecx>>16;
 618
 619        /* AMD errata T13 (order #21922) */
 620        if(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
 621                boot_cpu_data.x86 == 6 && 
 622                boot_cpu_data.x86_model== 3 &&
 623                boot_cpu_data.x86_mask == 0)
 624        {
 625                c->x86_cache_size = 64;
 626        }
 627
 628        printk("CPU: L2 Cache: %dK\n", c->x86_cache_size);
 629}
 630
 631
 632
 633__initfunc(static int amd_model(struct cpuinfo_x86 *c))
 634{
 635        u32 l, h;
 636        unsigned long flags;
 637        int mbytes = max_mapnr >> (20-PAGE_SHIFT);
 638        
 639        int r=get_model_name(c);
 640        
 641        /*
 642         *      Now do the cache operations. 
 643         */
 644         
 645        switch(c->x86)
 646        {
 647                case 5:
 648                        if( c->x86_model < 6 )
 649                        {
 650                                /* Anyone with a K5 want to fill this in */                             
 651                                break;
 652                        }
 653
 654                        /* K6 with old style WHCR */
 655                        if( c->x86_model < 8 ||
 656                                (c->x86_model== 8 && c->x86_mask < 8))
 657                        {
 658                                /* We can only write allocate on the low 508Mb */
 659                                if(mbytes>508)
 660                                        mbytes=508;
 661                                        
 662                                rdmsr(0xC0000082, l, h);
 663                                if((l&0x0000FFFF)==0)
 664                                {               
 665                                        l=(1<<0)|((mbytes/4)<<1);
 666                                        save_flags(flags);
 667                                        __cli();
 668                                        __asm__ __volatile__ ("wbinvd": : :"memory");
 669                                        wrmsr(0xC0000082, l, h);
 670                                        restore_flags(flags);
 671                                        printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
 672                                                mbytes);
 673
 674                                }
 675                                break;
 676                        }
 677                        if (c->x86_model == 8 || c->x86_model == 9 || c->x86_model == 13)
 678                        {
 679                                /* The more serious chips .. */
 680                                
 681                                if(mbytes>4092)
 682                                        mbytes=4092;
 683
 684                                rdmsr(0xC0000082, l, h);
 685                                if((l&0xFFFF0000)==0)
 686                                {
 687                                        l=((mbytes>>2)<<22)|(1<<16);
 688                                        save_flags(flags);
 689                                        __cli();
 690                                        __asm__ __volatile__ ("wbinvd": : :"memory");
 691                                        wrmsr(0xC0000082, l, h);
 692                                        restore_flags(flags);
 693                                        printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
 694                                                mbytes);
 695                                }
 696
 697                                /*  Set MTRR capability flag if appropriate  */
 698                                if((boot_cpu_data.x86_model == 13) ||
 699                                   (boot_cpu_data.x86_model == 9) ||
 700                                  ((boot_cpu_data.x86_model == 8) && 
 701                                   (boot_cpu_data.x86_mask >= 8)))
 702                                        c->x86_capability |= X86_FEATURE_MTRR;
 703                                break;
 704                        }
 705                        break;
 706
 707                case 6: /* An Athlon. We can trust the BIOS probably */
 708                {
 709                        break;
 710                }
 711        }
 712
 713        display_cacheinfo(c);
 714        return r;
 715}
 716
 717__initfunc(static void intel_model(struct cpuinfo_x86 *c))
 718{
 719        unsigned int *v = (unsigned int *) c->x86_model_id;
 720        cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 721        cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 722        cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 723        c->x86_model_id[48] = 0;
 724        printk("CPU: %s\n", c->x86_model_id);
 725}
 726                        
 727
 728/*
 729 * Read Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
 730 */
 731static inline void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
 732{
 733        unsigned char ccr2, ccr3;
 734
 735        /* we test for DEVID by checking whether CCR3 is writable */
 736        cli();
 737        ccr3 = getCx86(CX86_CCR3);
 738        setCx86(CX86_CCR3, ccr3 ^ 0x80);
 739        getCx86(0xc0);   /* dummy to change bus */
 740
 741        if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
 742                ccr2 = getCx86(CX86_CCR2);
 743                setCx86(CX86_CCR2, ccr2 ^ 0x04);
 744                getCx86(0xc0);  /* dummy */
 745
 746                if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
 747                        *dir0 = 0xfd;
 748                else {                          /* Cx486S A step */
 749                        setCx86(CX86_CCR2, ccr2);
 750                        *dir0 = 0xfe;
 751                }
 752        }
 753        else {
 754                setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
 755
 756                /* read DIR0 and DIR1 CPU registers */
 757                *dir0 = getCx86(CX86_DIR0);
 758                *dir1 = getCx86(CX86_DIR1);
 759        }
 760        sti();
 761}
 762
 763/*
 764 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
 765 * order to identify the Cyrix CPU model after we're out of setup.c
 766 */
 767unsigned char Cx86_dir0_msb __initdata = 0;
 768
 769static char Cx86_model[][9] __initdata = {
 770        "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
 771        "M II ", "Unknown"
 772};
 773static char Cx486_name[][5] __initdata = {
 774        "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
 775        "SRx2", "DRx2"
 776};
 777static char Cx486S_name[][4] __initdata = {
 778        "S", "S2", "Se", "S2e"
 779};
 780static char Cx486D_name[][4] __initdata = {
 781        "DX", "DX2", "?", "?", "?", "DX4"
 782};
 783static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
 784static char cyrix_model_mult1[] __initdata = "12??43";
 785static char cyrix_model_mult2[] __initdata = "12233445";
 786
 787__initfunc(static void cyrix_model(struct cpuinfo_x86 *c))
 788{
 789        unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
 790        char *buf = c->x86_model_id;
 791        const char *p = NULL;
 792
 793        do_cyrix_devid(&dir0, &dir1);
 794
 795        Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
 796        dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
 797
 798        /* common case step number/rev -- exceptions handled below */
 799        c->x86_model = (dir1 >> 4) + 1;
 800        c->x86_mask = dir1 & 0xf;
 801
 802        /* Now cook; the original recipe is by Channing Corn, from Cyrix.
 803         * We do the same thing for each generation: we work out
 804         * the model, multiplier and stepping.  Black magic included,
 805         * to make the silicon step/rev numbers match the printed ones.
 806         */
 807         
 808        switch (dir0_msn) {
 809                unsigned char tmp;
 810
 811        case 0: /* Cx486SLC/DLC/SRx/DRx */
 812                p = Cx486_name[dir0_lsn & 7];
 813                break;
 814
 815        case 1: /* Cx486S/DX/DX2/DX4 */
 816                p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
 817                        : Cx486S_name[dir0_lsn & 3];
 818                break;
 819
 820        case 2: /* 5x86 */
 821                Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
 822                p = Cx86_cb+2;
 823                break;
 824
 825        case 3: /* 6x86/6x86L */
 826                Cx86_cb[1] = ' ';
 827                Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
 828                if (dir1 > 0x21) { /* 686L */
 829                        Cx86_cb[0] = 'L';
 830                        p = Cx86_cb;
 831                        (c->x86_model)++;
 832                } else             /* 686 */
 833                        p = Cx86_cb+1;
 834                /* Emulate MTRRs using Cyrix's ARRs. */
 835                c->x86_capability |= X86_FEATURE_MTRR;
 836                /* 6x86's contain this bug */
 837                c->coma_bug = 1;
 838                break;
 839
 840        case 4: /* MediaGX/GXm */
 841                /*
 842                 *      Life sometimes gets weiiiiiiiird if we use this
 843                 *      on the MediaGX. So we turn it off for now. 
 844                 */
 845                
 846#ifdef CONFIG_PCI_QUIRKS
 847                /* It isnt really a PCI quirk directly, but the cure is the
 848                   same. The MediaGX has deep magic SMM stuff that handles the
 849                   SB emulation. It thows away the fifo on disable_dma() which
 850                   is wrong and ruins the audio. 
 851                   
 852                   Bug2: VSA1 has a wrap bug so that using maximum sized DMA 
 853                   causes bad things. According to NatSemi VSA2 has another
 854                   bug to do with 'hlt'. I've not seen any boards using VSA2
 855                   and X doesn't seem to support it either so who cares 8).
 856                   VSA1 we work around however.
 857                */
 858
 859                printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bug.\n");
 860                isa_dma_bridge_buggy = 2;
 861                                                                                                        
 862#endif
 863                /* GXm supports extended cpuid levels 'ala' AMD */
 864                if (c->cpuid_level == 2) {
 865                        get_model_name(c);  /* get CPU marketing name */
 866                        c->x86_capability&=~X86_FEATURE_TSC;
 867                        return;
 868                }
 869                else {  /* MediaGX */
 870                        Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
 871                        p = Cx86_cb+2;
 872                        c->x86_model = (dir1 & 0x20) ? 1 : 2;
 873                        c->x86_capability&=~X86_FEATURE_TSC;
 874                }
 875                break;
 876
 877        case 5: /* 6x86MX/M II */
 878                if (dir1 > 7) dir0_msn++;  /* M II */
 879                else c->coma_bug = 1;      /* 6x86MX, it has the bug. */
 880                tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
 881                Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
 882                p = Cx86_cb+tmp;
 883                if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
 884                        (c->x86_model)++;
 885                /* Emulate MTRRs using Cyrix's ARRs. */
 886                c->x86_capability |= X86_FEATURE_MTRR;
 887                break;
 888
 889        case 0xf:  /* Cyrix 486 without DEVID registers */
 890                switch (dir0_lsn) {
 891                case 0xd:  /* either a 486SLC or DLC w/o DEVID */
 892                        dir0_msn = 0;
 893                        p = Cx486_name[(c->hard_math) ? 1 : 0];
 894                        break;
 895
 896                case 0xe:  /* a 486S A step */
 897                        dir0_msn = 0;
 898                        p = Cx486S_name[0];
 899                        break;
 900                break;
 901                }
 902
 903        default:  /* unknown (shouldn't happen, we know everyone ;-) */
 904                dir0_msn = 7;
 905                break;
 906        }
 907        strcpy(buf, Cx86_model[dir0_msn & 7]);
 908        if (p) strcat(buf, p);
 909        return;
 910}
 911
 912__initfunc(static void transmeta_model(struct cpuinfo_x86 *c))
 913{
 914        unsigned int cap_mask, uk, max, dummy;
 915        unsigned int cms_rev1, cms_rev2;
 916        unsigned int cpu_rev, cpu_freq, cpu_flags;
 917        char cpu_info[65];
 918
 919        get_model_name(c);      /* Same as AMD/Cyrix */
 920        display_cacheinfo(c);
 921
 922        /* Print CMS and CPU revision */
 923        cpuid(0x80860000, &max, &dummy, &dummy, &dummy);
 924        if ( max >= 0x80860001 ) {
 925                cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); 
 926                printk("CPU: Processor revision %u.%u.%u.%u, %u MHz%s%s\n",
 927                       (cpu_rev >> 24) & 0xff,
 928                       (cpu_rev >> 16) & 0xff,
 929                       (cpu_rev >> 8) & 0xff,
 930                       cpu_rev & 0xff,
 931                       cpu_freq,
 932                       (cpu_flags & 1) ? " [recovery]" : "",
 933                       (cpu_flags & 2) ? " [longrun]" : "");
 934        }
 935        if ( max >= 0x80860002 ) {
 936                cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
 937                printk("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
 938                       (cms_rev1 >> 24) & 0xff,
 939                       (cms_rev1 >> 16) & 0xff,
 940                       (cms_rev1 >> 8) & 0xff,
 941                       cms_rev1 & 0xff,
 942                       cms_rev2);
 943        }
 944        if ( max >= 0x80860006 ) {
 945                cpuid(0x80860003,
 946                      (void *)&cpu_info[0],
 947                      (void *)&cpu_info[4],
 948                      (void *)&cpu_info[8],
 949                      (void *)&cpu_info[12]);
 950                cpuid(0x80860004,
 951                      (void *)&cpu_info[16],
 952                      (void *)&cpu_info[20],
 953                      (void *)&cpu_info[24],
 954                      (void *)&cpu_info[28]);
 955                cpuid(0x80860005,
 956                      (void *)&cpu_info[32],
 957                      (void *)&cpu_info[36],
 958                      (void *)&cpu_info[40],
 959                      (void *)&cpu_info[44]);
 960                cpuid(0x80860006,
 961                      (void *)&cpu_info[48],
 962                      (void *)&cpu_info[52],
 963                      (void *)&cpu_info[56],
 964                      (void *)&cpu_info[60]);
 965                cpu_info[64] = '\0';
 966                printk("CPU: %s\n", cpu_info);
 967        }
 968
 969        /* Unhide possibly hidden flags */
 970        rdmsr(0x80860004, cap_mask, uk);
 971        wrmsr(0x80860004, ~0, uk);
 972        cpuid(0x00000001, &dummy, &dummy, &dummy, &c->x86_capability);
 973        wrmsr(0x80860004, cap_mask, uk);
 974}
 975
 976
 977__initfunc(void get_cpu_vendor(struct cpuinfo_x86 *c))
 978{
 979        char *v = c->x86_vendor_id;
 980
 981        if (!strcmp(v, "GenuineIntel"))
 982                c->x86_vendor = X86_VENDOR_INTEL;
 983        else if (!strcmp(v, "AuthenticAMD"))
 984                c->x86_vendor = X86_VENDOR_AMD;
 985        else if (!strcmp(v, "CyrixInstead"))
 986                c->x86_vendor = X86_VENDOR_CYRIX;
 987        else if (!strcmp(v, "UMC UMC UMC "))
 988                c->x86_vendor = X86_VENDOR_UMC;
 989        else if (!strcmp(v, "CentaurHauls"))
 990                c->x86_vendor = X86_VENDOR_CENTAUR;
 991        else if (!strcmp(v, "NexGenDriven"))
 992                c->x86_vendor = X86_VENDOR_NEXGEN;
 993        else if (!strcmp(v, "RiseRiseRise"))
 994                c->x86_vendor = X86_VENDOR_RISE;
 995        else if (!strcmp(v, "GenuineTMx86"))
 996                c->x86_vendor = X86_VENDOR_TRANSMETA;
 997        else
 998                c->x86_vendor = X86_VENDOR_UNKNOWN;
 999}
1000
1001struct cpu_model_info {
1002        int vendor;
1003        int x86;
1004        char *model_names[16];
1005};
1006
1007static struct cpu_model_info cpu_models[] __initdata = {
1008        { X86_VENDOR_INTEL,     4,
1009          { "486 DX-25/33", "486 DX-50", "486 SX", "486 DX/2", "486 SL", 
1010            "486 SX/2", NULL, "486 DX/2-WB", "486 DX/4", "486 DX/4-WB", NULL, 
1011            NULL, NULL, NULL, NULL, NULL }},
1012        { X86_VENDOR_INTEL,     5,
1013          { "Pentium 60/66 A-step", "Pentium 60/66", "Pentium 75 - 200",
1014            "OverDrive PODP5V83", "Pentium MMX", NULL, NULL,
1015            "Mobile Pentium 75 - 200", "Mobile Pentium MMX", NULL, NULL, NULL,
1016            NULL, NULL, NULL, NULL }},
1017        { X86_VENDOR_INTEL,     6,
1018          { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)", 
1019            NULL, "Pentium II (Deschutes)", "Mobile Pentium II", 
1020            "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
1021            "Pentium III (Cascades)", NULL, NULL, NULL, NULL, NULL }},
1022        { X86_VENDOR_AMD,       4,
1023          { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
1024            "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
1025            "Am5x86-WB" }},
1026        { X86_VENDOR_AMD,       5,
1027          { "K5/SSA5", "K5",
1028            "K5", "K5", NULL, NULL,
1029            "K6", "K6", "K6-2",
1030            "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
1031        { X86_VENDOR_AMD,       6,
1032          { "Athlon", "Athlon",
1033            NULL, NULL, NULL, NULL,
1034            NULL, NULL, NULL,
1035            NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1036        { X86_VENDOR_UMC,       4,
1037          { NULL, "U5D", "U5S", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1038            NULL, NULL, NULL, NULL, NULL, NULL }},
1039        { X86_VENDOR_CENTAUR,   5,
1040          { NULL, NULL, NULL, NULL, "C6", NULL, NULL, NULL, "C6-2", NULL, NULL,
1041            NULL, NULL, NULL, NULL, NULL }},
1042        { X86_VENDOR_NEXGEN,    5,
1043          { "Nx586", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1044            NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1045};
1046
1047__initfunc(void identify_cpu(struct cpuinfo_x86 *c))
1048{
1049        int i;
1050        char *p = NULL;
1051        extern void mcheck_init(void);
1052        
1053        c->loops_per_jiffy = loops_per_jiffy;
1054        c->x86_cache_size = -1;
1055
1056        get_cpu_vendor(c);
1057
1058        if (c->x86_vendor == X86_VENDOR_UNKNOWN &&
1059            c->cpuid_level < 0)
1060                return;
1061
1062        /* It should be possible for the user to override this. */
1063        if(c->cpuid_level > 0 && 
1064           (c->x86_vendor == X86_VENDOR_INTEL || c->x86_vendor == X86_VENDOR_TRANSMETA) &&
1065           c->x86_capability&(1<<18)) {
1066                /* Disable processor serial number */
1067                unsigned long lo,hi;
1068                rdmsr(0x119,lo,hi);
1069                lo |= 0x200000;
1070                wrmsr(0x119,lo,hi);
1071                printk(KERN_INFO "CPU serial number disabled.\n");
1072        }
1073
1074        mcheck_init();
1075        
1076        if (c->x86_vendor == X86_VENDOR_CYRIX) {
1077                cyrix_model(c);
1078                return;
1079        }
1080
1081        if (c->x86_vendor == X86_VENDOR_AMD && amd_model(c))
1082                return;
1083                
1084        if (c->x86_vendor == X86_VENDOR_TRANSMETA) {
1085                transmeta_model(c);
1086                return;
1087        }
1088        
1089        if(c->x86_vendor == X86_VENDOR_CENTAUR && c->x86==6)
1090        {
1091                /* The Cyrix III supports model naming and cache queries */
1092                get_model_name(c);
1093                display_cacheinfo(c);
1094                return;
1095        }
1096
1097        if (c->cpuid_level > 1) {
1098                /* supports eax=2  call */
1099                int regs[4];
1100                int l1c=0, l1d=0, l2=0, l3=0;   /* Cache sizes */
1101
1102                cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
1103
1104                /* Least significant byte of eax says how many times
1105                 * to call cpuid with value 2 to get cache and TLB
1106                 * info.
1107                 */
1108                if ((regs[0] & 0xFF) != 1 )
1109                        printk(KERN_WARNING "Multiple cache reports are not supported yet\n");
1110
1111                c->x86_cache_size = 0;
1112
1113                for ( i = 0 ; i < 4 ; i++ ) {
1114
1115                        int j;
1116
1117                        if ( regs[i] < 0 )
1118                                continue; /* no useful data */
1119
1120                        /* look at all the bytes returned */
1121
1122                        for ( j = ( i == 0 ? 8:0 ) ; j < 25 ; j+=8 ) {
1123
1124                                unsigned char rh = regs[i]>>j;
1125                                unsigned char rl;
1126
1127                                rl = rh & 0x0F;
1128                                rh >>=4;
1129
1130                                switch(rh) {
1131
1132                                        case 2:
1133                                                if(rl) {
1134                                                        printk("%dK L3 cache\n", (rl-1)*512);
1135                                                        l3 += (rl-1)*512;
1136                                                }
1137                                                break;
1138
1139                                        case 4:
1140                                        case 8:
1141                                                if(rl) {
1142                                                        printk("%dK L2 cache (%d way)\n",128<<(rl-1), rh);
1143                                                        l2 += 128<<(rl-1);
1144                                                }
1145                                                break;
1146
1147                                        /*
1148                                         *      L1 caches do not count for SMP switching weights,
1149                                         *      they are shadowed by L2.
1150                                         */
1151
1152                                        case 6:
1153                                                if(rh==6 && rl > 5) {
1154                                                        printk("%dK L1 data cache\n", 8<<(rl - 6));
1155                                                        l1d+=8<<(rl-6);
1156                                                }
1157                                                break;
1158
1159                                        case 7:
1160                                                printk("%dK L1 instruction cache\n",
1161                                                rl?(16<<(rl-1)):12);
1162                                                l1c+=rl?(16<<(rl-1)):12;
1163                                                break;
1164                                }                     
1165                        }                       
1166                }
1167
1168                if(l1c && l1d)
1169                        printk("CPU: L1 I Cache: %dK  L1 D Cache: %dK\n", l1c, l1d);
1170                if(l2)
1171                        printk("CPU: L2 Cache: %dK\n", l2);
1172                if(l3)
1173                        printk("CPU: L3 Cache: %dK\n", l3);
1174
1175                /*
1176                 *      Assuming L3 is shared. The L1 cache is shadowed by L2
1177                 *      so doesn't need to be included.
1178                 */
1179
1180                c->x86_cache_size += l2;
1181        }
1182
1183        /*
1184         *      Intel finally adopted the AMD/Cyrix extended id naming
1185         *      stuff for the 'Pentium IV'
1186         */
1187
1188        if(c->x86_vendor ==X86_VENDOR_INTEL && c->x86 == 15)
1189        {
1190                intel_model(c);
1191                return;
1192        }
1193
1194        for (i = 0; i < sizeof(cpu_models)/sizeof(struct cpu_model_info); i++) {
1195                if (cpu_models[i].vendor == c->x86_vendor &&
1196                    cpu_models[i].x86 == c->x86) {
1197                        if (c->x86_model <= 16)
1198                                p = cpu_models[i].model_names[c->x86_model];
1199
1200                        /* Names for the Pentium II Celeron processors
1201                           detectable only by also checking the cache size */
1202                        if ((cpu_models[i].vendor == X86_VENDOR_INTEL)
1203                            && (cpu_models[i].x86 == 6)){ 
1204                                if(c->x86_model == 6 && c->x86_cache_size == 128) {
1205                                        p = "Celeron (Mendocino)"; 
1206                                } else { 
1207                                        if (c->x86_model == 5 && c->x86_cache_size == 0) {
1208                                                p = "Celeron (Covington)";
1209                                        }
1210                                }
1211                        }
1212                }
1213        }
1214
1215        if (p) {
1216                strcpy(c->x86_model_id, p);
1217                return;
1218        }
1219
1220        sprintf(c->x86_model_id, "%02x/%02x", c->x86_vendor, c->x86_model);
1221}
1222
1223/*
1224 *      Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
1225 */
1226 
1227__initfunc(void dodgy_tsc(void))
1228{
1229        get_cpu_vendor(&boot_cpu_data);
1230        
1231        if(boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)
1232        {
1233                return;
1234        }
1235        cyrix_model(&boot_cpu_data);
1236}
1237        
1238        
1239
1240static char *cpu_vendor_names[] __initdata = {
1241        "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
1242
1243
1244__initfunc(void setup_centaur(struct cpuinfo_x86 *c))
1245{
1246        u32 hv,lv;
1247        
1248        /* Centaur C6 Series */
1249        if(c->x86==5)
1250        {
1251                rdmsr(0x107, lv, hv);
1252                printk("Centaur FSR was 0x%X ",lv);
1253                lv|=(1<<1 | 1<<2 | 1<<7);
1254                /* lv|=(1<<6);  - may help too if the board can cope */
1255                printk("now 0x%X\n", lv);
1256                wrmsr(0x107, lv, hv);
1257                /* Emulate MTRRs using Centaur's MCR. */
1258                c->x86_capability |= X86_FEATURE_MTRR;
1259
1260                /* Disable TSC on C6 as per errata. */
1261                if (c->x86_model ==4) {
1262                        printk ("Disabling bugged TSC.\n");
1263                        c->x86_capability &= ~X86_FEATURE_TSC;
1264                }
1265
1266                /* Set 3DNow! on Winchip 2 and above. */
1267                if (c->x86_model >=8)
1268                    c->x86_capability |= X86_FEATURE_AMD3D;
1269
1270                c->x86_capability |=X86_FEATURE_CX8;
1271        }
1272        /* Cyrix III 'Samuel' CPU */
1273        if(c->x86 == 6 && c->x86_model == 6)
1274        {
1275                rdmsr(0x1107, lv, hv);
1276                lv|=(1<<1);     /* Report CX8 */
1277                lv|=(1<<7);     /* PGE enable */
1278                wrmsr(0x1107, lv, hv);
1279                /* Cyrix III */
1280                c->x86_capability |= X86_FEATURE_CX8;
1281                
1282                /* Check for 3dnow */
1283                cpuid(0x80000001, &lv, &lv, &lv, &hv);
1284                if(hv&(1<<31))
1285                        c->x86_capability |= X86_FEATURE_AMD3D;
1286        }       
1287}
1288
1289__initfunc(void print_cpu_info(struct cpuinfo_x86 *c))
1290{
1291        char *vendor = NULL;
1292
1293        if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
1294                vendor = cpu_vendor_names[c->x86_vendor];
1295        else if (c->cpuid_level >= 0)
1296                vendor = c->x86_vendor_id;
1297
1298        if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
1299                printk("%s ", vendor);
1300
1301        if (!c->x86_model_id[0])
1302                printk("%d86", c->x86);
1303        else
1304                printk("%s", c->x86_model_id);
1305
1306        if (c->x86_mask || c->cpuid_level>=0) 
1307                printk(" stepping %02x\n", c->x86_mask);
1308        else
1309                printk("\n");
1310
1311        if(c->x86_vendor == X86_VENDOR_CENTAUR) {
1312                setup_centaur(c);
1313        }
1314}
1315
1316/*
1317 *      Get CPU information for use by the procfs.
1318 */
1319
1320int get_cpuinfo(char * buffer)
1321{
1322        char *p = buffer;
1323        int sep_bug;
1324        static char *x86_cap_flags[] = {
1325                "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1326                "cx8", "apic", "10", "sep", "mtrr", "pge", "mca", "cmov",
1327                "16", "pse36", "psn", "19", "20", "21", "22", "mmx",
1328                "24", "xmm", "26", "27", "28", "29", "30", "31"
1329        };
1330        struct cpuinfo_x86 *c = cpu_data;
1331        int i, n;
1332
1333        for(n=0; n<NR_CPUS; n++, c++) {
1334#ifdef CONFIG_SMP
1335                if (!(cpu_online_map & (1<<n)))
1336                        continue;
1337#endif
1338                p += sprintf(p,"processor\t: %d\n"
1339                               "vendor_id\t: %s\n"
1340                               "cpu family\t: %d\n"
1341                               "model\t\t: %d\n"
1342                               "model name\t: %s\n",
1343                               n,
1344                               c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1345                               c->x86,
1346                               c->x86_model,
1347                               c->x86_model_id[0] ? c->x86_model_id : "unknown");
1348
1349                if (c->x86_mask || c->cpuid_level >= 0)
1350                        p += sprintf(p, "stepping\t: %d\n", c->x86_mask);
1351                else
1352                        p += sprintf(p, "stepping\t: unknown\n");
1353
1354                if (c->x86_capability & X86_FEATURE_TSC) {
1355                        p += sprintf(p, "cpu MHz\t\t: %lu.%03lu\n",
1356                                cpu_khz / 1000, cpu_khz % 1000);
1357                }
1358
1359                /* Cache size */
1360                if (c->x86_cache_size >= 0)
1361                        p += sprintf(p, "cache size\t: %d KB\n", c->x86_cache_size);
1362                
1363                /* Modify the capabilities according to chip type */
1364                switch (c->x86_vendor) {
1365
1366                    case X86_VENDOR_CYRIX:
1367                        x86_cap_flags[24] = "cxmmx";
1368                        break;
1369
1370                    case X86_VENDOR_AMD:
1371                        if (c->x86 == 5 && c->x86_model == 6)
1372                                x86_cap_flags[10] = "sep";
1373                        if (c->x86 < 6)
1374                                x86_cap_flags[16] = "fcmov";
1375                        else
1376                                x86_cap_flags[16] = "pat";
1377                        x86_cap_flags[22] = "mmxext";
1378                        x86_cap_flags[24] = "fxsr";
1379                        x86_cap_flags[30] = "3dnowext";
1380                        x86_cap_flags[31] = "3dnow";
1381                        break;
1382                                                                                                                                                                                                                                                                                                                                                
1383                    case X86_VENDOR_INTEL:
1384                        x86_cap_flags[16] = "pat";
1385                        x86_cap_flags[19] = "cflush";
1386                        x86_cap_flags[21] = "dtrace";
1387                        x86_cap_flags[22] = "acpi";
1388                        x86_cap_flags[24] = "fxsr";
1389                        x86_cap_flags[26] = "xmm2";
1390                        x86_cap_flags[27] = "ssnp";
1391                        x86_cap_flags[29] = "acc";
1392                        break;
1393
1394                    case X86_VENDOR_CENTAUR:
1395                        if (c->x86_model >=8)   /* Only Winchip2 and above */
1396                                x86_cap_flags[31] = "3dnow";
1397                        break;
1398
1399                    default:
1400                        /* Unknown CPU manufacturer or no specal action needed */
1401                        break;
1402                }
1403
1404                sep_bug = c->x86_vendor == X86_VENDOR_INTEL &&
1405                          c->x86 == 0x06 &&
1406                          c->cpuid_level >= 0 &&
1407                          (c->x86_capability & X86_FEATURE_SEP) &&
1408                          c->x86_model < 3 &&
1409                          c->x86_mask < 3;
1410        
1411                p += sprintf(p, "fdiv_bug\t: %s\n"
1412                                "hlt_bug\t\t: %s\n"
1413                                "sep_bug\t\t: %s\n"
1414                                "f00f_bug\t: %s\n"
1415                                "coma_bug\t: %s\n"
1416                                "fpu\t\t: %s\n"
1417                                "fpu_exception\t: %s\n"
1418                                "cpuid level\t: %d\n"
1419                                "wp\t\t: %s\n"
1420                                "flags\t\t:",
1421                             c->fdiv_bug ? "yes" : "no",
1422                             c->hlt_works_ok ? "no" : "yes",
1423                             sep_bug ? "yes" : "no",
1424                             c->f00f_bug ? "yes" : "no",
1425                             c->coma_bug ? "yes" : "no",
1426                             c->hard_math ? "yes" : "no",
1427                             (c->hard_math && ignore_irq13) ? "yes" : "no",
1428                             c->cpuid_level,
1429                             c->wp_works_ok ? "yes" : "no");
1430
1431                for ( i = 0 ; i < 32 ; i++ )
1432                        if ( c->x86_capability & (1 << i) )
1433                                p += sprintf(p, " %s", x86_cap_flags[i]);
1434                p += sprintf(p, "\nbogomips\t: %lu.%02lu\n\n",
1435                             c->loops_per_jiffy/(500000/HZ),
1436                             (c->loops_per_jiffy/(5000/HZ)) % 100);
1437        }
1438        return p - buffer;
1439}
1440
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.