linux/arch/sparc/kernel/smp.c
<<
>>
Prefs
   1/* smp.c: Sparc SMP support.
   2 *
   3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
   6 */
   7
   8#include <asm/head.h>
   9
  10#include <linux/kernel.h>
  11#include <linux/sched.h>
  12#include <linux/threads.h>
  13#include <linux/smp.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/init.h>
  17#include <linux/spinlock.h>
  18#include <linux/mm.h>
  19#include <linux/fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/cache.h>
  22#include <linux/delay.h>
  23
  24#include <asm/ptrace.h>
  25#include <asm/atomic.h>
  26
  27#include <asm/irq.h>
  28#include <asm/page.h>
  29#include <asm/pgalloc.h>
  30#include <asm/pgtable.h>
  31#include <asm/oplib.h>
  32#include <asm/cacheflush.h>
  33#include <asm/tlbflush.h>
  34#include <asm/cpudata.h>
  35
  36#include "irq.h"
  37
  38volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
  39unsigned char boot_cpu_id = 0;
  40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
  41
  42cpumask_t cpu_online_map = CPU_MASK_NONE;
  43cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
  44cpumask_t smp_commenced_mask = CPU_MASK_NONE;
  45
  46/* The only guaranteed locking primitive available on all Sparc
  47 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
  48 * places the current byte at the effective address into dest_reg and
  49 * places 0xff there afterwards.  Pretty lame locking primitive
  50 * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
  51 * instruction which is much better...
  52 */
  53
  54void __cpuinit smp_store_cpu_info(int id)
  55{
  56        int cpu_node;
  57
  58        cpu_data(id).udelay_val = loops_per_jiffy;
  59
  60        cpu_find_by_mid(id, &cpu_node);
  61        cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
  62                                                     "clock-frequency", 0);
  63        cpu_data(id).prom_node = cpu_node;
  64        cpu_data(id).mid = cpu_get_hwmid(cpu_node);
  65
  66        if (cpu_data(id).mid < 0)
  67                panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
  68}
  69
  70void __init smp_cpus_done(unsigned int max_cpus)
  71{
  72        extern void smp4m_smp_done(void);
  73        extern void smp4d_smp_done(void);
  74        unsigned long bogosum = 0;
  75        int cpu, num;
  76
  77        for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++)
  78                if (cpu_online(cpu)) {
  79                        num++;
  80                        bogosum += cpu_data(cpu).udelay_val;
  81                }
  82
  83        printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  84                num, bogosum/(500000/HZ),
  85                (bogosum/(5000/HZ))%100);
  86
  87        switch(sparc_cpu_model) {
  88        case sun4:
  89                printk("SUN4\n");
  90                BUG();
  91                break;
  92        case sun4c:
  93                printk("SUN4C\n");
  94                BUG();
  95                break;
  96        case sun4m:
  97                smp4m_smp_done();
  98                break;
  99        case sun4d:
 100                smp4d_smp_done();
 101                break;
 102        case sun4e:
 103                printk("SUN4E\n");
 104                BUG();
 105                break;
 106        case sun4u:
 107                printk("SUN4U\n");
 108                BUG();
 109                break;
 110        default:
 111                printk("UNKNOWN!\n");
 112                BUG();
 113                break;
 114        };
 115}
 116
 117void cpu_panic(void)
 118{
 119        printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 120        panic("SMP bolixed\n");
 121}
 122
 123struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
 124
 125void smp_send_reschedule(int cpu)
 126{
 127        /* See sparc64 */
 128}
 129
 130void smp_send_stop(void)
 131{
 132}
 133
 134void smp_flush_cache_all(void)
 135{
 136        xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
 137        local_flush_cache_all();
 138}
 139
 140void smp_flush_tlb_all(void)
 141{
 142        xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
 143        local_flush_tlb_all();
 144}
 145
 146void smp_flush_cache_mm(struct mm_struct *mm)
 147{
 148        if(mm->context != NO_CONTEXT) {
 149                cpumask_t cpu_mask = mm->cpu_vm_mask;
 150                cpu_clear(smp_processor_id(), cpu_mask);
 151                if (!cpus_empty(cpu_mask))
 152                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
 153                local_flush_cache_mm(mm);
 154        }
 155}
 156
 157void smp_flush_tlb_mm(struct mm_struct *mm)
 158{
 159        if(mm->context != NO_CONTEXT) {
 160                cpumask_t cpu_mask = mm->cpu_vm_mask;
 161                cpu_clear(smp_processor_id(), cpu_mask);
 162                if (!cpus_empty(cpu_mask)) {
 163                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
 164                        if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
 165                                mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
 166                }
 167                local_flush_tlb_mm(mm);
 168        }
 169}
 170
 171void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 172                           unsigned long end)
 173{
 174        struct mm_struct *mm = vma->vm_mm;
 175
 176        if (mm->context != NO_CONTEXT) {
 177                cpumask_t cpu_mask = mm->cpu_vm_mask;
 178                cpu_clear(smp_processor_id(), cpu_mask);
 179                if (!cpus_empty(cpu_mask))
 180                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
 181                local_flush_cache_range(vma, start, end);
 182        }
 183}
 184
 185void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 186                         unsigned long end)
 187{
 188        struct mm_struct *mm = vma->vm_mm;
 189
 190        if (mm->context != NO_CONTEXT) {
 191                cpumask_t cpu_mask = mm->cpu_vm_mask;
 192                cpu_clear(smp_processor_id(), cpu_mask);
 193                if (!cpus_empty(cpu_mask))
 194                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
 195                local_flush_tlb_range(vma, start, end);
 196        }
 197}
 198
 199void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
 200{
 201        struct mm_struct *mm = vma->vm_mm;
 202
 203        if(mm->context != NO_CONTEXT) {
 204                cpumask_t cpu_mask = mm->cpu_vm_mask;
 205                cpu_clear(smp_processor_id(), cpu_mask);
 206                if (!cpus_empty(cpu_mask))
 207                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
 208                local_flush_cache_page(vma, page);
 209        }
 210}
 211
 212void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 213{
 214        struct mm_struct *mm = vma->vm_mm;
 215
 216        if(mm->context != NO_CONTEXT) {
 217                cpumask_t cpu_mask = mm->cpu_vm_mask;
 218                cpu_clear(smp_processor_id(), cpu_mask);
 219                if (!cpus_empty(cpu_mask))
 220                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
 221                local_flush_tlb_page(vma, page);
 222        }
 223}
 224
 225void smp_reschedule_irq(void)
 226{
 227        set_need_resched();
 228}
 229
 230void smp_flush_page_to_ram(unsigned long page)
 231{
 232        /* Current theory is that those who call this are the one's
 233         * who have just dirtied their cache with the pages contents
 234         * in kernel space, therefore we only run this on local cpu.
 235         *
 236         * XXX This experiment failed, research further... -DaveM
 237         */
 238#if 1
 239        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
 240#endif
 241        local_flush_page_to_ram(page);
 242}
 243
 244void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
 245{
 246        cpumask_t cpu_mask = mm->cpu_vm_mask;
 247        cpu_clear(smp_processor_id(), cpu_mask);
 248        if (!cpus_empty(cpu_mask))
 249                xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
 250        local_flush_sig_insns(mm, insn_addr);
 251}
 252
 253extern unsigned int lvl14_resolution;
 254
 255/* /proc/profile writes can call this, don't __init it please. */
 256static DEFINE_SPINLOCK(prof_setup_lock);
 257
 258int setup_profiling_timer(unsigned int multiplier)
 259{
 260        int i;
 261        unsigned long flags;
 262
 263        /* Prevent level14 ticker IRQ flooding. */
 264        if((!multiplier) || (lvl14_resolution / multiplier) < 500)
 265                return -EINVAL;
 266
 267        spin_lock_irqsave(&prof_setup_lock, flags);
 268        for_each_possible_cpu(i) {
 269                load_profile_irq(i, lvl14_resolution / multiplier);
 270                prof_multiplier(i) = multiplier;
 271        }
 272        spin_unlock_irqrestore(&prof_setup_lock, flags);
 273
 274        return 0;
 275}
 276
 277void __init smp_prepare_cpus(unsigned int max_cpus)
 278{
 279        extern void __init smp4m_boot_cpus(void);
 280        extern void __init smp4d_boot_cpus(void);
 281        int i, cpuid, extra;
 282
 283        printk("Entering SMP Mode...\n");
 284
 285        extra = 0;
 286        for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
 287                if (cpuid >= NR_CPUS)
 288                        extra++;
 289        }
 290        /* i = number of cpus */
 291        if (extra && max_cpus > i - extra)
 292                printk("Warning: NR_CPUS is too low to start all cpus\n");
 293
 294        smp_store_cpu_info(boot_cpu_id);
 295
 296        switch(sparc_cpu_model) {
 297        case sun4:
 298                printk("SUN4\n");
 299                BUG();
 300                break;
 301        case sun4c:
 302                printk("SUN4C\n");
 303                BUG();
 304                break;
 305        case sun4m:
 306                smp4m_boot_cpus();
 307                break;
 308        case sun4d:
 309                smp4d_boot_cpus();
 310                break;
 311        case sun4e:
 312                printk("SUN4E\n");
 313                BUG();
 314                break;
 315        case sun4u:
 316                printk("SUN4U\n");
 317                BUG();
 318                break;
 319        default:
 320                printk("UNKNOWN!\n");
 321                BUG();
 322                break;
 323        };
 324}
 325
 326/* Set this up early so that things like the scheduler can init
 327 * properly.  We use the same cpu mask for both the present and
 328 * possible cpu map.
 329 */
 330void __init smp_setup_cpu_possible_map(void)
 331{
 332        int instance, mid;
 333
 334        instance = 0;
 335        while (!cpu_find_by_instance(instance, NULL, &mid)) {
 336                if (mid < NR_CPUS) {
 337                        cpu_set(mid, phys_cpu_present_map);
 338                        cpu_set(mid, cpu_present_map);
 339                }
 340                instance++;
 341        }
 342}
 343
 344void __init smp_prepare_boot_cpu(void)
 345{
 346        int cpuid = hard_smp_processor_id();
 347
 348        if (cpuid >= NR_CPUS) {
 349                prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
 350                prom_halt();
 351        }
 352        if (cpuid != 0)
 353                printk("boot cpu id != 0, this could work but is untested\n");
 354
 355        current_thread_info()->cpu = cpuid;
 356        cpu_set(cpuid, cpu_online_map);
 357        cpu_set(cpuid, phys_cpu_present_map);
 358}
 359
 360int __cpuinit __cpu_up(unsigned int cpu)
 361{
 362        extern int __cpuinit smp4m_boot_one_cpu(int);
 363        extern int __cpuinit smp4d_boot_one_cpu(int);
 364        int ret=0;
 365
 366        switch(sparc_cpu_model) {
 367        case sun4:
 368                printk("SUN4\n");
 369                BUG();
 370                break;
 371        case sun4c:
 372                printk("SUN4C\n");
 373                BUG();
 374                break;
 375        case sun4m:
 376                ret = smp4m_boot_one_cpu(cpu);
 377                break;
 378        case sun4d:
 379                ret = smp4d_boot_one_cpu(cpu);
 380                break;
 381        case sun4e:
 382                printk("SUN4E\n");
 383                BUG();
 384                break;
 385        case sun4u:
 386                printk("SUN4U\n");
 387                BUG();
 388                break;
 389        default:
 390                printk("UNKNOWN!\n");
 391                BUG();
 392                break;
 393        };
 394
 395        if (!ret) {
 396                cpu_set(cpu, smp_commenced_mask);
 397                while (!cpu_online(cpu))
 398                        mb();
 399        }
 400        return ret;
 401}
 402
 403void smp_bogo(struct seq_file *m)
 404{
 405        int i;
 406        
 407        for_each_online_cpu(i) {
 408                seq_printf(m,
 409                           "Cpu%dBogo\t: %lu.%02lu\n",
 410                           i,
 411                           cpu_data(i).udelay_val/(500000/HZ),
 412                           (cpu_data(i).udelay_val/(5000/HZ))%100);
 413        }
 414}
 415
 416void smp_info(struct seq_file *m)
 417{
 418        int i;
 419
 420        seq_printf(m, "State:\n");
 421        for_each_online_cpu(i)
 422                seq_printf(m, "CPU%d\t\t: online\n", i);
 423}
 424