linux/arch/cris/arch-v32/kernel/smp.c
<<
>>
Prefs
   1#include <linux/types.h>
   2#include <asm/delay.h>
   3#include <irq.h>
   4#include <hwregs/intr_vect.h>
   5#include <hwregs/intr_vect_defs.h>
   6#include <asm/tlbflush.h>
   7#include <asm/mmu_context.h>
   8#include <hwregs/asm/mmu_defs_asm.h>
   9#include <hwregs/supp_reg.h>
  10#include <asm/atomic.h>
  11
  12#include <linux/err.h>
  13#include <linux/init.h>
  14#include <linux/timex.h>
  15#include <linux/sched.h>
  16#include <linux/kernel.h>
  17#include <linux/cpumask.h>
  18#include <linux/interrupt.h>
  19#include <linux/module.h>
  20
  21#define IPI_SCHEDULE 1
  22#define IPI_CALL 2
  23#define IPI_FLUSH_TLB 4
  24#define IPI_BOOT 8
  25
  26#define FLUSH_ALL (void*)0xffffffff
  27
  28/* Vector of locks used for various atomic operations */
  29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
  30
  31/* CPU masks */
  32cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
  33EXPORT_SYMBOL(phys_cpu_present_map);
  34
  35/* Variables used during SMP boot */
  36volatile int cpu_now_booting = 0;
  37volatile struct thread_info *smp_init_current_idle_thread;
  38
  39/* Variables used during IPI */
  40static DEFINE_SPINLOCK(call_lock);
  41static DEFINE_SPINLOCK(tlbstate_lock);
  42
  43struct call_data_struct {
  44        void (*func) (void *info);
  45        void *info;
  46        int wait;
  47};
  48
  49static struct call_data_struct * call_data;
  50
  51static struct mm_struct* flush_mm;
  52static struct vm_area_struct* flush_vma;
  53static unsigned long flush_addr;
  54
  55extern int setup_irq(int, struct irqaction *);
  56
  57/* Mode registers */
  58static unsigned long irq_regs[NR_CPUS] = {
  59  regi_irq,
  60  regi_irq2
  61};
  62
  63static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id);
  64static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
  65static struct irqaction irq_ipi  = {
  66        .handler = crisv32_ipi_interrupt,
  67        .flags = IRQF_DISABLED,
  68        .name = "ipi",
  69};
  70
  71extern void cris_mmu_init(void);
  72extern void cris_timer_init(void);
  73
  74/* SMP initialization */
  75void __init smp_prepare_cpus(unsigned int max_cpus)
  76{
  77        int i;
  78
  79        /* From now on we can expect IPIs so set them up */
  80        setup_irq(IPI_INTR_VECT, &irq_ipi);
  81
  82        /* Mark all possible CPUs as present */
  83        for (i = 0; i < max_cpus; i++)
  84            cpu_set(i, phys_cpu_present_map);
  85}
  86
  87void __devinit smp_prepare_boot_cpu(void)
  88{
  89        /* PGD pointer has moved after per_cpu initialization so
  90         * update the MMU.
  91         */
  92        pgd_t **pgd;
  93        pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
  94
  95        SUPP_BANK_SEL(1);
  96        SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
  97        SUPP_BANK_SEL(2);
  98        SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
  99
 100        set_cpu_online(0, true);
 101        cpu_set(0, phys_cpu_present_map);
 102        set_cpu_possible(0, true);
 103}
 104
 105void __init smp_cpus_done(unsigned int max_cpus)
 106{
 107}
 108
 109/* Bring one cpu online.*/
 110static int __init
 111smp_boot_one_cpu(int cpuid)
 112{
 113        unsigned timeout;
 114        struct task_struct *idle;
 115        cpumask_t cpu_mask = CPU_MASK_NONE;
 116
 117        idle = fork_idle(cpuid);
 118        if (IS_ERR(idle))
 119                panic("SMP: fork failed for CPU:%d", cpuid);
 120
 121        task_thread_info(idle)->cpu = cpuid;
 122
 123        /* Information to the CPU that is about to boot */
 124        smp_init_current_idle_thread = task_thread_info(idle);
 125        cpu_now_booting = cpuid;
 126
 127        /* Kick it */
 128        cpu_set(cpuid, cpu_online_map);
 129        cpu_set(cpuid, cpu_mask);
 130        send_ipi(IPI_BOOT, 0, cpu_mask);
 131        cpu_clear(cpuid, cpu_online_map);
 132
 133        /* Wait for CPU to come online */
 134        for (timeout = 0; timeout < 10000; timeout++) {
 135                if(cpu_online(cpuid)) {
 136                        cpu_now_booting = 0;
 137                        smp_init_current_idle_thread = NULL;
 138                        return 0; /* CPU online */
 139                }
 140                udelay(100);
 141                barrier();
 142        }
 143
 144        put_task_struct(idle);
 145        idle = NULL;
 146
 147        printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
 148        return -1;
 149}
 150
 151/* Secondary CPUs starts using C here. Here we need to setup CPU
 152 * specific stuff such as the local timer and the MMU. */
 153void __init smp_callin(void)
 154{
 155        extern void cpu_idle(void);
 156
 157        int cpu = cpu_now_booting;
 158        reg_intr_vect_rw_mask vect_mask = {0};
 159
 160        /* Initialise the idle task for this CPU */
 161        atomic_inc(&init_mm.mm_count);
 162        current->active_mm = &init_mm;
 163
 164        /* Set up MMU */
 165        cris_mmu_init();
 166        __flush_tlb_all();
 167
 168        /* Setup local timer. */
 169        cris_timer_init();
 170
 171        /* Enable IRQ and idle */
 172        REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
 173        unmask_irq(IPI_INTR_VECT);
 174        unmask_irq(TIMER0_INTR_VECT);
 175        preempt_disable();
 176        notify_cpu_starting(cpu);
 177        local_irq_enable();
 178
 179        cpu_set(cpu, cpu_online_map);
 180        cpu_idle();
 181}
 182
 183/* Stop execution on this CPU.*/
 184void stop_this_cpu(void* dummy)
 185{
 186        local_irq_disable();
 187        asm volatile("halt");
 188}
 189
 190/* Other calls */
 191void smp_send_stop(void)
 192{
 193        smp_call_function(stop_this_cpu, NULL, 0);
 194}
 195
 196int setup_profiling_timer(unsigned int multiplier)
 197{
 198        return -EINVAL;
 199}
 200
 201
 202/* cache_decay_ticks is used by the scheduler to decide if a process
 203 * is "hot" on one CPU. A higher value means a higher penalty to move
 204 * a process to another CPU. Our cache is rather small so we report
 205 * 1 tick.
 206 */
 207unsigned long cache_decay_ticks = 1;
 208
 209int __cpuinit __cpu_up(unsigned int cpu)
 210{
 211        smp_boot_one_cpu(cpu);
 212        return cpu_online(cpu) ? 0 : -ENOSYS;
 213}
 214
 215void smp_send_reschedule(int cpu)
 216{
 217        cpumask_t cpu_mask = CPU_MASK_NONE;
 218        cpu_set(cpu, cpu_mask);
 219        send_ipi(IPI_SCHEDULE, 0, cpu_mask);
 220}
 221
 222/* TLB flushing
 223 *
 224 * Flush needs to be done on the local CPU and on any other CPU that
 225 * may have the same mapping. The mm->cpu_vm_mask is used to keep track
 226 * of which CPUs that a specific process has been executed on.
 227 */
 228void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
 229{
 230        unsigned long flags;
 231        cpumask_t cpu_mask;
 232
 233        spin_lock_irqsave(&tlbstate_lock, flags);
 234        cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
 235        cpu_clear(smp_processor_id(), cpu_mask);
 236        flush_mm = mm;
 237        flush_vma = vma;
 238        flush_addr = addr;
 239        send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
 240        spin_unlock_irqrestore(&tlbstate_lock, flags);
 241}
 242
 243void flush_tlb_all(void)
 244{
 245        __flush_tlb_all();
 246        flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
 247}
 248
 249void flush_tlb_mm(struct mm_struct *mm)
 250{
 251        __flush_tlb_mm(mm);
 252        flush_tlb_common(mm, FLUSH_ALL, 0);
 253        /* No more mappings in other CPUs */
 254        cpumask_clear(mm_cpumask(mm));
 255        cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
 256}
 257
 258void flush_tlb_page(struct vm_area_struct *vma,
 259                           unsigned long addr)
 260{
 261        __flush_tlb_page(vma, addr);
 262        flush_tlb_common(vma->vm_mm, vma, addr);
 263}
 264
 265/* Inter processor interrupts
 266 *
 267 * The IPIs are used for:
 268 *   * Force a schedule on a CPU
 269 *   * FLush TLB on other CPUs
 270 *   * Call a function on other CPUs
 271 */
 272
 273int send_ipi(int vector, int wait, cpumask_t cpu_mask)
 274{
 275        int i = 0;
 276        reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
 277        int ret = 0;
 278
 279        /* Calculate CPUs to send to. */
 280        cpus_and(cpu_mask, cpu_mask, cpu_online_map);
 281
 282        /* Send the IPI. */
 283        for_each_cpu_mask(i, cpu_mask)
 284        {
 285                ipi.vector |= vector;
 286                REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
 287        }
 288
 289        /* Wait for IPI to finish on other CPUS */
 290        if (wait) {
 291                for_each_cpu_mask(i, cpu_mask) {
 292                        int j;
 293                        for (j = 0 ; j < 1000; j++) {
 294                                ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
 295                                if (!ipi.vector)
 296                                        break;
 297                                udelay(100);
 298                        }
 299
 300                        /* Timeout? */
 301                        if (ipi.vector) {
 302                                printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
 303                                ret = -ETIMEDOUT;
 304                                dump_stack();
 305                        }
 306                }
 307        }
 308        return ret;
 309}
 310
 311/*
 312 * You must not call this function with disabled interrupts or from a
 313 * hardware interrupt handler or from a bottom half handler.
 314 */
 315int smp_call_function(void (*func)(void *info), void *info, int wait)
 316{
 317        cpumask_t cpu_mask = CPU_MASK_ALL;
 318        struct call_data_struct data;
 319        int ret;
 320
 321        cpu_clear(smp_processor_id(), cpu_mask);
 322
 323        WARN_ON(irqs_disabled());
 324
 325        data.func = func;
 326        data.info = info;
 327        data.wait = wait;
 328
 329        spin_lock(&call_lock);
 330        call_data = &data;
 331        ret = send_ipi(IPI_CALL, wait, cpu_mask);
 332        spin_unlock(&call_lock);
 333
 334        return ret;
 335}
 336
 337irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
 338{
 339        void (*func) (void *info) = call_data->func;
 340        void *info = call_data->info;
 341        reg_intr_vect_rw_ipi ipi;
 342
 343        ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
 344
 345        if (ipi.vector & IPI_CALL) {
 346                 func(info);
 347        }
 348        if (ipi.vector & IPI_FLUSH_TLB) {
 349                     if (flush_mm == FLUSH_ALL)
 350                         __flush_tlb_all();
 351                     else if (flush_vma == FLUSH_ALL)
 352                        __flush_tlb_mm(flush_mm);
 353                     else
 354                        __flush_tlb_page(flush_vma, flush_addr);
 355        }
 356
 357        ipi.vector = 0;
 358        REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);
 359
 360        return IRQ_HANDLED;
 361}
 362
 363