linux/arch/cris/arch-v32/kernel/irq.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2003, Axis Communications AB.
   3 */
   4
   5#include <asm/irq.h>
   6#include <linux/irq.h>
   7#include <linux/interrupt.h>
   8#include <linux/smp.h>
   9#include <linux/kernel.h>
  10#include <linux/errno.h>
  11#include <linux/init.h>
  12#include <linux/profile.h>
  13#include <linux/proc_fs.h>
  14#include <linux/seq_file.h>
  15#include <linux/threads.h>
  16#include <linux/spinlock.h>
  17#include <linux/kernel_stat.h>
  18#include <hwregs/reg_map.h>
  19#include <hwregs/reg_rdwr.h>
  20#include <hwregs/intr_vect.h>
  21#include <hwregs/intr_vect_defs.h>
  22
  23#define CPU_FIXED -1
  24
  25/* IRQ masks (refer to comment for crisv32_do_multiple) */
  26#if TIMER0_INTR_VECT - FIRST_IRQ < 32
  27#define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ))
  28#undef TIMER_VECT1
  29#else
  30#define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32))
  31#define TIMER_VECT1
  32#endif
  33#ifdef CONFIG_ETRAX_KGDB
  34#if defined(CONFIG_ETRAX_KGDB_PORT0)
  35#define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
  36#elif defined(CONFIG_ETRAX_KGDB_PORT1)
  37#define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
  38#elif defined(CONFIG_ETRAX_KGB_PORT2)
  39#define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
  40#elif defined(CONFIG_ETRAX_KGDB_PORT3)
  41#define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
  42#endif
  43#endif
  44
  45DEFINE_SPINLOCK(irq_lock);
  46
  47struct cris_irq_allocation
  48{
  49  int cpu; /* The CPU to which the IRQ is currently allocated. */
  50  cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */
  51};
  52
  53struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
  54  { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} };
  55
  56static unsigned long irq_regs[NR_CPUS] =
  57{
  58  regi_irq,
  59#ifdef CONFIG_SMP
  60  regi_irq2,
  61#endif
  62};
  63
  64#if NR_REAL_IRQS > 32
  65#define NBR_REGS 2
  66#else
  67#define NBR_REGS 1
  68#endif
  69
  70unsigned long cpu_irq_counters[NR_CPUS];
  71unsigned long irq_counters[NR_REAL_IRQS];
  72
  73/* From irq.c. */
  74extern void weird_irq(void);
  75
  76/* From entry.S. */
  77extern void system_call(void);
  78extern void nmi_interrupt(void);
  79extern void multiple_interrupt(void);
  80extern void gdb_handle_exception(void);
  81extern void i_mmu_refill(void);
  82extern void i_mmu_invalid(void);
  83extern void i_mmu_access(void);
  84extern void i_mmu_execute(void);
  85extern void d_mmu_refill(void);
  86extern void d_mmu_invalid(void);
  87extern void d_mmu_access(void);
  88extern void d_mmu_write(void);
  89
  90/* From kgdb.c. */
  91extern void kgdb_init(void);
  92extern void breakpoint(void);
  93
  94/* From traps.c.  */
  95extern void breakh_BUG(void);
  96
  97/*
  98 * Build the IRQ handler stubs using macros from irq.h.
  99 */
 100BUILD_IRQ(0x31)
 101BUILD_IRQ(0x32)
 102BUILD_IRQ(0x33)
 103BUILD_IRQ(0x34)
 104BUILD_IRQ(0x35)
 105BUILD_IRQ(0x36)
 106BUILD_IRQ(0x37)
 107BUILD_IRQ(0x38)
 108BUILD_IRQ(0x39)
 109BUILD_IRQ(0x3a)
 110BUILD_IRQ(0x3b)
 111BUILD_IRQ(0x3c)
 112BUILD_IRQ(0x3d)
 113BUILD_IRQ(0x3e)
 114BUILD_IRQ(0x3f)
 115BUILD_IRQ(0x40)
 116BUILD_IRQ(0x41)
 117BUILD_IRQ(0x42)
 118BUILD_IRQ(0x43)
 119BUILD_IRQ(0x44)
 120BUILD_IRQ(0x45)
 121BUILD_IRQ(0x46)
 122BUILD_IRQ(0x47)
 123BUILD_IRQ(0x48)
 124BUILD_IRQ(0x49)
 125BUILD_IRQ(0x4a)
 126BUILD_IRQ(0x4b)
 127BUILD_IRQ(0x4c)
 128BUILD_IRQ(0x4d)
 129BUILD_IRQ(0x4e)
 130BUILD_IRQ(0x4f)
 131BUILD_IRQ(0x50)
 132#if MACH_IRQS > 32
 133BUILD_IRQ(0x51)
 134BUILD_IRQ(0x52)
 135BUILD_IRQ(0x53)
 136BUILD_IRQ(0x54)
 137BUILD_IRQ(0x55)
 138BUILD_IRQ(0x56)
 139BUILD_IRQ(0x57)
 140BUILD_IRQ(0x58)
 141BUILD_IRQ(0x59)
 142BUILD_IRQ(0x5a)
 143BUILD_IRQ(0x5b)
 144BUILD_IRQ(0x5c)
 145BUILD_IRQ(0x5d)
 146BUILD_IRQ(0x5e)
 147BUILD_IRQ(0x5f)
 148BUILD_IRQ(0x60)
 149BUILD_IRQ(0x61)
 150BUILD_IRQ(0x62)
 151BUILD_IRQ(0x63)
 152BUILD_IRQ(0x64)
 153BUILD_IRQ(0x65)
 154BUILD_IRQ(0x66)
 155BUILD_IRQ(0x67)
 156BUILD_IRQ(0x68)
 157BUILD_IRQ(0x69)
 158BUILD_IRQ(0x6a)
 159BUILD_IRQ(0x6b)
 160BUILD_IRQ(0x6c)
 161BUILD_IRQ(0x6d)
 162BUILD_IRQ(0x6e)
 163BUILD_IRQ(0x6f)
 164BUILD_IRQ(0x70)
 165#endif
 166
 167/* Pointers to the low-level handlers. */
 168static void (*interrupt[MACH_IRQS])(void) = {
 169        IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt,
 170        IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt,
 171        IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt,
 172        IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt,
 173        IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt,
 174        IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt,
 175        IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt,
 176        IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt,
 177        IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt,
 178        IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt,
 179        IRQ0x4f_interrupt, IRQ0x50_interrupt,
 180#if MACH_IRQS > 32
 181        IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt,
 182        IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt,
 183        IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt,
 184        IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt,
 185        IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt,
 186        IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt,
 187        IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt,
 188        IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt,
 189        IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt,
 190        IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt,
 191        IRQ0x6f_interrupt, IRQ0x70_interrupt,
 192#endif
 193};
 194
 195void
 196block_irq(int irq, int cpu)
 197{
 198        int intr_mask;
 199        unsigned long flags;
 200
 201        spin_lock_irqsave(&irq_lock, flags);
 202        if (irq - FIRST_IRQ < 32)
 203                intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
 204                        rw_mask, 0);
 205        else
 206                intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
 207                        rw_mask, 1);
 208
 209        /* Remember; 1 let thru, 0 block. */
 210        if (irq - FIRST_IRQ < 32)
 211                intr_mask &= ~(1 << (irq - FIRST_IRQ));
 212        else
 213                intr_mask &= ~(1 << (irq - FIRST_IRQ - 32));
 214
 215        if (irq - FIRST_IRQ < 32)
 216                REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
 217                        0, intr_mask);
 218        else
 219                REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
 220                        1, intr_mask);
 221        spin_unlock_irqrestore(&irq_lock, flags);
 222}
 223
 224void
 225unblock_irq(int irq, int cpu)
 226{
 227        int intr_mask;
 228        unsigned long flags;
 229
 230        spin_lock_irqsave(&irq_lock, flags);
 231        if (irq - FIRST_IRQ < 32)
 232                intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
 233                        rw_mask, 0);
 234        else
 235                intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
 236                        rw_mask, 1);
 237
 238        /* Remember; 1 let thru, 0 block. */
 239        if (irq - FIRST_IRQ < 32)
 240                intr_mask |= (1 << (irq - FIRST_IRQ));
 241        else
 242                intr_mask |= (1 << (irq - FIRST_IRQ - 32));
 243
 244        if (irq - FIRST_IRQ < 32)
 245                REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
 246                        0, intr_mask);
 247        else
 248                REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
 249                        1, intr_mask);
 250
 251        spin_unlock_irqrestore(&irq_lock, flags);
 252}
 253
 254/* Find out which CPU the irq should be allocated to. */
 255static int irq_cpu(int irq)
 256{
 257        int cpu;
 258        unsigned long flags;
 259
 260        spin_lock_irqsave(&irq_lock, flags);
 261        cpu = irq_allocations[irq - FIRST_IRQ].cpu;
 262
 263        /* Fixed interrupts stay on the local CPU. */
 264        if (cpu == CPU_FIXED)
 265        {
 266                spin_unlock_irqrestore(&irq_lock, flags);
 267                return smp_processor_id();
 268        }
 269
 270
 271        /* Let the interrupt stay if possible */
 272        if (cpu_isset(cpu, irq_allocations[irq - FIRST_IRQ].mask))
 273                goto out;
 274
 275        /* IRQ must be moved to another CPU. */
 276        cpu = first_cpu(irq_allocations[irq - FIRST_IRQ].mask);
 277        irq_allocations[irq - FIRST_IRQ].cpu = cpu;
 278out:
 279        spin_unlock_irqrestore(&irq_lock, flags);
 280        return cpu;
 281}
 282
 283void
 284mask_irq(int irq)
 285{
 286        int cpu;
 287
 288        for (cpu = 0; cpu < NR_CPUS; cpu++)
 289                block_irq(irq, cpu);
 290}
 291
 292void
 293unmask_irq(int irq)
 294{
 295        unblock_irq(irq, irq_cpu(irq));
 296}
 297
 298
 299static unsigned int startup_crisv32_irq(unsigned int irq)
 300{
 301        unmask_irq(irq);
 302        return 0;
 303}
 304
 305static void shutdown_crisv32_irq(unsigned int irq)
 306{
 307        mask_irq(irq);
 308}
 309
 310static void enable_crisv32_irq(unsigned int irq)
 311{
 312        unmask_irq(irq);
 313}
 314
 315static void disable_crisv32_irq(unsigned int irq)
 316{
 317        mask_irq(irq);
 318}
 319
 320static void ack_crisv32_irq(unsigned int irq)
 321{
 322}
 323
 324static void end_crisv32_irq(unsigned int irq)
 325{
 326}
 327
 328void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
 329{
 330        unsigned long flags;
 331        spin_lock_irqsave(&irq_lock, flags);
 332        irq_allocations[irq - FIRST_IRQ].mask = *dest;
 333        spin_unlock_irqrestore(&irq_lock, flags);
 334}
 335
 336static struct irq_chip crisv32_irq_type = {
 337        .typename =    "CRISv32",
 338        .startup =     startup_crisv32_irq,
 339        .shutdown =    shutdown_crisv32_irq,
 340        .enable =      enable_crisv32_irq,
 341        .disable =     disable_crisv32_irq,
 342        .ack =         ack_crisv32_irq,
 343        .end =         end_crisv32_irq,
 344        .set_affinity = set_affinity_crisv32_irq
 345};
 346
 347void
 348set_exception_vector(int n, irqvectptr addr)
 349{
 350        etrax_irv->v[n] = (irqvectptr) addr;
 351}
 352
 353extern void do_IRQ(int irq, struct pt_regs * regs);
 354
 355void
 356crisv32_do_IRQ(int irq, int block, struct pt_regs* regs)
 357{
 358        /* Interrupts that may not be moved to another CPU and
 359         * are IRQF_DISABLED may skip blocking. This is currently
 360         * only valid for the timer IRQ and the IPI and is used
 361         * for the timer interrupt to avoid watchdog starvation.
 362         */
 363        if (!block) {
 364                do_IRQ(irq, regs);
 365                return;
 366        }
 367
 368        block_irq(irq, smp_processor_id());
 369        do_IRQ(irq, regs);
 370
 371        unblock_irq(irq, irq_cpu(irq));
 372}
 373
 374/* If multiple interrupts occur simultaneously we get a multiple
 375 * interrupt from the CPU and software has to sort out which
 376 * interrupts that happened. There are two special cases here:
 377 *
 378 * 1. Timer interrupts may never be blocked because of the
 379 *    watchdog (refer to comment in include/asr/arch/irq.h)
 380 * 2. GDB serial port IRQs are unhandled here and will be handled
 381 *    as a single IRQ when it strikes again because the GDB
 382 *    stubb wants to save the registers in its own fashion.
 383 */
 384void
 385crisv32_do_multiple(struct pt_regs* regs)
 386{
 387        int cpu;
 388        int mask;
 389        int masked[NBR_REGS];
 390        int bit;
 391        int i;
 392
 393        cpu = smp_processor_id();
 394
 395        /* An extra irq_enter here to prevent softIRQs to run after
 396         * each do_IRQ. This will decrease the interrupt latency.
 397         */
 398        irq_enter();
 399
 400        for (i = 0; i < NBR_REGS; i++) {
 401                /* Get which IRQs that happend. */
 402                masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
 403                        r_masked_vect, i);
 404
 405                /* Calculate new IRQ mask with these IRQs disabled. */
 406                mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
 407                mask &= ~masked[i];
 408
 409        /* Timer IRQ is never masked */
 410#ifdef TIMER_VECT1
 411                if ((i == 1) && (masked[0] & TIMER_MASK))
 412                        mask |= TIMER_MASK;
 413#else
 414                if ((i == 0) && (masked[0] & TIMER_MASK))
 415                        mask |= TIMER_MASK;
 416#endif
 417                /* Block all the IRQs */
 418                REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
 419
 420        /* Check for timer IRQ and handle it special. */
 421#ifdef TIMER_VECT1
 422                if ((i == 1) && (masked[i] & TIMER_MASK)) {
 423                        masked[i] &= ~TIMER_MASK;
 424                        do_IRQ(TIMER0_INTR_VECT, regs);
 425                }
 426#else
 427                if ((i == 0) && (masked[i] & TIMER_MASK)) {
 428                         masked[i] &= ~TIMER_MASK;
 429                         do_IRQ(TIMER0_INTR_VECT, regs);
 430                }
 431        }
 432#endif
 433
 434#ifdef IGNORE_MASK
 435        /* Remove IRQs that can't be handled as multiple. */
 436        masked[0] &= ~IGNORE_MASK;
 437#endif
 438
 439        /* Handle the rest of the IRQs. */
 440        for (i = 0; i < NBR_REGS; i++) {
 441                for (bit = 0; bit < 32; bit++) {
 442                        if (masked[i] & (1 << bit))
 443                                do_IRQ(bit + FIRST_IRQ + i*32, regs);
 444                }
 445        }
 446
 447        /* Unblock all the IRQs. */
 448        for (i = 0; i < NBR_REGS; i++) {
 449                mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
 450                mask |= masked[i];
 451                REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
 452        }
 453
 454        /* This irq_exit() will trigger the soft IRQs. */
 455        irq_exit();
 456}
 457
 458/*
 459 * This is called by start_kernel. It fixes the IRQ masks and setup the
 460 * interrupt vector table to point to bad_interrupt pointers.
 461 */
 462void __init
 463init_IRQ(void)
 464{
 465        int i;
 466        int j;
 467        reg_intr_vect_rw_mask vect_mask = {0};
 468
 469        /* Clear all interrupts masks. */
 470        for (i = 0; i < NBR_REGS; i++)
 471                REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask);
 472
 473        for (i = 0; i < 256; i++)
 474                etrax_irv->v[i] = weird_irq;
 475
 476        /* Point all IRQ's to bad handlers. */
 477        for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
 478                irq_desc[j].chip = &crisv32_irq_type;
 479                set_exception_vector(i, interrupt[j]);
 480        }
 481
 482        /* Mark Timer and IPI IRQs as CPU local */
 483        irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
 484        irq_desc[TIMER0_INTR_VECT].status |= IRQ_PER_CPU;
 485        irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
 486        irq_desc[IPI_INTR_VECT].status |= IRQ_PER_CPU;
 487
 488        set_exception_vector(0x00, nmi_interrupt);
 489        set_exception_vector(0x30, multiple_interrupt);
 490
 491        /* Set up handler for various MMU bus faults. */
 492        set_exception_vector(0x04, i_mmu_refill);
 493        set_exception_vector(0x05, i_mmu_invalid);
 494        set_exception_vector(0x06, i_mmu_access);
 495        set_exception_vector(0x07, i_mmu_execute);
 496        set_exception_vector(0x08, d_mmu_refill);
 497        set_exception_vector(0x09, d_mmu_invalid);
 498        set_exception_vector(0x0a, d_mmu_access);
 499        set_exception_vector(0x0b, d_mmu_write);
 500
 501#ifdef CONFIG_BUG
 502        /* Break 14 handler, used to implement cheap BUG().  */
 503        set_exception_vector(0x1e, breakh_BUG);
 504#endif
 505
 506        /* The system-call trap is reached by "break 13". */
 507        set_exception_vector(0x1d, system_call);
 508
 509        /* Exception handlers for debugging, both user-mode and kernel-mode. */
 510
 511        /* Break 8. */
 512        set_exception_vector(0x18, gdb_handle_exception);
 513        /* Hardware single step. */
 514        set_exception_vector(0x3, gdb_handle_exception);
 515        /* Hardware breakpoint. */
 516        set_exception_vector(0xc, gdb_handle_exception);
 517
 518#ifdef CONFIG_ETRAX_KGDB
 519        kgdb_init();
 520        /* Everything is set up; now trap the kernel. */
 521        breakpoint();
 522#endif
 523}
 524
 525