linux/arch/mips/lantiq/irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2010 John Crispin <john@phrozen.org>
   5 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
   6 */
   7
   8#include <linux/interrupt.h>
   9#include <linux/ioport.h>
  10#include <linux/sched.h>
  11#include <linux/irqchip.h>
  12#include <linux/irqdomain.h>
  13#include <linux/of_platform.h>
  14#include <linux/of_address.h>
  15#include <linux/of_irq.h>
  16
  17#include <asm/bootinfo.h>
  18#include <asm/irq_cpu.h>
  19
  20#include <lantiq_soc.h>
  21#include <irq.h>
  22
  23/* register definitions - internal irqs */
  24#define LTQ_ICU_ISR             0x0000
  25#define LTQ_ICU_IER             0x0008
  26#define LTQ_ICU_IOSR            0x0010
  27#define LTQ_ICU_IRSR            0x0018
  28#define LTQ_ICU_IMR             0x0020
  29
  30#define LTQ_ICU_IM_SIZE         0x28
  31
  32/* register definitions - external irqs */
  33#define LTQ_EIU_EXIN_C          0x0000
  34#define LTQ_EIU_EXIN_INIC       0x0004
  35#define LTQ_EIU_EXIN_INC        0x0008
  36#define LTQ_EIU_EXIN_INEN       0x000C
  37
  38/* number of external interrupts */
  39#define MAX_EIU                 6
  40
  41/* the performance counter */
  42#define LTQ_PERF_IRQ            (INT_NUM_IM4_IRL0 + 31)
  43
  44/*
  45 * irqs generated by devices attached to the EBU need to be acked in
  46 * a special manner
  47 */
  48#define LTQ_ICU_EBU_IRQ         22
  49
  50#define ltq_icu_w32(vpe, m, x, y)       \
  51        ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
  52
  53#define ltq_icu_r32(vpe, m, x)          \
  54        ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
  55
  56#define ltq_eiu_w32(x, y)       ltq_w32((x), ltq_eiu_membase + (y))
  57#define ltq_eiu_r32(x)          ltq_r32(ltq_eiu_membase + (x))
  58
  59/* we have a cascade of 8 irqs */
  60#define MIPS_CPU_IRQ_CASCADE            8
  61
  62static int exin_avail;
  63static u32 ltq_eiu_irq[MAX_EIU];
  64static void __iomem *ltq_icu_membase[NR_CPUS];
  65static void __iomem *ltq_eiu_membase;
  66static struct irq_domain *ltq_domain;
  67static DEFINE_SPINLOCK(ltq_eiu_lock);
  68static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
  69static int ltq_perfcount_irq;
  70
  71int ltq_eiu_get_irq(int exin)
  72{
  73        if (exin < exin_avail)
  74                return ltq_eiu_irq[exin];
  75        return -1;
  76}
  77
  78void ltq_disable_irq(struct irq_data *d)
  79{
  80        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  81        unsigned long im = offset / INT_NUM_IM_OFFSET;
  82        unsigned long flags;
  83        int vpe;
  84
  85        offset %= INT_NUM_IM_OFFSET;
  86
  87        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  88        for_each_present_cpu(vpe) {
  89                ltq_icu_w32(vpe, im,
  90                            ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
  91                            LTQ_ICU_IER);
  92        }
  93        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  94}
  95
  96void ltq_mask_and_ack_irq(struct irq_data *d)
  97{
  98        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  99        unsigned long im = offset / INT_NUM_IM_OFFSET;
 100        unsigned long flags;
 101        int vpe;
 102
 103        offset %= INT_NUM_IM_OFFSET;
 104
 105        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
 106        for_each_present_cpu(vpe) {
 107                ltq_icu_w32(vpe, im,
 108                            ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
 109                            LTQ_ICU_IER);
 110                ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
 111        }
 112        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
 113}
 114
 115static void ltq_ack_irq(struct irq_data *d)
 116{
 117        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 118        unsigned long im = offset / INT_NUM_IM_OFFSET;
 119        unsigned long flags;
 120        int vpe;
 121
 122        offset %= INT_NUM_IM_OFFSET;
 123
 124        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
 125        for_each_present_cpu(vpe) {
 126                ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
 127        }
 128        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
 129}
 130
 131void ltq_enable_irq(struct irq_data *d)
 132{
 133        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 134        unsigned long im = offset / INT_NUM_IM_OFFSET;
 135        unsigned long flags;
 136        int vpe;
 137
 138        offset %= INT_NUM_IM_OFFSET;
 139
 140        vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
 141
 142        /* This shouldn't be even possible, maybe during CPU hotplug spam */
 143        if (unlikely(vpe >= nr_cpu_ids))
 144                vpe = smp_processor_id();
 145
 146        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
 147
 148        ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
 149                    LTQ_ICU_IER);
 150
 151        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
 152}
 153
 154static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
 155{
 156        int i;
 157        unsigned long flags;
 158
 159        for (i = 0; i < exin_avail; i++) {
 160                if (d->hwirq == ltq_eiu_irq[i]) {
 161                        int val = 0;
 162                        int edge = 0;
 163
 164                        switch (type) {
 165                        case IRQF_TRIGGER_NONE:
 166                                break;
 167                        case IRQF_TRIGGER_RISING:
 168                                val = 1;
 169                                edge = 1;
 170                                break;
 171                        case IRQF_TRIGGER_FALLING:
 172                                val = 2;
 173                                edge = 1;
 174                                break;
 175                        case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
 176                                val = 3;
 177                                edge = 1;
 178                                break;
 179                        case IRQF_TRIGGER_HIGH:
 180                                val = 5;
 181                                break;
 182                        case IRQF_TRIGGER_LOW:
 183                                val = 6;
 184                                break;
 185                        default:
 186                                pr_err("invalid type %d for irq %ld\n",
 187                                        type, d->hwirq);
 188                                return -EINVAL;
 189                        }
 190
 191                        if (edge)
 192                                irq_set_handler(d->hwirq, handle_edge_irq);
 193
 194                        spin_lock_irqsave(&ltq_eiu_lock, flags);
 195                        ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
 196                                    (~(7 << (i * 4)))) | (val << (i * 4)),
 197                                    LTQ_EIU_EXIN_C);
 198                        spin_unlock_irqrestore(&ltq_eiu_lock, flags);
 199                }
 200        }
 201
 202        return 0;
 203}
 204
 205static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
 206{
 207        int i;
 208
 209        ltq_enable_irq(d);
 210        for (i = 0; i < exin_avail; i++) {
 211                if (d->hwirq == ltq_eiu_irq[i]) {
 212                        /* by default we are low level triggered */
 213                        ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
 214                        /* clear all pending */
 215                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
 216                                LTQ_EIU_EXIN_INC);
 217                        /* enable */
 218                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
 219                                LTQ_EIU_EXIN_INEN);
 220                        break;
 221                }
 222        }
 223
 224        return 0;
 225}
 226
 227static void ltq_shutdown_eiu_irq(struct irq_data *d)
 228{
 229        int i;
 230
 231        ltq_disable_irq(d);
 232        for (i = 0; i < exin_avail; i++) {
 233                if (d->hwirq == ltq_eiu_irq[i]) {
 234                        /* disable */
 235                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
 236                                LTQ_EIU_EXIN_INEN);
 237                        break;
 238                }
 239        }
 240}
 241
 242#if defined(CONFIG_SMP)
 243static int ltq_icu_irq_set_affinity(struct irq_data *d,
 244                                    const struct cpumask *cpumask, bool force)
 245{
 246        struct cpumask tmask;
 247
 248        if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
 249                return -EINVAL;
 250
 251        irq_data_update_effective_affinity(d, &tmask);
 252
 253        return IRQ_SET_MASK_OK;
 254}
 255#endif
 256
 257static struct irq_chip ltq_irq_type = {
 258        .name = "icu",
 259        .irq_enable = ltq_enable_irq,
 260        .irq_disable = ltq_disable_irq,
 261        .irq_unmask = ltq_enable_irq,
 262        .irq_ack = ltq_ack_irq,
 263        .irq_mask = ltq_disable_irq,
 264        .irq_mask_ack = ltq_mask_and_ack_irq,
 265#if defined(CONFIG_SMP)
 266        .irq_set_affinity = ltq_icu_irq_set_affinity,
 267#endif
 268};
 269
 270static struct irq_chip ltq_eiu_type = {
 271        .name = "eiu",
 272        .irq_startup = ltq_startup_eiu_irq,
 273        .irq_shutdown = ltq_shutdown_eiu_irq,
 274        .irq_enable = ltq_enable_irq,
 275        .irq_disable = ltq_disable_irq,
 276        .irq_unmask = ltq_enable_irq,
 277        .irq_ack = ltq_ack_irq,
 278        .irq_mask = ltq_disable_irq,
 279        .irq_mask_ack = ltq_mask_and_ack_irq,
 280        .irq_set_type = ltq_eiu_settype,
 281#if defined(CONFIG_SMP)
 282        .irq_set_affinity = ltq_icu_irq_set_affinity,
 283#endif
 284};
 285
 286static void ltq_hw_irq_handler(struct irq_desc *desc)
 287{
 288        unsigned int module = irq_desc_get_irq(desc) - 2;
 289        u32 irq;
 290        irq_hw_number_t hwirq;
 291        int vpe = smp_processor_id();
 292
 293        irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
 294        if (irq == 0)
 295                return;
 296
 297        /*
 298         * silicon bug causes only the msb set to 1 to be valid. all
 299         * other bits might be bogus
 300         */
 301        irq = __fls(irq);
 302        hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
 303        generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
 304
 305        /* if this is a EBU irq, we need to ack it or get a deadlock */
 306        if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
 307                ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
 308                        LTQ_EBU_PCC_ISTAT);
 309}
 310
 311static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
 312{
 313        struct irq_chip *chip = &ltq_irq_type;
 314        struct irq_data *data;
 315        int i;
 316
 317        if (hw < MIPS_CPU_IRQ_CASCADE)
 318                return 0;
 319
 320        for (i = 0; i < exin_avail; i++)
 321                if (hw == ltq_eiu_irq[i])
 322                        chip = &ltq_eiu_type;
 323
 324        data = irq_get_irq_data(irq);
 325
 326        irq_data_update_effective_affinity(data, cpumask_of(0));
 327
 328        irq_set_chip_and_handler(irq, chip, handle_level_irq);
 329
 330        return 0;
 331}
 332
 333static const struct irq_domain_ops irq_domain_ops = {
 334        .xlate = irq_domain_xlate_onetwocell,
 335        .map = icu_map,
 336};
 337
 338int __init icu_of_init(struct device_node *node, struct device_node *parent)
 339{
 340        struct device_node *eiu_node;
 341        struct resource res;
 342        int i, ret, vpe;
 343
 344        /* load register regions of available ICUs */
 345        for_each_possible_cpu(vpe) {
 346                if (of_address_to_resource(node, vpe, &res))
 347                        panic("Failed to get icu%i memory range", vpe);
 348
 349                if (!request_mem_region(res.start, resource_size(&res),
 350                                        res.name))
 351                        pr_err("Failed to request icu%i memory\n", vpe);
 352
 353                ltq_icu_membase[vpe] = ioremap(res.start,
 354                                        resource_size(&res));
 355
 356                if (!ltq_icu_membase[vpe])
 357                        panic("Failed to remap icu%i memory", vpe);
 358        }
 359
 360        /* turn off all irqs by default */
 361        for_each_possible_cpu(vpe) {
 362                for (i = 0; i < MAX_IM; i++) {
 363                        /* make sure all irqs are turned off by default */
 364                        ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
 365
 366                        /* clear all possibly pending interrupts */
 367                        ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
 368                        ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
 369
 370                        /* clear resend */
 371                        ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
 372                }
 373        }
 374
 375        mips_cpu_irq_init();
 376
 377        for (i = 0; i < MAX_IM; i++)
 378                irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
 379
 380        ltq_domain = irq_domain_add_linear(node,
 381                (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
 382                &irq_domain_ops, 0);
 383
 384        /* tell oprofile which irq to use */
 385        ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 386
 387        /* the external interrupts are optional and xway only */
 388        eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
 389        if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
 390                /* find out how many external irq sources we have */
 391                exin_avail = of_property_count_u32_elems(eiu_node,
 392                                                         "lantiq,eiu-irqs");
 393
 394                if (exin_avail > MAX_EIU)
 395                        exin_avail = MAX_EIU;
 396
 397                ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
 398                                                ltq_eiu_irq, exin_avail);
 399                if (ret)
 400                        panic("failed to load external irq resources");
 401
 402                if (!request_mem_region(res.start, resource_size(&res),
 403                                                        res.name))
 404                        pr_err("Failed to request eiu memory");
 405
 406                ltq_eiu_membase = ioremap(res.start,
 407                                                        resource_size(&res));
 408                if (!ltq_eiu_membase)
 409                        panic("Failed to remap eiu memory");
 410        }
 411
 412        return 0;
 413}
 414
 415int get_c0_perfcount_int(void)
 416{
 417        return ltq_perfcount_irq;
 418}
 419EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 420
 421unsigned int get_c0_compare_int(void)
 422{
 423        return CP0_LEGACY_COMPARE_IRQ;
 424}
 425
 426IRQCHIP_DECLARE(lantiq_icu, "lantiq,icu", icu_of_init);
 427
 428void __init arch_init_irq(void)
 429{
 430        irqchip_init();
 431}
 432