linux/drivers/irqchip/irq-gic-v3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#define pr_fmt(fmt)     "GICv3: " fmt
   8
   9#include <linux/acpi.h>
  10#include <linux/cpu.h>
  11#include <linux/cpu_pm.h>
  12#include <linux/delay.h>
  13#include <linux/interrupt.h>
  14#include <linux/irqdomain.h>
  15#include <linux/of.h>
  16#include <linux/of_address.h>
  17#include <linux/of_irq.h>
  18#include <linux/percpu.h>
  19#include <linux/refcount.h>
  20#include <linux/slab.h>
  21
  22#include <linux/irqchip.h>
  23#include <linux/irqchip/arm-gic-common.h>
  24#include <linux/irqchip/arm-gic-v3.h>
  25#include <linux/irqchip/irq-partition-percpu.h>
  26
  27#include <asm/cputype.h>
  28#include <asm/exception.h>
  29#include <asm/smp_plat.h>
  30#include <asm/virt.h>
  31
  32#include "irq-gic-common.h"
  33
  34#define GICD_INT_NMI_PRI        (GICD_INT_DEF_PRI & ~0x80)
  35
  36#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996     (1ULL << 0)
  37#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539   (1ULL << 1)
  38
  39#define GIC_IRQ_TYPE_PARTITION  (GIC_IRQ_TYPE_LPI + 1)
  40
  41struct redist_region {
  42        void __iomem            *redist_base;
  43        phys_addr_t             phys_base;
  44        bool                    single_redist;
  45};
  46
  47struct gic_chip_data {
  48        struct fwnode_handle    *fwnode;
  49        void __iomem            *dist_base;
  50        struct redist_region    *redist_regions;
  51        struct rdists           rdists;
  52        struct irq_domain       *domain;
  53        u64                     redist_stride;
  54        u32                     nr_redist_regions;
  55        u64                     flags;
  56        bool                    has_rss;
  57        unsigned int            ppi_nr;
  58        struct partition_desc   **ppi_descs;
  59};
  60
  61static struct gic_chip_data gic_data __read_mostly;
  62static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
  63
  64#define GIC_ID_NR       (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
  65#define GIC_LINE_NR     min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
  66#define GIC_ESPI_NR     GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
  67
  68/*
  69 * The behaviours of RPR and PMR registers differ depending on the value of
  70 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
  71 * distributor and redistributors depends on whether security is enabled in the
  72 * GIC.
  73 *
  74 * When security is enabled, non-secure priority values from the (re)distributor
  75 * are presented to the GIC CPUIF as follow:
  76 *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
  77 *
  78 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
  79 * EL1 are subject to a similar operation thus matching the priorities presented
  80 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
  81 * these values are unchanged by the GIC.
  82 *
  83 * see GICv3/GICv4 Architecture Specification (IHI0069D):
  84 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
  85 *   priorities.
  86 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
  87 *   interrupt.
  88 */
  89static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
  90
  91/*
  92 * Global static key controlling whether an update to PMR allowing more
  93 * interrupts requires to be propagated to the redistributor (DSB SY).
  94 * And this needs to be exported for modules to be able to enable
  95 * interrupts...
  96 */
  97DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
  98EXPORT_SYMBOL(gic_pmr_sync);
  99
 100DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
 101EXPORT_SYMBOL(gic_nonsecure_priorities);
 102
 103/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
 104static refcount_t *ppi_nmi_refs;
 105
 106static struct gic_kvm_info gic_v3_kvm_info __initdata;
 107static DEFINE_PER_CPU(bool, has_rss);
 108
 109#define MPIDR_RS(mpidr)                 (((mpidr) & 0xF0UL) >> 4)
 110#define gic_data_rdist()                (this_cpu_ptr(gic_data.rdists.rdist))
 111#define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
 112#define gic_data_rdist_sgi_base()       (gic_data_rdist_rd_base() + SZ_64K)
 113
 114/* Our default, arbitrary priority value. Linux only uses one anyway. */
 115#define DEFAULT_PMR_VALUE       0xf0
 116
 117enum gic_intid_range {
 118        SGI_RANGE,
 119        PPI_RANGE,
 120        SPI_RANGE,
 121        EPPI_RANGE,
 122        ESPI_RANGE,
 123        LPI_RANGE,
 124        __INVALID_RANGE__
 125};
 126
 127static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
 128{
 129        switch (hwirq) {
 130        case 0 ... 15:
 131                return SGI_RANGE;
 132        case 16 ... 31:
 133                return PPI_RANGE;
 134        case 32 ... 1019:
 135                return SPI_RANGE;
 136        case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
 137                return EPPI_RANGE;
 138        case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
 139                return ESPI_RANGE;
 140        case 8192 ... GENMASK(23, 0):
 141                return LPI_RANGE;
 142        default:
 143                return __INVALID_RANGE__;
 144        }
 145}
 146
 147static enum gic_intid_range get_intid_range(struct irq_data *d)
 148{
 149        return __get_intid_range(d->hwirq);
 150}
 151
 152static inline unsigned int gic_irq(struct irq_data *d)
 153{
 154        return d->hwirq;
 155}
 156
 157static inline bool gic_irq_in_rdist(struct irq_data *d)
 158{
 159        switch (get_intid_range(d)) {
 160        case SGI_RANGE:
 161        case PPI_RANGE:
 162        case EPPI_RANGE:
 163                return true;
 164        default:
 165                return false;
 166        }
 167}
 168
 169static inline void __iomem *gic_dist_base(struct irq_data *d)
 170{
 171        switch (get_intid_range(d)) {
 172        case SGI_RANGE:
 173        case PPI_RANGE:
 174        case EPPI_RANGE:
 175                /* SGI+PPI -> SGI_base for this CPU */
 176                return gic_data_rdist_sgi_base();
 177
 178        case SPI_RANGE:
 179        case ESPI_RANGE:
 180                /* SPI -> dist_base */
 181                return gic_data.dist_base;
 182
 183        default:
 184                return NULL;
 185        }
 186}
 187
 188static void gic_do_wait_for_rwp(void __iomem *base)
 189{
 190        u32 count = 1000000;    /* 1s! */
 191
 192        while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
 193                count--;
 194                if (!count) {
 195                        pr_err_ratelimited("RWP timeout, gone fishing\n");
 196                        return;
 197                }
 198                cpu_relax();
 199                udelay(1);
 200        }
 201}
 202
 203/* Wait for completion of a distributor change */
 204static void gic_dist_wait_for_rwp(void)
 205{
 206        gic_do_wait_for_rwp(gic_data.dist_base);
 207}
 208
 209/* Wait for completion of a redistributor change */
 210static void gic_redist_wait_for_rwp(void)
 211{
 212        gic_do_wait_for_rwp(gic_data_rdist_rd_base());
 213}
 214
 215#ifdef CONFIG_ARM64
 216
 217static u64 __maybe_unused gic_read_iar(void)
 218{
 219        if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
 220                return gic_read_iar_cavium_thunderx();
 221        else
 222                return gic_read_iar_common();
 223}
 224#endif
 225
 226static void gic_enable_redist(bool enable)
 227{
 228        void __iomem *rbase;
 229        u32 count = 1000000;    /* 1s! */
 230        u32 val;
 231
 232        if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
 233                return;
 234
 235        rbase = gic_data_rdist_rd_base();
 236
 237        val = readl_relaxed(rbase + GICR_WAKER);
 238        if (enable)
 239                /* Wake up this CPU redistributor */
 240                val &= ~GICR_WAKER_ProcessorSleep;
 241        else
 242                val |= GICR_WAKER_ProcessorSleep;
 243        writel_relaxed(val, rbase + GICR_WAKER);
 244
 245        if (!enable) {          /* Check that GICR_WAKER is writeable */
 246                val = readl_relaxed(rbase + GICR_WAKER);
 247                if (!(val & GICR_WAKER_ProcessorSleep))
 248                        return; /* No PM support in this redistributor */
 249        }
 250
 251        while (--count) {
 252                val = readl_relaxed(rbase + GICR_WAKER);
 253                if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
 254                        break;
 255                cpu_relax();
 256                udelay(1);
 257        }
 258        if (!count)
 259                pr_err_ratelimited("redistributor failed to %s...\n",
 260                                   enable ? "wakeup" : "sleep");
 261}
 262
 263/*
 264 * Routines to disable, enable, EOI and route interrupts
 265 */
 266static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
 267{
 268        switch (get_intid_range(d)) {
 269        case SGI_RANGE:
 270        case PPI_RANGE:
 271        case SPI_RANGE:
 272                *index = d->hwirq;
 273                return offset;
 274        case EPPI_RANGE:
 275                /*
 276                 * Contrary to the ESPI range, the EPPI range is contiguous
 277                 * to the PPI range in the registers, so let's adjust the
 278                 * displacement accordingly. Consistency is overrated.
 279                 */
 280                *index = d->hwirq - EPPI_BASE_INTID + 32;
 281                return offset;
 282        case ESPI_RANGE:
 283                *index = d->hwirq - ESPI_BASE_INTID;
 284                switch (offset) {
 285                case GICD_ISENABLER:
 286                        return GICD_ISENABLERnE;
 287                case GICD_ICENABLER:
 288                        return GICD_ICENABLERnE;
 289                case GICD_ISPENDR:
 290                        return GICD_ISPENDRnE;
 291                case GICD_ICPENDR:
 292                        return GICD_ICPENDRnE;
 293                case GICD_ISACTIVER:
 294                        return GICD_ISACTIVERnE;
 295                case GICD_ICACTIVER:
 296                        return GICD_ICACTIVERnE;
 297                case GICD_IPRIORITYR:
 298                        return GICD_IPRIORITYRnE;
 299                case GICD_ICFGR:
 300                        return GICD_ICFGRnE;
 301                case GICD_IROUTER:
 302                        return GICD_IROUTERnE;
 303                default:
 304                        break;
 305                }
 306                break;
 307        default:
 308                break;
 309        }
 310
 311        WARN_ON(1);
 312        *index = d->hwirq;
 313        return offset;
 314}
 315
 316static int gic_peek_irq(struct irq_data *d, u32 offset)
 317{
 318        void __iomem *base;
 319        u32 index, mask;
 320
 321        offset = convert_offset_index(d, offset, &index);
 322        mask = 1 << (index % 32);
 323
 324        if (gic_irq_in_rdist(d))
 325                base = gic_data_rdist_sgi_base();
 326        else
 327                base = gic_data.dist_base;
 328
 329        return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
 330}
 331
 332static void gic_poke_irq(struct irq_data *d, u32 offset)
 333{
 334        void (*rwp_wait)(void);
 335        void __iomem *base;
 336        u32 index, mask;
 337
 338        offset = convert_offset_index(d, offset, &index);
 339        mask = 1 << (index % 32);
 340
 341        if (gic_irq_in_rdist(d)) {
 342                base = gic_data_rdist_sgi_base();
 343                rwp_wait = gic_redist_wait_for_rwp;
 344        } else {
 345                base = gic_data.dist_base;
 346                rwp_wait = gic_dist_wait_for_rwp;
 347        }
 348
 349        writel_relaxed(mask, base + offset + (index / 32) * 4);
 350        rwp_wait();
 351}
 352
 353static void gic_mask_irq(struct irq_data *d)
 354{
 355        gic_poke_irq(d, GICD_ICENABLER);
 356}
 357
 358static void gic_eoimode1_mask_irq(struct irq_data *d)
 359{
 360        gic_mask_irq(d);
 361        /*
 362         * When masking a forwarded interrupt, make sure it is
 363         * deactivated as well.
 364         *
 365         * This ensures that an interrupt that is getting
 366         * disabled/masked will not get "stuck", because there is
 367         * noone to deactivate it (guest is being terminated).
 368         */
 369        if (irqd_is_forwarded_to_vcpu(d))
 370                gic_poke_irq(d, GICD_ICACTIVER);
 371}
 372
 373static void gic_unmask_irq(struct irq_data *d)
 374{
 375        gic_poke_irq(d, GICD_ISENABLER);
 376}
 377
 378static inline bool gic_supports_nmi(void)
 379{
 380        return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
 381               static_branch_likely(&supports_pseudo_nmis);
 382}
 383
 384static int gic_irq_set_irqchip_state(struct irq_data *d,
 385                                     enum irqchip_irq_state which, bool val)
 386{
 387        u32 reg;
 388
 389        if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
 390                return -EINVAL;
 391
 392        switch (which) {
 393        case IRQCHIP_STATE_PENDING:
 394                reg = val ? GICD_ISPENDR : GICD_ICPENDR;
 395                break;
 396
 397        case IRQCHIP_STATE_ACTIVE:
 398                reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
 399                break;
 400
 401        case IRQCHIP_STATE_MASKED:
 402                reg = val ? GICD_ICENABLER : GICD_ISENABLER;
 403                break;
 404
 405        default:
 406                return -EINVAL;
 407        }
 408
 409        gic_poke_irq(d, reg);
 410        return 0;
 411}
 412
 413static int gic_irq_get_irqchip_state(struct irq_data *d,
 414                                     enum irqchip_irq_state which, bool *val)
 415{
 416        if (d->hwirq >= 8192) /* PPI/SPI only */
 417                return -EINVAL;
 418
 419        switch (which) {
 420        case IRQCHIP_STATE_PENDING:
 421                *val = gic_peek_irq(d, GICD_ISPENDR);
 422                break;
 423
 424        case IRQCHIP_STATE_ACTIVE:
 425                *val = gic_peek_irq(d, GICD_ISACTIVER);
 426                break;
 427
 428        case IRQCHIP_STATE_MASKED:
 429                *val = !gic_peek_irq(d, GICD_ISENABLER);
 430                break;
 431
 432        default:
 433                return -EINVAL;
 434        }
 435
 436        return 0;
 437}
 438
 439static void gic_irq_set_prio(struct irq_data *d, u8 prio)
 440{
 441        void __iomem *base = gic_dist_base(d);
 442        u32 offset, index;
 443
 444        offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
 445
 446        writeb_relaxed(prio, base + offset + index);
 447}
 448
 449static u32 gic_get_ppi_index(struct irq_data *d)
 450{
 451        switch (get_intid_range(d)) {
 452        case PPI_RANGE:
 453                return d->hwirq - 16;
 454        case EPPI_RANGE:
 455                return d->hwirq - EPPI_BASE_INTID + 16;
 456        default:
 457                unreachable();
 458        }
 459}
 460
 461static int gic_irq_nmi_setup(struct irq_data *d)
 462{
 463        struct irq_desc *desc = irq_to_desc(d->irq);
 464
 465        if (!gic_supports_nmi())
 466                return -EINVAL;
 467
 468        if (gic_peek_irq(d, GICD_ISENABLER)) {
 469                pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
 470                return -EINVAL;
 471        }
 472
 473        /*
 474         * A secondary irq_chip should be in charge of LPI request,
 475         * it should not be possible to get there
 476         */
 477        if (WARN_ON(gic_irq(d) >= 8192))
 478                return -EINVAL;
 479
 480        /* desc lock should already be held */
 481        if (gic_irq_in_rdist(d)) {
 482                u32 idx = gic_get_ppi_index(d);
 483
 484                /* Setting up PPI as NMI, only switch handler for first NMI */
 485                if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
 486                        refcount_set(&ppi_nmi_refs[idx], 1);
 487                        desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
 488                }
 489        } else {
 490                desc->handle_irq = handle_fasteoi_nmi;
 491        }
 492
 493        gic_irq_set_prio(d, GICD_INT_NMI_PRI);
 494
 495        return 0;
 496}
 497
 498static void gic_irq_nmi_teardown(struct irq_data *d)
 499{
 500        struct irq_desc *desc = irq_to_desc(d->irq);
 501
 502        if (WARN_ON(!gic_supports_nmi()))
 503                return;
 504
 505        if (gic_peek_irq(d, GICD_ISENABLER)) {
 506                pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
 507                return;
 508        }
 509
 510        /*
 511         * A secondary irq_chip should be in charge of LPI request,
 512         * it should not be possible to get there
 513         */
 514        if (WARN_ON(gic_irq(d) >= 8192))
 515                return;
 516
 517        /* desc lock should already be held */
 518        if (gic_irq_in_rdist(d)) {
 519                u32 idx = gic_get_ppi_index(d);
 520
 521                /* Tearing down NMI, only switch handler for last NMI */
 522                if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
 523                        desc->handle_irq = handle_percpu_devid_irq;
 524        } else {
 525                desc->handle_irq = handle_fasteoi_irq;
 526        }
 527
 528        gic_irq_set_prio(d, GICD_INT_DEF_PRI);
 529}
 530
 531static void gic_eoi_irq(struct irq_data *d)
 532{
 533        gic_write_eoir(gic_irq(d));
 534}
 535
 536static void gic_eoimode1_eoi_irq(struct irq_data *d)
 537{
 538        /*
 539         * No need to deactivate an LPI, or an interrupt that
 540         * is is getting forwarded to a vcpu.
 541         */
 542        if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
 543                return;
 544        gic_write_dir(gic_irq(d));
 545}
 546
 547static int gic_set_type(struct irq_data *d, unsigned int type)
 548{
 549        enum gic_intid_range range;
 550        unsigned int irq = gic_irq(d);
 551        void (*rwp_wait)(void);
 552        void __iomem *base;
 553        u32 offset, index;
 554        int ret;
 555
 556        range = get_intid_range(d);
 557
 558        /* Interrupt configuration for SGIs can't be changed */
 559        if (range == SGI_RANGE)
 560                return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
 561
 562        /* SPIs have restrictions on the supported types */
 563        if ((range == SPI_RANGE || range == ESPI_RANGE) &&
 564            type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
 565                return -EINVAL;
 566
 567        if (gic_irq_in_rdist(d)) {
 568                base = gic_data_rdist_sgi_base();
 569                rwp_wait = gic_redist_wait_for_rwp;
 570        } else {
 571                base = gic_data.dist_base;
 572                rwp_wait = gic_dist_wait_for_rwp;
 573        }
 574
 575        offset = convert_offset_index(d, GICD_ICFGR, &index);
 576
 577        ret = gic_configure_irq(index, type, base + offset, rwp_wait);
 578        if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
 579                /* Misconfigured PPIs are usually not fatal */
 580                pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
 581                ret = 0;
 582        }
 583
 584        return ret;
 585}
 586
 587static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 588{
 589        if (get_intid_range(d) == SGI_RANGE)
 590                return -EINVAL;
 591
 592        if (vcpu)
 593                irqd_set_forwarded_to_vcpu(d);
 594        else
 595                irqd_clr_forwarded_to_vcpu(d);
 596        return 0;
 597}
 598
 599static u64 gic_mpidr_to_affinity(unsigned long mpidr)
 600{
 601        u64 aff;
 602
 603        aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
 604               MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
 605               MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
 606               MPIDR_AFFINITY_LEVEL(mpidr, 0));
 607
 608        return aff;
 609}
 610
 611static void gic_deactivate_unhandled(u32 irqnr)
 612{
 613        if (static_branch_likely(&supports_deactivate_key)) {
 614                if (irqnr < 8192)
 615                        gic_write_dir(irqnr);
 616        } else {
 617                gic_write_eoir(irqnr);
 618        }
 619}
 620
 621static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
 622{
 623        bool irqs_enabled = interrupts_enabled(regs);
 624        int err;
 625
 626        if (irqs_enabled)
 627                nmi_enter();
 628
 629        if (static_branch_likely(&supports_deactivate_key))
 630                gic_write_eoir(irqnr);
 631        /*
 632         * Leave the PSR.I bit set to prevent other NMIs to be
 633         * received while handling this one.
 634         * PSR.I will be restored when we ERET to the
 635         * interrupted context.
 636         */
 637        err = handle_domain_nmi(gic_data.domain, irqnr, regs);
 638        if (err)
 639                gic_deactivate_unhandled(irqnr);
 640
 641        if (irqs_enabled)
 642                nmi_exit();
 643}
 644
 645static u32 do_read_iar(struct pt_regs *regs)
 646{
 647        u32 iar;
 648
 649        if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
 650                u64 pmr;
 651
 652                /*
 653                 * We were in a context with IRQs disabled. However, the
 654                 * entry code has set PMR to a value that allows any
 655                 * interrupt to be acknowledged, and not just NMIs. This can
 656                 * lead to surprising effects if the NMI has been retired in
 657                 * the meantime, and that there is an IRQ pending. The IRQ
 658                 * would then be taken in NMI context, something that nobody
 659                 * wants to debug twice.
 660                 *
 661                 * Until we sort this, drop PMR again to a level that will
 662                 * actually only allow NMIs before reading IAR, and then
 663                 * restore it to what it was.
 664                 */
 665                pmr = gic_read_pmr();
 666                gic_pmr_mask_irqs();
 667                isb();
 668
 669                iar = gic_read_iar();
 670
 671                gic_write_pmr(pmr);
 672        } else {
 673                iar = gic_read_iar();
 674        }
 675
 676        return iar;
 677}
 678
 679static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 680{
 681        u32 irqnr;
 682
 683        irqnr = do_read_iar(regs);
 684
 685        /* Check for special IDs first */
 686        if ((irqnr >= 1020 && irqnr <= 1023))
 687                return;
 688
 689        if (gic_supports_nmi() &&
 690            unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
 691                gic_handle_nmi(irqnr, regs);
 692                return;
 693        }
 694
 695        if (gic_prio_masking_enabled()) {
 696                gic_pmr_mask_irqs();
 697                gic_arch_enable_irqs();
 698        }
 699
 700        if (static_branch_likely(&supports_deactivate_key))
 701                gic_write_eoir(irqnr);
 702        else
 703                isb();
 704
 705        if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
 706                WARN_ONCE(true, "Unexpected interrupt received!\n");
 707                gic_deactivate_unhandled(irqnr);
 708        }
 709}
 710
 711static u32 gic_get_pribits(void)
 712{
 713        u32 pribits;
 714
 715        pribits = gic_read_ctlr();
 716        pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
 717        pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
 718        pribits++;
 719
 720        return pribits;
 721}
 722
 723static bool gic_has_group0(void)
 724{
 725        u32 val;
 726        u32 old_pmr;
 727
 728        old_pmr = gic_read_pmr();
 729
 730        /*
 731         * Let's find out if Group0 is under control of EL3 or not by
 732         * setting the highest possible, non-zero priority in PMR.
 733         *
 734         * If SCR_EL3.FIQ is set, the priority gets shifted down in
 735         * order for the CPU interface to set bit 7, and keep the
 736         * actual priority in the non-secure range. In the process, it
 737         * looses the least significant bit and the actual priority
 738         * becomes 0x80. Reading it back returns 0, indicating that
 739         * we're don't have access to Group0.
 740         */
 741        gic_write_pmr(BIT(8 - gic_get_pribits()));
 742        val = gic_read_pmr();
 743
 744        gic_write_pmr(old_pmr);
 745
 746        return val != 0;
 747}
 748
 749static void __init gic_dist_init(void)
 750{
 751        unsigned int i;
 752        u64 affinity;
 753        void __iomem *base = gic_data.dist_base;
 754        u32 val;
 755
 756        /* Disable the distributor */
 757        writel_relaxed(0, base + GICD_CTLR);
 758        gic_dist_wait_for_rwp();
 759
 760        /*
 761         * Configure SPIs as non-secure Group-1. This will only matter
 762         * if the GIC only has a single security state. This will not
 763         * do the right thing if the kernel is running in secure mode,
 764         * but that's not the intended use case anyway.
 765         */
 766        for (i = 32; i < GIC_LINE_NR; i += 32)
 767                writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
 768
 769        /* Extended SPI range, not handled by the GICv2/GICv3 common code */
 770        for (i = 0; i < GIC_ESPI_NR; i += 32) {
 771                writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
 772                writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
 773        }
 774
 775        for (i = 0; i < GIC_ESPI_NR; i += 32)
 776                writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
 777
 778        for (i = 0; i < GIC_ESPI_NR; i += 16)
 779                writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
 780
 781        for (i = 0; i < GIC_ESPI_NR; i += 4)
 782                writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
 783
 784        /* Now do the common stuff, and wait for the distributor to drain */
 785        gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
 786
 787        val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
 788        if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
 789                pr_info("Enabling SGIs without active state\n");
 790                val |= GICD_CTLR_nASSGIreq;
 791        }
 792
 793        /* Enable distributor with ARE, Group1 */
 794        writel_relaxed(val, base + GICD_CTLR);
 795
 796        /*
 797         * Set all global interrupts to the boot CPU only. ARE must be
 798         * enabled.
 799         */
 800        affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
 801        for (i = 32; i < GIC_LINE_NR; i++)
 802                gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
 803
 804        for (i = 0; i < GIC_ESPI_NR; i++)
 805                gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
 806}
 807
 808static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
 809{
 810        int ret = -ENODEV;
 811        int i;
 812
 813        for (i = 0; i < gic_data.nr_redist_regions; i++) {
 814                void __iomem *ptr = gic_data.redist_regions[i].redist_base;
 815                u64 typer;
 816                u32 reg;
 817
 818                reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
 819                if (reg != GIC_PIDR2_ARCH_GICv3 &&
 820                    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
 821                        pr_warn("No redistributor present @%p\n", ptr);
 822                        break;
 823                }
 824
 825                do {
 826                        typer = gic_read_typer(ptr + GICR_TYPER);
 827                        ret = fn(gic_data.redist_regions + i, ptr);
 828                        if (!ret)
 829                                return 0;
 830
 831                        if (gic_data.redist_regions[i].single_redist)
 832                                break;
 833
 834                        if (gic_data.redist_stride) {
 835                                ptr += gic_data.redist_stride;
 836                        } else {
 837                                ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
 838                                if (typer & GICR_TYPER_VLPIS)
 839                                        ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
 840                        }
 841                } while (!(typer & GICR_TYPER_LAST));
 842        }
 843
 844        return ret ? -ENODEV : 0;
 845}
 846
 847static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
 848{
 849        unsigned long mpidr = cpu_logical_map(smp_processor_id());
 850        u64 typer;
 851        u32 aff;
 852
 853        /*
 854         * Convert affinity to a 32bit value that can be matched to
 855         * GICR_TYPER bits [63:32].
 856         */
 857        aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
 858               MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
 859               MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
 860               MPIDR_AFFINITY_LEVEL(mpidr, 0));
 861
 862        typer = gic_read_typer(ptr + GICR_TYPER);
 863        if ((typer >> 32) == aff) {
 864                u64 offset = ptr - region->redist_base;
 865                raw_spin_lock_init(&gic_data_rdist()->rd_lock);
 866                gic_data_rdist_rd_base() = ptr;
 867                gic_data_rdist()->phys_base = region->phys_base + offset;
 868
 869                pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
 870                        smp_processor_id(), mpidr,
 871                        (int)(region - gic_data.redist_regions),
 872                        &gic_data_rdist()->phys_base);
 873                return 0;
 874        }
 875
 876        /* Try next one */
 877        return 1;
 878}
 879
 880static int gic_populate_rdist(void)
 881{
 882        if (gic_iterate_rdists(__gic_populate_rdist) == 0)
 883                return 0;
 884
 885        /* We couldn't even deal with ourselves... */
 886        WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
 887             smp_processor_id(),
 888             (unsigned long)cpu_logical_map(smp_processor_id()));
 889        return -ENODEV;
 890}
 891
 892static int __gic_update_rdist_properties(struct redist_region *region,
 893                                         void __iomem *ptr)
 894{
 895        u64 typer = gic_read_typer(ptr + GICR_TYPER);
 896
 897        gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
 898
 899        /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
 900        gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
 901        gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
 902                                           gic_data.rdists.has_rvpeid);
 903        gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
 904
 905        /* Detect non-sensical configurations */
 906        if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
 907                gic_data.rdists.has_direct_lpi = false;
 908                gic_data.rdists.has_vlpis = false;
 909                gic_data.rdists.has_rvpeid = false;
 910        }
 911
 912        gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
 913
 914        return 1;
 915}
 916
 917static void gic_update_rdist_properties(void)
 918{
 919        gic_data.ppi_nr = UINT_MAX;
 920        gic_iterate_rdists(__gic_update_rdist_properties);
 921        if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
 922                gic_data.ppi_nr = 0;
 923        pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
 924        if (gic_data.rdists.has_vlpis)
 925                pr_info("GICv4 features: %s%s%s\n",
 926                        gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
 927                        gic_data.rdists.has_rvpeid ? "RVPEID " : "",
 928                        gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
 929}
 930
 931/* Check whether it's single security state view */
 932static inline bool gic_dist_security_disabled(void)
 933{
 934        return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
 935}
 936
 937static void gic_cpu_sys_reg_init(void)
 938{
 939        int i, cpu = smp_processor_id();
 940        u64 mpidr = cpu_logical_map(cpu);
 941        u64 need_rss = MPIDR_RS(mpidr);
 942        bool group0;
 943        u32 pribits;
 944
 945        /*
 946         * Need to check that the SRE bit has actually been set. If
 947         * not, it means that SRE is disabled at EL2. We're going to
 948         * die painfully, and there is nothing we can do about it.
 949         *
 950         * Kindly inform the luser.
 951         */
 952        if (!gic_enable_sre())
 953                pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
 954
 955        pribits = gic_get_pribits();
 956
 957        group0 = gic_has_group0();
 958
 959        /* Set priority mask register */
 960        if (!gic_prio_masking_enabled()) {
 961                write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
 962        } else if (gic_supports_nmi()) {
 963                /*
 964                 * Mismatch configuration with boot CPU, the system is likely
 965                 * to die as interrupt masking will not work properly on all
 966                 * CPUs
 967                 *
 968                 * The boot CPU calls this function before enabling NMI support,
 969                 * and as a result we'll never see this warning in the boot path
 970                 * for that CPU.
 971                 */
 972                if (static_branch_unlikely(&gic_nonsecure_priorities))
 973                        WARN_ON(!group0 || gic_dist_security_disabled());
 974                else
 975                        WARN_ON(group0 && !gic_dist_security_disabled());
 976        }
 977
 978        /*
 979         * Some firmwares hand over to the kernel with the BPR changed from
 980         * its reset value (and with a value large enough to prevent
 981         * any pre-emptive interrupts from working at all). Writing a zero
 982         * to BPR restores is reset value.
 983         */
 984        gic_write_bpr1(0);
 985
 986        if (static_branch_likely(&supports_deactivate_key)) {
 987                /* EOI drops priority only (mode 1) */
 988                gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
 989        } else {
 990                /* EOI deactivates interrupt too (mode 0) */
 991                gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
 992        }
 993
 994        /* Always whack Group0 before Group1 */
 995        if (group0) {
 996                switch(pribits) {
 997                case 8:
 998                case 7:
 999                        write_gicreg(0, ICC_AP0R3_EL1);
1000                        write_gicreg(0, ICC_AP0R2_EL1);
1001                        fallthrough;
1002                case 6:
1003                        write_gicreg(0, ICC_AP0R1_EL1);
1004                        fallthrough;
1005                case 5:
1006                case 4:
1007                        write_gicreg(0, ICC_AP0R0_EL1);
1008                }
1009
1010                isb();
1011        }
1012
1013        switch(pribits) {
1014        case 8:
1015        case 7:
1016                write_gicreg(0, ICC_AP1R3_EL1);
1017                write_gicreg(0, ICC_AP1R2_EL1);
1018                fallthrough;
1019        case 6:
1020                write_gicreg(0, ICC_AP1R1_EL1);
1021                fallthrough;
1022        case 5:
1023        case 4:
1024                write_gicreg(0, ICC_AP1R0_EL1);
1025        }
1026
1027        isb();
1028
1029        /* ... and let's hit the road... */
1030        gic_write_grpen1(1);
1031
1032        /* Keep the RSS capability status in per_cpu variable */
1033        per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1034
1035        /* Check all the CPUs have capable of sending SGIs to other CPUs */
1036        for_each_online_cpu(i) {
1037                bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1038
1039                need_rss |= MPIDR_RS(cpu_logical_map(i));
1040                if (need_rss && (!have_rss))
1041                        pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1042                                cpu, (unsigned long)mpidr,
1043                                i, (unsigned long)cpu_logical_map(i));
1044        }
1045
1046        /**
1047         * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1048         * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1049         * UNPREDICTABLE choice of :
1050         *   - The write is ignored.
1051         *   - The RS field is treated as 0.
1052         */
1053        if (need_rss && (!gic_data.has_rss))
1054                pr_crit_once("RSS is required but GICD doesn't support it\n");
1055}
1056
1057static bool gicv3_nolpi;
1058
1059static int __init gicv3_nolpi_cfg(char *buf)
1060{
1061        return strtobool(buf, &gicv3_nolpi);
1062}
1063early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1064
1065static int gic_dist_supports_lpis(void)
1066{
1067        return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1068                !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1069                !gicv3_nolpi);
1070}
1071
1072static void gic_cpu_init(void)
1073{
1074        void __iomem *rbase;
1075        int i;
1076
1077        /* Register ourselves with the rest of the world */
1078        if (gic_populate_rdist())
1079                return;
1080
1081        gic_enable_redist(true);
1082
1083        WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1084             !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1085             "Distributor has extended ranges, but CPU%d doesn't\n",
1086             smp_processor_id());
1087
1088        rbase = gic_data_rdist_sgi_base();
1089
1090        /* Configure SGIs/PPIs as non-secure Group-1 */
1091        for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1092                writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1093
1094        gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1095
1096        /* initialise system registers */
1097        gic_cpu_sys_reg_init();
1098}
1099
1100#ifdef CONFIG_SMP
1101
1102#define MPIDR_TO_SGI_RS(mpidr)  (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1103#define MPIDR_TO_SGI_CLUSTER_ID(mpidr)  ((mpidr) & ~0xFUL)
1104
1105static int gic_starting_cpu(unsigned int cpu)
1106{
1107        gic_cpu_init();
1108
1109        if (gic_dist_supports_lpis())
1110                its_cpu_init();
1111
1112        return 0;
1113}
1114
1115static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1116                                   unsigned long cluster_id)
1117{
1118        int next_cpu, cpu = *base_cpu;
1119        unsigned long mpidr = cpu_logical_map(cpu);
1120        u16 tlist = 0;
1121
1122        while (cpu < nr_cpu_ids) {
1123                tlist |= 1 << (mpidr & 0xf);
1124
1125                next_cpu = cpumask_next(cpu, mask);
1126                if (next_cpu >= nr_cpu_ids)
1127                        goto out;
1128                cpu = next_cpu;
1129
1130                mpidr = cpu_logical_map(cpu);
1131
1132                if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1133                        cpu--;
1134                        goto out;
1135                }
1136        }
1137out:
1138        *base_cpu = cpu;
1139        return tlist;
1140}
1141
1142#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1143        (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1144                << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1145
1146static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1147{
1148        u64 val;
1149
1150        val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)     |
1151               MPIDR_TO_SGI_AFFINITY(cluster_id, 2)     |
1152               irq << ICC_SGI1R_SGI_ID_SHIFT            |
1153               MPIDR_TO_SGI_AFFINITY(cluster_id, 1)     |
1154               MPIDR_TO_SGI_RS(cluster_id)              |
1155               tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1156
1157        pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1158        gic_write_sgi1r(val);
1159}
1160
1161static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1162{
1163        int cpu;
1164
1165        if (WARN_ON(d->hwirq >= 16))
1166                return;
1167
1168        /*
1169         * Ensure that stores to Normal memory are visible to the
1170         * other CPUs before issuing the IPI.
1171         */
1172        wmb();
1173
1174        for_each_cpu(cpu, mask) {
1175                u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1176                u16 tlist;
1177
1178                tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1179                gic_send_sgi(cluster_id, tlist, d->hwirq);
1180        }
1181
1182        /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1183        isb();
1184}
1185
1186static void __init gic_smp_init(void)
1187{
1188        struct irq_fwspec sgi_fwspec = {
1189                .fwnode         = gic_data.fwnode,
1190                .param_count    = 1,
1191        };
1192        int base_sgi;
1193
1194        cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1195                                  "irqchip/arm/gicv3:starting",
1196                                  gic_starting_cpu, NULL);
1197
1198        /* Register all 8 non-secure SGIs */
1199        base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1200                                           NUMA_NO_NODE, &sgi_fwspec,
1201                                           false, NULL);
1202        if (WARN_ON(base_sgi <= 0))
1203                return;
1204
1205        set_smp_ipi_range(base_sgi, 8);
1206}
1207
1208static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1209                            bool force)
1210{
1211        unsigned int cpu;
1212        u32 offset, index;
1213        void __iomem *reg;
1214        int enabled;
1215        u64 val;
1216
1217        if (force)
1218                cpu = cpumask_first(mask_val);
1219        else
1220                cpu = cpumask_any_and(mask_val, cpu_online_mask);
1221
1222        if (cpu >= nr_cpu_ids)
1223                return -EINVAL;
1224
1225        if (gic_irq_in_rdist(d))
1226                return -EINVAL;
1227
1228        /* If interrupt was enabled, disable it first */
1229        enabled = gic_peek_irq(d, GICD_ISENABLER);
1230        if (enabled)
1231                gic_mask_irq(d);
1232
1233        offset = convert_offset_index(d, GICD_IROUTER, &index);
1234        reg = gic_dist_base(d) + offset + (index * 8);
1235        val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1236
1237        gic_write_irouter(val, reg);
1238
1239        /*
1240         * If the interrupt was enabled, enabled it again. Otherwise,
1241         * just wait for the distributor to have digested our changes.
1242         */
1243        if (enabled)
1244                gic_unmask_irq(d);
1245        else
1246                gic_dist_wait_for_rwp();
1247
1248        irq_data_update_effective_affinity(d, cpumask_of(cpu));
1249
1250        return IRQ_SET_MASK_OK_DONE;
1251}
1252#else
1253#define gic_set_affinity        NULL
1254#define gic_ipi_send_mask       NULL
1255#define gic_smp_init()          do { } while(0)
1256#endif
1257
1258static int gic_retrigger(struct irq_data *data)
1259{
1260        return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1261}
1262
1263#ifdef CONFIG_CPU_PM
1264static int gic_cpu_pm_notifier(struct notifier_block *self,
1265                               unsigned long cmd, void *v)
1266{
1267        if (cmd == CPU_PM_EXIT) {
1268                if (gic_dist_security_disabled())
1269                        gic_enable_redist(true);
1270                gic_cpu_sys_reg_init();
1271        } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1272                gic_write_grpen1(0);
1273                gic_enable_redist(false);
1274        }
1275        return NOTIFY_OK;
1276}
1277
1278static struct notifier_block gic_cpu_pm_notifier_block = {
1279        .notifier_call = gic_cpu_pm_notifier,
1280};
1281
1282static void gic_cpu_pm_init(void)
1283{
1284        cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1285}
1286
1287#else
1288static inline void gic_cpu_pm_init(void) { }
1289#endif /* CONFIG_CPU_PM */
1290
1291static struct irq_chip gic_chip = {
1292        .name                   = "GICv3",
1293        .irq_mask               = gic_mask_irq,
1294        .irq_unmask             = gic_unmask_irq,
1295        .irq_eoi                = gic_eoi_irq,
1296        .irq_set_type           = gic_set_type,
1297        .irq_set_affinity       = gic_set_affinity,
1298        .irq_retrigger          = gic_retrigger,
1299        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1300        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1301        .irq_nmi_setup          = gic_irq_nmi_setup,
1302        .irq_nmi_teardown       = gic_irq_nmi_teardown,
1303        .ipi_send_mask          = gic_ipi_send_mask,
1304        .flags                  = IRQCHIP_SET_TYPE_MASKED |
1305                                  IRQCHIP_SKIP_SET_WAKE |
1306                                  IRQCHIP_MASK_ON_SUSPEND,
1307};
1308
1309static struct irq_chip gic_eoimode1_chip = {
1310        .name                   = "GICv3",
1311        .irq_mask               = gic_eoimode1_mask_irq,
1312        .irq_unmask             = gic_unmask_irq,
1313        .irq_eoi                = gic_eoimode1_eoi_irq,
1314        .irq_set_type           = gic_set_type,
1315        .irq_set_affinity       = gic_set_affinity,
1316        .irq_retrigger          = gic_retrigger,
1317        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1318        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1319        .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
1320        .irq_nmi_setup          = gic_irq_nmi_setup,
1321        .irq_nmi_teardown       = gic_irq_nmi_teardown,
1322        .ipi_send_mask          = gic_ipi_send_mask,
1323        .flags                  = IRQCHIP_SET_TYPE_MASKED |
1324                                  IRQCHIP_SKIP_SET_WAKE |
1325                                  IRQCHIP_MASK_ON_SUSPEND,
1326};
1327
1328static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1329                              irq_hw_number_t hw)
1330{
1331        struct irq_chip *chip = &gic_chip;
1332        struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1333
1334        if (static_branch_likely(&supports_deactivate_key))
1335                chip = &gic_eoimode1_chip;
1336
1337        switch (__get_intid_range(hw)) {
1338        case SGI_RANGE:
1339        case PPI_RANGE:
1340        case EPPI_RANGE:
1341                irq_set_percpu_devid(irq);
1342                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1343                                    handle_percpu_devid_irq, NULL, NULL);
1344                break;
1345
1346        case SPI_RANGE:
1347        case ESPI_RANGE:
1348                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1349                                    handle_fasteoi_irq, NULL, NULL);
1350                irq_set_probe(irq);
1351                irqd_set_single_target(irqd);
1352                break;
1353
1354        case LPI_RANGE:
1355                if (!gic_dist_supports_lpis())
1356                        return -EPERM;
1357                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1358                                    handle_fasteoi_irq, NULL, NULL);
1359                break;
1360
1361        default:
1362                return -EPERM;
1363        }
1364
1365        /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1366        irqd_set_handle_enforce_irqctx(irqd);
1367        return 0;
1368}
1369
1370static int gic_irq_domain_translate(struct irq_domain *d,
1371                                    struct irq_fwspec *fwspec,
1372                                    unsigned long *hwirq,
1373                                    unsigned int *type)
1374{
1375        if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1376                *hwirq = fwspec->param[0];
1377                *type = IRQ_TYPE_EDGE_RISING;
1378                return 0;
1379        }
1380
1381        if (is_of_node(fwspec->fwnode)) {
1382                if (fwspec->param_count < 3)
1383                        return -EINVAL;
1384
1385                switch (fwspec->param[0]) {
1386                case 0:                 /* SPI */
1387                        *hwirq = fwspec->param[1] + 32;
1388                        break;
1389                case 1:                 /* PPI */
1390                        *hwirq = fwspec->param[1] + 16;
1391                        break;
1392                case 2:                 /* ESPI */
1393                        *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1394                        break;
1395                case 3:                 /* EPPI */
1396                        *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1397                        break;
1398                case GIC_IRQ_TYPE_LPI:  /* LPI */
1399                        *hwirq = fwspec->param[1];
1400                        break;
1401                case GIC_IRQ_TYPE_PARTITION:
1402                        *hwirq = fwspec->param[1];
1403                        if (fwspec->param[1] >= 16)
1404                                *hwirq += EPPI_BASE_INTID - 16;
1405                        else
1406                                *hwirq += 16;
1407                        break;
1408                default:
1409                        return -EINVAL;
1410                }
1411
1412                *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1413
1414                /*
1415                 * Make it clear that broken DTs are... broken.
1416                 * Partitioned PPIs are an unfortunate exception.
1417                 */
1418                WARN_ON(*type == IRQ_TYPE_NONE &&
1419                        fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1420                return 0;
1421        }
1422
1423        if (is_fwnode_irqchip(fwspec->fwnode)) {
1424                if(fwspec->param_count != 2)
1425                        return -EINVAL;
1426
1427                *hwirq = fwspec->param[0];
1428                *type = fwspec->param[1];
1429
1430                WARN_ON(*type == IRQ_TYPE_NONE);
1431                return 0;
1432        }
1433
1434        return -EINVAL;
1435}
1436
1437static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1438                                unsigned int nr_irqs, void *arg)
1439{
1440        int i, ret;
1441        irq_hw_number_t hwirq;
1442        unsigned int type = IRQ_TYPE_NONE;
1443        struct irq_fwspec *fwspec = arg;
1444
1445        ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1446        if (ret)
1447                return ret;
1448
1449        for (i = 0; i < nr_irqs; i++) {
1450                ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1451                if (ret)
1452                        return ret;
1453        }
1454
1455        return 0;
1456}
1457
1458static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1459                                unsigned int nr_irqs)
1460{
1461        int i;
1462
1463        for (i = 0; i < nr_irqs; i++) {
1464                struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1465                irq_set_handler(virq + i, NULL);
1466                irq_domain_reset_irq_data(d);
1467        }
1468}
1469
1470static int gic_irq_domain_select(struct irq_domain *d,
1471                                 struct irq_fwspec *fwspec,
1472                                 enum irq_domain_bus_token bus_token)
1473{
1474        /* Not for us */
1475        if (fwspec->fwnode != d->fwnode)
1476                return 0;
1477
1478        /* If this is not DT, then we have a single domain */
1479        if (!is_of_node(fwspec->fwnode))
1480                return 1;
1481
1482        /*
1483         * If this is a PPI and we have a 4th (non-null) parameter,
1484         * then we need to match the partition domain.
1485         */
1486        if (fwspec->param_count >= 4 &&
1487            fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
1488            gic_data.ppi_descs)
1489                return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1490
1491        return d == gic_data.domain;
1492}
1493
1494static const struct irq_domain_ops gic_irq_domain_ops = {
1495        .translate = gic_irq_domain_translate,
1496        .alloc = gic_irq_domain_alloc,
1497        .free = gic_irq_domain_free,
1498        .select = gic_irq_domain_select,
1499};
1500
1501static int partition_domain_translate(struct irq_domain *d,
1502                                      struct irq_fwspec *fwspec,
1503                                      unsigned long *hwirq,
1504                                      unsigned int *type)
1505{
1506        struct device_node *np;
1507        int ret;
1508
1509        if (!gic_data.ppi_descs)
1510                return -ENOMEM;
1511
1512        np = of_find_node_by_phandle(fwspec->param[3]);
1513        if (WARN_ON(!np))
1514                return -EINVAL;
1515
1516        ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1517                                     of_node_to_fwnode(np));
1518        if (ret < 0)
1519                return ret;
1520
1521        *hwirq = ret;
1522        *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1523
1524        return 0;
1525}
1526
1527static const struct irq_domain_ops partition_domain_ops = {
1528        .translate = partition_domain_translate,
1529        .select = gic_irq_domain_select,
1530};
1531
1532static bool gic_enable_quirk_msm8996(void *data)
1533{
1534        struct gic_chip_data *d = data;
1535
1536        d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1537
1538        return true;
1539}
1540
1541static bool gic_enable_quirk_cavium_38539(void *data)
1542{
1543        struct gic_chip_data *d = data;
1544
1545        d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1546
1547        return true;
1548}
1549
1550static bool gic_enable_quirk_hip06_07(void *data)
1551{
1552        struct gic_chip_data *d = data;
1553
1554        /*
1555         * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1556         * not being an actual ARM implementation). The saving grace is
1557         * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1558         * HIP07 doesn't even have a proper IIDR, and still pretends to
1559         * have ESPI. In both cases, put them right.
1560         */
1561        if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1562                /* Zero both ESPI and the RES0 field next to it... */
1563                d->rdists.gicd_typer &= ~GENMASK(9, 8);
1564                return true;
1565        }
1566
1567        return false;
1568}
1569
1570static const struct gic_quirk gic_quirks[] = {
1571        {
1572                .desc   = "GICv3: Qualcomm MSM8996 broken firmware",
1573                .compatible = "qcom,msm8996-gic-v3",
1574                .init   = gic_enable_quirk_msm8996,
1575        },
1576        {
1577                .desc   = "GICv3: HIP06 erratum 161010803",
1578                .iidr   = 0x0204043b,
1579                .mask   = 0xffffffff,
1580                .init   = gic_enable_quirk_hip06_07,
1581        },
1582        {
1583                .desc   = "GICv3: HIP07 erratum 161010803",
1584                .iidr   = 0x00000000,
1585                .mask   = 0xffffffff,
1586                .init   = gic_enable_quirk_hip06_07,
1587        },
1588        {
1589                /*
1590                 * Reserved register accesses generate a Synchronous
1591                 * External Abort. This erratum applies to:
1592                 * - ThunderX: CN88xx
1593                 * - OCTEON TX: CN83xx, CN81xx
1594                 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1595                 */
1596                .desc   = "GICv3: Cavium erratum 38539",
1597                .iidr   = 0xa000034c,
1598                .mask   = 0xe8f00fff,
1599                .init   = gic_enable_quirk_cavium_38539,
1600        },
1601        {
1602        }
1603};
1604
1605static void gic_enable_nmi_support(void)
1606{
1607        int i;
1608
1609        if (!gic_prio_masking_enabled())
1610                return;
1611
1612        ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1613        if (!ppi_nmi_refs)
1614                return;
1615
1616        for (i = 0; i < gic_data.ppi_nr; i++)
1617                refcount_set(&ppi_nmi_refs[i], 0);
1618
1619        /*
1620         * Linux itself doesn't use 1:N distribution, so has no need to
1621         * set PMHE. The only reason to have it set is if EL3 requires it
1622         * (and we can't change it).
1623         */
1624        if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
1625                static_branch_enable(&gic_pmr_sync);
1626
1627        pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1628                static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
1629
1630        /*
1631         * How priority values are used by the GIC depends on two things:
1632         * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1633         * and if Group 0 interrupts can be delivered to Linux in the non-secure
1634         * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1635         * the ICC_PMR_EL1 register and the priority that software assigns to
1636         * interrupts:
1637         *
1638         * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1639         * -----------------------------------------------------------
1640         *      1       |      -      |  unchanged  |    unchanged
1641         * -----------------------------------------------------------
1642         *      0       |      1      |  non-secure |    non-secure
1643         * -----------------------------------------------------------
1644         *      0       |      0      |  unchanged  |    non-secure
1645         *
1646         * where non-secure means that the value is right-shifted by one and the
1647         * MSB bit set, to make it fit in the non-secure priority range.
1648         *
1649         * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1650         * are both either modified or unchanged, we can use the same set of
1651         * priorities.
1652         *
1653         * In the last case, where only the interrupt priorities are modified to
1654         * be in the non-secure range, we use a different PMR value to mask IRQs
1655         * and the rest of the values that we use remain unchanged.
1656         */
1657        if (gic_has_group0() && !gic_dist_security_disabled())
1658                static_branch_enable(&gic_nonsecure_priorities);
1659
1660        static_branch_enable(&supports_pseudo_nmis);
1661
1662        if (static_branch_likely(&supports_deactivate_key))
1663                gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1664        else
1665                gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1666}
1667
1668static int __init gic_init_bases(void __iomem *dist_base,
1669                                 struct redist_region *rdist_regs,
1670                                 u32 nr_redist_regions,
1671                                 u64 redist_stride,
1672                                 struct fwnode_handle *handle)
1673{
1674        u32 typer;
1675        int err;
1676
1677        if (!is_hyp_mode_available())
1678                static_branch_disable(&supports_deactivate_key);
1679
1680        if (static_branch_likely(&supports_deactivate_key))
1681                pr_info("GIC: Using split EOI/Deactivate mode\n");
1682
1683        gic_data.fwnode = handle;
1684        gic_data.dist_base = dist_base;
1685        gic_data.redist_regions = rdist_regs;
1686        gic_data.nr_redist_regions = nr_redist_regions;
1687        gic_data.redist_stride = redist_stride;
1688
1689        /*
1690         * Find out how many interrupts are supported.
1691         */
1692        typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1693        gic_data.rdists.gicd_typer = typer;
1694
1695        gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1696                          gic_quirks, &gic_data);
1697
1698        pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1699        pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1700
1701        /*
1702         * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1703         * architecture spec (which says that reserved registers are RES0).
1704         */
1705        if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1706                gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1707
1708        gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1709                                                 &gic_data);
1710        gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1711        gic_data.rdists.has_rvpeid = true;
1712        gic_data.rdists.has_vlpis = true;
1713        gic_data.rdists.has_direct_lpi = true;
1714        gic_data.rdists.has_vpend_valid_dirty = true;
1715
1716        if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1717                err = -ENOMEM;
1718                goto out_free;
1719        }
1720
1721        irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1722
1723        gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1724        pr_info("Distributor has %sRange Selector support\n",
1725                gic_data.has_rss ? "" : "no ");
1726
1727        if (typer & GICD_TYPER_MBIS) {
1728                err = mbi_init(handle, gic_data.domain);
1729                if (err)
1730                        pr_err("Failed to initialize MBIs\n");
1731        }
1732
1733        set_handle_irq(gic_handle_irq);
1734
1735        gic_update_rdist_properties();
1736
1737        gic_dist_init();
1738        gic_cpu_init();
1739        gic_smp_init();
1740        gic_cpu_pm_init();
1741
1742        if (gic_dist_supports_lpis()) {
1743                its_init(handle, &gic_data.rdists, gic_data.domain);
1744                its_cpu_init();
1745        } else {
1746                if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1747                        gicv2m_init(handle, gic_data.domain);
1748        }
1749
1750        gic_enable_nmi_support();
1751
1752        return 0;
1753
1754out_free:
1755        if (gic_data.domain)
1756                irq_domain_remove(gic_data.domain);
1757        free_percpu(gic_data.rdists.rdist);
1758        return err;
1759}
1760
1761static int __init gic_validate_dist_version(void __iomem *dist_base)
1762{
1763        u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1764
1765        if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1766                return -ENODEV;
1767
1768        return 0;
1769}
1770
1771/* Create all possible partitions at boot time */
1772static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1773{
1774        struct device_node *parts_node, *child_part;
1775        int part_idx = 0, i;
1776        int nr_parts;
1777        struct partition_affinity *parts;
1778
1779        parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1780        if (!parts_node)
1781                return;
1782
1783        gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1784        if (!gic_data.ppi_descs)
1785                return;
1786
1787        nr_parts = of_get_child_count(parts_node);
1788
1789        if (!nr_parts)
1790                goto out_put_node;
1791
1792        parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1793        if (WARN_ON(!parts))
1794                goto out_put_node;
1795
1796        for_each_child_of_node(parts_node, child_part) {
1797                struct partition_affinity *part;
1798                int n;
1799
1800                part = &parts[part_idx];
1801
1802                part->partition_id = of_node_to_fwnode(child_part);
1803
1804                pr_info("GIC: PPI partition %pOFn[%d] { ",
1805                        child_part, part_idx);
1806
1807                n = of_property_count_elems_of_size(child_part, "affinity",
1808                                                    sizeof(u32));
1809                WARN_ON(n <= 0);
1810
1811                for (i = 0; i < n; i++) {
1812                        int err, cpu;
1813                        u32 cpu_phandle;
1814                        struct device_node *cpu_node;
1815
1816                        err = of_property_read_u32_index(child_part, "affinity",
1817                                                         i, &cpu_phandle);
1818                        if (WARN_ON(err))
1819                                continue;
1820
1821                        cpu_node = of_find_node_by_phandle(cpu_phandle);
1822                        if (WARN_ON(!cpu_node))
1823                                continue;
1824
1825                        cpu = of_cpu_node_to_id(cpu_node);
1826                        if (WARN_ON(cpu < 0))
1827                                continue;
1828
1829                        pr_cont("%pOF[%d] ", cpu_node, cpu);
1830
1831                        cpumask_set_cpu(cpu, &part->mask);
1832                }
1833
1834                pr_cont("}\n");
1835                part_idx++;
1836        }
1837
1838        for (i = 0; i < gic_data.ppi_nr; i++) {
1839                unsigned int irq;
1840                struct partition_desc *desc;
1841                struct irq_fwspec ppi_fwspec = {
1842                        .fwnode         = gic_data.fwnode,
1843                        .param_count    = 3,
1844                        .param          = {
1845                                [0]     = GIC_IRQ_TYPE_PARTITION,
1846                                [1]     = i,
1847                                [2]     = IRQ_TYPE_NONE,
1848                        },
1849                };
1850
1851                irq = irq_create_fwspec_mapping(&ppi_fwspec);
1852                if (WARN_ON(!irq))
1853                        continue;
1854                desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1855                                             irq, &partition_domain_ops);
1856                if (WARN_ON(!desc))
1857                        continue;
1858
1859                gic_data.ppi_descs[i] = desc;
1860        }
1861
1862out_put_node:
1863        of_node_put(parts_node);
1864}
1865
1866static void __init gic_of_setup_kvm_info(struct device_node *node)
1867{
1868        int ret;
1869        struct resource r;
1870        u32 gicv_idx;
1871
1872        gic_v3_kvm_info.type = GIC_V3;
1873
1874        gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1875        if (!gic_v3_kvm_info.maint_irq)
1876                return;
1877
1878        if (of_property_read_u32(node, "#redistributor-regions",
1879                                 &gicv_idx))
1880                gicv_idx = 1;
1881
1882        gicv_idx += 3;  /* Also skip GICD, GICC, GICH */
1883        ret = of_address_to_resource(node, gicv_idx, &r);
1884        if (!ret)
1885                gic_v3_kvm_info.vcpu = r;
1886
1887        gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1888        gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
1889        vgic_set_kvm_info(&gic_v3_kvm_info);
1890}
1891
1892static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1893{
1894        void __iomem *dist_base;
1895        struct redist_region *rdist_regs;
1896        u64 redist_stride;
1897        u32 nr_redist_regions;
1898        int err, i;
1899
1900        dist_base = of_iomap(node, 0);
1901        if (!dist_base) {
1902                pr_err("%pOF: unable to map gic dist registers\n", node);
1903                return -ENXIO;
1904        }
1905
1906        err = gic_validate_dist_version(dist_base);
1907        if (err) {
1908                pr_err("%pOF: no distributor detected, giving up\n", node);
1909                goto out_unmap_dist;
1910        }
1911
1912        if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1913                nr_redist_regions = 1;
1914
1915        rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1916                             GFP_KERNEL);
1917        if (!rdist_regs) {
1918                err = -ENOMEM;
1919                goto out_unmap_dist;
1920        }
1921
1922        for (i = 0; i < nr_redist_regions; i++) {
1923                struct resource res;
1924                int ret;
1925
1926                ret = of_address_to_resource(node, 1 + i, &res);
1927                rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1928                if (ret || !rdist_regs[i].redist_base) {
1929                        pr_err("%pOF: couldn't map region %d\n", node, i);
1930                        err = -ENODEV;
1931                        goto out_unmap_rdist;
1932                }
1933                rdist_regs[i].phys_base = res.start;
1934        }
1935
1936        if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1937                redist_stride = 0;
1938
1939        gic_enable_of_quirks(node, gic_quirks, &gic_data);
1940
1941        err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1942                             redist_stride, &node->fwnode);
1943        if (err)
1944                goto out_unmap_rdist;
1945
1946        gic_populate_ppi_partitions(node);
1947
1948        if (static_branch_likely(&supports_deactivate_key))
1949                gic_of_setup_kvm_info(node);
1950        return 0;
1951
1952out_unmap_rdist:
1953        for (i = 0; i < nr_redist_regions; i++)
1954                if (rdist_regs[i].redist_base)
1955                        iounmap(rdist_regs[i].redist_base);
1956        kfree(rdist_regs);
1957out_unmap_dist:
1958        iounmap(dist_base);
1959        return err;
1960}
1961
1962IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
1963
1964#ifdef CONFIG_ACPI
1965static struct
1966{
1967        void __iomem *dist_base;
1968        struct redist_region *redist_regs;
1969        u32 nr_redist_regions;
1970        bool single_redist;
1971        int enabled_rdists;
1972        u32 maint_irq;
1973        int maint_irq_mode;
1974        phys_addr_t vcpu_base;
1975} acpi_data __initdata;
1976
1977static void __init
1978gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1979{
1980        static int count = 0;
1981
1982        acpi_data.redist_regs[count].phys_base = phys_base;
1983        acpi_data.redist_regs[count].redist_base = redist_base;
1984        acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
1985        count++;
1986}
1987
1988static int __init
1989gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
1990                           const unsigned long end)
1991{
1992        struct acpi_madt_generic_redistributor *redist =
1993                        (struct acpi_madt_generic_redistributor *)header;
1994        void __iomem *redist_base;
1995
1996        redist_base = ioremap(redist->base_address, redist->length);
1997        if (!redist_base) {
1998                pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1999                return -ENOMEM;
2000        }
2001
2002        gic_acpi_register_redist(redist->base_address, redist_base);
2003        return 0;
2004}
2005
2006static int __init
2007gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2008                         const unsigned long end)
2009{
2010        struct acpi_madt_generic_interrupt *gicc =
2011                                (struct acpi_madt_generic_interrupt *)header;
2012        u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2013        u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2014        void __iomem *redist_base;
2015
2016        /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2017        if (!(gicc->flags & ACPI_MADT_ENABLED))
2018                return 0;
2019
2020        redist_base = ioremap(gicc->gicr_base_address, size);
2021        if (!redist_base)
2022                return -ENOMEM;
2023
2024        gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2025        return 0;
2026}
2027
2028static int __init gic_acpi_collect_gicr_base(void)
2029{
2030        acpi_tbl_entry_handler redist_parser;
2031        enum acpi_madt_type type;
2032
2033        if (acpi_data.single_redist) {
2034                type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2035                redist_parser = gic_acpi_parse_madt_gicc;
2036        } else {
2037                type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2038                redist_parser = gic_acpi_parse_madt_redist;
2039        }
2040
2041        /* Collect redistributor base addresses in GICR entries */
2042        if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2043                return 0;
2044
2045        pr_info("No valid GICR entries exist\n");
2046        return -ENODEV;
2047}
2048
2049static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2050                                  const unsigned long end)
2051{
2052        /* Subtable presence means that redist exists, that's it */
2053        return 0;
2054}
2055
2056static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2057                                      const unsigned long end)
2058{
2059        struct acpi_madt_generic_interrupt *gicc =
2060                                (struct acpi_madt_generic_interrupt *)header;
2061
2062        /*
2063         * If GICC is enabled and has valid gicr base address, then it means
2064         * GICR base is presented via GICC
2065         */
2066        if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2067                acpi_data.enabled_rdists++;
2068                return 0;
2069        }
2070
2071        /*
2072         * It's perfectly valid firmware can pass disabled GICC entry, driver
2073         * should not treat as errors, skip the entry instead of probe fail.
2074         */
2075        if (!(gicc->flags & ACPI_MADT_ENABLED))
2076                return 0;
2077
2078        return -ENODEV;
2079}
2080
2081static int __init gic_acpi_count_gicr_regions(void)
2082{
2083        int count;
2084
2085        /*
2086         * Count how many redistributor regions we have. It is not allowed
2087         * to mix redistributor description, GICR and GICC subtables have to be
2088         * mutually exclusive.
2089         */
2090        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2091                                      gic_acpi_match_gicr, 0);
2092        if (count > 0) {
2093                acpi_data.single_redist = false;
2094                return count;
2095        }
2096
2097        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2098                                      gic_acpi_match_gicc, 0);
2099        if (count > 0) {
2100                acpi_data.single_redist = true;
2101                count = acpi_data.enabled_rdists;
2102        }
2103
2104        return count;
2105}
2106
2107static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2108                                           struct acpi_probe_entry *ape)
2109{
2110        struct acpi_madt_generic_distributor *dist;
2111        int count;
2112
2113        dist = (struct acpi_madt_generic_distributor *)header;
2114        if (dist->version != ape->driver_data)
2115                return false;
2116
2117        /* We need to do that exercise anyway, the sooner the better */
2118        count = gic_acpi_count_gicr_regions();
2119        if (count <= 0)
2120                return false;
2121
2122        acpi_data.nr_redist_regions = count;
2123        return true;
2124}
2125
2126static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2127                                                const unsigned long end)
2128{
2129        struct acpi_madt_generic_interrupt *gicc =
2130                (struct acpi_madt_generic_interrupt *)header;
2131        int maint_irq_mode;
2132        static int first_madt = true;
2133
2134        /* Skip unusable CPUs */
2135        if (!(gicc->flags & ACPI_MADT_ENABLED))
2136                return 0;
2137
2138        maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2139                ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2140
2141        if (first_madt) {
2142                first_madt = false;
2143
2144                acpi_data.maint_irq = gicc->vgic_interrupt;
2145                acpi_data.maint_irq_mode = maint_irq_mode;
2146                acpi_data.vcpu_base = gicc->gicv_base_address;
2147
2148                return 0;
2149        }
2150
2151        /*
2152         * The maintenance interrupt and GICV should be the same for every CPU
2153         */
2154        if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2155            (acpi_data.maint_irq_mode != maint_irq_mode) ||
2156            (acpi_data.vcpu_base != gicc->gicv_base_address))
2157                return -EINVAL;
2158
2159        return 0;
2160}
2161
2162static bool __init gic_acpi_collect_virt_info(void)
2163{
2164        int count;
2165
2166        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2167                                      gic_acpi_parse_virt_madt_gicc, 0);
2168
2169        return (count > 0);
2170}
2171
2172#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2173#define ACPI_GICV2_VCTRL_MEM_SIZE       (SZ_4K)
2174#define ACPI_GICV2_VCPU_MEM_SIZE        (SZ_8K)
2175
2176static void __init gic_acpi_setup_kvm_info(void)
2177{
2178        int irq;
2179
2180        if (!gic_acpi_collect_virt_info()) {
2181                pr_warn("Unable to get hardware information used for virtualization\n");
2182                return;
2183        }
2184
2185        gic_v3_kvm_info.type = GIC_V3;
2186
2187        irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2188                                acpi_data.maint_irq_mode,
2189                                ACPI_ACTIVE_HIGH);
2190        if (irq <= 0)
2191                return;
2192
2193        gic_v3_kvm_info.maint_irq = irq;
2194
2195        if (acpi_data.vcpu_base) {
2196                struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2197
2198                vcpu->flags = IORESOURCE_MEM;
2199                vcpu->start = acpi_data.vcpu_base;
2200                vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2201        }
2202
2203        gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2204        gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2205        vgic_set_kvm_info(&gic_v3_kvm_info);
2206}
2207
2208static int __init
2209gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2210{
2211        struct acpi_madt_generic_distributor *dist;
2212        struct fwnode_handle *domain_handle;
2213        size_t size;
2214        int i, err;
2215
2216        /* Get distributor base address */
2217        dist = (struct acpi_madt_generic_distributor *)header;
2218        acpi_data.dist_base = ioremap(dist->base_address,
2219                                      ACPI_GICV3_DIST_MEM_SIZE);
2220        if (!acpi_data.dist_base) {
2221                pr_err("Unable to map GICD registers\n");
2222                return -ENOMEM;
2223        }
2224
2225        err = gic_validate_dist_version(acpi_data.dist_base);
2226        if (err) {
2227                pr_err("No distributor detected at @%p, giving up\n",
2228                       acpi_data.dist_base);
2229                goto out_dist_unmap;
2230        }
2231
2232        size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2233        acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2234        if (!acpi_data.redist_regs) {
2235                err = -ENOMEM;
2236                goto out_dist_unmap;
2237        }
2238
2239        err = gic_acpi_collect_gicr_base();
2240        if (err)
2241                goto out_redist_unmap;
2242
2243        domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2244        if (!domain_handle) {
2245                err = -ENOMEM;
2246                goto out_redist_unmap;
2247        }
2248
2249        err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2250                             acpi_data.nr_redist_regions, 0, domain_handle);
2251        if (err)
2252                goto out_fwhandle_free;
2253
2254        acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
2255
2256        if (static_branch_likely(&supports_deactivate_key))
2257                gic_acpi_setup_kvm_info();
2258
2259        return 0;
2260
2261out_fwhandle_free:
2262        irq_domain_free_fwnode(domain_handle);
2263out_redist_unmap:
2264        for (i = 0; i < acpi_data.nr_redist_regions; i++)
2265                if (acpi_data.redist_regs[i].redist_base)
2266                        iounmap(acpi_data.redist_regs[i].redist_base);
2267        kfree(acpi_data.redist_regs);
2268out_dist_unmap:
2269        iounmap(acpi_data.dist_base);
2270        return err;
2271}
2272IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2273                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2274                     gic_acpi_init);
2275IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2276                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2277                     gic_acpi_init);
2278IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2279                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2280                     gic_acpi_init);
2281#endif
2282