linux/drivers/pci/controller/pcie-mediatek-gen3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MediaTek PCIe host controller driver.
   4 *
   5 * Copyright (c) 2020 MediaTek Inc.
   6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/delay.h>
  11#include <linux/iopoll.h>
  12#include <linux/irq.h>
  13#include <linux/irqchip/chained_irq.h>
  14#include <linux/irqdomain.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/msi.h>
  18#include <linux/pci.h>
  19#include <linux/phy/phy.h>
  20#include <linux/platform_device.h>
  21#include <linux/pm_domain.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/reset.h>
  24
  25#include "../pci.h"
  26
  27#define PCIE_SETTING_REG                0x80
  28#define PCIE_PCI_IDS_1                  0x9c
  29#define PCI_CLASS(class)                (class << 8)
  30#define PCIE_RC_MODE                    BIT(0)
  31
  32#define PCIE_CFGNUM_REG                 0x140
  33#define PCIE_CFG_DEVFN(devfn)           ((devfn) & GENMASK(7, 0))
  34#define PCIE_CFG_BUS(bus)               (((bus) << 8) & GENMASK(15, 8))
  35#define PCIE_CFG_BYTE_EN(bytes)         (((bytes) << 16) & GENMASK(19, 16))
  36#define PCIE_CFG_FORCE_BYTE_EN          BIT(20)
  37#define PCIE_CFG_OFFSET_ADDR            0x1000
  38#define PCIE_CFG_HEADER(bus, devfn) \
  39        (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
  40
  41#define PCIE_RST_CTRL_REG               0x148
  42#define PCIE_MAC_RSTB                   BIT(0)
  43#define PCIE_PHY_RSTB                   BIT(1)
  44#define PCIE_BRG_RSTB                   BIT(2)
  45#define PCIE_PE_RSTB                    BIT(3)
  46
  47#define PCIE_LTSSM_STATUS_REG           0x150
  48#define PCIE_LTSSM_STATE_MASK           GENMASK(28, 24)
  49#define PCIE_LTSSM_STATE(val)           ((val & PCIE_LTSSM_STATE_MASK) >> 24)
  50#define PCIE_LTSSM_STATE_L2_IDLE        0x14
  51
  52#define PCIE_LINK_STATUS_REG            0x154
  53#define PCIE_PORT_LINKUP                BIT(8)
  54
  55#define PCIE_MSI_SET_NUM                8
  56#define PCIE_MSI_IRQS_PER_SET           32
  57#define PCIE_MSI_IRQS_NUM \
  58        (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
  59
  60#define PCIE_INT_ENABLE_REG             0x180
  61#define PCIE_MSI_ENABLE                 GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
  62#define PCIE_MSI_SHIFT                  8
  63#define PCIE_INTX_SHIFT                 24
  64#define PCIE_INTX_ENABLE \
  65        GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
  66
  67#define PCIE_INT_STATUS_REG             0x184
  68#define PCIE_MSI_SET_ENABLE_REG         0x190
  69#define PCIE_MSI_SET_ENABLE             GENMASK(PCIE_MSI_SET_NUM - 1, 0)
  70
  71#define PCIE_MSI_SET_BASE_REG           0xc00
  72#define PCIE_MSI_SET_OFFSET             0x10
  73#define PCIE_MSI_SET_STATUS_OFFSET      0x04
  74#define PCIE_MSI_SET_ENABLE_OFFSET      0x08
  75
  76#define PCIE_MSI_SET_ADDR_HI_BASE       0xc80
  77#define PCIE_MSI_SET_ADDR_HI_OFFSET     0x04
  78
  79#define PCIE_ICMD_PM_REG                0x198
  80#define PCIE_TURN_OFF_LINK              BIT(4)
  81
  82#define PCIE_TRANS_TABLE_BASE_REG       0x800
  83#define PCIE_ATR_SRC_ADDR_MSB_OFFSET    0x4
  84#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET   0x8
  85#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET   0xc
  86#define PCIE_ATR_TRSL_PARAM_OFFSET      0x10
  87#define PCIE_ATR_TLB_SET_OFFSET         0x20
  88
  89#define PCIE_MAX_TRANS_TABLES           8
  90#define PCIE_ATR_EN                     BIT(0)
  91#define PCIE_ATR_SIZE(size) \
  92        (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
  93#define PCIE_ATR_ID(id)                 ((id) & GENMASK(3, 0))
  94#define PCIE_ATR_TYPE_MEM               PCIE_ATR_ID(0)
  95#define PCIE_ATR_TYPE_IO                PCIE_ATR_ID(1)
  96#define PCIE_ATR_TLP_TYPE(type)         (((type) << 16) & GENMASK(18, 16))
  97#define PCIE_ATR_TLP_TYPE_MEM           PCIE_ATR_TLP_TYPE(0)
  98#define PCIE_ATR_TLP_TYPE_IO            PCIE_ATR_TLP_TYPE(2)
  99
 100/**
 101 * struct mtk_msi_set - MSI information for each set
 102 * @base: IO mapped register base
 103 * @msg_addr: MSI message address
 104 * @saved_irq_state: IRQ enable state saved at suspend time
 105 */
 106struct mtk_msi_set {
 107        void __iomem *base;
 108        phys_addr_t msg_addr;
 109        u32 saved_irq_state;
 110};
 111
 112/**
 113 * struct mtk_pcie_port - PCIe port information
 114 * @dev: pointer to PCIe device
 115 * @base: IO mapped register base
 116 * @reg_base: physical register base
 117 * @mac_reset: MAC reset control
 118 * @phy_reset: PHY reset control
 119 * @phy: PHY controller block
 120 * @clks: PCIe clocks
 121 * @num_clks: PCIe clocks count for this port
 122 * @irq: PCIe controller interrupt number
 123 * @saved_irq_state: IRQ enable state saved at suspend time
 124 * @irq_lock: lock protecting IRQ register access
 125 * @intx_domain: legacy INTx IRQ domain
 126 * @msi_domain: MSI IRQ domain
 127 * @msi_bottom_domain: MSI IRQ bottom domain
 128 * @msi_sets: MSI sets information
 129 * @lock: lock protecting IRQ bit map
 130 * @msi_irq_in_use: bit map for assigned MSI IRQ
 131 */
 132struct mtk_pcie_port {
 133        struct device *dev;
 134        void __iomem *base;
 135        phys_addr_t reg_base;
 136        struct reset_control *mac_reset;
 137        struct reset_control *phy_reset;
 138        struct phy *phy;
 139        struct clk_bulk_data *clks;
 140        int num_clks;
 141
 142        int irq;
 143        u32 saved_irq_state;
 144        raw_spinlock_t irq_lock;
 145        struct irq_domain *intx_domain;
 146        struct irq_domain *msi_domain;
 147        struct irq_domain *msi_bottom_domain;
 148        struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
 149        struct mutex lock;
 150        DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
 151};
 152
 153/**
 154 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
 155 * @bus: PCI bus to query
 156 * @devfn: device/function number
 157 * @where: offset in config space
 158 * @size: data size in TLP header
 159 *
 160 * Set byte enable field and device information in configuration TLP header.
 161 */
 162static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
 163                                        int where, int size)
 164{
 165        struct mtk_pcie_port *port = bus->sysdata;
 166        int bytes;
 167        u32 val;
 168
 169        bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
 170
 171        val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
 172              PCIE_CFG_HEADER(bus->number, devfn);
 173
 174        writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
 175}
 176
 177static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 178                                      int where)
 179{
 180        struct mtk_pcie_port *port = bus->sysdata;
 181
 182        return port->base + PCIE_CFG_OFFSET_ADDR + where;
 183}
 184
 185static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 186                                int where, int size, u32 *val)
 187{
 188        mtk_pcie_config_tlp_header(bus, devfn, where, size);
 189
 190        return pci_generic_config_read32(bus, devfn, where, size, val);
 191}
 192
 193static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 194                                 int where, int size, u32 val)
 195{
 196        mtk_pcie_config_tlp_header(bus, devfn, where, size);
 197
 198        if (size <= 2)
 199                val <<= (where & 0x3) * 8;
 200
 201        return pci_generic_config_write32(bus, devfn, where, 4, val);
 202}
 203
 204static struct pci_ops mtk_pcie_ops = {
 205        .map_bus = mtk_pcie_map_bus,
 206        .read  = mtk_pcie_config_read,
 207        .write = mtk_pcie_config_write,
 208};
 209
 210static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
 211                                    resource_size_t cpu_addr,
 212                                    resource_size_t pci_addr,
 213                                    resource_size_t size,
 214                                    unsigned long type, int num)
 215{
 216        void __iomem *table;
 217        u32 val;
 218
 219        if (num >= PCIE_MAX_TRANS_TABLES) {
 220                dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
 221                        (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
 222                return -ENODEV;
 223        }
 224
 225        table = port->base + PCIE_TRANS_TABLE_BASE_REG +
 226                num * PCIE_ATR_TLB_SET_OFFSET;
 227
 228        writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
 229                       table);
 230        writel_relaxed(upper_32_bits(cpu_addr),
 231                       table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
 232        writel_relaxed(lower_32_bits(pci_addr),
 233                       table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
 234        writel_relaxed(upper_32_bits(pci_addr),
 235                       table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
 236
 237        if (type == IORESOURCE_IO)
 238                val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
 239        else
 240                val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
 241
 242        writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
 243
 244        return 0;
 245}
 246
 247static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
 248{
 249        int i;
 250        u32 val;
 251
 252        for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 253                struct mtk_msi_set *msi_set = &port->msi_sets[i];
 254
 255                msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
 256                                i * PCIE_MSI_SET_OFFSET;
 257                msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
 258                                    i * PCIE_MSI_SET_OFFSET;
 259
 260                /* Configure the MSI capture address */
 261                writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
 262                writel_relaxed(upper_32_bits(msi_set->msg_addr),
 263                               port->base + PCIE_MSI_SET_ADDR_HI_BASE +
 264                               i * PCIE_MSI_SET_ADDR_HI_OFFSET);
 265        }
 266
 267        val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
 268        val |= PCIE_MSI_SET_ENABLE;
 269        writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
 270
 271        val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 272        val |= PCIE_MSI_ENABLE;
 273        writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 274}
 275
 276static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
 277{
 278        struct resource_entry *entry;
 279        struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
 280        unsigned int table_index = 0;
 281        int err;
 282        u32 val;
 283
 284        /* Set as RC mode */
 285        val = readl_relaxed(port->base + PCIE_SETTING_REG);
 286        val |= PCIE_RC_MODE;
 287        writel_relaxed(val, port->base + PCIE_SETTING_REG);
 288
 289        /* Set class code */
 290        val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
 291        val &= ~GENMASK(31, 8);
 292        val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
 293        writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
 294
 295        /* Mask all INTx interrupts */
 296        val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 297        val &= ~PCIE_INTX_ENABLE;
 298        writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 299
 300        /* Assert all reset signals */
 301        val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
 302        val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
 303        writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
 304
 305        /*
 306         * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
 307         * and 2.2.1 (Initial Power-Up (G3 to S0)).
 308         * The deassertion of PERST# should be delayed 100ms (TPVPERL)
 309         * for the power and clock to become stable.
 310         */
 311        msleep(100);
 312
 313        /* De-assert reset signals */
 314        val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
 315        writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
 316
 317        /* Check if the link is up or not */
 318        err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
 319                                 !!(val & PCIE_PORT_LINKUP), 20,
 320                                 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
 321        if (err) {
 322                val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
 323                dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
 324                return err;
 325        }
 326
 327        mtk_pcie_enable_msi(port);
 328
 329        /* Set PCIe translation windows */
 330        resource_list_for_each_entry(entry, &host->windows) {
 331                struct resource *res = entry->res;
 332                unsigned long type = resource_type(res);
 333                resource_size_t cpu_addr;
 334                resource_size_t pci_addr;
 335                resource_size_t size;
 336                const char *range_type;
 337
 338                if (type == IORESOURCE_IO) {
 339                        cpu_addr = pci_pio_to_address(res->start);
 340                        range_type = "IO";
 341                } else if (type == IORESOURCE_MEM) {
 342                        cpu_addr = res->start;
 343                        range_type = "MEM";
 344                } else {
 345                        continue;
 346                }
 347
 348                pci_addr = res->start - entry->offset;
 349                size = resource_size(res);
 350                err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
 351                                               type, table_index);
 352                if (err)
 353                        return err;
 354
 355                dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
 356                        range_type, table_index, (unsigned long long)cpu_addr,
 357                        (unsigned long long)pci_addr, (unsigned long long)size);
 358
 359                table_index++;
 360        }
 361
 362        return 0;
 363}
 364
 365static int mtk_pcie_set_affinity(struct irq_data *data,
 366                                 const struct cpumask *mask, bool force)
 367{
 368        return -EINVAL;
 369}
 370
 371static void mtk_pcie_msi_irq_mask(struct irq_data *data)
 372{
 373        pci_msi_mask_irq(data);
 374        irq_chip_mask_parent(data);
 375}
 376
 377static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
 378{
 379        pci_msi_unmask_irq(data);
 380        irq_chip_unmask_parent(data);
 381}
 382
 383static struct irq_chip mtk_msi_irq_chip = {
 384        .irq_ack = irq_chip_ack_parent,
 385        .irq_mask = mtk_pcie_msi_irq_mask,
 386        .irq_unmask = mtk_pcie_msi_irq_unmask,
 387        .name = "MSI",
 388};
 389
 390static struct msi_domain_info mtk_msi_domain_info = {
 391        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 392                   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
 393        .chip   = &mtk_msi_irq_chip,
 394};
 395
 396static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 397{
 398        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 399        struct mtk_pcie_port *port = data->domain->host_data;
 400        unsigned long hwirq;
 401
 402        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 403
 404        msg->address_hi = upper_32_bits(msi_set->msg_addr);
 405        msg->address_lo = lower_32_bits(msi_set->msg_addr);
 406        msg->data = hwirq;
 407        dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
 408                hwirq, msg->address_hi, msg->address_lo, msg->data);
 409}
 410
 411static void mtk_msi_bottom_irq_ack(struct irq_data *data)
 412{
 413        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 414        unsigned long hwirq;
 415
 416        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 417
 418        writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
 419}
 420
 421static void mtk_msi_bottom_irq_mask(struct irq_data *data)
 422{
 423        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 424        struct mtk_pcie_port *port = data->domain->host_data;
 425        unsigned long hwirq, flags;
 426        u32 val;
 427
 428        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 429
 430        raw_spin_lock_irqsave(&port->irq_lock, flags);
 431        val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 432        val &= ~BIT(hwirq);
 433        writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 434        raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 435}
 436
 437static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
 438{
 439        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 440        struct mtk_pcie_port *port = data->domain->host_data;
 441        unsigned long hwirq, flags;
 442        u32 val;
 443
 444        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 445
 446        raw_spin_lock_irqsave(&port->irq_lock, flags);
 447        val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 448        val |= BIT(hwirq);
 449        writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 450        raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 451}
 452
 453static struct irq_chip mtk_msi_bottom_irq_chip = {
 454        .irq_ack                = mtk_msi_bottom_irq_ack,
 455        .irq_mask               = mtk_msi_bottom_irq_mask,
 456        .irq_unmask             = mtk_msi_bottom_irq_unmask,
 457        .irq_compose_msi_msg    = mtk_compose_msi_msg,
 458        .irq_set_affinity       = mtk_pcie_set_affinity,
 459        .name                   = "MSI",
 460};
 461
 462static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
 463                                       unsigned int virq, unsigned int nr_irqs,
 464                                       void *arg)
 465{
 466        struct mtk_pcie_port *port = domain->host_data;
 467        struct mtk_msi_set *msi_set;
 468        int i, hwirq, set_idx;
 469
 470        mutex_lock(&port->lock);
 471
 472        hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
 473                                        order_base_2(nr_irqs));
 474
 475        mutex_unlock(&port->lock);
 476
 477        if (hwirq < 0)
 478                return -ENOSPC;
 479
 480        set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
 481        msi_set = &port->msi_sets[set_idx];
 482
 483        for (i = 0; i < nr_irqs; i++)
 484                irq_domain_set_info(domain, virq + i, hwirq + i,
 485                                    &mtk_msi_bottom_irq_chip, msi_set,
 486                                    handle_edge_irq, NULL, NULL);
 487
 488        return 0;
 489}
 490
 491static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
 492                                       unsigned int virq, unsigned int nr_irqs)
 493{
 494        struct mtk_pcie_port *port = domain->host_data;
 495        struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 496
 497        mutex_lock(&port->lock);
 498
 499        bitmap_release_region(port->msi_irq_in_use, data->hwirq,
 500                              order_base_2(nr_irqs));
 501
 502        mutex_unlock(&port->lock);
 503
 504        irq_domain_free_irqs_common(domain, virq, nr_irqs);
 505}
 506
 507static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
 508        .alloc = mtk_msi_bottom_domain_alloc,
 509        .free = mtk_msi_bottom_domain_free,
 510};
 511
 512static void mtk_intx_mask(struct irq_data *data)
 513{
 514        struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 515        unsigned long flags;
 516        u32 val;
 517
 518        raw_spin_lock_irqsave(&port->irq_lock, flags);
 519        val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 520        val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
 521        writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 522        raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 523}
 524
 525static void mtk_intx_unmask(struct irq_data *data)
 526{
 527        struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 528        unsigned long flags;
 529        u32 val;
 530
 531        raw_spin_lock_irqsave(&port->irq_lock, flags);
 532        val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 533        val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
 534        writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
 535        raw_spin_unlock_irqrestore(&port->irq_lock, flags);
 536}
 537
 538/**
 539 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
 540 * @data: pointer to chip specific data
 541 *
 542 * As an emulated level IRQ, its interrupt status will remain
 543 * until the corresponding de-assert message is received; hence that
 544 * the status can only be cleared when the interrupt has been serviced.
 545 */
 546static void mtk_intx_eoi(struct irq_data *data)
 547{
 548        struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
 549        unsigned long hwirq;
 550
 551        hwirq = data->hwirq + PCIE_INTX_SHIFT;
 552        writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
 553}
 554
 555static struct irq_chip mtk_intx_irq_chip = {
 556        .irq_mask               = mtk_intx_mask,
 557        .irq_unmask             = mtk_intx_unmask,
 558        .irq_eoi                = mtk_intx_eoi,
 559        .irq_set_affinity       = mtk_pcie_set_affinity,
 560        .name                   = "INTx",
 561};
 562
 563static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 564                             irq_hw_number_t hwirq)
 565{
 566        irq_set_chip_data(irq, domain->host_data);
 567        irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
 568                                      handle_fasteoi_irq, "INTx");
 569        return 0;
 570}
 571
 572static const struct irq_domain_ops intx_domain_ops = {
 573        .map = mtk_pcie_intx_map,
 574};
 575
 576static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
 577{
 578        struct device *dev = port->dev;
 579        struct device_node *intc_node, *node = dev->of_node;
 580        int ret;
 581
 582        raw_spin_lock_init(&port->irq_lock);
 583
 584        /* Setup INTx */
 585        intc_node = of_get_child_by_name(node, "interrupt-controller");
 586        if (!intc_node) {
 587                dev_err(dev, "missing interrupt-controller node\n");
 588                return -ENODEV;
 589        }
 590
 591        port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
 592                                                  &intx_domain_ops, port);
 593        if (!port->intx_domain) {
 594                dev_err(dev, "failed to create INTx IRQ domain\n");
 595                return -ENODEV;
 596        }
 597
 598        /* Setup MSI */
 599        mutex_init(&port->lock);
 600
 601        port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
 602                                  &mtk_msi_bottom_domain_ops, port);
 603        if (!port->msi_bottom_domain) {
 604                dev_err(dev, "failed to create MSI bottom domain\n");
 605                ret = -ENODEV;
 606                goto err_msi_bottom_domain;
 607        }
 608
 609        port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
 610                                                     &mtk_msi_domain_info,
 611                                                     port->msi_bottom_domain);
 612        if (!port->msi_domain) {
 613                dev_err(dev, "failed to create MSI domain\n");
 614                ret = -ENODEV;
 615                goto err_msi_domain;
 616        }
 617
 618        return 0;
 619
 620err_msi_domain:
 621        irq_domain_remove(port->msi_bottom_domain);
 622err_msi_bottom_domain:
 623        irq_domain_remove(port->intx_domain);
 624
 625        return ret;
 626}
 627
 628static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
 629{
 630        irq_set_chained_handler_and_data(port->irq, NULL, NULL);
 631
 632        if (port->intx_domain)
 633                irq_domain_remove(port->intx_domain);
 634
 635        if (port->msi_domain)
 636                irq_domain_remove(port->msi_domain);
 637
 638        if (port->msi_bottom_domain)
 639                irq_domain_remove(port->msi_bottom_domain);
 640
 641        irq_dispose_mapping(port->irq);
 642}
 643
 644static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
 645{
 646        struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
 647        unsigned long msi_enable, msi_status;
 648        unsigned int virq;
 649        irq_hw_number_t bit, hwirq;
 650
 651        msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 652
 653        do {
 654                msi_status = readl_relaxed(msi_set->base +
 655                                           PCIE_MSI_SET_STATUS_OFFSET);
 656                msi_status &= msi_enable;
 657                if (!msi_status)
 658                        break;
 659
 660                for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
 661                        hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
 662                        virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
 663                        generic_handle_irq(virq);
 664                }
 665        } while (true);
 666}
 667
 668static void mtk_pcie_irq_handler(struct irq_desc *desc)
 669{
 670        struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
 671        struct irq_chip *irqchip = irq_desc_get_chip(desc);
 672        unsigned long status;
 673        unsigned int virq;
 674        irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
 675
 676        chained_irq_enter(irqchip, desc);
 677
 678        status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
 679        for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
 680                              PCIE_INTX_SHIFT) {
 681                virq = irq_find_mapping(port->intx_domain,
 682                                        irq_bit - PCIE_INTX_SHIFT);
 683                generic_handle_irq(virq);
 684        }
 685
 686        irq_bit = PCIE_MSI_SHIFT;
 687        for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
 688                              PCIE_MSI_SHIFT) {
 689                mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
 690
 691                writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
 692        }
 693
 694        chained_irq_exit(irqchip, desc);
 695}
 696
 697static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
 698{
 699        struct device *dev = port->dev;
 700        struct platform_device *pdev = to_platform_device(dev);
 701        int err;
 702
 703        err = mtk_pcie_init_irq_domains(port);
 704        if (err)
 705                return err;
 706
 707        port->irq = platform_get_irq(pdev, 0);
 708        if (port->irq < 0)
 709                return port->irq;
 710
 711        irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
 712
 713        return 0;
 714}
 715
 716static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
 717{
 718        struct device *dev = port->dev;
 719        struct platform_device *pdev = to_platform_device(dev);
 720        struct resource *regs;
 721        int ret;
 722
 723        regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
 724        if (!regs)
 725                return -EINVAL;
 726        port->base = devm_ioremap_resource(dev, regs);
 727        if (IS_ERR(port->base)) {
 728                dev_err(dev, "failed to map register base\n");
 729                return PTR_ERR(port->base);
 730        }
 731
 732        port->reg_base = regs->start;
 733
 734        port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
 735        if (IS_ERR(port->phy_reset)) {
 736                ret = PTR_ERR(port->phy_reset);
 737                if (ret != -EPROBE_DEFER)
 738                        dev_err(dev, "failed to get PHY reset\n");
 739
 740                return ret;
 741        }
 742
 743        port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
 744        if (IS_ERR(port->mac_reset)) {
 745                ret = PTR_ERR(port->mac_reset);
 746                if (ret != -EPROBE_DEFER)
 747                        dev_err(dev, "failed to get MAC reset\n");
 748
 749                return ret;
 750        }
 751
 752        port->phy = devm_phy_optional_get(dev, "pcie-phy");
 753        if (IS_ERR(port->phy)) {
 754                ret = PTR_ERR(port->phy);
 755                if (ret != -EPROBE_DEFER)
 756                        dev_err(dev, "failed to get PHY\n");
 757
 758                return ret;
 759        }
 760
 761        port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
 762        if (port->num_clks < 0) {
 763                dev_err(dev, "failed to get clocks\n");
 764                return port->num_clks;
 765        }
 766
 767        return 0;
 768}
 769
 770static int mtk_pcie_power_up(struct mtk_pcie_port *port)
 771{
 772        struct device *dev = port->dev;
 773        int err;
 774
 775        /* PHY power on and enable pipe clock */
 776        reset_control_deassert(port->phy_reset);
 777
 778        err = phy_init(port->phy);
 779        if (err) {
 780                dev_err(dev, "failed to initialize PHY\n");
 781                goto err_phy_init;
 782        }
 783
 784        err = phy_power_on(port->phy);
 785        if (err) {
 786                dev_err(dev, "failed to power on PHY\n");
 787                goto err_phy_on;
 788        }
 789
 790        /* MAC power on and enable transaction layer clocks */
 791        reset_control_deassert(port->mac_reset);
 792
 793        pm_runtime_enable(dev);
 794        pm_runtime_get_sync(dev);
 795
 796        err = clk_bulk_prepare_enable(port->num_clks, port->clks);
 797        if (err) {
 798                dev_err(dev, "failed to enable clocks\n");
 799                goto err_clk_init;
 800        }
 801
 802        return 0;
 803
 804err_clk_init:
 805        pm_runtime_put_sync(dev);
 806        pm_runtime_disable(dev);
 807        reset_control_assert(port->mac_reset);
 808        phy_power_off(port->phy);
 809err_phy_on:
 810        phy_exit(port->phy);
 811err_phy_init:
 812        reset_control_assert(port->phy_reset);
 813
 814        return err;
 815}
 816
 817static void mtk_pcie_power_down(struct mtk_pcie_port *port)
 818{
 819        clk_bulk_disable_unprepare(port->num_clks, port->clks);
 820
 821        pm_runtime_put_sync(port->dev);
 822        pm_runtime_disable(port->dev);
 823        reset_control_assert(port->mac_reset);
 824
 825        phy_power_off(port->phy);
 826        phy_exit(port->phy);
 827        reset_control_assert(port->phy_reset);
 828}
 829
 830static int mtk_pcie_setup(struct mtk_pcie_port *port)
 831{
 832        int err;
 833
 834        err = mtk_pcie_parse_port(port);
 835        if (err)
 836                return err;
 837
 838        /* Don't touch the hardware registers before power up */
 839        err = mtk_pcie_power_up(port);
 840        if (err)
 841                return err;
 842
 843        /* Try link up */
 844        err = mtk_pcie_startup_port(port);
 845        if (err)
 846                goto err_setup;
 847
 848        err = mtk_pcie_setup_irq(port);
 849        if (err)
 850                goto err_setup;
 851
 852        return 0;
 853
 854err_setup:
 855        mtk_pcie_power_down(port);
 856
 857        return err;
 858}
 859
 860static int mtk_pcie_probe(struct platform_device *pdev)
 861{
 862        struct device *dev = &pdev->dev;
 863        struct mtk_pcie_port *port;
 864        struct pci_host_bridge *host;
 865        int err;
 866
 867        host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
 868        if (!host)
 869                return -ENOMEM;
 870
 871        port = pci_host_bridge_priv(host);
 872
 873        port->dev = dev;
 874        platform_set_drvdata(pdev, port);
 875
 876        err = mtk_pcie_setup(port);
 877        if (err)
 878                return err;
 879
 880        host->ops = &mtk_pcie_ops;
 881        host->sysdata = port;
 882
 883        err = pci_host_probe(host);
 884        if (err) {
 885                mtk_pcie_irq_teardown(port);
 886                mtk_pcie_power_down(port);
 887                return err;
 888        }
 889
 890        return 0;
 891}
 892
 893static int mtk_pcie_remove(struct platform_device *pdev)
 894{
 895        struct mtk_pcie_port *port = platform_get_drvdata(pdev);
 896        struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
 897
 898        pci_lock_rescan_remove();
 899        pci_stop_root_bus(host->bus);
 900        pci_remove_root_bus(host->bus);
 901        pci_unlock_rescan_remove();
 902
 903        mtk_pcie_irq_teardown(port);
 904        mtk_pcie_power_down(port);
 905
 906        return 0;
 907}
 908
 909static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
 910{
 911        int i;
 912
 913        raw_spin_lock(&port->irq_lock);
 914
 915        port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
 916
 917        for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 918                struct mtk_msi_set *msi_set = &port->msi_sets[i];
 919
 920                msi_set->saved_irq_state = readl_relaxed(msi_set->base +
 921                                           PCIE_MSI_SET_ENABLE_OFFSET);
 922        }
 923
 924        raw_spin_unlock(&port->irq_lock);
 925}
 926
 927static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
 928{
 929        int i;
 930
 931        raw_spin_lock(&port->irq_lock);
 932
 933        writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
 934
 935        for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 936                struct mtk_msi_set *msi_set = &port->msi_sets[i];
 937
 938                writel_relaxed(msi_set->saved_irq_state,
 939                               msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 940        }
 941
 942        raw_spin_unlock(&port->irq_lock);
 943}
 944
 945static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
 946{
 947        u32 val;
 948
 949        val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
 950        val |= PCIE_TURN_OFF_LINK;
 951        writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
 952
 953        /* Check the link is L2 */
 954        return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
 955                                  (PCIE_LTSSM_STATE(val) ==
 956                                   PCIE_LTSSM_STATE_L2_IDLE), 20,
 957                                   50 * USEC_PER_MSEC);
 958}
 959
 960static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
 961{
 962        struct mtk_pcie_port *port = dev_get_drvdata(dev);
 963        int err;
 964        u32 val;
 965
 966        /* Trigger link to L2 state */
 967        err = mtk_pcie_turn_off_link(port);
 968        if (err) {
 969                dev_err(port->dev, "cannot enter L2 state\n");
 970                return err;
 971        }
 972
 973        /* Pull down the PERST# pin */
 974        val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
 975        val |= PCIE_PE_RSTB;
 976        writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
 977
 978        dev_dbg(port->dev, "entered L2 states successfully");
 979
 980        mtk_pcie_irq_save(port);
 981        mtk_pcie_power_down(port);
 982
 983        return 0;
 984}
 985
 986static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
 987{
 988        struct mtk_pcie_port *port = dev_get_drvdata(dev);
 989        int err;
 990
 991        err = mtk_pcie_power_up(port);
 992        if (err)
 993                return err;
 994
 995        err = mtk_pcie_startup_port(port);
 996        if (err) {
 997                mtk_pcie_power_down(port);
 998                return err;
 999        }
1000
1001        mtk_pcie_irq_restore(port);
1002
1003        return 0;
1004}
1005
1006static const struct dev_pm_ops mtk_pcie_pm_ops = {
1007        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1008                                      mtk_pcie_resume_noirq)
1009};
1010
1011static const struct of_device_id mtk_pcie_of_match[] = {
1012        { .compatible = "mediatek,mt8192-pcie" },
1013        {},
1014};
1015MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1016
1017static struct platform_driver mtk_pcie_driver = {
1018        .probe = mtk_pcie_probe,
1019        .remove = mtk_pcie_remove,
1020        .driver = {
1021                .name = "mtk-pcie",
1022                .of_match_table = mtk_pcie_of_match,
1023                .pm = &mtk_pcie_pm_ops,
1024        },
1025};
1026
1027module_platform_driver(mtk_pcie_driver);
1028MODULE_LICENSE("GPL v2");
1029