linux/arch/powerpc/platforms/wsp/wsp_pci.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Ben Herrenschmidt, IBM Corporation
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version
   7 * 2 of the License, or (at your option) any later version.
   8 */
   9
  10#define DEBUG
  11
  12#include <linux/kernel.h>
  13#include <linux/pci.h>
  14#include <linux/delay.h>
  15#include <linux/string.h>
  16#include <linux/init.h>
  17#include <linux/bootmem.h>
  18#include <linux/irq.h>
  19#include <linux/interrupt.h>
  20#include <linux/debugfs.h>
  21
  22#include <asm/sections.h>
  23#include <asm/io.h>
  24#include <asm/prom.h>
  25#include <asm/pci-bridge.h>
  26#include <asm/machdep.h>
  27#include <asm/ppc-pci.h>
  28#include <asm/iommu.h>
  29#include <asm/io-workarounds.h>
  30
  31#include "wsp.h"
  32#include "wsp_pci.h"
  33#include "msi.h"
  34
  35
  36/* Max number of TVTs for one table. Only 32-bit tables can use
  37 * multiple TVTs and so the max currently supported is thus 8
  38 * since only 2G of DMA space is supported
  39 */
  40#define MAX_TABLE_TVT_COUNT             8
  41
  42struct wsp_dma_table {
  43        struct list_head        link;
  44        struct iommu_table      table;
  45        struct wsp_phb  *phb;
  46        struct page             *tces[MAX_TABLE_TVT_COUNT];
  47};
  48
  49/* We support DMA regions from 0...2G in 32bit space (no support for
  50 * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
  51 * entry) with validation enabled (though not supported by SimiCS
  52 * just yet).
  53 *
  54 * To simplify things, we divide this 2G space into N regions based
  55 * on the constant below which could be turned into a tunable eventually
  56 *
  57 * We then assign dynamically those regions to devices as they show up.
  58 *
  59 * We use a bitmap as an allocator for these.
  60 *
  61 * Tables are allocated/created dynamically as devices are discovered,
  62 * multiple TVT entries are used if needed
  63 *
  64 * When 64-bit DMA support is added we should simply use a separate set
  65 * of larger regions (the HW supports 64 TVT entries). We can
  66 * additionally create a bypass region in 64-bit space for performances
  67 * though that would have a cost in term of security.
  68 *
  69 * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
  70 * for all devices and bus/dev/fn validation is disabled
  71 *
  72 * Note that a DMA32 region cannot be smaller than 256M so the max
  73 * supported here for now is 8. We don't yet support sharing regions
  74 * between multiple devices so the max number of devices supported
  75 * is MAX_TABLE_TVT_COUNT.
  76 */
  77#define NUM_DMA32_REGIONS       1
  78
  79struct wsp_phb {
  80        struct pci_controller   *hose;
  81
  82        /* Lock controlling access to the list of dma tables.
  83         * It does -not- protect against dma_* operations on
  84         * those tables, those should be stopped before an entry
  85         * is removed from the list.
  86         *
  87         * The lock is also used for error handling operations
  88         */
  89        spinlock_t              lock;
  90        struct list_head        dma_tables;
  91        unsigned long           dma32_map;
  92        unsigned long           dma32_base;
  93        unsigned int            dma32_num_regions;
  94        unsigned long           dma32_region_size;
  95
  96        /* Debugfs stuff */
  97        struct dentry           *ddir;
  98
  99        struct list_head        all;
 100};
 101static LIST_HEAD(wsp_phbs);
 102
 103//#define cfg_debug(fmt...)     pr_debug(fmt)
 104#define cfg_debug(fmt...)
 105
 106
 107static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
 108                                  int offset, int len, u32 *val)
 109{
 110        struct pci_controller *hose;
 111        int suboff;
 112        u64 addr;
 113
 114        hose = pci_bus_to_host(bus);
 115        if (hose == NULL)
 116                return PCIBIOS_DEVICE_NOT_FOUND;
 117        if (offset >= 0x1000)
 118                return  PCIBIOS_BAD_REGISTER_NUMBER;
 119        addr = PCIE_REG_CA_ENABLE |
 120                ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
 121                ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
 122                ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
 123        suboff = offset & 3;
 124
 125        /*
 126         * Note: the caller has already checked that offset is
 127         * suitably aligned and that len is 1, 2 or 4.
 128         */
 129
 130        switch (len) {
 131        case 1:
 132                addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
 133                out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
 134                *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
 135                        >> (suboff << 3)) & 0xff;
 136                cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
 137                          bus->number, devfn >> 3, devfn & 7,
 138                          offset, suboff, addr, *val);
 139                break;
 140        case 2:
 141                addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
 142                out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
 143                *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
 144                        >> (suboff << 3)) & 0xffff;
 145                cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
 146                          bus->number, devfn >> 3, devfn & 7,
 147                          offset, suboff, addr, *val);
 148                break;
 149        default:
 150                addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
 151                out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
 152                *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
 153                cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
 154                          bus->number, devfn >> 3, devfn & 7,
 155                          offset, suboff, addr, *val);
 156                break;
 157        }
 158        return PCIBIOS_SUCCESSFUL;
 159}
 160
 161static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 162                                   int offset, int len, u32 val)
 163{
 164        struct pci_controller *hose;
 165        int suboff;
 166        u64 addr;
 167
 168        hose = pci_bus_to_host(bus);
 169        if (hose == NULL)
 170                return PCIBIOS_DEVICE_NOT_FOUND;
 171        if (offset >= 0x1000)
 172                return  PCIBIOS_BAD_REGISTER_NUMBER;
 173        addr = PCIE_REG_CA_ENABLE |
 174                ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
 175                ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
 176                ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
 177        suboff = offset & 3;
 178
 179        /*
 180         * Note: the caller has already checked that offset is
 181         * suitably aligned and that len is 1, 2 or 4.
 182         */
 183        switch (len) {
 184        case 1:
 185                addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
 186                val <<= suboff << 3;
 187                out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
 188                out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
 189                cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
 190                          bus->number, devfn >> 3, devfn & 7,
 191                          offset, suboff, addr, val);
 192                break;
 193        case 2:
 194                addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
 195                val <<= suboff << 3;
 196                out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
 197                out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
 198                cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
 199                          bus->number, devfn >> 3, devfn & 7,
 200                          offset, suboff, addr, val);
 201                break;
 202        default:
 203                addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
 204                out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
 205                out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
 206                cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
 207                          bus->number, devfn >> 3, devfn & 7,
 208                          offset, suboff, addr, val);
 209                break;
 210        }
 211        return PCIBIOS_SUCCESSFUL;
 212}
 213
 214static struct pci_ops wsp_pcie_pci_ops =
 215{
 216        .read = wsp_pcie_read_config,
 217        .write = wsp_pcie_write_config,
 218};
 219
 220#define TCE_SHIFT               12
 221#define TCE_PAGE_SIZE           (1 << TCE_SHIFT)
 222#define TCE_PCI_WRITE           0x2              /* write from PCI allowed */
 223#define TCE_PCI_READ            0x1              /* read from PCI allowed */
 224#define TCE_RPN_MASK            0x3fffffffffful  /* 42-bit RPN (4K pages) */
 225#define TCE_RPN_SHIFT           12
 226
 227//#define dma_debug(fmt...)     pr_debug(fmt)
 228#define dma_debug(fmt...)
 229
 230static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
 231                           unsigned long uaddr, enum dma_data_direction direction,
 232                           struct dma_attrs *attrs)
 233{
 234        struct wsp_dma_table *ptbl = container_of(tbl,
 235                                                    struct wsp_dma_table,
 236                                                    table);
 237        u64 proto_tce;
 238        u64 *tcep;
 239        u64 rpn;
 240
 241        proto_tce = TCE_PCI_READ;
 242#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
 243        proto_tce |= TCE_PCI_WRITE;
 244#else
 245        if (direction != DMA_TO_DEVICE)
 246                proto_tce |= TCE_PCI_WRITE;
 247#endif
 248
 249        /* XXX Make this faster by factoring out the page address for
 250         * within a TCE table
 251         */
 252        while (npages--) {
 253                /* We don't use it->base as the table can be scattered */
 254                tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
 255                tcep += (index & 0xffff);
 256
 257                /* can't move this out since we might cross LMB boundary */
 258                rpn = __pa(uaddr) >> TCE_SHIFT;
 259                *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
 260
 261                dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
 262                          tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
 263
 264                uaddr += TCE_PAGE_SIZE;
 265                index++;
 266        }
 267        return 0;
 268}
 269
 270static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
 271{
 272        struct wsp_dma_table *ptbl = container_of(tbl,
 273                                                    struct wsp_dma_table,
 274                                                    table);
 275#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
 276        struct pci_controller *hose = ptbl->phb->hose;
 277#endif
 278        u64 *tcep;
 279
 280        /* XXX Make this faster by factoring out the page address for
 281         * within a TCE table. Also use line-kill option to kill multiple
 282         * TCEs at once
 283         */
 284        while (npages--) {
 285                /* We don't use it->base as the table can be scattered */
 286                tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
 287                tcep += (index & 0xffff);
 288                dma_debug("[DMA] TCE %p cleared\n", tcep);
 289                *tcep = 0;
 290#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
 291                /* Don't write there since it would pollute other MMIO accesses */
 292                out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
 293                         PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
 294                         (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
 295#endif
 296                index++;
 297        }
 298}
 299
 300static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
 301                                                            unsigned int region,
 302                                                            struct pci_dev *validate)
 303{
 304        struct pci_controller *hose = phb->hose;
 305        unsigned long size = phb->dma32_region_size;
 306        unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
 307        struct wsp_dma_table *tbl;
 308        int tvts_per_table, i, tvt, nid;
 309        unsigned long flags;
 310
 311        nid = of_node_to_nid(phb->hose->dn);
 312
 313        /* Calculate how many TVTs are needed */
 314        tvts_per_table = size / 0x10000000;
 315        if (tvts_per_table == 0)
 316                tvts_per_table = 1;
 317
 318        /* Calculate the base TVT index. We know all tables have the same
 319         * size so we just do a simple multiply here
 320         */
 321        tvt = region * tvts_per_table;
 322
 323        pr_debug("         Region : %d\n", region);
 324        pr_debug("      DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
 325        pr_debug(" Number of TVTs : %d\n", tvts_per_table);
 326        pr_debug("       Base TVT : %d\n", tvt);
 327        pr_debug("         Node   : %d\n", nid);
 328
 329        tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
 330        if (!tbl)
 331                return ERR_PTR(-ENOMEM);
 332        tbl->phb = phb;
 333
 334        /* Create as many TVTs as needed, each represents 256M at most */
 335        for (i = 0; i < tvts_per_table; i++) {
 336                u64 tvt_data1, tvt_data0;
 337
 338                /* Allocate table. We use a 4K TCE size for now always so
 339                 * one table is always 8 * (258M / 4K) == 512K
 340                 */
 341                tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
 342                if (tbl->tces[i] == NULL)
 343                        goto fail;
 344                memset(page_address(tbl->tces[i]), 0, 0x80000);
 345
 346                pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
 347
 348                /* Table size. We currently set it to be the whole 256M region */
 349                tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
 350                /* IO page size set to 4K */
 351                tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
 352                /* Shift in the address */
 353                tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
 354
 355                /* Validation stuff. We only validate fully bus/dev/fn for now
 356                 * one day maybe we can group devices but that isn't the case
 357                 * at the moment
 358                 */
 359                if (validate) {
 360                        tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
 361                        tvt_data0 |= validate->bus->number;
 362                        tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
 363                        tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
 364                                << IODA_TVT1_DEVNUM_VALUE_SHIFT;
 365                        tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
 366                        tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
 367                                << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
 368                }
 369
 370                /* XX PE number is always 0 for now */
 371
 372                /* Program the values using the PHB lock */
 373                spin_lock_irqsave(&phb->lock, flags);
 374                out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
 375                         (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
 376                out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
 377                out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
 378                spin_unlock_irqrestore(&phb->lock, flags);
 379        }
 380
 381        /* Init bits and pieces */
 382        tbl->table.it_blocksize = 16;
 383        tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
 384        tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
 385
 386        /*
 387         * It's already blank but we clear it anyway.
 388         * Consider an aditiona interface that makes cleaing optional
 389         */
 390        iommu_init_table(&tbl->table, nid);
 391
 392        list_add(&tbl->link, &phb->dma_tables);
 393        return tbl;
 394
 395 fail:
 396        pr_debug("  Failed to allocate a 256M TCE table !\n");
 397        for (i = 0; i < tvts_per_table; i++)
 398                if (tbl->tces[i])
 399                        __free_pages(tbl->tces[i], get_order(0x80000));
 400        kfree(tbl);
 401        return ERR_PTR(-ENOMEM);
 402}
 403
 404static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev)
 405{
 406        struct dev_archdata *archdata = &pdev->dev.archdata;
 407        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 408        struct wsp_phb *phb = hose->private_data;
 409        struct wsp_dma_table *table = NULL;
 410        unsigned long flags;
 411        int i;
 412
 413        /* Don't assign an iommu table to a bridge */
 414        if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
 415                return;
 416
 417        pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
 418
 419        spin_lock_irqsave(&phb->lock, flags);
 420
 421        /* If only one region, check if it already exist */
 422        if (phb->dma32_num_regions == 1) {
 423                spin_unlock_irqrestore(&phb->lock, flags);
 424                if (list_empty(&phb->dma_tables))
 425                        table = wsp_pci_create_dma32_table(phb, 0, NULL);
 426                else
 427                        table = list_first_entry(&phb->dma_tables,
 428                                                 struct wsp_dma_table,
 429                                                 link);
 430        } else {
 431                /* else find a free region */
 432                for (i = 0; i < phb->dma32_num_regions && !table; i++) {
 433                        if (__test_and_set_bit(i, &phb->dma32_map))
 434                                continue;
 435                        spin_unlock_irqrestore(&phb->lock, flags);
 436                        table = wsp_pci_create_dma32_table(phb, i, pdev);
 437                }
 438        }
 439
 440        /* Check if we got an error */
 441        if (IS_ERR(table)) {
 442                pr_err("%s: Failed to create DMA table, err %ld !\n",
 443                       pci_name(pdev), PTR_ERR(table));
 444                return;
 445        }
 446
 447        /* Or a valid table */
 448        if (table) {
 449                pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
 450                        pci_name(pdev),
 451                        table->table.it_offset << IOMMU_PAGE_SHIFT,
 452                        (table->table.it_offset << IOMMU_PAGE_SHIFT)
 453                        + phb->dma32_region_size - 1);
 454                archdata->dma_data.iommu_table_base = &table->table;
 455                return;
 456        }
 457
 458        /* Or no room */
 459        spin_unlock_irqrestore(&phb->lock, flags);
 460        pr_err("%s: Out of DMA space !\n", pci_name(pdev));
 461}
 462
 463static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
 464{
 465        u64 val;
 466        int i;
 467
 468#define DUMP_REG(x) \
 469        pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
 470
 471        /*
 472         * Some WSP variants  has a bogus class code by default in the PCI-E
 473         * root complex's built-in P2P bridge
 474         */
 475        val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
 476        pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
 477        out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
 478                 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
 479        pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
 480
 481#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
 482        /* XXX Disable TCE caching, it doesn't work on DD1 */
 483        out_be64(hose->cfg_data + 0xe50,
 484                 in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
 485        printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
 486#endif
 487
 488        /* Configure M32A and IO. IO is hard wired to be 1M for now */
 489        out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
 490        out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
 491                 (~(hose->io_resource.end - hose->io_resource.start)) &
 492                 0x3fffffff000ul);
 493        out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
 494
 495        out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
 496                 hose->mem_resources[0].start);
 497        printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
 498                 (~(hose->mem_resources[0].end -
 499                    hose->mem_resources[0].start)) & 0x3ffffff0000ul);
 500        out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
 501                 (~(hose->mem_resources[0].end -
 502                    hose->mem_resources[0].start)) & 0x3ffffff0000ul);
 503        out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
 504                 (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
 505
 506        /* Clear all TVT entries
 507         *
 508         * XX Might get TVT count from device-tree
 509         */
 510        for (i = 0; i < IODA_TVT_COUNT; i++) {
 511                out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
 512                         PCIE_REG_IODA_AD_TBL_TVT | i);
 513                out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
 514                out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
 515        }
 516
 517        /* Kill the TCE cache */
 518        out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
 519                 in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
 520                 PCIE_REG_PHBC_64B_TCE_EN);
 521
 522        /* Enable 32 & 64-bit MSIs, IO space and M32A */
 523        val = PCIE_REG_PHBC_32BIT_MSI_EN |
 524              PCIE_REG_PHBC_IO_EN |
 525              PCIE_REG_PHBC_64BIT_MSI_EN |
 526              PCIE_REG_PHBC_M32A_EN;
 527        if (iommu_is_off)
 528                val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
 529        pr_debug("Will write config: 0x%llx\n", val);
 530        out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
 531
 532        /* Enable error reporting */
 533        out_be64(hose->cfg_data + 0xe00,
 534                 in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
 535
 536        /* Mask an error that's generated when doing config space probe
 537         *
 538         * XXX Maybe we should only mask it around config space cycles... that or
 539         * ignore it when we know we had a config space cycle recently ?
 540         */
 541        out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
 542        out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
 543
 544        /* Enable UTL errors, for now, all of them got to UTL irq 1
 545         *
 546         * We similarily mask one UTL error caused apparently during normal
 547         * probing. We also mask the link up error
 548         */
 549        out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
 550        out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
 551        out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
 552        out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
 553        out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
 554        out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
 555
 556        DUMP_REG(PCIE_REG_IO_BASE_ADDR);
 557        DUMP_REG(PCIE_REG_IO_BASE_MASK);
 558        DUMP_REG(PCIE_REG_IO_START_ADDR);
 559        DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
 560        DUMP_REG(PCIE_REG_M32A_BASE_MASK);
 561        DUMP_REG(PCIE_REG_M32A_START_ADDR);
 562        DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
 563        DUMP_REG(PCIE_REG_M32B_BASE_MASK);
 564        DUMP_REG(PCIE_REG_M32B_START_ADDR);
 565        DUMP_REG(PCIE_REG_M64_BASE_ADDR);
 566        DUMP_REG(PCIE_REG_M64_BASE_MASK);
 567        DUMP_REG(PCIE_REG_M64_START_ADDR);
 568        DUMP_REG(PCIE_REG_PHB_CONFIG);
 569}
 570
 571static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
 572{
 573        u64 val;
 574        int i;
 575
 576        for (i = 0; i < 10000; i++) {
 577                val = in_be64(phb->hose->cfg_data + 0xe08);
 578                if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
 579                        return;
 580                udelay(1);
 581        }
 582        pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
 583                   phb->hose->global_number, port);
 584}
 585
 586#define DEF_PCI_AC_RET_pio(name, ret, at, al, aa)               \
 587static ret wsp_pci_##name at                                    \
 588{                                                               \
 589        struct iowa_bus *bus;                                   \
 590        struct wsp_phb *phb;                                    \
 591        unsigned long flags;                                    \
 592        ret rval;                                               \
 593        bus = iowa_pio_find_bus(aa);                            \
 594        WARN_ON(!bus);                                          \
 595        phb = bus->private;                                     \
 596        spin_lock_irqsave(&phb->lock, flags);                   \
 597        wsp_pci_wait_io_idle(phb, aa);                          \
 598        rval = __do_##name al;                                  \
 599        spin_unlock_irqrestore(&phb->lock, flags);              \
 600        return rval;                                            \
 601}
 602
 603#define DEF_PCI_AC_NORET_pio(name, at, al, aa)                  \
 604static void wsp_pci_##name at                                   \
 605{                                                               \
 606        struct iowa_bus *bus;                                   \
 607        struct wsp_phb *phb;                                    \
 608        unsigned long flags;                                    \
 609        bus = iowa_pio_find_bus(aa);                            \
 610        WARN_ON(!bus);                                          \
 611        phb = bus->private;                                     \
 612        spin_lock_irqsave(&phb->lock, flags);                   \
 613        wsp_pci_wait_io_idle(phb, aa);                          \
 614        __do_##name al;                                         \
 615        spin_unlock_irqrestore(&phb->lock, flags);              \
 616}
 617
 618#define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
 619#define DEF_PCI_AC_NORET_mem(name, at, al, aa)
 620
 621#define DEF_PCI_AC_RET(name, ret, at, al, space, aa)            \
 622        DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
 623
 624#define DEF_PCI_AC_NORET(name, at, al, space, aa)               \
 625        DEF_PCI_AC_NORET_##space(name, at, al, aa)              \
 626
 627
 628#include <asm/io-defs.h>
 629
 630#undef DEF_PCI_AC_RET
 631#undef DEF_PCI_AC_NORET
 632
 633static struct ppc_pci_io wsp_pci_iops = {
 634        .inb = wsp_pci_inb,
 635        .inw = wsp_pci_inw,
 636        .inl = wsp_pci_inl,
 637        .outb = wsp_pci_outb,
 638        .outw = wsp_pci_outw,
 639        .outl = wsp_pci_outl,
 640        .insb = wsp_pci_insb,
 641        .insw = wsp_pci_insw,
 642        .insl = wsp_pci_insl,
 643        .outsb = wsp_pci_outsb,
 644        .outsw = wsp_pci_outsw,
 645        .outsl = wsp_pci_outsl,
 646};
 647
 648static int __init wsp_setup_one_phb(struct device_node *np)
 649{
 650        struct pci_controller *hose;
 651        struct wsp_phb *phb;
 652
 653        pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
 654
 655        phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
 656        if (!phb)
 657                return -ENOMEM;
 658        hose = pcibios_alloc_controller(np);
 659        if (!hose) {
 660                /* Can't really free the phb */
 661                return -ENOMEM;
 662        }
 663        hose->private_data = phb;
 664        phb->hose = hose;
 665
 666        INIT_LIST_HEAD(&phb->dma_tables);
 667        spin_lock_init(&phb->lock);
 668
 669        /* XXX Use bus-range property ? */
 670        hose->first_busno = 0;
 671        hose->last_busno = 0xff;
 672
 673        /* We use cfg_data as the address for the whole bridge MMIO space
 674         */
 675        hose->cfg_data = of_iomap(hose->dn, 0);
 676
 677        pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
 678
 679        /* Get the ranges of the device-tree */
 680        pci_process_bridge_OF_ranges(hose, np, 0);
 681
 682        /* XXX Force re-assigning of everything for now */
 683        pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
 684                      PCI_ENABLE_PROC_DOMAINS);
 685        pci_probe_only = 0;
 686
 687        /* Calculate how the TCE space is divided */
 688        phb->dma32_base         = 0;
 689        phb->dma32_num_regions  = NUM_DMA32_REGIONS;
 690        if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
 691                pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
 692                           MAX_TABLE_TVT_COUNT);
 693                phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
 694        }
 695        phb->dma32_region_size  = 0x80000000 / phb->dma32_num_regions;
 696
 697        BUG_ON(!is_power_of_2(phb->dma32_region_size));
 698
 699        /* Setup config ops */
 700        hose->ops = &wsp_pcie_pci_ops;
 701
 702        /* Configure the HW */
 703        wsp_pcie_configure_hw(hose);
 704
 705        /* Instanciate IO workarounds */
 706        iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
 707#ifdef CONFIG_PCI_MSI
 708        wsp_setup_phb_msi(hose);
 709#endif
 710
 711        /* Add to global list */
 712        list_add(&phb->all, &wsp_phbs);
 713
 714        return 0;
 715}
 716
 717void __init wsp_setup_pci(void)
 718{
 719        struct device_node *np;
 720        int rc;
 721
 722        /* Find host bridges */
 723        for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
 724                rc = wsp_setup_one_phb(np);
 725                if (rc)
 726                        pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
 727                               np->full_name, rc);
 728        }
 729
 730        /* Establish device-tree linkage */
 731        pci_devs_phb_init();
 732
 733        /* Set DMA ops to use TCEs */
 734        if (iommu_is_off) {
 735                pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
 736                set_pci_dma_ops(&dma_direct_ops);
 737        } else {
 738                ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
 739                ppc_md.tce_build = tce_build_wsp;
 740                ppc_md.tce_free = tce_free_wsp;
 741                set_pci_dma_ops(&dma_iommu_ops);
 742        }
 743}
 744
 745#define err_debug(fmt...)       pr_debug(fmt)
 746//#define err_debug(fmt...)
 747
 748static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
 749{
 750        const u32 *prop;
 751        int hw_irq;
 752
 753        /* Ok, no interrupts property, let's try to find our child P2P */
 754        np = of_get_next_child(np, NULL);
 755        if (np == NULL)
 756                return 0;
 757
 758        /* Grab it's interrupt map */
 759        prop = of_get_property(np, "interrupt-map", NULL);
 760        if (prop == NULL)
 761                return 0;
 762
 763        /* Grab one of the interrupts in there, keep the low 4 bits */
 764        hw_irq = prop[5] & 0xf;
 765
 766        /* 0..4 for PHB 0 and 5..9 for PHB 1 */
 767        if (hw_irq < 5)
 768                hw_irq = 4;
 769        else
 770                hw_irq = 9;
 771        hw_irq |= prop[5] & ~0xf;
 772
 773        err_debug("PCI: Using 0x%x as error IRQ for %s\n",
 774                  hw_irq, np->parent->full_name);
 775        return irq_create_mapping(NULL, hw_irq);
 776}
 777
 778static const struct {
 779        u32 offset;
 780        const char *name;
 781} wsp_pci_regs[] = {
 782#define DREG(x) { PCIE_REG_##x, #x }
 783#define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
 784        /* Architected registers except CONFIG_ and IODA
 785         * to avoid side effects
 786         */
 787        DREG(DMA_CHAN_STATUS),
 788        DREG(CPU_LOADSTORE_STATUS),
 789        DREG(LOCK0),
 790        DREG(LOCK1),
 791        DREG(PHB_CONFIG),
 792        DREG(IO_BASE_ADDR),
 793        DREG(IO_BASE_MASK),
 794        DREG(IO_START_ADDR),
 795        DREG(M32A_BASE_ADDR),
 796        DREG(M32A_BASE_MASK),
 797        DREG(M32A_START_ADDR),
 798        DREG(M32B_BASE_ADDR),
 799        DREG(M32B_BASE_MASK),
 800        DREG(M32B_START_ADDR),
 801        DREG(M64_BASE_ADDR),
 802        DREG(M64_BASE_MASK),
 803        DREG(M64_START_ADDR),
 804        DREG(TCE_KILL),
 805        DREG(LOCK2),
 806        DREG(PHB_GEN_CAP),
 807        DREG(PHB_TCE_CAP),
 808        DREG(PHB_IRQ_CAP),
 809        DREG(PHB_EEH_CAP),
 810        DREG(PAPR_ERR_INJ_CONTROL),
 811        DREG(PAPR_ERR_INJ_ADDR),
 812        DREG(PAPR_ERR_INJ_MASK),
 813
 814        /* UTL core regs */
 815        DUTL(SYS_BUS_CONTROL),
 816        DUTL(STATUS),
 817        DUTL(SYS_BUS_AGENT_STATUS),
 818        DUTL(SYS_BUS_AGENT_ERR_SEV),
 819        DUTL(SYS_BUS_AGENT_IRQ_EN),
 820        DUTL(SYS_BUS_BURST_SZ_CONF),
 821        DUTL(REVISION_ID),
 822        DUTL(OUT_POST_HDR_BUF_ALLOC),
 823        DUTL(OUT_POST_DAT_BUF_ALLOC),
 824        DUTL(IN_POST_HDR_BUF_ALLOC),
 825        DUTL(IN_POST_DAT_BUF_ALLOC),
 826        DUTL(OUT_NP_BUF_ALLOC),
 827        DUTL(IN_NP_BUF_ALLOC),
 828        DUTL(PCIE_TAGS_ALLOC),
 829        DUTL(GBIF_READ_TAGS_ALLOC),
 830
 831        DUTL(PCIE_PORT_CONTROL),
 832        DUTL(PCIE_PORT_STATUS),
 833        DUTL(PCIE_PORT_ERROR_SEV),
 834        DUTL(PCIE_PORT_IRQ_EN),
 835        DUTL(RC_STATUS),
 836        DUTL(RC_ERR_SEVERITY),
 837        DUTL(RC_IRQ_EN),
 838        DUTL(EP_STATUS),
 839        DUTL(EP_ERR_SEVERITY),
 840        DUTL(EP_ERR_IRQ_EN),
 841        DUTL(PCI_PM_CTRL1),
 842        DUTL(PCI_PM_CTRL2),
 843
 844        /* PCIe stack regs */
 845        DREG(SYSTEM_CONFIG1),
 846        DREG(SYSTEM_CONFIG2),
 847        DREG(EP_SYSTEM_CONFIG),
 848        DREG(EP_FLR),
 849        DREG(EP_BAR_CONFIG),
 850        DREG(LINK_CONFIG),
 851        DREG(PM_CONFIG),
 852        DREG(DLP_CONTROL),
 853        DREG(DLP_STATUS),
 854        DREG(ERR_REPORT_CONTROL),
 855        DREG(SLOT_CONTROL1),
 856        DREG(SLOT_CONTROL2),
 857        DREG(UTL_CONFIG),
 858        DREG(BUFFERS_CONFIG),
 859        DREG(ERROR_INJECT),
 860        DREG(SRIOV_CONFIG),
 861        DREG(PF0_SRIOV_STATUS),
 862        DREG(PF1_SRIOV_STATUS),
 863        DREG(PORT_NUMBER),
 864        DREG(POR_SYSTEM_CONFIG),
 865
 866        /* Internal logic regs */
 867        DREG(PHB_VERSION),
 868        DREG(RESET),
 869        DREG(PHB_CONTROL),
 870        DREG(PHB_TIMEOUT_CONTROL1),
 871        DREG(PHB_QUIESCE_DMA),
 872        DREG(PHB_DMA_READ_TAG_ACTV),
 873        DREG(PHB_TCE_READ_TAG_ACTV),
 874
 875        /* FIR registers */
 876        DREG(LEM_FIR_ACCUM),
 877        DREG(LEM_FIR_AND_MASK),
 878        DREG(LEM_FIR_OR_MASK),
 879        DREG(LEM_ACTION0),
 880        DREG(LEM_ACTION1),
 881        DREG(LEM_ERROR_MASK),
 882        DREG(LEM_ERROR_AND_MASK),
 883        DREG(LEM_ERROR_OR_MASK),
 884
 885        /* Error traps registers */
 886        DREG(PHB_ERR_STATUS),
 887        DREG(PHB_ERR_STATUS),
 888        DREG(PHB_ERR1_STATUS),
 889        DREG(PHB_ERR_INJECT),
 890        DREG(PHB_ERR_LEM_ENABLE),
 891        DREG(PHB_ERR_IRQ_ENABLE),
 892        DREG(PHB_ERR_FREEZE_ENABLE),
 893        DREG(PHB_ERR_SIDE_ENABLE),
 894        DREG(PHB_ERR_LOG_0),
 895        DREG(PHB_ERR_LOG_1),
 896        DREG(PHB_ERR_STATUS_MASK),
 897        DREG(PHB_ERR1_STATUS_MASK),
 898        DREG(MMIO_ERR_STATUS),
 899        DREG(MMIO_ERR1_STATUS),
 900        DREG(MMIO_ERR_INJECT),
 901        DREG(MMIO_ERR_LEM_ENABLE),
 902        DREG(MMIO_ERR_IRQ_ENABLE),
 903        DREG(MMIO_ERR_FREEZE_ENABLE),
 904        DREG(MMIO_ERR_SIDE_ENABLE),
 905        DREG(MMIO_ERR_LOG_0),
 906        DREG(MMIO_ERR_LOG_1),
 907        DREG(MMIO_ERR_STATUS_MASK),
 908        DREG(MMIO_ERR1_STATUS_MASK),
 909        DREG(DMA_ERR_STATUS),
 910        DREG(DMA_ERR1_STATUS),
 911        DREG(DMA_ERR_INJECT),
 912        DREG(DMA_ERR_LEM_ENABLE),
 913        DREG(DMA_ERR_IRQ_ENABLE),
 914        DREG(DMA_ERR_FREEZE_ENABLE),
 915        DREG(DMA_ERR_SIDE_ENABLE),
 916        DREG(DMA_ERR_LOG_0),
 917        DREG(DMA_ERR_LOG_1),
 918        DREG(DMA_ERR_STATUS_MASK),
 919        DREG(DMA_ERR1_STATUS_MASK),
 920
 921        /* Debug and Trace registers */
 922        DREG(PHB_DEBUG_CONTROL0),
 923        DREG(PHB_DEBUG_STATUS0),
 924        DREG(PHB_DEBUG_CONTROL1),
 925        DREG(PHB_DEBUG_STATUS1),
 926        DREG(PHB_DEBUG_CONTROL2),
 927        DREG(PHB_DEBUG_STATUS2),
 928        DREG(PHB_DEBUG_CONTROL3),
 929        DREG(PHB_DEBUG_STATUS3),
 930        DREG(PHB_DEBUG_CONTROL4),
 931        DREG(PHB_DEBUG_STATUS4),
 932        DREG(PHB_DEBUG_CONTROL5),
 933        DREG(PHB_DEBUG_STATUS5),
 934
 935        /* Don't seem to exist ...
 936        DREG(PHB_DEBUG_CONTROL6),
 937        DREG(PHB_DEBUG_STATUS6),
 938        */
 939};
 940
 941static int wsp_pci_regs_show(struct seq_file *m, void *private)
 942{
 943        struct wsp_phb *phb = m->private;
 944        struct pci_controller *hose = phb->hose;
 945        int i;
 946
 947        for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
 948                /* Skip write-only regs */
 949                if (wsp_pci_regs[i].offset == 0xc08 ||
 950                    wsp_pci_regs[i].offset == 0xc10 ||
 951                    wsp_pci_regs[i].offset == 0xc38 ||
 952                    wsp_pci_regs[i].offset == 0xc40)
 953                        continue;
 954                seq_printf(m, "0x%03x: 0x%016llx %s\n",
 955                           wsp_pci_regs[i].offset,
 956                           in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
 957                           wsp_pci_regs[i].name);
 958        }
 959        return 0;
 960}
 961
 962static int wsp_pci_regs_open(struct inode *inode, struct file *file)
 963{
 964        return single_open(file, wsp_pci_regs_show, inode->i_private);
 965}
 966
 967static const struct file_operations wsp_pci_regs_fops = {
 968        .open = wsp_pci_regs_open,
 969        .read = seq_read,
 970        .llseek = seq_lseek,
 971        .release = single_release,
 972};
 973
 974static int wsp_pci_reg_set(void *data, u64 val)
 975{
 976        out_be64((void __iomem *)data, val);
 977        return 0;
 978}
 979
 980static int wsp_pci_reg_get(void *data, u64 *val)
 981{
 982        *val = in_be64((void __iomem *)data);
 983        return 0;
 984}
 985
 986DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
 987
 988static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
 989{
 990        struct wsp_phb *phb = dev_id;
 991        struct pci_controller *hose = phb->hose;
 992        irqreturn_t handled = IRQ_NONE;
 993        struct wsp_pcie_err_log_data ed;
 994
 995        pr_err("PCI: Error interrupt on %s (PHB %d)\n",
 996               hose->dn->full_name, hose->global_number);
 997 again:
 998        memset(&ed, 0, sizeof(ed));
 999
1000        /* Read and clear UTL errors */
1001        ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
1002        if (ed.utl_sys_err)
1003                out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
1004        ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
1005        if (ed.utl_port_err)
1006                out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
1007        ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
1008        if (ed.utl_rc_err)
1009                out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
1010
1011        /* Read and clear main trap errors */
1012        ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
1013        if (ed.phb_err) {
1014                ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
1015                ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
1016                ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
1017                out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
1018                out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
1019        }
1020        ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
1021        if (ed.mmio_err) {
1022                ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
1023                ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
1024                ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
1025                out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
1026                out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
1027        }
1028        ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
1029        if (ed.dma_err) {
1030                ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
1031                ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
1032                ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
1033                out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
1034                out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
1035        }
1036
1037        /* Now print things out */
1038        if (ed.phb_err) {
1039                pr_err("   PHB Error Status      : 0x%016llx\n", ed.phb_err);
1040                pr_err("   PHB First Error Status: 0x%016llx\n", ed.phb_err1);
1041                pr_err("   PHB Error Log 0       : 0x%016llx\n", ed.phb_log0);
1042                pr_err("   PHB Error Log 1       : 0x%016llx\n", ed.phb_log1);
1043        }
1044        if (ed.mmio_err) {
1045                pr_err("  MMIO Error Status      : 0x%016llx\n", ed.mmio_err);
1046                pr_err("  MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
1047                pr_err("  MMIO Error Log 0       : 0x%016llx\n", ed.mmio_log0);
1048                pr_err("  MMIO Error Log 1       : 0x%016llx\n", ed.mmio_log1);
1049        }
1050        if (ed.dma_err) {
1051                pr_err("   DMA Error Status      : 0x%016llx\n", ed.dma_err);
1052                pr_err("   DMA First Error Status: 0x%016llx\n", ed.dma_err1);
1053                pr_err("   DMA Error Log 0       : 0x%016llx\n", ed.dma_log0);
1054                pr_err("   DMA Error Log 1       : 0x%016llx\n", ed.dma_log1);
1055        }
1056        if (ed.utl_sys_err)
1057                pr_err("   UTL Sys Error Status  : 0x%016llx\n", ed.utl_sys_err);
1058        if (ed.utl_port_err)
1059                pr_err("   UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
1060        if (ed.utl_rc_err)
1061                pr_err("   UTL RC Error Status   : 0x%016llx\n", ed.utl_rc_err);
1062
1063        /* Interrupts are caused by the error traps. If we had any error there
1064         * we loop again in case the UTL buffered some new stuff between
1065         * going there and going to the traps
1066         */
1067        if (ed.dma_err || ed.mmio_err || ed.phb_err) {
1068                handled = IRQ_HANDLED;
1069                goto again;
1070        }
1071        return handled;
1072}
1073
1074static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
1075{
1076        struct pci_controller *hose = phb->hose;
1077        int err_irq, i, rc;
1078        char fname[16];
1079
1080        /* Create a debugfs file for that PHB */
1081        sprintf(fname, "phb%d", phb->hose->global_number);
1082        phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
1083
1084        /* Some useful debug output */
1085        if (phb->ddir) {
1086                struct dentry *d = debugfs_create_dir("regs", phb->ddir);
1087                char tmp[64];
1088
1089                for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
1090                        sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
1091                                wsp_pci_regs[i].name);
1092                        debugfs_create_file(tmp, 0600, d,
1093                                            hose->cfg_data + wsp_pci_regs[i].offset,
1094                                            &wsp_pci_reg_fops);
1095                }
1096                debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
1097        }
1098
1099        /* Find the IRQ number for that PHB */
1100        err_irq = irq_of_parse_and_map(hose->dn, 0);
1101        if (err_irq == 0)
1102                /* XXX Error IRQ lacking from device-tree */
1103                err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
1104        if (err_irq == 0) {
1105                pr_err("PCI: Failed to fetch error interrupt for %s\n",
1106                       hose->dn->full_name);
1107                return;
1108        }
1109        /* Request it */
1110        rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
1111        if (rc) {
1112                pr_err("PCI: Failed to request interrupt for %s\n",
1113                       hose->dn->full_name);
1114        }
1115        /* Enable interrupts for all errors for now */
1116        out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1117        out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1118        out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1119}
1120
1121/*
1122 * This is called later to hookup with the error interrupt
1123 */
1124static int __init wsp_setup_pci_late(void)
1125{
1126        struct wsp_phb *phb;
1127
1128        list_for_each_entry(phb, &wsp_phbs, all)
1129                wsp_setup_pci_err_reporting(phb);
1130
1131        return 0;
1132}
1133arch_initcall(wsp_setup_pci_late);
1134