linux/drivers/bcma/host_pci.c
<<
>>
Prefs
   1/*
   2 * Broadcom specific AMBA
   3 * PCI Host
   4 *
   5 * Licensed under the GNU/GPL. See COPYING for details.
   6 */
   7
   8#include "bcma_private.h"
   9#include <linux/slab.h>
  10#include <linux/bcma/bcma.h>
  11#include <linux/pci.h>
  12#include <linux/module.h>
  13
  14static void bcma_host_pci_switch_core(struct bcma_device *core)
  15{
  16        int win2 = core->bus->host_is_pcie2 ?
  17                BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
  18
  19        pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
  20                               core->addr);
  21        pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
  22        core->bus->mapped_core = core;
  23        bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
  24}
  25
  26/* Provides access to the requested core. Returns base offset that has to be
  27 * used. It makes use of fixed windows when possible. */
  28static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
  29{
  30        switch (core->id.id) {
  31        case BCMA_CORE_CHIPCOMMON:
  32                return 3 * BCMA_CORE_SIZE;
  33        case BCMA_CORE_PCIE:
  34                return 2 * BCMA_CORE_SIZE;
  35        }
  36
  37        if (core->bus->mapped_core != core)
  38                bcma_host_pci_switch_core(core);
  39        return 0;
  40}
  41
  42static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
  43{
  44        offset += bcma_host_pci_provide_access_to_core(core);
  45        return ioread8(core->bus->mmio + offset);
  46}
  47
  48static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
  49{
  50        offset += bcma_host_pci_provide_access_to_core(core);
  51        return ioread16(core->bus->mmio + offset);
  52}
  53
  54static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
  55{
  56        offset += bcma_host_pci_provide_access_to_core(core);
  57        return ioread32(core->bus->mmio + offset);
  58}
  59
  60static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
  61                                 u8 value)
  62{
  63        offset += bcma_host_pci_provide_access_to_core(core);
  64        iowrite8(value, core->bus->mmio + offset);
  65}
  66
  67static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
  68                                 u16 value)
  69{
  70        offset += bcma_host_pci_provide_access_to_core(core);
  71        iowrite16(value, core->bus->mmio + offset);
  72}
  73
  74static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
  75                                 u32 value)
  76{
  77        offset += bcma_host_pci_provide_access_to_core(core);
  78        iowrite32(value, core->bus->mmio + offset);
  79}
  80
  81#ifdef CONFIG_BCMA_BLOCKIO
  82static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
  83                                     size_t count, u16 offset, u8 reg_width)
  84{
  85        void __iomem *addr = core->bus->mmio + offset;
  86        if (core->bus->mapped_core != core)
  87                bcma_host_pci_switch_core(core);
  88        switch (reg_width) {
  89        case sizeof(u8):
  90                ioread8_rep(addr, buffer, count);
  91                break;
  92        case sizeof(u16):
  93                WARN_ON(count & 1);
  94                ioread16_rep(addr, buffer, count >> 1);
  95                break;
  96        case sizeof(u32):
  97                WARN_ON(count & 3);
  98                ioread32_rep(addr, buffer, count >> 2);
  99                break;
 100        default:
 101                WARN_ON(1);
 102        }
 103}
 104
 105static void bcma_host_pci_block_write(struct bcma_device *core,
 106                                      const void *buffer, size_t count,
 107                                      u16 offset, u8 reg_width)
 108{
 109        void __iomem *addr = core->bus->mmio + offset;
 110        if (core->bus->mapped_core != core)
 111                bcma_host_pci_switch_core(core);
 112        switch (reg_width) {
 113        case sizeof(u8):
 114                iowrite8_rep(addr, buffer, count);
 115                break;
 116        case sizeof(u16):
 117                WARN_ON(count & 1);
 118                iowrite16_rep(addr, buffer, count >> 1);
 119                break;
 120        case sizeof(u32):
 121                WARN_ON(count & 3);
 122                iowrite32_rep(addr, buffer, count >> 2);
 123                break;
 124        default:
 125                WARN_ON(1);
 126        }
 127}
 128#endif
 129
 130static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
 131{
 132        if (core->bus->mapped_core != core)
 133                bcma_host_pci_switch_core(core);
 134        return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 135}
 136
 137static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
 138                                  u32 value)
 139{
 140        if (core->bus->mapped_core != core)
 141                bcma_host_pci_switch_core(core);
 142        iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 143}
 144
 145static const struct bcma_host_ops bcma_host_pci_ops = {
 146        .read8          = bcma_host_pci_read8,
 147        .read16         = bcma_host_pci_read16,
 148        .read32         = bcma_host_pci_read32,
 149        .write8         = bcma_host_pci_write8,
 150        .write16        = bcma_host_pci_write16,
 151        .write32        = bcma_host_pci_write32,
 152#ifdef CONFIG_BCMA_BLOCKIO
 153        .block_read     = bcma_host_pci_block_read,
 154        .block_write    = bcma_host_pci_block_write,
 155#endif
 156        .aread32        = bcma_host_pci_aread32,
 157        .awrite32       = bcma_host_pci_awrite32,
 158};
 159
 160static int bcma_host_pci_probe(struct pci_dev *dev,
 161                               const struct pci_device_id *id)
 162{
 163        struct bcma_bus *bus;
 164        int err = -ENOMEM;
 165        const char *name;
 166        u32 val;
 167
 168        /* Alloc */
 169        bus = kzalloc(sizeof(*bus), GFP_KERNEL);
 170        if (!bus)
 171                goto out;
 172
 173        /* Basic PCI configuration */
 174        err = pci_enable_device(dev);
 175        if (err)
 176                goto err_kfree_bus;
 177
 178        name = dev_name(&dev->dev);
 179        if (dev->driver && dev->driver->name)
 180                name = dev->driver->name;
 181        err = pci_request_regions(dev, name);
 182        if (err)
 183                goto err_pci_disable;
 184        pci_set_master(dev);
 185
 186        /* Disable the RETRY_TIMEOUT register (0x41) to keep
 187         * PCI Tx retries from interfering with C3 CPU state */
 188        pci_read_config_dword(dev, 0x40, &val);
 189        if ((val & 0x0000ff00) != 0)
 190                pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
 191
 192        /* SSB needed additional powering up, do we have any AMBA PCI cards? */
 193        if (!pci_is_pcie(dev)) {
 194                bcma_err(bus, "PCI card detected, they are not supported.\n");
 195                err = -ENXIO;
 196                goto err_pci_release_regions;
 197        }
 198
 199        bus->dev = &dev->dev;
 200
 201        /* Map MMIO */
 202        err = -ENOMEM;
 203        bus->mmio = pci_iomap(dev, 0, ~0UL);
 204        if (!bus->mmio)
 205                goto err_pci_release_regions;
 206
 207        /* Host specific */
 208        bus->host_pci = dev;
 209        bus->hosttype = BCMA_HOSTTYPE_PCI;
 210        bus->ops = &bcma_host_pci_ops;
 211
 212        bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
 213        bus->boardinfo.type = bus->host_pci->subsystem_device;
 214
 215        /* Initialize struct, detect chip */
 216        bcma_init_bus(bus);
 217
 218        /* Scan bus to find out generation of PCIe core */
 219        err = bcma_bus_scan(bus);
 220        if (err)
 221                goto err_pci_unmap_mmio;
 222
 223        if (bcma_find_core(bus, BCMA_CORE_PCIE2))
 224                bus->host_is_pcie2 = true;
 225
 226        /* Register */
 227        err = bcma_bus_register(bus);
 228        if (err)
 229                goto err_unregister_cores;
 230
 231        pci_set_drvdata(dev, bus);
 232
 233out:
 234        return err;
 235
 236err_unregister_cores:
 237        bcma_unregister_cores(bus);
 238err_pci_unmap_mmio:
 239        pci_iounmap(dev, bus->mmio);
 240err_pci_release_regions:
 241        pci_release_regions(dev);
 242err_pci_disable:
 243        pci_disable_device(dev);
 244err_kfree_bus:
 245        kfree(bus);
 246        return err;
 247}
 248
 249static void bcma_host_pci_remove(struct pci_dev *dev)
 250{
 251        struct bcma_bus *bus = pci_get_drvdata(dev);
 252
 253        bcma_bus_unregister(bus);
 254        pci_iounmap(dev, bus->mmio);
 255        pci_release_regions(dev);
 256        pci_disable_device(dev);
 257        kfree(bus);
 258}
 259
 260#ifdef CONFIG_PM_SLEEP
 261static int bcma_host_pci_suspend(struct device *dev)
 262{
 263        struct bcma_bus *bus = dev_get_drvdata(dev);
 264
 265        bus->mapped_core = NULL;
 266
 267        return bcma_bus_suspend(bus);
 268}
 269
 270static int bcma_host_pci_resume(struct device *dev)
 271{
 272        struct bcma_bus *bus = dev_get_drvdata(dev);
 273
 274        return bcma_bus_resume(bus);
 275}
 276
 277static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
 278                         bcma_host_pci_resume);
 279#define BCMA_PM_OPS     (&bcma_pm_ops)
 280
 281#else /* CONFIG_PM_SLEEP */
 282
 283#define BCMA_PM_OPS     NULL
 284
 285#endif /* CONFIG_PM_SLEEP */
 286
 287static const struct pci_device_id bcma_pci_bridge_tbl[] = {
 288        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
 289        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
 290        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },  /* 0xa8d8 */
 291        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
 292        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
 293        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
 294        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
 295        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
 296        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
 297        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
 298        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
 299        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
 300        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_HP, 0x804a) },
 301        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
 302        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
 303        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
 304        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
 305        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
 306        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xa8db, BCM43217 (sic!) */
 307        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) },  /* 0xa8dc */
 308        { 0, },
 309};
 310MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
 311
 312static struct pci_driver bcma_pci_bridge_driver = {
 313        .name = "bcma-pci-bridge",
 314        .id_table = bcma_pci_bridge_tbl,
 315        .probe = bcma_host_pci_probe,
 316        .remove = bcma_host_pci_remove,
 317        .driver.pm = BCMA_PM_OPS,
 318};
 319
 320int __init bcma_host_pci_init(void)
 321{
 322        return pci_register_driver(&bcma_pci_bridge_driver);
 323}
 324
 325void __exit bcma_host_pci_exit(void)
 326{
 327        pci_unregister_driver(&bcma_pci_bridge_driver);
 328}
 329
 330/**************************************************
 331 * Runtime ops for drivers.
 332 **************************************************/
 333
 334/* See also pcicore_up */
 335void bcma_host_pci_up(struct bcma_bus *bus)
 336{
 337        if (bus->hosttype != BCMA_HOSTTYPE_PCI)
 338                return;
 339
 340        if (bus->host_is_pcie2)
 341                bcma_core_pcie2_up(&bus->drv_pcie2);
 342        else
 343                bcma_core_pci_up(&bus->drv_pci[0]);
 344}
 345EXPORT_SYMBOL_GPL(bcma_host_pci_up);
 346
 347/* See also pcicore_down */
 348void bcma_host_pci_down(struct bcma_bus *bus)
 349{
 350        if (bus->hosttype != BCMA_HOSTTYPE_PCI)
 351                return;
 352
 353        if (!bus->host_is_pcie2)
 354                bcma_core_pci_down(&bus->drv_pci[0]);
 355}
 356EXPORT_SYMBOL_GPL(bcma_host_pci_down);
 357
 358/* See also si_pci_setup */
 359int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
 360                          bool enable)
 361{
 362        struct pci_dev *pdev;
 363        u32 coremask, tmp;
 364        int err = 0;
 365
 366        if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
 367                /* This bcma device is not on a PCI host-bus. So the IRQs are
 368                 * not routed through the PCI core.
 369                 * So we must not enable routing through the PCI core. */
 370                goto out;
 371        }
 372
 373        pdev = bus->host_pci;
 374
 375        err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
 376        if (err)
 377                goto out;
 378
 379        coremask = BIT(core->core_index) << 8;
 380        if (enable)
 381                tmp |= coremask;
 382        else
 383                tmp &= ~coremask;
 384
 385        err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
 386
 387out:
 388        return err;
 389}
 390EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
 391