linux/drivers/pci/pci.c
<<
>>
Prefs
   1/*
   2 *      PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *      Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *      David Mosberger-Tang
   6 *
   7 *      Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
  25#include <asm-generic/pci-bridge.h>
  26#include <asm/setup.h>
  27#include "pci.h"
  28
  29const char *pci_power_names[] = {
  30        "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  31};
  32EXPORT_SYMBOL_GPL(pci_power_names);
  33
  34int isa_dma_bridge_buggy;
  35EXPORT_SYMBOL(isa_dma_bridge_buggy);
  36
  37int pci_pci_problems;
  38EXPORT_SYMBOL(pci_pci_problems);
  39
  40unsigned int pci_pm_d3_delay;
  41
  42static void pci_pme_list_scan(struct work_struct *work);
  43
  44static LIST_HEAD(pci_pme_list);
  45static DEFINE_MUTEX(pci_pme_list_mutex);
  46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  47
  48struct pci_pme_device {
  49        struct list_head list;
  50        struct pci_dev *dev;
  51};
  52
  53#define PME_TIMEOUT 1000 /* How long between PME checks */
  54
  55static void pci_dev_d3_sleep(struct pci_dev *dev)
  56{
  57        unsigned int delay = dev->d3_delay;
  58
  59        if (delay < pci_pm_d3_delay)
  60                delay = pci_pm_d3_delay;
  61
  62        msleep(delay);
  63}
  64
  65#ifdef CONFIG_PCI_DOMAINS
  66int pci_domains_supported = 1;
  67#endif
  68
  69#define DEFAULT_CARDBUS_IO_SIZE         (256)
  70#define DEFAULT_CARDBUS_MEM_SIZE        (64*1024*1024)
  71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  74
  75#define DEFAULT_HOTPLUG_IO_SIZE         (256)
  76#define DEFAULT_HOTPLUG_MEM_SIZE        (2*1024*1024)
  77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  78unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  80
  81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
  82
  83/*
  84 * The default CLS is used if arch didn't set CLS explicitly and not
  85 * all pci devices agree on the same value.  Arch can override either
  86 * the dfl or actual value as it sees fit.  Don't forget this is
  87 * measured in 32-bit words, not bytes.
  88 */
  89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  90u8 pci_cache_line_size;
  91
  92/*
  93 * If we set up a device for bus mastering, we need to check the latency
  94 * timer as certain BIOSes forget to set it properly.
  95 */
  96unsigned int pcibios_max_latency = 255;
  97
  98/* If set, the PCIe ARI capability will not be used. */
  99static bool pcie_ari_disabled;
 100
 101/**
 102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 103 * @bus: pointer to PCI bus structure to search
 104 *
 105 * Given a PCI bus, returns the highest PCI bus number present in the set
 106 * including the given PCI bus and its list of child PCI buses.
 107 */
 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
 109{
 110        struct list_head *tmp;
 111        unsigned char max, n;
 112
 113        max = bus->busn_res.end;
 114        list_for_each(tmp, &bus->children) {
 115                n = pci_bus_max_busnr(pci_bus_b(tmp));
 116                if(n > max)
 117                        max = n;
 118        }
 119        return max;
 120}
 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 122
 123#ifdef CONFIG_HAS_IOMEM
 124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 125{
 126        /*
 127         * Make sure the BAR is actually a memory resource, not an IO resource
 128         */
 129        if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 130                WARN_ON(1);
 131                return NULL;
 132        }
 133        return ioremap_nocache(pci_resource_start(pdev, bar),
 134                                     pci_resource_len(pdev, bar));
 135}
 136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 137#endif
 138
 139#define PCI_FIND_CAP_TTL        48
 140
 141static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 142                                   u8 pos, int cap, int *ttl)
 143{
 144        u8 id;
 145
 146        while ((*ttl)--) {
 147                pci_bus_read_config_byte(bus, devfn, pos, &pos);
 148                if (pos < 0x40)
 149                        break;
 150                pos &= ~3;
 151                pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 152                                         &id);
 153                if (id == 0xff)
 154                        break;
 155                if (id == cap)
 156                        return pos;
 157                pos += PCI_CAP_LIST_NEXT;
 158        }
 159        return 0;
 160}
 161
 162static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 163                               u8 pos, int cap)
 164{
 165        int ttl = PCI_FIND_CAP_TTL;
 166
 167        return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 168}
 169
 170int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 171{
 172        return __pci_find_next_cap(dev->bus, dev->devfn,
 173                                   pos + PCI_CAP_LIST_NEXT, cap);
 174}
 175EXPORT_SYMBOL_GPL(pci_find_next_capability);
 176
 177static int __pci_bus_find_cap_start(struct pci_bus *bus,
 178                                    unsigned int devfn, u8 hdr_type)
 179{
 180        u16 status;
 181
 182        pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 183        if (!(status & PCI_STATUS_CAP_LIST))
 184                return 0;
 185
 186        switch (hdr_type) {
 187        case PCI_HEADER_TYPE_NORMAL:
 188        case PCI_HEADER_TYPE_BRIDGE:
 189                return PCI_CAPABILITY_LIST;
 190        case PCI_HEADER_TYPE_CARDBUS:
 191                return PCI_CB_CAPABILITY_LIST;
 192        default:
 193                return 0;
 194        }
 195
 196        return 0;
 197}
 198
 199/**
 200 * pci_find_capability - query for devices' capabilities 
 201 * @dev: PCI device to query
 202 * @cap: capability code
 203 *
 204 * Tell if a device supports a given PCI capability.
 205 * Returns the address of the requested capability structure within the
 206 * device's PCI configuration space or 0 in case the device does not
 207 * support it.  Possible values for @cap:
 208 *
 209 *  %PCI_CAP_ID_PM           Power Management 
 210 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 211 *  %PCI_CAP_ID_VPD          Vital Product Data 
 212 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 213 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 214 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 215 *  %PCI_CAP_ID_PCIX         PCI-X
 216 *  %PCI_CAP_ID_EXP          PCI Express
 217 */
 218int pci_find_capability(struct pci_dev *dev, int cap)
 219{
 220        int pos;
 221
 222        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 223        if (pos)
 224                pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 225
 226        return pos;
 227}
 228
 229/**
 230 * pci_bus_find_capability - query for devices' capabilities 
 231 * @bus:   the PCI bus to query
 232 * @devfn: PCI device to query
 233 * @cap:   capability code
 234 *
 235 * Like pci_find_capability() but works for pci devices that do not have a
 236 * pci_dev structure set up yet. 
 237 *
 238 * Returns the address of the requested capability structure within the
 239 * device's PCI configuration space or 0 in case the device does not
 240 * support it.
 241 */
 242int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 243{
 244        int pos;
 245        u8 hdr_type;
 246
 247        pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 248
 249        pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 250        if (pos)
 251                pos = __pci_find_next_cap(bus, devfn, pos, cap);
 252
 253        return pos;
 254}
 255
 256/**
 257 * pci_find_next_ext_capability - Find an extended capability
 258 * @dev: PCI device to query
 259 * @start: address at which to start looking (0 to start at beginning of list)
 260 * @cap: capability code
 261 *
 262 * Returns the address of the next matching extended capability structure
 263 * within the device's PCI configuration space or 0 if the device does
 264 * not support it.  Some capabilities can occur several times, e.g., the
 265 * vendor-specific capability, and this provides a way to find them all.
 266 */
 267int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
 268{
 269        u32 header;
 270        int ttl;
 271        int pos = PCI_CFG_SPACE_SIZE;
 272
 273        /* minimum 8 bytes per capability */
 274        ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 275
 276        if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 277                return 0;
 278
 279        if (start)
 280                pos = start;
 281
 282        if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 283                return 0;
 284
 285        /*
 286         * If we have no capabilities, this is indicated by cap ID,
 287         * cap version and next pointer all being 0.
 288         */
 289        if (header == 0)
 290                return 0;
 291
 292        while (ttl-- > 0) {
 293                if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 294                        return pos;
 295
 296                pos = PCI_EXT_CAP_NEXT(header);
 297                if (pos < PCI_CFG_SPACE_SIZE)
 298                        break;
 299
 300                if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 301                        break;
 302        }
 303
 304        return 0;
 305}
 306EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 307
 308/**
 309 * pci_find_ext_capability - Find an extended capability
 310 * @dev: PCI device to query
 311 * @cap: capability code
 312 *
 313 * Returns the address of the requested extended capability structure
 314 * within the device's PCI configuration space or 0 if the device does
 315 * not support it.  Possible values for @cap:
 316 *
 317 *  %PCI_EXT_CAP_ID_ERR         Advanced Error Reporting
 318 *  %PCI_EXT_CAP_ID_VC          Virtual Channel
 319 *  %PCI_EXT_CAP_ID_DSN         Device Serial Number
 320 *  %PCI_EXT_CAP_ID_PWR         Power Budgeting
 321 */
 322int pci_find_ext_capability(struct pci_dev *dev, int cap)
 323{
 324        return pci_find_next_ext_capability(dev, 0, cap);
 325}
 326EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 327
 328static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 329{
 330        int rc, ttl = PCI_FIND_CAP_TTL;
 331        u8 cap, mask;
 332
 333        if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 334                mask = HT_3BIT_CAP_MASK;
 335        else
 336                mask = HT_5BIT_CAP_MASK;
 337
 338        pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 339                                      PCI_CAP_ID_HT, &ttl);
 340        while (pos) {
 341                rc = pci_read_config_byte(dev, pos + 3, &cap);
 342                if (rc != PCIBIOS_SUCCESSFUL)
 343                        return 0;
 344
 345                if ((cap & mask) == ht_cap)
 346                        return pos;
 347
 348                pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 349                                              pos + PCI_CAP_LIST_NEXT,
 350                                              PCI_CAP_ID_HT, &ttl);
 351        }
 352
 353        return 0;
 354}
 355/**
 356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 357 * @dev: PCI device to query
 358 * @pos: Position from which to continue searching
 359 * @ht_cap: Hypertransport capability code
 360 *
 361 * To be used in conjunction with pci_find_ht_capability() to search for
 362 * all capabilities matching @ht_cap. @pos should always be a value returned
 363 * from pci_find_ht_capability().
 364 *
 365 * NB. To be 100% safe against broken PCI devices, the caller should take
 366 * steps to avoid an infinite loop.
 367 */
 368int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 369{
 370        return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 371}
 372EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 373
 374/**
 375 * pci_find_ht_capability - query a device's Hypertransport capabilities
 376 * @dev: PCI device to query
 377 * @ht_cap: Hypertransport capability code
 378 *
 379 * Tell if a device supports a given Hypertransport capability.
 380 * Returns an address within the device's PCI configuration space
 381 * or 0 in case the device does not support the request capability.
 382 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 383 * which has a Hypertransport capability matching @ht_cap.
 384 */
 385int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 386{
 387        int pos;
 388
 389        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 390        if (pos)
 391                pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 392
 393        return pos;
 394}
 395EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 396
 397/**
 398 * pci_find_parent_resource - return resource region of parent bus of given region
 399 * @dev: PCI device structure contains resources to be searched
 400 * @res: child resource record for which parent is sought
 401 *
 402 *  For given resource region of given device, return the resource
 403 *  region of parent bus the given region is contained in or where
 404 *  it should be allocated from.
 405 */
 406struct resource *
 407pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 408{
 409        const struct pci_bus *bus = dev->bus;
 410        int i;
 411        struct resource *best = NULL, *r;
 412
 413        pci_bus_for_each_resource(bus, r, i) {
 414                if (!r)
 415                        continue;
 416                if (res->start && !(res->start >= r->start && res->end <= r->end))
 417                        continue;       /* Not contained */
 418                if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 419                        continue;       /* Wrong type */
 420                if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 421                        return r;       /* Exact match */
 422                /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 423                if (r->flags & IORESOURCE_PREFETCH)
 424                        continue;
 425                /* .. but we can put a prefetchable resource inside a non-prefetchable one */
 426                if (!best)
 427                        best = r;
 428        }
 429        return best;
 430}
 431
 432/**
 433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 434 * @dev: PCI device to have its BARs restored
 435 *
 436 * Restore the BAR values for a given device, so as to make it
 437 * accessible by its driver.
 438 */
 439static void
 440pci_restore_bars(struct pci_dev *dev)
 441{
 442        int i;
 443
 444        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 445                pci_update_resource(dev, i);
 446}
 447
 448static struct pci_platform_pm_ops *pci_platform_pm;
 449
 450int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 451{
 452        if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 453            || !ops->sleep_wake || !ops->can_wakeup)
 454                return -EINVAL;
 455        pci_platform_pm = ops;
 456        return 0;
 457}
 458
 459static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 460{
 461        return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 462}
 463
 464static inline int platform_pci_set_power_state(struct pci_dev *dev,
 465                                                pci_power_t t)
 466{
 467        return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 468}
 469
 470static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 471{
 472        return pci_platform_pm ?
 473                        pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 474}
 475
 476static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 477{
 478        return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 479}
 480
 481static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 482{
 483        return pci_platform_pm ?
 484                        pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 485}
 486
 487static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 488{
 489        return pci_platform_pm ?
 490                        pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 491}
 492
 493/**
 494 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 495 *                           given PCI device
 496 * @dev: PCI device to handle.
 497 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 498 *
 499 * RETURN VALUE:
 500 * -EINVAL if the requested state is invalid.
 501 * -EIO if device does not support PCI PM or its PM capabilities register has a
 502 * wrong version, or device doesn't support the requested state.
 503 * 0 if device already is in the requested state.
 504 * 0 if device's power state has been successfully changed.
 505 */
 506static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 507{
 508        u16 pmcsr;
 509        bool need_restore = false;
 510
 511        /* Check if we're already there */
 512        if (dev->current_state == state)
 513                return 0;
 514
 515        if (!dev->pm_cap)
 516                return -EIO;
 517
 518        if (state < PCI_D0 || state > PCI_D3hot)
 519                return -EINVAL;
 520
 521        /* Validate current state:
 522         * Can enter D0 from any state, but if we can only go deeper 
 523         * to sleep if we're already in a low power state
 524         */
 525        if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 526            && dev->current_state > state) {
 527                dev_err(&dev->dev, "invalid power transition "
 528                        "(from state %d to %d)\n", dev->current_state, state);
 529                return -EINVAL;
 530        }
 531
 532        /* check if this device supports the desired state */
 533        if ((state == PCI_D1 && !dev->d1_support)
 534           || (state == PCI_D2 && !dev->d2_support))
 535                return -EIO;
 536
 537        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 538
 539        /* If we're (effectively) in D3, force entire word to 0.
 540         * This doesn't affect PME_Status, disables PME_En, and
 541         * sets PowerState to 0.
 542         */
 543        switch (dev->current_state) {
 544        case PCI_D0:
 545        case PCI_D1:
 546        case PCI_D2:
 547                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 548                pmcsr |= state;
 549                break;
 550        case PCI_D3hot:
 551        case PCI_D3cold:
 552        case PCI_UNKNOWN: /* Boot-up */
 553                if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 554                 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 555                        need_restore = true;
 556                /* Fall-through: force to D0 */
 557        default:
 558                pmcsr = 0;
 559                break;
 560        }
 561
 562        /* enter specified state */
 563        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 564
 565        /* Mandatory power management transition delays */
 566        /* see PCI PM 1.1 5.6.1 table 18 */
 567        if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 568                pci_dev_d3_sleep(dev);
 569        else if (state == PCI_D2 || dev->current_state == PCI_D2)
 570                udelay(PCI_PM_D2_DELAY);
 571
 572        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 573        dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 574        if (dev->current_state != state && printk_ratelimit())
 575                dev_info(&dev->dev, "Refused to change power state, "
 576                        "currently in D%d\n", dev->current_state);
 577
 578        /*
 579         * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 580         * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 581         * from D3hot to D0 _may_ perform an internal reset, thereby
 582         * going to "D0 Uninitialized" rather than "D0 Initialized".
 583         * For example, at least some versions of the 3c905B and the
 584         * 3c556B exhibit this behaviour.
 585         *
 586         * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 587         * devices in a D3hot state at boot.  Consequently, we need to
 588         * restore at least the BARs so that the device will be
 589         * accessible to its driver.
 590         */
 591        if (need_restore)
 592                pci_restore_bars(dev);
 593
 594        if (dev->bus->self)
 595                pcie_aspm_pm_state_change(dev->bus->self);
 596
 597        return 0;
 598}
 599
 600/**
 601 * pci_update_current_state - Read PCI power state of given device from its
 602 *                            PCI PM registers and cache it
 603 * @dev: PCI device to handle.
 604 * @state: State to cache in case the device doesn't have the PM capability
 605 */
 606void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 607{
 608        if (dev->pm_cap) {
 609                u16 pmcsr;
 610
 611                /*
 612                 * Configuration space is not accessible for device in
 613                 * D3cold, so just keep or set D3cold for safety
 614                 */
 615                if (dev->current_state == PCI_D3cold)
 616                        return;
 617                if (state == PCI_D3cold) {
 618                        dev->current_state = PCI_D3cold;
 619                        return;
 620                }
 621                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 622                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 623        } else {
 624                dev->current_state = state;
 625        }
 626}
 627
 628/**
 629 * pci_power_up - Put the given device into D0 forcibly
 630 * @dev: PCI device to power up
 631 */
 632void pci_power_up(struct pci_dev *dev)
 633{
 634        if (platform_pci_power_manageable(dev))
 635                platform_pci_set_power_state(dev, PCI_D0);
 636
 637        pci_raw_set_power_state(dev, PCI_D0);
 638        pci_update_current_state(dev, PCI_D0);
 639}
 640
 641/**
 642 * pci_platform_power_transition - Use platform to change device power state
 643 * @dev: PCI device to handle.
 644 * @state: State to put the device into.
 645 */
 646static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 647{
 648        int error;
 649
 650        if (platform_pci_power_manageable(dev)) {
 651                error = platform_pci_set_power_state(dev, state);
 652                if (!error)
 653                        pci_update_current_state(dev, state);
 654                /* Fall back to PCI_D0 if native PM is not supported */
 655                if (!dev->pm_cap)
 656                        dev->current_state = PCI_D0;
 657        } else {
 658                error = -ENODEV;
 659                /* Fall back to PCI_D0 if native PM is not supported */
 660                if (!dev->pm_cap)
 661                        dev->current_state = PCI_D0;
 662        }
 663
 664        return error;
 665}
 666
 667/**
 668 * __pci_start_power_transition - Start power transition of a PCI device
 669 * @dev: PCI device to handle.
 670 * @state: State to put the device into.
 671 */
 672static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 673{
 674        if (state == PCI_D0) {
 675                pci_platform_power_transition(dev, PCI_D0);
 676                /*
 677                 * Mandatory power management transition delays, see
 678                 * PCI Express Base Specification Revision 2.0 Section
 679                 * 6.6.1: Conventional Reset.  Do not delay for
 680                 * devices powered on/off by corresponding bridge,
 681                 * because have already delayed for the bridge.
 682                 */
 683                if (dev->runtime_d3cold) {
 684                        msleep(dev->d3cold_delay);
 685                        /*
 686                         * When powering on a bridge from D3cold, the
 687                         * whole hierarchy may be powered on into
 688                         * D0uninitialized state, resume them to give
 689                         * them a chance to suspend again
 690                         */
 691                        pci_wakeup_bus(dev->subordinate);
 692                }
 693        }
 694}
 695
 696/**
 697 * __pci_dev_set_current_state - Set current state of a PCI device
 698 * @dev: Device to handle
 699 * @data: pointer to state to be set
 700 */
 701static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
 702{
 703        pci_power_t state = *(pci_power_t *)data;
 704
 705        dev->current_state = state;
 706        return 0;
 707}
 708
 709/**
 710 * __pci_bus_set_current_state - Walk given bus and set current state of devices
 711 * @bus: Top bus of the subtree to walk.
 712 * @state: state to be set
 713 */
 714static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
 715{
 716        if (bus)
 717                pci_walk_bus(bus, __pci_dev_set_current_state, &state);
 718}
 719
 720/**
 721 * __pci_complete_power_transition - Complete power transition of a PCI device
 722 * @dev: PCI device to handle.
 723 * @state: State to put the device into.
 724 *
 725 * This function should not be called directly by device drivers.
 726 */
 727int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 728{
 729        int ret;
 730
 731        if (state <= PCI_D0)
 732                return -EINVAL;
 733        ret = pci_platform_power_transition(dev, state);
 734        /* Power off the bridge may power off the whole hierarchy */
 735        if (!ret && state == PCI_D3cold)
 736                __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
 737        return ret;
 738}
 739EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 740
 741/**
 742 * pci_set_power_state - Set the power state of a PCI device
 743 * @dev: PCI device to handle.
 744 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 745 *
 746 * Transition a device to a new power state, using the platform firmware and/or
 747 * the device's PCI PM registers.
 748 *
 749 * RETURN VALUE:
 750 * -EINVAL if the requested state is invalid.
 751 * -EIO if device does not support PCI PM or its PM capabilities register has a
 752 * wrong version, or device doesn't support the requested state.
 753 * 0 if device already is in the requested state.
 754 * 0 if device's power state has been successfully changed.
 755 */
 756int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 757{
 758        int error;
 759
 760        /* bound the state we're entering */
 761        if (state > PCI_D3cold)
 762                state = PCI_D3cold;
 763        else if (state < PCI_D0)
 764                state = PCI_D0;
 765        else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 766                /*
 767                 * If the device or the parent bridge do not support PCI PM,
 768                 * ignore the request if we're doing anything other than putting
 769                 * it into D0 (which would only happen on boot).
 770                 */
 771                return 0;
 772
 773        /* Check if we're already there */
 774        if (dev->current_state == state)
 775                return 0;
 776
 777        __pci_start_power_transition(dev, state);
 778
 779        /* This device is quirked not to be put into D3, so
 780           don't put it in D3 */
 781        if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 782                return 0;
 783
 784        /*
 785         * To put device in D3cold, we put device into D3hot in native
 786         * way, then put device into D3cold with platform ops
 787         */
 788        error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
 789                                        PCI_D3hot : state);
 790
 791        if (!__pci_complete_power_transition(dev, state))
 792                error = 0;
 793        /*
 794         * When aspm_policy is "powersave" this call ensures
 795         * that ASPM is configured.
 796         */
 797        if (!error && dev->bus->self)
 798                pcie_aspm_powersave_config_link(dev->bus->self);
 799
 800        return error;
 801}
 802
 803/**
 804 * pci_choose_state - Choose the power state of a PCI device
 805 * @dev: PCI device to be suspended
 806 * @state: target sleep state for the whole system. This is the value
 807 *      that is passed to suspend() function.
 808 *
 809 * Returns PCI power state suitable for given device and given system
 810 * message.
 811 */
 812
 813pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 814{
 815        pci_power_t ret;
 816
 817        if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 818                return PCI_D0;
 819
 820        ret = platform_pci_choose_state(dev);
 821        if (ret != PCI_POWER_ERROR)
 822                return ret;
 823
 824        switch (state.event) {
 825        case PM_EVENT_ON:
 826                return PCI_D0;
 827        case PM_EVENT_FREEZE:
 828        case PM_EVENT_PRETHAW:
 829                /* REVISIT both freeze and pre-thaw "should" use D0 */
 830        case PM_EVENT_SUSPEND:
 831        case PM_EVENT_HIBERNATE:
 832                return PCI_D3hot;
 833        default:
 834                dev_info(&dev->dev, "unrecognized suspend event %d\n",
 835                         state.event);
 836                BUG();
 837        }
 838        return PCI_D0;
 839}
 840
 841EXPORT_SYMBOL(pci_choose_state);
 842
 843#define PCI_EXP_SAVE_REGS       7
 844
 845
 846static struct pci_cap_saved_state *pci_find_saved_cap(
 847        struct pci_dev *pci_dev, char cap)
 848{
 849        struct pci_cap_saved_state *tmp;
 850        struct hlist_node *pos;
 851
 852        hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
 853                if (tmp->cap.cap_nr == cap)
 854                        return tmp;
 855        }
 856        return NULL;
 857}
 858
 859static int pci_save_pcie_state(struct pci_dev *dev)
 860{
 861        int i = 0;
 862        struct pci_cap_saved_state *save_state;
 863        u16 *cap;
 864
 865        if (!pci_is_pcie(dev))
 866                return 0;
 867
 868        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 869        if (!save_state) {
 870                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 871                return -ENOMEM;
 872        }
 873
 874        cap = (u16 *)&save_state->cap.data[0];
 875        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
 876        pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
 877        pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
 878        pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
 879        pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
 880        pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
 881        pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
 882
 883        return 0;
 884}
 885
 886static void pci_restore_pcie_state(struct pci_dev *dev)
 887{
 888        int i = 0;
 889        struct pci_cap_saved_state *save_state;
 890        u16 *cap;
 891
 892        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 893        if (!save_state)
 894                return;
 895
 896        cap = (u16 *)&save_state->cap.data[0];
 897        pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
 898        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
 899        pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
 900        pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
 901        pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
 902        pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
 903        pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
 904}
 905
 906
 907static int pci_save_pcix_state(struct pci_dev *dev)
 908{
 909        int pos;
 910        struct pci_cap_saved_state *save_state;
 911
 912        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 913        if (pos <= 0)
 914                return 0;
 915
 916        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 917        if (!save_state) {
 918                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 919                return -ENOMEM;
 920        }
 921
 922        pci_read_config_word(dev, pos + PCI_X_CMD,
 923                             (u16 *)save_state->cap.data);
 924
 925        return 0;
 926}
 927
 928static void pci_restore_pcix_state(struct pci_dev *dev)
 929{
 930        int i = 0, pos;
 931        struct pci_cap_saved_state *save_state;
 932        u16 *cap;
 933
 934        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 935        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 936        if (!save_state || pos <= 0)
 937                return;
 938        cap = (u16 *)&save_state->cap.data[0];
 939
 940        pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 941}
 942
 943
 944/**
 945 * pci_save_state - save the PCI configuration space of a device before suspending
 946 * @dev: - PCI device that we're dealing with
 947 */
 948int
 949pci_save_state(struct pci_dev *dev)
 950{
 951        int i;
 952        /* XXX: 100% dword access ok here? */
 953        for (i = 0; i < 16; i++)
 954                pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 955        dev->state_saved = true;
 956        if ((i = pci_save_pcie_state(dev)) != 0)
 957                return i;
 958        if ((i = pci_save_pcix_state(dev)) != 0)
 959                return i;
 960        return 0;
 961}
 962
 963static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 964                                     u32 saved_val, int retry)
 965{
 966        u32 val;
 967
 968        pci_read_config_dword(pdev, offset, &val);
 969        if (val == saved_val)
 970                return;
 971
 972        for (;;) {
 973                dev_dbg(&pdev->dev, "restoring config space at offset "
 974                        "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
 975                pci_write_config_dword(pdev, offset, saved_val);
 976                if (retry-- <= 0)
 977                        return;
 978
 979                pci_read_config_dword(pdev, offset, &val);
 980                if (val == saved_val)
 981                        return;
 982
 983                mdelay(1);
 984        }
 985}
 986
 987static void pci_restore_config_space_range(struct pci_dev *pdev,
 988                                           int start, int end, int retry)
 989{
 990        int index;
 991
 992        for (index = end; index >= start; index--)
 993                pci_restore_config_dword(pdev, 4 * index,
 994                                         pdev->saved_config_space[index],
 995                                         retry);
 996}
 997
 998static void pci_restore_config_space(struct pci_dev *pdev)
 999{
1000        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1001                pci_restore_config_space_range(pdev, 10, 15, 0);
1002                /* Restore BARs before the command register. */
1003                pci_restore_config_space_range(pdev, 4, 9, 10);
1004                pci_restore_config_space_range(pdev, 0, 3, 0);
1005        } else {
1006                pci_restore_config_space_range(pdev, 0, 15, 0);
1007        }
1008}
1009
1010/** 
1011 * pci_restore_state - Restore the saved state of a PCI device
1012 * @dev: - PCI device that we're dealing with
1013 */
1014void pci_restore_state(struct pci_dev *dev)
1015{
1016        if (!dev->state_saved)
1017                return;
1018
1019        /* PCI Express register must be restored first */
1020        pci_restore_pcie_state(dev);
1021        pci_restore_ats_state(dev);
1022
1023        pci_restore_config_space(dev);
1024
1025        pci_restore_pcix_state(dev);
1026        pci_restore_msi_state(dev);
1027        pci_restore_iov_state(dev);
1028
1029        dev->state_saved = false;
1030}
1031
1032struct pci_saved_state {
1033        u32 config_space[16];
1034        struct pci_cap_saved_data cap[0];
1035};
1036
1037/**
1038 * pci_store_saved_state - Allocate and return an opaque struct containing
1039 *                         the device saved state.
1040 * @dev: PCI device that we're dealing with
1041 *
1042 * Rerturn NULL if no state or error.
1043 */
1044struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1045{
1046        struct pci_saved_state *state;
1047        struct pci_cap_saved_state *tmp;
1048        struct pci_cap_saved_data *cap;
1049        struct hlist_node *pos;
1050        size_t size;
1051
1052        if (!dev->state_saved)
1053                return NULL;
1054
1055        size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1056
1057        hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1058                size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1059
1060        state = kzalloc(size, GFP_KERNEL);
1061        if (!state)
1062                return NULL;
1063
1064        memcpy(state->config_space, dev->saved_config_space,
1065               sizeof(state->config_space));
1066
1067        cap = state->cap;
1068        hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1069                size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1070                memcpy(cap, &tmp->cap, len);
1071                cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1072        }
1073        /* Empty cap_save terminates list */
1074
1075        return state;
1076}
1077EXPORT_SYMBOL_GPL(pci_store_saved_state);
1078
1079/**
1080 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1081 * @dev: PCI device that we're dealing with
1082 * @state: Saved state returned from pci_store_saved_state()
1083 */
1084int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1085{
1086        struct pci_cap_saved_data *cap;
1087
1088        dev->state_saved = false;
1089
1090        if (!state)
1091                return 0;
1092
1093        memcpy(dev->saved_config_space, state->config_space,
1094               sizeof(state->config_space));
1095
1096        cap = state->cap;
1097        while (cap->size) {
1098                struct pci_cap_saved_state *tmp;
1099
1100                tmp = pci_find_saved_cap(dev, cap->cap_nr);
1101                if (!tmp || tmp->cap.size != cap->size)
1102                        return -EINVAL;
1103
1104                memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1105                cap = (struct pci_cap_saved_data *)((u8 *)cap +
1106                       sizeof(struct pci_cap_saved_data) + cap->size);
1107        }
1108
1109        dev->state_saved = true;
1110        return 0;
1111}
1112EXPORT_SYMBOL_GPL(pci_load_saved_state);
1113
1114/**
1115 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1116 *                                 and free the memory allocated for it.
1117 * @dev: PCI device that we're dealing with
1118 * @state: Pointer to saved state returned from pci_store_saved_state()
1119 */
1120int pci_load_and_free_saved_state(struct pci_dev *dev,
1121                                  struct pci_saved_state **state)
1122{
1123        int ret = pci_load_saved_state(dev, *state);
1124        kfree(*state);
1125        *state = NULL;
1126        return ret;
1127}
1128EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1129
1130static int do_pci_enable_device(struct pci_dev *dev, int bars)
1131{
1132        int err;
1133
1134        err = pci_set_power_state(dev, PCI_D0);
1135        if (err < 0 && err != -EIO)
1136                return err;
1137        err = pcibios_enable_device(dev, bars);
1138        if (err < 0)
1139                return err;
1140        pci_fixup_device(pci_fixup_enable, dev);
1141
1142        return 0;
1143}
1144
1145/**
1146 * pci_reenable_device - Resume abandoned device
1147 * @dev: PCI device to be resumed
1148 *
1149 *  Note this function is a backend of pci_default_resume and is not supposed
1150 *  to be called by normal code, write proper resume handler and use it instead.
1151 */
1152int pci_reenable_device(struct pci_dev *dev)
1153{
1154        if (pci_is_enabled(dev))
1155                return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1156        return 0;
1157}
1158
1159static int __pci_enable_device_flags(struct pci_dev *dev,
1160                                     resource_size_t flags)
1161{
1162        int err;
1163        int i, bars = 0;
1164
1165        /*
1166         * Power state could be unknown at this point, either due to a fresh
1167         * boot or a device removal call.  So get the current power state
1168         * so that things like MSI message writing will behave as expected
1169         * (e.g. if the device really is in D0 at enable time).
1170         */
1171        if (dev->pm_cap) {
1172                u16 pmcsr;
1173                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1174                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1175        }
1176
1177        if (atomic_add_return(1, &dev->enable_cnt) > 1)
1178                return 0;               /* already enabled */
1179
1180        /* only skip sriov related */
1181        for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1182                if (dev->resource[i].flags & flags)
1183                        bars |= (1 << i);
1184        for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1185                if (dev->resource[i].flags & flags)
1186                        bars |= (1 << i);
1187
1188        err = do_pci_enable_device(dev, bars);
1189        if (err < 0)
1190                atomic_dec(&dev->enable_cnt);
1191        return err;
1192}
1193
1194/**
1195 * pci_enable_device_io - Initialize a device for use with IO space
1196 * @dev: PCI device to be initialized
1197 *
1198 *  Initialize device before it's used by a driver. Ask low-level code
1199 *  to enable I/O resources. Wake up the device if it was suspended.
1200 *  Beware, this function can fail.
1201 */
1202int pci_enable_device_io(struct pci_dev *dev)
1203{
1204        return __pci_enable_device_flags(dev, IORESOURCE_IO);
1205}
1206
1207/**
1208 * pci_enable_device_mem - Initialize a device for use with Memory space
1209 * @dev: PCI device to be initialized
1210 *
1211 *  Initialize device before it's used by a driver. Ask low-level code
1212 *  to enable Memory resources. Wake up the device if it was suspended.
1213 *  Beware, this function can fail.
1214 */
1215int pci_enable_device_mem(struct pci_dev *dev)
1216{
1217        return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1218}
1219
1220/**
1221 * pci_enable_device - Initialize device before it's used by a driver.
1222 * @dev: PCI device to be initialized
1223 *
1224 *  Initialize device before it's used by a driver. Ask low-level code
1225 *  to enable I/O and memory. Wake up the device if it was suspended.
1226 *  Beware, this function can fail.
1227 *
1228 *  Note we don't actually enable the device many times if we call
1229 *  this function repeatedly (we just increment the count).
1230 */
1231int pci_enable_device(struct pci_dev *dev)
1232{
1233        return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1234}
1235
1236/*
1237 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1238 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1239 * there's no need to track it separately.  pci_devres is initialized
1240 * when a device is enabled using managed PCI device enable interface.
1241 */
1242struct pci_devres {
1243        unsigned int enabled:1;
1244        unsigned int pinned:1;
1245        unsigned int orig_intx:1;
1246        unsigned int restore_intx:1;
1247        u32 region_mask;
1248};
1249
1250static void pcim_release(struct device *gendev, void *res)
1251{
1252        struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1253        struct pci_devres *this = res;
1254        int i;
1255
1256        if (dev->msi_enabled)
1257                pci_disable_msi(dev);
1258        if (dev->msix_enabled)
1259                pci_disable_msix(dev);
1260
1261        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1262                if (this->region_mask & (1 << i))
1263                        pci_release_region(dev, i);
1264
1265        if (this->restore_intx)
1266                pci_intx(dev, this->orig_intx);
1267
1268        if (this->enabled && !this->pinned)
1269                pci_disable_device(dev);
1270}
1271
1272static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1273{
1274        struct pci_devres *dr, *new_dr;
1275
1276        dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1277        if (dr)
1278                return dr;
1279
1280        new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1281        if (!new_dr)
1282                return NULL;
1283        return devres_get(&pdev->dev, new_dr, NULL, NULL);
1284}
1285
1286static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1287{
1288        if (pci_is_managed(pdev))
1289                return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1290        return NULL;
1291}
1292
1293/**
1294 * pcim_enable_device - Managed pci_enable_device()
1295 * @pdev: PCI device to be initialized
1296 *
1297 * Managed pci_enable_device().
1298 */
1299int pcim_enable_device(struct pci_dev *pdev)
1300{
1301        struct pci_devres *dr;
1302        int rc;
1303
1304        dr = get_pci_dr(pdev);
1305        if (unlikely(!dr))
1306                return -ENOMEM;
1307        if (dr->enabled)
1308                return 0;
1309
1310        rc = pci_enable_device(pdev);
1311        if (!rc) {
1312                pdev->is_managed = 1;
1313                dr->enabled = 1;
1314        }
1315        return rc;
1316}
1317
1318/**
1319 * pcim_pin_device - Pin managed PCI device
1320 * @pdev: PCI device to pin
1321 *
1322 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1323 * driver detach.  @pdev must have been enabled with
1324 * pcim_enable_device().
1325 */
1326void pcim_pin_device(struct pci_dev *pdev)
1327{
1328        struct pci_devres *dr;
1329
1330        dr = find_pci_dr(pdev);
1331        WARN_ON(!dr || !dr->enabled);
1332        if (dr)
1333                dr->pinned = 1;
1334}
1335
1336/**
1337 * pcibios_disable_device - disable arch specific PCI resources for device dev
1338 * @dev: the PCI device to disable
1339 *
1340 * Disables architecture specific PCI resources for the device. This
1341 * is the default implementation. Architecture implementations can
1342 * override this.
1343 */
1344void __weak pcibios_disable_device (struct pci_dev *dev) {}
1345
1346static void do_pci_disable_device(struct pci_dev *dev)
1347{
1348        u16 pci_command;
1349
1350        pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1351        if (pci_command & PCI_COMMAND_MASTER) {
1352                pci_command &= ~PCI_COMMAND_MASTER;
1353                pci_write_config_word(dev, PCI_COMMAND, pci_command);
1354        }
1355
1356        pcibios_disable_device(dev);
1357}
1358
1359/**
1360 * pci_disable_enabled_device - Disable device without updating enable_cnt
1361 * @dev: PCI device to disable
1362 *
1363 * NOTE: This function is a backend of PCI power management routines and is
1364 * not supposed to be called drivers.
1365 */
1366void pci_disable_enabled_device(struct pci_dev *dev)
1367{
1368        if (pci_is_enabled(dev))
1369                do_pci_disable_device(dev);
1370}
1371
1372/**
1373 * pci_disable_device - Disable PCI device after use
1374 * @dev: PCI device to be disabled
1375 *
1376 * Signal to the system that the PCI device is not in use by the system
1377 * anymore.  This only involves disabling PCI bus-mastering, if active.
1378 *
1379 * Note we don't actually disable the device until all callers of
1380 * pci_enable_device() have called pci_disable_device().
1381 */
1382void
1383pci_disable_device(struct pci_dev *dev)
1384{
1385        struct pci_devres *dr;
1386
1387        dr = find_pci_dr(dev);
1388        if (dr)
1389                dr->enabled = 0;
1390
1391        if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1392                return;
1393
1394        do_pci_disable_device(dev);
1395
1396        dev->is_busmaster = 0;
1397}
1398
1399/**
1400 * pcibios_set_pcie_reset_state - set reset state for device dev
1401 * @dev: the PCIe device reset
1402 * @state: Reset state to enter into
1403 *
1404 *
1405 * Sets the PCIe reset state for the device. This is the default
1406 * implementation. Architecture implementations can override this.
1407 */
1408int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1409                                        enum pcie_reset_state state)
1410{
1411        return -EINVAL;
1412}
1413
1414/**
1415 * pci_set_pcie_reset_state - set reset state for device dev
1416 * @dev: the PCIe device reset
1417 * @state: Reset state to enter into
1418 *
1419 *
1420 * Sets the PCI reset state for the device.
1421 */
1422int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1423{
1424        return pcibios_set_pcie_reset_state(dev, state);
1425}
1426
1427/**
1428 * pci_check_pme_status - Check if given device has generated PME.
1429 * @dev: Device to check.
1430 *
1431 * Check the PME status of the device and if set, clear it and clear PME enable
1432 * (if set).  Return 'true' if PME status and PME enable were both set or
1433 * 'false' otherwise.
1434 */
1435bool pci_check_pme_status(struct pci_dev *dev)
1436{
1437        int pmcsr_pos;
1438        u16 pmcsr;
1439        bool ret = false;
1440
1441        if (!dev->pm_cap)
1442                return false;
1443
1444        pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1445        pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1446        if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1447                return false;
1448
1449        /* Clear PME status. */
1450        pmcsr |= PCI_PM_CTRL_PME_STATUS;
1451        if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1452                /* Disable PME to avoid interrupt flood. */
1453                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1454                ret = true;
1455        }
1456
1457        pci_write_config_word(dev, pmcsr_pos, pmcsr);
1458
1459        return ret;
1460}
1461
1462/**
1463 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1464 * @dev: Device to handle.
1465 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1466 *
1467 * Check if @dev has generated PME and queue a resume request for it in that
1468 * case.
1469 */
1470static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1471{
1472        if (pme_poll_reset && dev->pme_poll)
1473                dev->pme_poll = false;
1474
1475        if (pci_check_pme_status(dev)) {
1476                pci_wakeup_event(dev);
1477                pm_request_resume(&dev->dev);
1478        }
1479        return 0;
1480}
1481
1482/**
1483 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1484 * @bus: Top bus of the subtree to walk.
1485 */
1486void pci_pme_wakeup_bus(struct pci_bus *bus)
1487{
1488        if (bus)
1489                pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1490}
1491
1492/**
1493 * pci_wakeup - Wake up a PCI device
1494 * @pci_dev: Device to handle.
1495 * @ign: ignored parameter
1496 */
1497static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1498{
1499        pci_wakeup_event(pci_dev);
1500        pm_request_resume(&pci_dev->dev);
1501        return 0;
1502}
1503
1504/**
1505 * pci_wakeup_bus - Walk given bus and wake up devices on it
1506 * @bus: Top bus of the subtree to walk.
1507 */
1508void pci_wakeup_bus(struct pci_bus *bus)
1509{
1510        if (bus)
1511                pci_walk_bus(bus, pci_wakeup, NULL);
1512}
1513
1514/**
1515 * pci_pme_capable - check the capability of PCI device to generate PME#
1516 * @dev: PCI device to handle.
1517 * @state: PCI state from which device will issue PME#.
1518 */
1519bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1520{
1521        if (!dev->pm_cap)
1522                return false;
1523
1524        return !!(dev->pme_support & (1 << state));
1525}
1526
1527static void pci_pme_list_scan(struct work_struct *work)
1528{
1529        struct pci_pme_device *pme_dev, *n;
1530
1531        mutex_lock(&pci_pme_list_mutex);
1532        if (!list_empty(&pci_pme_list)) {
1533                list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1534                        if (pme_dev->dev->pme_poll) {
1535                                struct pci_dev *bridge;
1536
1537                                bridge = pme_dev->dev->bus->self;
1538                                /*
1539                                 * If bridge is in low power state, the
1540                                 * configuration space of subordinate devices
1541                                 * may be not accessible
1542                                 */
1543                                if (bridge && bridge->current_state != PCI_D0)
1544                                        continue;
1545                                pci_pme_wakeup(pme_dev->dev, NULL);
1546                        } else {
1547                                list_del(&pme_dev->list);
1548                                kfree(pme_dev);
1549                        }
1550                }
1551                if (!list_empty(&pci_pme_list))
1552                        schedule_delayed_work(&pci_pme_work,
1553                                              msecs_to_jiffies(PME_TIMEOUT));
1554        }
1555        mutex_unlock(&pci_pme_list_mutex);
1556}
1557
1558/**
1559 * pci_pme_active - enable or disable PCI device's PME# function
1560 * @dev: PCI device to handle.
1561 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1562 *
1563 * The caller must verify that the device is capable of generating PME# before
1564 * calling this function with @enable equal to 'true'.
1565 */
1566void pci_pme_active(struct pci_dev *dev, bool enable)
1567{
1568        u16 pmcsr;
1569
1570        if (!dev->pm_cap)
1571                return;
1572
1573        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1574        /* Clear PME_Status by writing 1 to it and enable PME# */
1575        pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1576        if (!enable)
1577                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1578
1579        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1580
1581        /* PCI (as opposed to PCIe) PME requires that the device have
1582           its PME# line hooked up correctly. Not all hardware vendors
1583           do this, so the PME never gets delivered and the device
1584           remains asleep. The easiest way around this is to
1585           periodically walk the list of suspended devices and check
1586           whether any have their PME flag set. The assumption is that
1587           we'll wake up often enough anyway that this won't be a huge
1588           hit, and the power savings from the devices will still be a
1589           win. */
1590
1591        if (dev->pme_poll) {
1592                struct pci_pme_device *pme_dev;
1593                if (enable) {
1594                        pme_dev = kmalloc(sizeof(struct pci_pme_device),
1595                                          GFP_KERNEL);
1596                        if (!pme_dev)
1597                                goto out;
1598                        pme_dev->dev = dev;
1599                        mutex_lock(&pci_pme_list_mutex);
1600                        list_add(&pme_dev->list, &pci_pme_list);
1601                        if (list_is_singular(&pci_pme_list))
1602                                schedule_delayed_work(&pci_pme_work,
1603                                                      msecs_to_jiffies(PME_TIMEOUT));
1604                        mutex_unlock(&pci_pme_list_mutex);
1605                } else {
1606                        mutex_lock(&pci_pme_list_mutex);
1607                        list_for_each_entry(pme_dev, &pci_pme_list, list) {
1608                                if (pme_dev->dev == dev) {
1609                                        list_del(&pme_dev->list);
1610                                        kfree(pme_dev);
1611                                        break;
1612                                }
1613                        }
1614                        mutex_unlock(&pci_pme_list_mutex);
1615                }
1616        }
1617
1618out:
1619        dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1620}
1621
1622/**
1623 * __pci_enable_wake - enable PCI device as wakeup event source
1624 * @dev: PCI device affected
1625 * @state: PCI state from which device will issue wakeup events
1626 * @runtime: True if the events are to be generated at run time
1627 * @enable: True to enable event generation; false to disable
1628 *
1629 * This enables the device as a wakeup event source, or disables it.
1630 * When such events involves platform-specific hooks, those hooks are
1631 * called automatically by this routine.
1632 *
1633 * Devices with legacy power management (no standard PCI PM capabilities)
1634 * always require such platform hooks.
1635 *
1636 * RETURN VALUE:
1637 * 0 is returned on success
1638 * -EINVAL is returned if device is not supposed to wake up the system
1639 * Error code depending on the platform is returned if both the platform and
1640 * the native mechanism fail to enable the generation of wake-up events
1641 */
1642int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1643                      bool runtime, bool enable)
1644{
1645        int ret = 0;
1646
1647        if (enable && !runtime && !device_may_wakeup(&dev->dev))
1648                return -EINVAL;
1649
1650        /* Don't do the same thing twice in a row for one device. */
1651        if (!!enable == !!dev->wakeup_prepared)
1652                return 0;
1653
1654        /*
1655         * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1656         * Anderson we should be doing PME# wake enable followed by ACPI wake
1657         * enable.  To disable wake-up we call the platform first, for symmetry.
1658         */
1659
1660        if (enable) {
1661                int error;
1662
1663                if (pci_pme_capable(dev, state))
1664                        pci_pme_active(dev, true);
1665                else
1666                        ret = 1;
1667                error = runtime ? platform_pci_run_wake(dev, true) :
1668                                        platform_pci_sleep_wake(dev, true);
1669                if (ret)
1670                        ret = error;
1671                if (!ret)
1672                        dev->wakeup_prepared = true;
1673        } else {
1674                if (runtime)
1675                        platform_pci_run_wake(dev, false);
1676                else
1677                        platform_pci_sleep_wake(dev, false);
1678                pci_pme_active(dev, false);
1679                dev->wakeup_prepared = false;
1680        }
1681
1682        return ret;
1683}
1684EXPORT_SYMBOL(__pci_enable_wake);
1685
1686/**
1687 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1688 * @dev: PCI device to prepare
1689 * @enable: True to enable wake-up event generation; false to disable
1690 *
1691 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1692 * and this function allows them to set that up cleanly - pci_enable_wake()
1693 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1694 * ordering constraints.
1695 *
1696 * This function only returns error code if the device is not capable of
1697 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1698 * enable wake-up power for it.
1699 */
1700int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1701{
1702        return pci_pme_capable(dev, PCI_D3cold) ?
1703                        pci_enable_wake(dev, PCI_D3cold, enable) :
1704                        pci_enable_wake(dev, PCI_D3hot, enable);
1705}
1706
1707/**
1708 * pci_target_state - find an appropriate low power state for a given PCI dev
1709 * @dev: PCI device
1710 *
1711 * Use underlying platform code to find a supported low power state for @dev.
1712 * If the platform can't manage @dev, return the deepest state from which it
1713 * can generate wake events, based on any available PME info.
1714 */
1715pci_power_t pci_target_state(struct pci_dev *dev)
1716{
1717        pci_power_t target_state = PCI_D3hot;
1718
1719        if (platform_pci_power_manageable(dev)) {
1720                /*
1721                 * Call the platform to choose the target state of the device
1722                 * and enable wake-up from this state if supported.
1723                 */
1724                pci_power_t state = platform_pci_choose_state(dev);
1725
1726                switch (state) {
1727                case PCI_POWER_ERROR:
1728                case PCI_UNKNOWN:
1729                        break;
1730                case PCI_D1:
1731                case PCI_D2:
1732                        if (pci_no_d1d2(dev))
1733                                break;
1734                default:
1735                        target_state = state;
1736                }
1737        } else if (!dev->pm_cap) {
1738                target_state = PCI_D0;
1739        } else if (device_may_wakeup(&dev->dev)) {
1740                /*
1741                 * Find the deepest state from which the device can generate
1742                 * wake-up events, make it the target state and enable device
1743                 * to generate PME#.
1744                 */
1745                if (dev->pme_support) {
1746                        while (target_state
1747                              && !(dev->pme_support & (1 << target_state)))
1748                                target_state--;
1749                }
1750        }
1751
1752        return target_state;
1753}
1754
1755/**
1756 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1757 * @dev: Device to handle.
1758 *
1759 * Choose the power state appropriate for the device depending on whether
1760 * it can wake up the system and/or is power manageable by the platform
1761 * (PCI_D3hot is the default) and put the device into that state.
1762 */
1763int pci_prepare_to_sleep(struct pci_dev *dev)
1764{
1765        pci_power_t target_state = pci_target_state(dev);
1766        int error;
1767
1768        if (target_state == PCI_POWER_ERROR)
1769                return -EIO;
1770
1771        /* D3cold during system suspend/hibernate is not supported */
1772        if (target_state > PCI_D3hot)
1773                target_state = PCI_D3hot;
1774
1775        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1776
1777        error = pci_set_power_state(dev, target_state);
1778
1779        if (error)
1780                pci_enable_wake(dev, target_state, false);
1781
1782        return error;
1783}
1784
1785/**
1786 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1787 * @dev: Device to handle.
1788 *
1789 * Disable device's system wake-up capability and put it into D0.
1790 */
1791int pci_back_from_sleep(struct pci_dev *dev)
1792{
1793        pci_enable_wake(dev, PCI_D0, false);
1794        return pci_set_power_state(dev, PCI_D0);
1795}
1796
1797/**
1798 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1799 * @dev: PCI device being suspended.
1800 *
1801 * Prepare @dev to generate wake-up events at run time and put it into a low
1802 * power state.
1803 */
1804int pci_finish_runtime_suspend(struct pci_dev *dev)
1805{
1806        pci_power_t target_state = pci_target_state(dev);
1807        int error;
1808
1809        if (target_state == PCI_POWER_ERROR)
1810                return -EIO;
1811
1812        dev->runtime_d3cold = target_state == PCI_D3cold;
1813
1814        __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1815
1816        error = pci_set_power_state(dev, target_state);
1817
1818        if (error) {
1819                __pci_enable_wake(dev, target_state, true, false);
1820                dev->runtime_d3cold = false;
1821        }
1822
1823        return error;
1824}
1825
1826/**
1827 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1828 * @dev: Device to check.
1829 *
1830 * Return true if the device itself is cabable of generating wake-up events
1831 * (through the platform or using the native PCIe PME) or if the device supports
1832 * PME and one of its upstream bridges can generate wake-up events.
1833 */
1834bool pci_dev_run_wake(struct pci_dev *dev)
1835{
1836        struct pci_bus *bus = dev->bus;
1837
1838        if (device_run_wake(&dev->dev))
1839                return true;
1840
1841        if (!dev->pme_support)
1842                return false;
1843
1844        while (bus->parent) {
1845                struct pci_dev *bridge = bus->self;
1846
1847                if (device_run_wake(&bridge->dev))
1848                        return true;
1849
1850                bus = bus->parent;
1851        }
1852
1853        /* We have reached the root bus. */
1854        if (bus->bridge)
1855                return device_run_wake(bus->bridge);
1856
1857        return false;
1858}
1859EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1860
1861void pci_config_pm_runtime_get(struct pci_dev *pdev)
1862{
1863        struct device *dev = &pdev->dev;
1864        struct device *parent = dev->parent;
1865
1866        if (parent)
1867                pm_runtime_get_sync(parent);
1868        pm_runtime_get_noresume(dev);
1869        /*
1870         * pdev->current_state is set to PCI_D3cold during suspending,
1871         * so wait until suspending completes
1872         */
1873        pm_runtime_barrier(dev);
1874        /*
1875         * Only need to resume devices in D3cold, because config
1876         * registers are still accessible for devices suspended but
1877         * not in D3cold.
1878         */
1879        if (pdev->current_state == PCI_D3cold)
1880                pm_runtime_resume(dev);
1881}
1882
1883void pci_config_pm_runtime_put(struct pci_dev *pdev)
1884{
1885        struct device *dev = &pdev->dev;
1886        struct device *parent = dev->parent;
1887
1888        pm_runtime_put(dev);
1889        if (parent)
1890                pm_runtime_put_sync(parent);
1891}
1892
1893/**
1894 * pci_pm_init - Initialize PM functions of given PCI device
1895 * @dev: PCI device to handle.
1896 */
1897void pci_pm_init(struct pci_dev *dev)
1898{
1899        int pm;
1900        u16 pmc;
1901
1902        pm_runtime_forbid(&dev->dev);
1903        device_enable_async_suspend(&dev->dev);
1904        dev->wakeup_prepared = false;
1905
1906        dev->pm_cap = 0;
1907
1908        /* find PCI PM capability in list */
1909        pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1910        if (!pm)
1911                return;
1912        /* Check device's ability to generate PME# */
1913        pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1914
1915        if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1916                dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1917                        pmc & PCI_PM_CAP_VER_MASK);
1918                return;
1919        }
1920
1921        dev->pm_cap = pm;
1922        dev->d3_delay = PCI_PM_D3_WAIT;
1923        dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
1924        dev->d3cold_allowed = true;
1925
1926        dev->d1_support = false;
1927        dev->d2_support = false;
1928        if (!pci_no_d1d2(dev)) {
1929                if (pmc & PCI_PM_CAP_D1)
1930                        dev->d1_support = true;
1931                if (pmc & PCI_PM_CAP_D2)
1932                        dev->d2_support = true;
1933
1934                if (dev->d1_support || dev->d2_support)
1935                        dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1936                                   dev->d1_support ? " D1" : "",
1937                                   dev->d2_support ? " D2" : "");
1938        }
1939
1940        pmc &= PCI_PM_CAP_PME_MASK;
1941        if (pmc) {
1942                dev_printk(KERN_DEBUG, &dev->dev,
1943                         "PME# supported from%s%s%s%s%s\n",
1944                         (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1945                         (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1946                         (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1947                         (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1948                         (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1949                dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1950                dev->pme_poll = true;
1951                /*
1952                 * Make device's PM flags reflect the wake-up capability, but
1953                 * let the user space enable it to wake up the system as needed.
1954                 */
1955                device_set_wakeup_capable(&dev->dev, true);
1956                /* Disable the PME# generation functionality */
1957                pci_pme_active(dev, false);
1958        } else {
1959                dev->pme_support = 0;
1960        }
1961}
1962
1963/**
1964 * platform_pci_wakeup_init - init platform wakeup if present
1965 * @dev: PCI device
1966 *
1967 * Some devices don't have PCI PM caps but can still generate wakeup
1968 * events through platform methods (like ACPI events).  If @dev supports
1969 * platform wakeup events, set the device flag to indicate as much.  This
1970 * may be redundant if the device also supports PCI PM caps, but double
1971 * initialization should be safe in that case.
1972 */
1973void platform_pci_wakeup_init(struct pci_dev *dev)
1974{
1975        if (!platform_pci_can_wakeup(dev))
1976                return;
1977
1978        device_set_wakeup_capable(&dev->dev, true);
1979        platform_pci_sleep_wake(dev, false);
1980}
1981
1982static void pci_add_saved_cap(struct pci_dev *pci_dev,
1983        struct pci_cap_saved_state *new_cap)
1984{
1985        hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1986}
1987
1988/**
1989 * pci_add_save_buffer - allocate buffer for saving given capability registers
1990 * @dev: the PCI device
1991 * @cap: the capability to allocate the buffer for
1992 * @size: requested size of the buffer
1993 */
1994static int pci_add_cap_save_buffer(
1995        struct pci_dev *dev, char cap, unsigned int size)
1996{
1997        int pos;
1998        struct pci_cap_saved_state *save_state;
1999
2000        pos = pci_find_capability(dev, cap);
2001        if (pos <= 0)
2002                return 0;
2003
2004        save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2005        if (!save_state)
2006                return -ENOMEM;
2007
2008        save_state->cap.cap_nr = cap;
2009        save_state->cap.size = size;
2010        pci_add_saved_cap(dev, save_state);
2011
2012        return 0;
2013}
2014
2015/**
2016 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2017 * @dev: the PCI device
2018 */
2019void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2020{
2021        int error;
2022
2023        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2024                                        PCI_EXP_SAVE_REGS * sizeof(u16));
2025        if (error)
2026                dev_err(&dev->dev,
2027                        "unable to preallocate PCI Express save buffer\n");
2028
2029        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2030        if (error)
2031                dev_err(&dev->dev,
2032                        "unable to preallocate PCI-X save buffer\n");
2033}
2034
2035void pci_free_cap_save_buffers(struct pci_dev *dev)
2036{
2037        struct pci_cap_saved_state *tmp;
2038        struct hlist_node *pos, *n;
2039
2040        hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2041                kfree(tmp);
2042}
2043
2044/**
2045 * pci_enable_ari - enable ARI forwarding if hardware support it
2046 * @dev: the PCI device
2047 */
2048void pci_enable_ari(struct pci_dev *dev)
2049{
2050        u32 cap;
2051        struct pci_dev *bridge;
2052
2053        if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2054                return;
2055
2056        if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI))
2057                return;
2058
2059        bridge = dev->bus->self;
2060        if (!bridge)
2061                return;
2062
2063        pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2064        if (!(cap & PCI_EXP_DEVCAP2_ARI))
2065                return;
2066
2067        pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI);
2068        bridge->ari_enabled = 1;
2069}
2070
2071/**
2072 * pci_enable_ido - enable ID-based Ordering on a device
2073 * @dev: the PCI device
2074 * @type: which types of IDO to enable
2075 *
2076 * Enable ID-based ordering on @dev.  @type can contain the bits
2077 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2078 * which types of transactions are allowed to be re-ordered.
2079 */
2080void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2081{
2082        u16 ctrl = 0;
2083
2084        if (type & PCI_EXP_IDO_REQUEST)
2085                ctrl |= PCI_EXP_IDO_REQ_EN;
2086        if (type & PCI_EXP_IDO_COMPLETION)
2087                ctrl |= PCI_EXP_IDO_CMP_EN;
2088        if (ctrl)
2089                pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
2090}
2091EXPORT_SYMBOL(pci_enable_ido);
2092
2093/**
2094 * pci_disable_ido - disable ID-based ordering on a device
2095 * @dev: the PCI device
2096 * @type: which types of IDO to disable
2097 */
2098void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2099{
2100        u16 ctrl = 0;
2101
2102        if (type & PCI_EXP_IDO_REQUEST)
2103                ctrl |= PCI_EXP_IDO_REQ_EN;
2104        if (type & PCI_EXP_IDO_COMPLETION)
2105                ctrl |= PCI_EXP_IDO_CMP_EN;
2106        if (ctrl)
2107                pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
2108}
2109EXPORT_SYMBOL(pci_disable_ido);
2110
2111/**
2112 * pci_enable_obff - enable optimized buffer flush/fill
2113 * @dev: PCI device
2114 * @type: type of signaling to use
2115 *
2116 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2117 * signaling if possible, falling back to message signaling only if
2118 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2119 * be brought out of L0s or L1 to send the message.  It should be either
2120 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2121 *
2122 * If your device can benefit from receiving all messages, even at the
2123 * power cost of bringing the link back up from a low power state, use
2124 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2125 * preferred type).
2126 *
2127 * RETURNS:
2128 * Zero on success, appropriate error number on failure.
2129 */
2130int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2131{
2132        u32 cap;
2133        u16 ctrl;
2134        int ret;
2135
2136        pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2137        if (!(cap & PCI_EXP_OBFF_MASK))
2138                return -ENOTSUPP; /* no OBFF support at all */
2139
2140        /* Make sure the topology supports OBFF as well */
2141        if (dev->bus->self) {
2142                ret = pci_enable_obff(dev->bus->self, type);
2143                if (ret)
2144                        return ret;
2145        }
2146
2147        pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
2148        if (cap & PCI_EXP_OBFF_WAKE)
2149                ctrl |= PCI_EXP_OBFF_WAKE_EN;
2150        else {
2151                switch (type) {
2152                case PCI_EXP_OBFF_SIGNAL_L0:
2153                        if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2154                                ctrl |= PCI_EXP_OBFF_MSGA_EN;
2155                        break;
2156                case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2157                        ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2158                        ctrl |= PCI_EXP_OBFF_MSGB_EN;
2159                        break;
2160                default:
2161                        WARN(1, "bad OBFF signal type\n");
2162                        return -ENOTSUPP;
2163                }
2164        }
2165        pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl);
2166
2167        return 0;
2168}
2169EXPORT_SYMBOL(pci_enable_obff);
2170
2171/**
2172 * pci_disable_obff - disable optimized buffer flush/fill
2173 * @dev: PCI device
2174 *
2175 * Disable OBFF on @dev.
2176 */
2177void pci_disable_obff(struct pci_dev *dev)
2178{
2179        pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN);
2180}
2181EXPORT_SYMBOL(pci_disable_obff);
2182
2183/**
2184 * pci_ltr_supported - check whether a device supports LTR
2185 * @dev: PCI device
2186 *
2187 * RETURNS:
2188 * True if @dev supports latency tolerance reporting, false otherwise.
2189 */
2190static bool pci_ltr_supported(struct pci_dev *dev)
2191{
2192        u32 cap;
2193
2194        pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2195
2196        return cap & PCI_EXP_DEVCAP2_LTR;
2197}
2198
2199/**
2200 * pci_enable_ltr - enable latency tolerance reporting
2201 * @dev: PCI device
2202 *
2203 * Enable LTR on @dev if possible, which means enabling it first on
2204 * upstream ports.
2205 *
2206 * RETURNS:
2207 * Zero on success, errno on failure.
2208 */
2209int pci_enable_ltr(struct pci_dev *dev)
2210{
2211        int ret;
2212
2213        /* Only primary function can enable/disable LTR */
2214        if (PCI_FUNC(dev->devfn) != 0)
2215                return -EINVAL;
2216
2217        if (!pci_ltr_supported(dev))
2218                return -ENOTSUPP;
2219
2220        /* Enable upstream ports first */
2221        if (dev->bus->self) {
2222                ret = pci_enable_ltr(dev->bus->self);
2223                if (ret)
2224                        return ret;
2225        }
2226
2227        return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2228}
2229EXPORT_SYMBOL(pci_enable_ltr);
2230
2231/**
2232 * pci_disable_ltr - disable latency tolerance reporting
2233 * @dev: PCI device
2234 */
2235void pci_disable_ltr(struct pci_dev *dev)
2236{
2237        /* Only primary function can enable/disable LTR */
2238        if (PCI_FUNC(dev->devfn) != 0)
2239                return;
2240
2241        if (!pci_ltr_supported(dev))
2242                return;
2243
2244        pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2245}
2246EXPORT_SYMBOL(pci_disable_ltr);
2247
2248static int __pci_ltr_scale(int *val)
2249{
2250        int scale = 0;
2251
2252        while (*val > 1023) {
2253                *val = (*val + 31) / 32;
2254                scale++;
2255        }
2256        return scale;
2257}
2258
2259/**
2260 * pci_set_ltr - set LTR latency values
2261 * @dev: PCI device
2262 * @snoop_lat_ns: snoop latency in nanoseconds
2263 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2264 *
2265 * Figure out the scale and set the LTR values accordingly.
2266 */
2267int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2268{
2269        int pos, ret, snoop_scale, nosnoop_scale;
2270        u16 val;
2271
2272        if (!pci_ltr_supported(dev))
2273                return -ENOTSUPP;
2274
2275        snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2276        nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2277
2278        if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2279            nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2280                return -EINVAL;
2281
2282        if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2283            (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2284                return -EINVAL;
2285
2286        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2287        if (!pos)
2288                return -ENOTSUPP;
2289
2290        val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2291        ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2292        if (ret != 4)
2293                return -EIO;
2294
2295        val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2296        ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2297        if (ret != 4)
2298                return -EIO;
2299
2300        return 0;
2301}
2302EXPORT_SYMBOL(pci_set_ltr);
2303
2304static int pci_acs_enable;
2305
2306/**
2307 * pci_request_acs - ask for ACS to be enabled if supported
2308 */
2309void pci_request_acs(void)
2310{
2311        pci_acs_enable = 1;
2312}
2313
2314/**
2315 * pci_enable_acs - enable ACS if hardware support it
2316 * @dev: the PCI device
2317 */
2318void pci_enable_acs(struct pci_dev *dev)
2319{
2320        int pos;
2321        u16 cap;
2322        u16 ctrl;
2323
2324        if (!pci_acs_enable)
2325                return;
2326
2327        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2328        if (!pos)
2329                return;
2330
2331        pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2332        pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2333
2334        /* Source Validation */
2335        ctrl |= (cap & PCI_ACS_SV);
2336
2337        /* P2P Request Redirect */
2338        ctrl |= (cap & PCI_ACS_RR);
2339
2340        /* P2P Completion Redirect */
2341        ctrl |= (cap & PCI_ACS_CR);
2342
2343        /* Upstream Forwarding */
2344        ctrl |= (cap & PCI_ACS_UF);
2345
2346        pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2347}
2348
2349/**
2350 * pci_acs_enabled - test ACS against required flags for a given device
2351 * @pdev: device to test
2352 * @acs_flags: required PCI ACS flags
2353 *
2354 * Return true if the device supports the provided flags.  Automatically
2355 * filters out flags that are not implemented on multifunction devices.
2356 */
2357bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2358{
2359        int pos, ret;
2360        u16 ctrl;
2361
2362        ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2363        if (ret >= 0)
2364                return ret > 0;
2365
2366        if (!pci_is_pcie(pdev))
2367                return false;
2368
2369        /* Filter out flags not applicable to multifunction */
2370        if (pdev->multifunction)
2371                acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2372                              PCI_ACS_EC | PCI_ACS_DT);
2373
2374        if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
2375            pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
2376            pdev->multifunction) {
2377                pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2378                if (!pos)
2379                        return false;
2380
2381                pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2382                if ((ctrl & acs_flags) != acs_flags)
2383                        return false;
2384        }
2385
2386        return true;
2387}
2388
2389/**
2390 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2391 * @start: starting downstream device
2392 * @end: ending upstream device or NULL to search to the root bus
2393 * @acs_flags: required flags
2394 *
2395 * Walk up a device tree from start to end testing PCI ACS support.  If
2396 * any step along the way does not support the required flags, return false.
2397 */
2398bool pci_acs_path_enabled(struct pci_dev *start,
2399                          struct pci_dev *end, u16 acs_flags)
2400{
2401        struct pci_dev *pdev, *parent = start;
2402
2403        do {
2404                pdev = parent;
2405
2406                if (!pci_acs_enabled(pdev, acs_flags))
2407                        return false;
2408
2409                if (pci_is_root_bus(pdev->bus))
2410                        return (end == NULL);
2411
2412                parent = pdev->bus->self;
2413        } while (pdev != end);
2414
2415        return true;
2416}
2417
2418/**
2419 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2420 * @dev: the PCI device
2421 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2422 *
2423 * Perform INTx swizzling for a device behind one level of bridge.  This is
2424 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2425 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2426 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2427 * the PCI Express Base Specification, Revision 2.1)
2428 */
2429u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2430{
2431        int slot;
2432
2433        if (pci_ari_enabled(dev->bus))
2434                slot = 0;
2435        else
2436                slot = PCI_SLOT(dev->devfn);
2437
2438        return (((pin - 1) + slot) % 4) + 1;
2439}
2440
2441int
2442pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2443{
2444        u8 pin;
2445
2446        pin = dev->pin;
2447        if (!pin)
2448                return -1;
2449
2450        while (!pci_is_root_bus(dev->bus)) {
2451                pin = pci_swizzle_interrupt_pin(dev, pin);
2452                dev = dev->bus->self;
2453        }
2454        *bridge = dev;
2455        return pin;
2456}
2457
2458/**
2459 * pci_common_swizzle - swizzle INTx all the way to root bridge
2460 * @dev: the PCI device
2461 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2462 *
2463 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2464 * bridges all the way up to a PCI root bus.
2465 */
2466u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2467{
2468        u8 pin = *pinp;
2469
2470        while (!pci_is_root_bus(dev->bus)) {
2471                pin = pci_swizzle_interrupt_pin(dev, pin);
2472                dev = dev->bus->self;
2473        }
2474        *pinp = pin;
2475        return PCI_SLOT(dev->devfn);
2476}
2477
2478/**
2479 *      pci_release_region - Release a PCI bar
2480 *      @pdev: PCI device whose resources were previously reserved by pci_request_region
2481 *      @bar: BAR to release
2482 *
2483 *      Releases the PCI I/O and memory resources previously reserved by a
2484 *      successful call to pci_request_region.  Call this function only
2485 *      after all use of the PCI regions has ceased.
2486 */
2487void pci_release_region(struct pci_dev *pdev, int bar)
2488{
2489        struct pci_devres *dr;
2490
2491        if (pci_resource_len(pdev, bar) == 0)
2492                return;
2493        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2494                release_region(pci_resource_start(pdev, bar),
2495                                pci_resource_len(pdev, bar));
2496        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2497                release_mem_region(pci_resource_start(pdev, bar),
2498                                pci_resource_len(pdev, bar));
2499
2500        dr = find_pci_dr(pdev);
2501        if (dr)
2502                dr->region_mask &= ~(1 << bar);
2503}
2504
2505/**
2506 *      __pci_request_region - Reserved PCI I/O and memory resource
2507 *      @pdev: PCI device whose resources are to be reserved
2508 *      @bar: BAR to be reserved
2509 *      @res_name: Name to be associated with resource.
2510 *      @exclusive: whether the region access is exclusive or not
2511 *
2512 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2513 *      being reserved by owner @res_name.  Do not access any
2514 *      address inside the PCI regions unless this call returns
2515 *      successfully.
2516 *
2517 *      If @exclusive is set, then the region is marked so that userspace
2518 *      is explicitly not allowed to map the resource via /dev/mem or
2519 *      sysfs MMIO access.
2520 *
2521 *      Returns 0 on success, or %EBUSY on error.  A warning
2522 *      message is also printed on failure.
2523 */
2524static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2525                                                                        int exclusive)
2526{
2527        struct pci_devres *dr;
2528
2529        if (pci_resource_len(pdev, bar) == 0)
2530                return 0;
2531                
2532        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2533                if (!request_region(pci_resource_start(pdev, bar),
2534                            pci_resource_len(pdev, bar), res_name))
2535                        goto err_out;
2536        }
2537        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2538                if (!__request_mem_region(pci_resource_start(pdev, bar),
2539                                        pci_resource_len(pdev, bar), res_name,
2540                                        exclusive))
2541                        goto err_out;
2542        }
2543
2544        dr = find_pci_dr(pdev);
2545        if (dr)
2546                dr->region_mask |= 1 << bar;
2547
2548        return 0;
2549
2550err_out:
2551        dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2552                 &pdev->resource[bar]);
2553        return -EBUSY;
2554}
2555
2556/**
2557 *      pci_request_region - Reserve PCI I/O and memory resource
2558 *      @pdev: PCI device whose resources are to be reserved
2559 *      @bar: BAR to be reserved
2560 *      @res_name: Name to be associated with resource
2561 *
2562 *      Mark the PCI region associated with PCI device @pdev BAR @bar as
2563 *      being reserved by owner @res_name.  Do not access any
2564 *      address inside the PCI regions unless this call returns
2565 *      successfully.
2566 *
2567 *      Returns 0 on success, or %EBUSY on error.  A warning
2568 *      message is also printed on failure.
2569 */
2570int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2571{
2572        return __pci_request_region(pdev, bar, res_name, 0);
2573}
2574
2575/**
2576 *      pci_request_region_exclusive - Reserved PCI I/O and memory resource
2577 *      @pdev: PCI device whose resources are to be reserved
2578 *      @bar: BAR to be reserved
2579 *      @res_name: Name to be associated with resource.
2580 *
2581 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2582 *      being reserved by owner @res_name.  Do not access any
2583 *      address inside the PCI regions unless this call returns
2584 *      successfully.
2585 *
2586 *      Returns 0 on success, or %EBUSY on error.  A warning
2587 *      message is also printed on failure.
2588 *
2589 *      The key difference that _exclusive makes it that userspace is
2590 *      explicitly not allowed to map the resource via /dev/mem or
2591 *      sysfs.
2592 */
2593int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2594{
2595        return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2596}
2597/**
2598 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2599 * @pdev: PCI device whose resources were previously reserved
2600 * @bars: Bitmask of BARs to be released
2601 *
2602 * Release selected PCI I/O and memory resources previously reserved.
2603 * Call this function only after all use of the PCI regions has ceased.
2604 */
2605void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2606{
2607        int i;
2608
2609        for (i = 0; i < 6; i++)
2610                if (bars & (1 << i))
2611                        pci_release_region(pdev, i);
2612}
2613
2614int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2615                                 const char *res_name, int excl)
2616{
2617        int i;
2618
2619        for (i = 0; i < 6; i++)
2620                if (bars & (1 << i))
2621                        if (__pci_request_region(pdev, i, res_name, excl))
2622                                goto err_out;
2623        return 0;
2624
2625err_out:
2626        while(--i >= 0)
2627                if (bars & (1 << i))
2628                        pci_release_region(pdev, i);
2629
2630        return -EBUSY;
2631}
2632
2633
2634/**
2635 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2636 * @pdev: PCI device whose resources are to be reserved
2637 * @bars: Bitmask of BARs to be requested
2638 * @res_name: Name to be associated with resource
2639 */
2640int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2641                                 const char *res_name)
2642{
2643        return __pci_request_selected_regions(pdev, bars, res_name, 0);
2644}
2645
2646int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2647                                 int bars, const char *res_name)
2648{
2649        return __pci_request_selected_regions(pdev, bars, res_name,
2650                        IORESOURCE_EXCLUSIVE);
2651}
2652
2653/**
2654 *      pci_release_regions - Release reserved PCI I/O and memory resources
2655 *      @pdev: PCI device whose resources were previously reserved by pci_request_regions
2656 *
2657 *      Releases all PCI I/O and memory resources previously reserved by a
2658 *      successful call to pci_request_regions.  Call this function only
2659 *      after all use of the PCI regions has ceased.
2660 */
2661
2662void pci_release_regions(struct pci_dev *pdev)
2663{
2664        pci_release_selected_regions(pdev, (1 << 6) - 1);
2665}
2666
2667/**
2668 *      pci_request_regions - Reserved PCI I/O and memory resources
2669 *      @pdev: PCI device whose resources are to be reserved
2670 *      @res_name: Name to be associated with resource.
2671 *
2672 *      Mark all PCI regions associated with PCI device @pdev as
2673 *      being reserved by owner @res_name.  Do not access any
2674 *      address inside the PCI regions unless this call returns
2675 *      successfully.
2676 *
2677 *      Returns 0 on success, or %EBUSY on error.  A warning
2678 *      message is also printed on failure.
2679 */
2680int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2681{
2682        return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2683}
2684
2685/**
2686 *      pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2687 *      @pdev: PCI device whose resources are to be reserved
2688 *      @res_name: Name to be associated with resource.
2689 *
2690 *      Mark all PCI regions associated with PCI device @pdev as
2691 *      being reserved by owner @res_name.  Do not access any
2692 *      address inside the PCI regions unless this call returns
2693 *      successfully.
2694 *
2695 *      pci_request_regions_exclusive() will mark the region so that
2696 *      /dev/mem and the sysfs MMIO access will not be allowed.
2697 *
2698 *      Returns 0 on success, or %EBUSY on error.  A warning
2699 *      message is also printed on failure.
2700 */
2701int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2702{
2703        return pci_request_selected_regions_exclusive(pdev,
2704                                        ((1 << 6) - 1), res_name);
2705}
2706
2707static void __pci_set_master(struct pci_dev *dev, bool enable)
2708{
2709        u16 old_cmd, cmd;
2710
2711        pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2712        if (enable)
2713                cmd = old_cmd | PCI_COMMAND_MASTER;
2714        else
2715                cmd = old_cmd & ~PCI_COMMAND_MASTER;
2716        if (cmd != old_cmd) {
2717                dev_dbg(&dev->dev, "%s bus mastering\n",
2718                        enable ? "enabling" : "disabling");
2719                pci_write_config_word(dev, PCI_COMMAND, cmd);
2720        }
2721        dev->is_busmaster = enable;
2722}
2723
2724/**
2725 * pcibios_setup - process "pci=" kernel boot arguments
2726 * @str: string used to pass in "pci=" kernel boot arguments
2727 *
2728 * Process kernel boot arguments.  This is the default implementation.
2729 * Architecture specific implementations can override this as necessary.
2730 */
2731char * __weak __init pcibios_setup(char *str)
2732{
2733        return str;
2734}
2735
2736/**
2737 * pcibios_set_master - enable PCI bus-mastering for device dev
2738 * @dev: the PCI device to enable
2739 *
2740 * Enables PCI bus-mastering for the device.  This is the default
2741 * implementation.  Architecture specific implementations can override
2742 * this if necessary.
2743 */
2744void __weak pcibios_set_master(struct pci_dev *dev)
2745{
2746        u8 lat;
2747
2748        /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2749        if (pci_is_pcie(dev))
2750                return;
2751
2752        pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2753        if (lat < 16)
2754                lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2755        else if (lat > pcibios_max_latency)
2756                lat = pcibios_max_latency;
2757        else
2758                return;
2759        dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2760        pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2761}
2762
2763/**
2764 * pci_set_master - enables bus-mastering for device dev
2765 * @dev: the PCI device to enable
2766 *
2767 * Enables bus-mastering on the device and calls pcibios_set_master()
2768 * to do the needed arch specific settings.
2769 */
2770void pci_set_master(struct pci_dev *dev)
2771{
2772        __pci_set_master(dev, true);
2773        pcibios_set_master(dev);
2774}
2775
2776/**
2777 * pci_clear_master - disables bus-mastering for device dev
2778 * @dev: the PCI device to disable
2779 */
2780void pci_clear_master(struct pci_dev *dev)
2781{
2782        __pci_set_master(dev, false);
2783}
2784
2785/**
2786 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2787 * @dev: the PCI device for which MWI is to be enabled
2788 *
2789 * Helper function for pci_set_mwi.
2790 * Originally copied from drivers/net/acenic.c.
2791 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2792 *
2793 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2794 */
2795int pci_set_cacheline_size(struct pci_dev *dev)
2796{
2797        u8 cacheline_size;
2798
2799        if (!pci_cache_line_size)
2800                return -EINVAL;
2801
2802        /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2803           equal to or multiple of the right value. */
2804        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2805        if (cacheline_size >= pci_cache_line_size &&
2806            (cacheline_size % pci_cache_line_size) == 0)
2807                return 0;
2808
2809        /* Write the correct value. */
2810        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2811        /* Read it back. */
2812        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2813        if (cacheline_size == pci_cache_line_size)
2814                return 0;
2815
2816        dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2817                   "supported\n", pci_cache_line_size << 2);
2818
2819        return -EINVAL;
2820}
2821EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2822
2823#ifdef PCI_DISABLE_MWI
2824int pci_set_mwi(struct pci_dev *dev)
2825{
2826        return 0;
2827}
2828
2829int pci_try_set_mwi(struct pci_dev *dev)
2830{
2831        return 0;
2832}
2833
2834void pci_clear_mwi(struct pci_dev *dev)
2835{
2836}
2837
2838#else
2839
2840/**
2841 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2842 * @dev: the PCI device for which MWI is enabled
2843 *
2844 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2845 *
2846 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2847 */
2848int
2849pci_set_mwi(struct pci_dev *dev)
2850{
2851        int rc;
2852        u16 cmd;
2853
2854        rc = pci_set_cacheline_size(dev);
2855        if (rc)
2856                return rc;
2857
2858        pci_read_config_word(dev, PCI_COMMAND, &cmd);
2859        if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2860                dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2861                cmd |= PCI_COMMAND_INVALIDATE;
2862                pci_write_config_word(dev, PCI_COMMAND, cmd);
2863        }
2864        
2865        return 0;
2866}
2867
2868/**
2869 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2870 * @dev: the PCI device for which MWI is enabled
2871 *
2872 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2873 * Callers are not required to check the return value.
2874 *
2875 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2876 */
2877int pci_try_set_mwi(struct pci_dev *dev)
2878{
2879        int rc = pci_set_mwi(dev);
2880        return rc;
2881}
2882
2883/**
2884 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2885 * @dev: the PCI device to disable
2886 *
2887 * Disables PCI Memory-Write-Invalidate transaction on the device
2888 */
2889void
2890pci_clear_mwi(struct pci_dev *dev)
2891{
2892        u16 cmd;
2893
2894        pci_read_config_word(dev, PCI_COMMAND, &cmd);
2895        if (cmd & PCI_COMMAND_INVALIDATE) {
2896                cmd &= ~PCI_COMMAND_INVALIDATE;
2897                pci_write_config_word(dev, PCI_COMMAND, cmd);
2898        }
2899}
2900#endif /* ! PCI_DISABLE_MWI */
2901
2902/**
2903 * pci_intx - enables/disables PCI INTx for device dev
2904 * @pdev: the PCI device to operate on
2905 * @enable: boolean: whether to enable or disable PCI INTx
2906 *
2907 * Enables/disables PCI INTx for device dev
2908 */
2909void
2910pci_intx(struct pci_dev *pdev, int enable)
2911{
2912        u16 pci_command, new;
2913
2914        pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2915
2916        if (enable) {
2917                new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2918        } else {
2919                new = pci_command | PCI_COMMAND_INTX_DISABLE;
2920        }
2921
2922        if (new != pci_command) {
2923                struct pci_devres *dr;
2924
2925                pci_write_config_word(pdev, PCI_COMMAND, new);
2926
2927                dr = find_pci_dr(pdev);
2928                if (dr && !dr->restore_intx) {
2929                        dr->restore_intx = 1;
2930                        dr->orig_intx = !enable;
2931                }
2932        }
2933}
2934
2935/**
2936 * pci_intx_mask_supported - probe for INTx masking support
2937 * @dev: the PCI device to operate on
2938 *
2939 * Check if the device dev support INTx masking via the config space
2940 * command word.
2941 */
2942bool pci_intx_mask_supported(struct pci_dev *dev)
2943{
2944        bool mask_supported = false;
2945        u16 orig, new;
2946
2947        if (dev->broken_intx_masking)
2948                return false;
2949
2950        pci_cfg_access_lock(dev);
2951
2952        pci_read_config_word(dev, PCI_COMMAND, &orig);
2953        pci_write_config_word(dev, PCI_COMMAND,
2954                              orig ^ PCI_COMMAND_INTX_DISABLE);
2955        pci_read_config_word(dev, PCI_COMMAND, &new);
2956
2957        /*
2958         * There's no way to protect against hardware bugs or detect them
2959         * reliably, but as long as we know what the value should be, let's
2960         * go ahead and check it.
2961         */
2962        if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2963                dev_err(&dev->dev, "Command register changed from "
2964                        "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2965        } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2966                mask_supported = true;
2967                pci_write_config_word(dev, PCI_COMMAND, orig);
2968        }
2969
2970        pci_cfg_access_unlock(dev);
2971        return mask_supported;
2972}
2973EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2974
2975static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2976{
2977        struct pci_bus *bus = dev->bus;
2978        bool mask_updated = true;
2979        u32 cmd_status_dword;
2980        u16 origcmd, newcmd;
2981        unsigned long flags;
2982        bool irq_pending;
2983
2984        /*
2985         * We do a single dword read to retrieve both command and status.
2986         * Document assumptions that make this possible.
2987         */
2988        BUILD_BUG_ON(PCI_COMMAND % 4);
2989        BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2990
2991        raw_spin_lock_irqsave(&pci_lock, flags);
2992
2993        bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2994
2995        irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2996
2997        /*
2998         * Check interrupt status register to see whether our device
2999         * triggered the interrupt (when masking) or the next IRQ is
3000         * already pending (when unmasking).
3001         */
3002        if (mask != irq_pending) {
3003                mask_updated = false;
3004                goto done;
3005        }
3006
3007        origcmd = cmd_status_dword;
3008        newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3009        if (mask)
3010                newcmd |= PCI_COMMAND_INTX_DISABLE;
3011        if (newcmd != origcmd)
3012                bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3013
3014done:
3015        raw_spin_unlock_irqrestore(&pci_lock, flags);
3016
3017        return mask_updated;
3018}
3019
3020/**
3021 * pci_check_and_mask_intx - mask INTx on pending interrupt
3022 * @dev: the PCI device to operate on
3023 *
3024 * Check if the device dev has its INTx line asserted, mask it and
3025 * return true in that case. False is returned if not interrupt was
3026 * pending.
3027 */
3028bool pci_check_and_mask_intx(struct pci_dev *dev)
3029{
3030        return pci_check_and_set_intx_mask(dev, true);
3031}
3032EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3033
3034/**
3035 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
3036 * @dev: the PCI device to operate on
3037 *
3038 * Check if the device dev has its INTx line asserted, unmask it if not
3039 * and return true. False is returned and the mask remains active if
3040 * there was still an interrupt pending.
3041 */
3042bool pci_check_and_unmask_intx(struct pci_dev *dev)
3043{
3044        return pci_check_and_set_intx_mask(dev, false);
3045}
3046EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3047
3048/**
3049 * pci_msi_off - disables any msi or msix capabilities
3050 * @dev: the PCI device to operate on
3051 *
3052 * If you want to use msi see pci_enable_msi and friends.
3053 * This is a lower level primitive that allows us to disable
3054 * msi operation at the device level.
3055 */
3056void pci_msi_off(struct pci_dev *dev)
3057{
3058        int pos;
3059        u16 control;
3060
3061        pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3062        if (pos) {
3063                pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3064                control &= ~PCI_MSI_FLAGS_ENABLE;
3065                pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3066        }
3067        pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3068        if (pos) {
3069                pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3070                control &= ~PCI_MSIX_FLAGS_ENABLE;
3071                pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3072        }
3073}
3074EXPORT_SYMBOL_GPL(pci_msi_off);
3075
3076int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3077{
3078        return dma_set_max_seg_size(&dev->dev, size);
3079}
3080EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3081
3082int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3083{
3084        return dma_set_seg_boundary(&dev->dev, mask);
3085}
3086EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3087
3088static int pcie_flr(struct pci_dev *dev, int probe)
3089{
3090        int i;
3091        u32 cap;
3092        u16 status;
3093
3094        pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3095        if (!(cap & PCI_EXP_DEVCAP_FLR))
3096                return -ENOTTY;
3097
3098        if (probe)
3099                return 0;
3100
3101        /* Wait for Transaction Pending bit clean */
3102        for (i = 0; i < 4; i++) {
3103                if (i)
3104                        msleep((1 << (i - 1)) * 100);
3105
3106                pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3107                if (!(status & PCI_EXP_DEVSTA_TRPND))
3108                        goto clear;
3109        }
3110
3111        dev_err(&dev->dev, "transaction is not cleared; "
3112                        "proceeding with reset anyway\n");
3113
3114clear:
3115        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3116
3117        msleep(100);
3118
3119        return 0;
3120}
3121
3122static int pci_af_flr(struct pci_dev *dev, int probe)
3123{
3124        int i;
3125        int pos;
3126        u8 cap;
3127        u8 status;
3128
3129        pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3130        if (!pos)
3131                return -ENOTTY;
3132
3133        pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3134        if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3135                return -ENOTTY;
3136
3137        if (probe)
3138                return 0;
3139
3140        /* Wait for Transaction Pending bit clean */
3141        for (i = 0; i < 4; i++) {
3142                if (i)
3143                        msleep((1 << (i - 1)) * 100);
3144
3145                pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3146                if (!(status & PCI_AF_STATUS_TP))
3147                        goto clear;
3148        }
3149
3150        dev_err(&dev->dev, "transaction is not cleared; "
3151                        "proceeding with reset anyway\n");
3152
3153clear:
3154        pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3155        msleep(100);
3156
3157        return 0;
3158}
3159
3160/**
3161 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3162 * @dev: Device to reset.
3163 * @probe: If set, only check if the device can be reset this way.
3164 *
3165 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3166 * unset, it will be reinitialized internally when going from PCI_D3hot to
3167 * PCI_D0.  If that's the case and the device is not in a low-power state
3168 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3169 *
3170 * NOTE: This causes the caller to sleep for twice the device power transition
3171 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3172 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3173 * Moreover, only devices in D0 can be reset by this function.
3174 */
3175static int pci_pm_reset(struct pci_dev *dev, int probe)
3176{
3177        u16 csr;
3178
3179        if (!dev->pm_cap)
3180                return -ENOTTY;
3181
3182        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3183        if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3184                return -ENOTTY;
3185
3186        if (probe)
3187                return 0;
3188
3189        if (dev->current_state != PCI_D0)
3190                return -EINVAL;
3191
3192        csr &= ~PCI_PM_CTRL_STATE_MASK;
3193        csr |= PCI_D3hot;
3194        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3195        pci_dev_d3_sleep(dev);
3196
3197        csr &= ~PCI_PM_CTRL_STATE_MASK;
3198        csr |= PCI_D0;
3199        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3200        pci_dev_d3_sleep(dev);
3201
3202        return 0;
3203}
3204
3205static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3206{
3207        u16 ctrl;
3208        struct pci_dev *pdev;
3209
3210        if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3211                return -ENOTTY;
3212
3213        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3214                if (pdev != dev)
3215                        return -ENOTTY;
3216
3217        if (probe)
3218                return 0;
3219
3220        pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3221        ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3222        pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3223        msleep(100);
3224
3225        ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3226        pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3227        msleep(100);
3228
3229        return 0;
3230}
3231
3232static int __pci_dev_reset(struct pci_dev *dev, int probe)
3233{
3234        int rc;
3235
3236        might_sleep();
3237
3238        rc = pci_dev_specific_reset(dev, probe);
3239        if (rc != -ENOTTY)
3240                goto done;
3241
3242        rc = pcie_flr(dev, probe);
3243        if (rc != -ENOTTY)
3244                goto done;
3245
3246        rc = pci_af_flr(dev, probe);
3247        if (rc != -ENOTTY)
3248                goto done;
3249
3250        rc = pci_pm_reset(dev, probe);
3251        if (rc != -ENOTTY)
3252                goto done;
3253
3254        rc = pci_parent_bus_reset(dev, probe);
3255done:
3256        return rc;
3257}
3258
3259static int pci_dev_reset(struct pci_dev *dev, int probe)
3260{
3261        int rc;
3262
3263        if (!probe) {
3264                pci_cfg_access_lock(dev);
3265                /* block PM suspend, driver probe, etc. */
3266                device_lock(&dev->dev);
3267        }
3268
3269        rc = __pci_dev_reset(dev, probe);
3270
3271        if (!probe) {
3272                device_unlock(&dev->dev);
3273                pci_cfg_access_unlock(dev);
3274        }
3275        return rc;
3276}
3277/**
3278 * __pci_reset_function - reset a PCI device function
3279 * @dev: PCI device to reset
3280 *
3281 * Some devices allow an individual function to be reset without affecting
3282 * other functions in the same device.  The PCI device must be responsive
3283 * to PCI config space in order to use this function.
3284 *
3285 * The device function is presumed to be unused when this function is called.
3286 * Resetting the device will make the contents of PCI configuration space
3287 * random, so any caller of this must be prepared to reinitialise the
3288 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3289 * etc.
3290 *
3291 * Returns 0 if the device function was successfully reset or negative if the
3292 * device doesn't support resetting a single function.
3293 */
3294int __pci_reset_function(struct pci_dev *dev)
3295{
3296        return pci_dev_reset(dev, 0);
3297}
3298EXPORT_SYMBOL_GPL(__pci_reset_function);
3299
3300/**
3301 * __pci_reset_function_locked - reset a PCI device function while holding
3302 * the @dev mutex lock.
3303 * @dev: PCI device to reset
3304 *
3305 * Some devices allow an individual function to be reset without affecting
3306 * other functions in the same device.  The PCI device must be responsive
3307 * to PCI config space in order to use this function.
3308 *
3309 * The device function is presumed to be unused and the caller is holding
3310 * the device mutex lock when this function is called.
3311 * Resetting the device will make the contents of PCI configuration space
3312 * random, so any caller of this must be prepared to reinitialise the
3313 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3314 * etc.
3315 *
3316 * Returns 0 if the device function was successfully reset or negative if the
3317 * device doesn't support resetting a single function.
3318 */
3319int __pci_reset_function_locked(struct pci_dev *dev)
3320{
3321        return __pci_dev_reset(dev, 0);
3322}
3323EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3324
3325/**
3326 * pci_probe_reset_function - check whether the device can be safely reset
3327 * @dev: PCI device to reset
3328 *
3329 * Some devices allow an individual function to be reset without affecting
3330 * other functions in the same device.  The PCI device must be responsive
3331 * to PCI config space in order to use this function.
3332 *
3333 * Returns 0 if the device function can be reset or negative if the
3334 * device doesn't support resetting a single function.
3335 */
3336int pci_probe_reset_function(struct pci_dev *dev)
3337{
3338        return pci_dev_reset(dev, 1);
3339}
3340
3341/**
3342 * pci_reset_function - quiesce and reset a PCI device function
3343 * @dev: PCI device to reset
3344 *
3345 * Some devices allow an individual function to be reset without affecting
3346 * other functions in the same device.  The PCI device must be responsive
3347 * to PCI config space in order to use this function.
3348 *
3349 * This function does not just reset the PCI portion of a device, but
3350 * clears all the state associated with the device.  This function differs
3351 * from __pci_reset_function in that it saves and restores device state
3352 * over the reset.
3353 *
3354 * Returns 0 if the device function was successfully reset or negative if the
3355 * device doesn't support resetting a single function.
3356 */
3357int pci_reset_function(struct pci_dev *dev)
3358{
3359        int rc;
3360
3361        rc = pci_dev_reset(dev, 1);
3362        if (rc)
3363                return rc;
3364
3365        pci_save_state(dev);
3366
3367        /*
3368         * both INTx and MSI are disabled after the Interrupt Disable bit
3369         * is set and the Bus Master bit is cleared.
3370         */
3371        pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3372
3373        rc = pci_dev_reset(dev, 0);
3374
3375        pci_restore_state(dev);
3376
3377        return rc;
3378}
3379EXPORT_SYMBOL_GPL(pci_reset_function);
3380
3381/**
3382 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3383 * @dev: PCI device to query
3384 *
3385 * Returns mmrbc: maximum designed memory read count in bytes
3386 *    or appropriate error value.
3387 */
3388int pcix_get_max_mmrbc(struct pci_dev *dev)
3389{
3390        int cap;
3391        u32 stat;
3392
3393        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3394        if (!cap)
3395                return -EINVAL;
3396
3397        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3398                return -EINVAL;
3399
3400        return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3401}
3402EXPORT_SYMBOL(pcix_get_max_mmrbc);
3403
3404/**
3405 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3406 * @dev: PCI device to query
3407 *
3408 * Returns mmrbc: maximum memory read count in bytes
3409 *    or appropriate error value.
3410 */
3411int pcix_get_mmrbc(struct pci_dev *dev)
3412{
3413        int cap;
3414        u16 cmd;
3415
3416        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3417        if (!cap)
3418                return -EINVAL;
3419
3420        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3421                return -EINVAL;
3422
3423        return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3424}
3425EXPORT_SYMBOL(pcix_get_mmrbc);
3426
3427/**
3428 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3429 * @dev: PCI device to query
3430 * @mmrbc: maximum memory read count in bytes
3431 *    valid values are 512, 1024, 2048, 4096
3432 *
3433 * If possible sets maximum memory read byte count, some bridges have erratas
3434 * that prevent this.
3435 */
3436int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3437{
3438        int cap;
3439        u32 stat, v, o;
3440        u16 cmd;
3441
3442        if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3443                return -EINVAL;
3444
3445        v = ffs(mmrbc) - 10;
3446
3447        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3448        if (!cap)
3449                return -EINVAL;
3450
3451        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3452                return -EINVAL;
3453
3454        if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3455                return -E2BIG;
3456
3457        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3458                return -EINVAL;
3459
3460        o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3461        if (o != v) {
3462                if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3463                        return -EIO;
3464
3465                cmd &= ~PCI_X_CMD_MAX_READ;
3466                cmd |= v << 2;
3467                if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3468                        return -EIO;
3469        }
3470        return 0;
3471}
3472EXPORT_SYMBOL(pcix_set_mmrbc);
3473
3474/**
3475 * pcie_get_readrq - get PCI Express read request size
3476 * @dev: PCI device to query
3477 *
3478 * Returns maximum memory read request in bytes
3479 *    or appropriate error value.
3480 */
3481int pcie_get_readrq(struct pci_dev *dev)
3482{
3483        u16 ctl;
3484
3485        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3486
3487        return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3488}
3489EXPORT_SYMBOL(pcie_get_readrq);
3490
3491/**
3492 * pcie_set_readrq - set PCI Express maximum memory read request
3493 * @dev: PCI device to query
3494 * @rq: maximum memory read count in bytes
3495 *    valid values are 128, 256, 512, 1024, 2048, 4096
3496 *
3497 * If possible sets maximum memory read request in bytes
3498 */
3499int pcie_set_readrq(struct pci_dev *dev, int rq)
3500{
3501        u16 v;
3502
3503        if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3504                return -EINVAL;
3505
3506        /*
3507         * If using the "performance" PCIe config, we clamp the
3508         * read rq size to the max packet size to prevent the
3509         * host bridge generating requests larger than we can
3510         * cope with
3511         */
3512        if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3513                int mps = pcie_get_mps(dev);
3514
3515                if (mps < 0)
3516                        return mps;
3517                if (mps < rq)
3518                        rq = mps;
3519        }
3520
3521        v = (ffs(rq) - 8) << 12;
3522
3523        return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3524                                                  PCI_EXP_DEVCTL_READRQ, v);
3525}
3526EXPORT_SYMBOL(pcie_set_readrq);
3527
3528/**
3529 * pcie_get_mps - get PCI Express maximum payload size
3530 * @dev: PCI device to query
3531 *
3532 * Returns maximum payload size in bytes
3533 *    or appropriate error value.
3534 */
3535int pcie_get_mps(struct pci_dev *dev)
3536{
3537        u16 ctl;
3538
3539        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3540
3541        return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3542}
3543
3544/**
3545 * pcie_set_mps - set PCI Express maximum payload size
3546 * @dev: PCI device to query
3547 * @mps: maximum payload size in bytes
3548 *    valid values are 128, 256, 512, 1024, 2048, 4096
3549 *
3550 * If possible sets maximum payload size
3551 */
3552int pcie_set_mps(struct pci_dev *dev, int mps)
3553{
3554        u16 v;
3555
3556        if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3557                return -EINVAL;
3558
3559        v = ffs(mps) - 8;
3560        if (v > dev->pcie_mpss) 
3561                return -EINVAL;
3562        v <<= 5;
3563
3564        return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3565                                                  PCI_EXP_DEVCTL_PAYLOAD, v);
3566}
3567
3568/**
3569 * pci_select_bars - Make BAR mask from the type of resource
3570 * @dev: the PCI device for which BAR mask is made
3571 * @flags: resource type mask to be selected
3572 *
3573 * This helper routine makes bar mask from the type of resource.
3574 */
3575int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3576{
3577        int i, bars = 0;
3578        for (i = 0; i < PCI_NUM_RESOURCES; i++)
3579                if (pci_resource_flags(dev, i) & flags)
3580                        bars |= (1 << i);
3581        return bars;
3582}
3583
3584/**
3585 * pci_resource_bar - get position of the BAR associated with a resource
3586 * @dev: the PCI device
3587 * @resno: the resource number
3588 * @type: the BAR type to be filled in
3589 *
3590 * Returns BAR position in config space, or 0 if the BAR is invalid.
3591 */
3592int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3593{
3594        int reg;
3595
3596        if (resno < PCI_ROM_RESOURCE) {
3597                *type = pci_bar_unknown;
3598                return PCI_BASE_ADDRESS_0 + 4 * resno;
3599        } else if (resno == PCI_ROM_RESOURCE) {
3600                *type = pci_bar_mem32;
3601                return dev->rom_base_reg;
3602        } else if (resno < PCI_BRIDGE_RESOURCES) {
3603                /* device specific resource */
3604                reg = pci_iov_resource_bar(dev, resno, type);
3605                if (reg)
3606                        return reg;
3607        }
3608
3609        dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3610        return 0;
3611}
3612
3613/* Some architectures require additional programming to enable VGA */
3614static arch_set_vga_state_t arch_set_vga_state;
3615
3616void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3617{
3618        arch_set_vga_state = func;      /* NULL disables */
3619}
3620
3621static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3622                      unsigned int command_bits, u32 flags)
3623{
3624        if (arch_set_vga_state)
3625                return arch_set_vga_state(dev, decode, command_bits,
3626                                                flags);
3627        return 0;
3628}
3629
3630/**
3631 * pci_set_vga_state - set VGA decode state on device and parents if requested
3632 * @dev: the PCI device
3633 * @decode: true = enable decoding, false = disable decoding
3634 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3635 * @flags: traverse ancestors and change bridges
3636 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3637 */
3638int pci_set_vga_state(struct pci_dev *dev, bool decode,
3639                      unsigned int command_bits, u32 flags)
3640{
3641        struct pci_bus *bus;
3642        struct pci_dev *bridge;
3643        u16 cmd;
3644        int rc;
3645
3646        WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3647
3648        /* ARCH specific VGA enables */
3649        rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3650        if (rc)
3651                return rc;
3652
3653        if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3654                pci_read_config_word(dev, PCI_COMMAND, &cmd);
3655                if (decode == true)
3656                        cmd |= command_bits;
3657                else
3658                        cmd &= ~command_bits;
3659                pci_write_config_word(dev, PCI_COMMAND, cmd);
3660        }
3661
3662        if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3663                return 0;
3664
3665        bus = dev->bus;
3666        while (bus) {
3667                bridge = bus->self;
3668                if (bridge) {
3669                        pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3670                                             &cmd);
3671                        if (decode == true)
3672                                cmd |= PCI_BRIDGE_CTL_VGA;
3673                        else
3674                                cmd &= ~PCI_BRIDGE_CTL_VGA;
3675                        pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3676                                              cmd);
3677                }
3678                bus = bus->parent;
3679        }
3680        return 0;
3681}
3682
3683#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3684static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3685static DEFINE_SPINLOCK(resource_alignment_lock);
3686
3687/**
3688 * pci_specified_resource_alignment - get resource alignment specified by user.
3689 * @dev: the PCI device to get
3690 *
3691 * RETURNS: Resource alignment if it is specified.
3692 *          Zero if it is not specified.
3693 */
3694resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3695{
3696        int seg, bus, slot, func, align_order, count;
3697        resource_size_t align = 0;
3698        char *p;
3699
3700        spin_lock(&resource_alignment_lock);
3701        p = resource_alignment_param;
3702        while (*p) {
3703                count = 0;
3704                if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3705                                                        p[count] == '@') {
3706                        p += count + 1;
3707                } else {
3708                        align_order = -1;
3709                }
3710                if (sscanf(p, "%x:%x:%x.%x%n",
3711                        &seg, &bus, &slot, &func, &count) != 4) {
3712                        seg = 0;
3713                        if (sscanf(p, "%x:%x.%x%n",
3714                                        &bus, &slot, &func, &count) != 3) {
3715                                /* Invalid format */
3716                                printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3717                                        p);
3718                                break;
3719                        }
3720                }
3721                p += count;
3722                if (seg == pci_domain_nr(dev->bus) &&
3723                        bus == dev->bus->number &&
3724                        slot == PCI_SLOT(dev->devfn) &&
3725                        func == PCI_FUNC(dev->devfn)) {
3726                        if (align_order == -1) {
3727                                align = PAGE_SIZE;
3728                        } else {
3729                                align = 1 << align_order;
3730                        }
3731                        /* Found */
3732                        break;
3733                }
3734                if (*p != ';' && *p != ',') {
3735                        /* End of param or invalid format */
3736                        break;
3737                }
3738                p++;
3739        }
3740        spin_unlock(&resource_alignment_lock);
3741        return align;
3742}
3743
3744/**
3745 * pci_is_reassigndev - check if specified PCI is target device to reassign
3746 * @dev: the PCI device to check
3747 *
3748 * RETURNS: non-zero for PCI device is a target device to reassign,
3749 *          or zero is not.
3750 */
3751int pci_is_reassigndev(struct pci_dev *dev)
3752{
3753        return (pci_specified_resource_alignment(dev) != 0);
3754}
3755
3756/*
3757 * This function disables memory decoding and releases memory resources
3758 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3759 * It also rounds up size to specified alignment.
3760 * Later on, the kernel will assign page-aligned memory resource back
3761 * to the device.
3762 */
3763void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3764{
3765        int i;
3766        struct resource *r;
3767        resource_size_t align, size;
3768        u16 command;
3769
3770        if (!pci_is_reassigndev(dev))
3771                return;
3772
3773        if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3774            (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3775                dev_warn(&dev->dev,
3776                        "Can't reassign resources to host bridge.\n");
3777                return;
3778        }
3779
3780        dev_info(&dev->dev,
3781                "Disabling memory decoding and releasing memory resources.\n");
3782        pci_read_config_word(dev, PCI_COMMAND, &command);
3783        command &= ~PCI_COMMAND_MEMORY;
3784        pci_write_config_word(dev, PCI_COMMAND, command);
3785
3786        align = pci_specified_resource_alignment(dev);
3787        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3788                r = &dev->resource[i];
3789                if (!(r->flags & IORESOURCE_MEM))
3790                        continue;
3791                size = resource_size(r);
3792                if (size < align) {
3793                        size = align;
3794                        dev_info(&dev->dev,
3795                                "Rounding up size of resource #%d to %#llx.\n",
3796                                i, (unsigned long long)size);
3797                }
3798                r->end = size - 1;
3799                r->start = 0;
3800        }
3801        /* Need to disable bridge's resource window,
3802         * to enable the kernel to reassign new resource
3803         * window later on.
3804         */
3805        if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3806            (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3807                for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3808                        r = &dev->resource[i];
3809                        if (!(r->flags & IORESOURCE_MEM))
3810                                continue;
3811                        r->end = resource_size(r) - 1;
3812                        r->start = 0;
3813                }
3814                pci_disable_bridge_window(dev);
3815        }
3816}
3817
3818ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3819{
3820        if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3821                count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3822        spin_lock(&resource_alignment_lock);
3823        strncpy(resource_alignment_param, buf, count);
3824        resource_alignment_param[count] = '\0';
3825        spin_unlock(&resource_alignment_lock);
3826        return count;
3827}
3828
3829ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3830{
3831        size_t count;
3832        spin_lock(&resource_alignment_lock);
3833        count = snprintf(buf, size, "%s", resource_alignment_param);
3834        spin_unlock(&resource_alignment_lock);
3835        return count;
3836}
3837
3838static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3839{
3840        return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3841}
3842
3843static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3844                                        const char *buf, size_t count)
3845{
3846        return pci_set_resource_alignment_param(buf, count);
3847}
3848
3849BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3850                                        pci_resource_alignment_store);
3851
3852static int __init pci_resource_alignment_sysfs_init(void)
3853{
3854        return bus_create_file(&pci_bus_type,
3855                                        &bus_attr_resource_alignment);
3856}
3857
3858late_initcall(pci_resource_alignment_sysfs_init);
3859
3860static void __devinit pci_no_domains(void)
3861{
3862#ifdef CONFIG_PCI_DOMAINS
3863        pci_domains_supported = 0;
3864#endif
3865}
3866
3867/**
3868 * pci_ext_cfg_enabled - can we access extended PCI config space?
3869 * @dev: The PCI device of the root bridge.
3870 *
3871 * Returns 1 if we can access PCI extended config space (offsets
3872 * greater than 0xff). This is the default implementation. Architecture
3873 * implementations can override this.
3874 */
3875int __weak pci_ext_cfg_avail(struct pci_dev *dev)
3876{
3877        return 1;
3878}
3879
3880void __weak pci_fixup_cardbus(struct pci_bus *bus)
3881{
3882}
3883EXPORT_SYMBOL(pci_fixup_cardbus);
3884
3885static int __init pci_setup(char *str)
3886{
3887        while (str) {
3888                char *k = strchr(str, ',');
3889                if (k)
3890                        *k++ = 0;
3891                if (*str && (str = pcibios_setup(str)) && *str) {
3892                        if (!strcmp(str, "nomsi")) {
3893                                pci_no_msi();
3894                        } else if (!strcmp(str, "noaer")) {
3895                                pci_no_aer();
3896                        } else if (!strncmp(str, "realloc=", 8)) {
3897                                pci_realloc_get_opt(str + 8);
3898                        } else if (!strncmp(str, "realloc", 7)) {
3899                                pci_realloc_get_opt("on");
3900                        } else if (!strcmp(str, "nodomains")) {
3901                                pci_no_domains();
3902                        } else if (!strncmp(str, "noari", 5)) {
3903                                pcie_ari_disabled = true;
3904                        } else if (!strncmp(str, "cbiosize=", 9)) {
3905                                pci_cardbus_io_size = memparse(str + 9, &str);
3906                        } else if (!strncmp(str, "cbmemsize=", 10)) {
3907                                pci_cardbus_mem_size = memparse(str + 10, &str);
3908                        } else if (!strncmp(str, "resource_alignment=", 19)) {
3909                                pci_set_resource_alignment_param(str + 19,
3910                                                        strlen(str + 19));
3911                        } else if (!strncmp(str, "ecrc=", 5)) {
3912                                pcie_ecrc_get_policy(str + 5);
3913                        } else if (!strncmp(str, "hpiosize=", 9)) {
3914                                pci_hotplug_io_size = memparse(str + 9, &str);
3915                        } else if (!strncmp(str, "hpmemsize=", 10)) {
3916                                pci_hotplug_mem_size = memparse(str + 10, &str);
3917                        } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3918                                pcie_bus_config = PCIE_BUS_TUNE_OFF;
3919                        } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3920                                pcie_bus_config = PCIE_BUS_SAFE;
3921                        } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3922                                pcie_bus_config = PCIE_BUS_PERFORMANCE;
3923                        } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3924                                pcie_bus_config = PCIE_BUS_PEER2PEER;
3925                        } else if (!strncmp(str, "pcie_scan_all", 13)) {
3926                                pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
3927                        } else {
3928                                printk(KERN_ERR "PCI: Unknown option `%s'\n",
3929                                                str);
3930                        }
3931                }
3932                str = k;
3933        }
3934        return 0;
3935}
3936early_param("pci", pci_setup);
3937
3938EXPORT_SYMBOL(pci_reenable_device);
3939EXPORT_SYMBOL(pci_enable_device_io);
3940EXPORT_SYMBOL(pci_enable_device_mem);
3941EXPORT_SYMBOL(pci_enable_device);
3942EXPORT_SYMBOL(pcim_enable_device);
3943EXPORT_SYMBOL(pcim_pin_device);
3944EXPORT_SYMBOL(pci_disable_device);
3945EXPORT_SYMBOL(pci_find_capability);
3946EXPORT_SYMBOL(pci_bus_find_capability);
3947EXPORT_SYMBOL(pci_release_regions);
3948EXPORT_SYMBOL(pci_request_regions);
3949EXPORT_SYMBOL(pci_request_regions_exclusive);
3950EXPORT_SYMBOL(pci_release_region);
3951EXPORT_SYMBOL(pci_request_region);
3952EXPORT_SYMBOL(pci_request_region_exclusive);
3953EXPORT_SYMBOL(pci_release_selected_regions);
3954EXPORT_SYMBOL(pci_request_selected_regions);
3955EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3956EXPORT_SYMBOL(pci_set_master);
3957EXPORT_SYMBOL(pci_clear_master);
3958EXPORT_SYMBOL(pci_set_mwi);
3959EXPORT_SYMBOL(pci_try_set_mwi);
3960EXPORT_SYMBOL(pci_clear_mwi);
3961EXPORT_SYMBOL_GPL(pci_intx);
3962EXPORT_SYMBOL(pci_assign_resource);
3963EXPORT_SYMBOL(pci_find_parent_resource);
3964EXPORT_SYMBOL(pci_select_bars);
3965
3966EXPORT_SYMBOL(pci_set_power_state);
3967EXPORT_SYMBOL(pci_save_state);
3968EXPORT_SYMBOL(pci_restore_state);
3969EXPORT_SYMBOL(pci_pme_capable);
3970EXPORT_SYMBOL(pci_pme_active);
3971EXPORT_SYMBOL(pci_wake_from_d3);
3972EXPORT_SYMBOL(pci_target_state);
3973EXPORT_SYMBOL(pci_prepare_to_sleep);
3974EXPORT_SYMBOL(pci_back_from_sleep);
3975EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3976
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.