linux/drivers/pci/pci.c
<<
>>
Prefs
   1/*
   2 *      PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *      Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *      David Mosberger-Tang
   6 *
   7 *      Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
  25#include <asm-generic/pci-bridge.h>
  26#include <asm/setup.h>
  27#include "pci.h"
  28
  29const char *pci_power_names[] = {
  30        "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  31};
  32EXPORT_SYMBOL_GPL(pci_power_names);
  33
  34int isa_dma_bridge_buggy;
  35EXPORT_SYMBOL(isa_dma_bridge_buggy);
  36
  37int pci_pci_problems;
  38EXPORT_SYMBOL(pci_pci_problems);
  39
  40unsigned int pci_pm_d3_delay;
  41
  42static void pci_pme_list_scan(struct work_struct *work);
  43
  44static LIST_HEAD(pci_pme_list);
  45static DEFINE_MUTEX(pci_pme_list_mutex);
  46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  47
  48struct pci_pme_device {
  49        struct list_head list;
  50        struct pci_dev *dev;
  51};
  52
  53#define PME_TIMEOUT 1000 /* How long between PME checks */
  54
  55static void pci_dev_d3_sleep(struct pci_dev *dev)
  56{
  57        unsigned int delay = dev->d3_delay;
  58
  59        if (delay < pci_pm_d3_delay)
  60                delay = pci_pm_d3_delay;
  61
  62        msleep(delay);
  63}
  64
  65#ifdef CONFIG_PCI_DOMAINS
  66int pci_domains_supported = 1;
  67#endif
  68
  69#define DEFAULT_CARDBUS_IO_SIZE         (256)
  70#define DEFAULT_CARDBUS_MEM_SIZE        (64*1024*1024)
  71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  74
  75#define DEFAULT_HOTPLUG_IO_SIZE         (256)
  76#define DEFAULT_HOTPLUG_MEM_SIZE        (2*1024*1024)
  77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  78unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  80
  81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
  82
  83/*
  84 * The default CLS is used if arch didn't set CLS explicitly and not
  85 * all pci devices agree on the same value.  Arch can override either
  86 * the dfl or actual value as it sees fit.  Don't forget this is
  87 * measured in 32-bit words, not bytes.
  88 */
  89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  90u8 pci_cache_line_size;
  91
  92/*
  93 * If we set up a device for bus mastering, we need to check the latency
  94 * timer as certain BIOSes forget to set it properly.
  95 */
  96unsigned int pcibios_max_latency = 255;
  97
  98/* If set, the PCIe ARI capability will not be used. */
  99static bool pcie_ari_disabled;
 100
 101/**
 102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 103 * @bus: pointer to PCI bus structure to search
 104 *
 105 * Given a PCI bus, returns the highest PCI bus number present in the set
 106 * including the given PCI bus and its list of child PCI buses.
 107 */
 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
 109{
 110        struct list_head *tmp;
 111        unsigned char max, n;
 112
 113        max = bus->busn_res.end;
 114        list_for_each(tmp, &bus->children) {
 115                n = pci_bus_max_busnr(pci_bus_b(tmp));
 116                if(n > max)
 117                        max = n;
 118        }
 119        return max;
 120}
 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 122
 123#ifdef CONFIG_HAS_IOMEM
 124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 125{
 126        /*
 127         * Make sure the BAR is actually a memory resource, not an IO resource
 128         */
 129        if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 130                WARN_ON(1);
 131                return NULL;
 132        }
 133        return ioremap_nocache(pci_resource_start(pdev, bar),
 134                                     pci_resource_len(pdev, bar));
 135}
 136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 137#endif
 138
 139#define PCI_FIND_CAP_TTL        48
 140
 141static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 142                                   u8 pos, int cap, int *ttl)
 143{
 144        u8 id;
 145
 146        while ((*ttl)--) {
 147                pci_bus_read_config_byte(bus, devfn, pos, &pos);
 148                if (pos < 0x40)
 149                        break;
 150                pos &= ~3;
 151                pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 152                                         &id);
 153                if (id == 0xff)
 154                        break;
 155                if (id == cap)
 156                        return pos;
 157                pos += PCI_CAP_LIST_NEXT;
 158        }
 159        return 0;
 160}
 161
 162static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 163                               u8 pos, int cap)
 164{
 165        int ttl = PCI_FIND_CAP_TTL;
 166
 167        return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 168}
 169
 170int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 171{
 172        return __pci_find_next_cap(dev->bus, dev->devfn,
 173                                   pos + PCI_CAP_LIST_NEXT, cap);
 174}
 175EXPORT_SYMBOL_GPL(pci_find_next_capability);
 176
 177static int __pci_bus_find_cap_start(struct pci_bus *bus,
 178                                    unsigned int devfn, u8 hdr_type)
 179{
 180        u16 status;
 181
 182        pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 183        if (!(status & PCI_STATUS_CAP_LIST))
 184                return 0;
 185
 186        switch (hdr_type) {
 187        case PCI_HEADER_TYPE_NORMAL:
 188        case PCI_HEADER_TYPE_BRIDGE:
 189                return PCI_CAPABILITY_LIST;
 190        case PCI_HEADER_TYPE_CARDBUS:
 191                return PCI_CB_CAPABILITY_LIST;
 192        default:
 193                return 0;
 194        }
 195
 196        return 0;
 197}
 198
 199/**
 200 * pci_find_capability - query for devices' capabilities 
 201 * @dev: PCI device to query
 202 * @cap: capability code
 203 *
 204 * Tell if a device supports a given PCI capability.
 205 * Returns the address of the requested capability structure within the
 206 * device's PCI configuration space or 0 in case the device does not
 207 * support it.  Possible values for @cap:
 208 *
 209 *  %PCI_CAP_ID_PM           Power Management 
 210 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 211 *  %PCI_CAP_ID_VPD          Vital Product Data 
 212 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 213 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 214 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 215 *  %PCI_CAP_ID_PCIX         PCI-X
 216 *  %PCI_CAP_ID_EXP          PCI Express
 217 */
 218int pci_find_capability(struct pci_dev *dev, int cap)
 219{
 220        int pos;
 221
 222        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 223        if (pos)
 224                pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 225
 226        return pos;
 227}
 228
 229/**
 230 * pci_bus_find_capability - query for devices' capabilities 
 231 * @bus:   the PCI bus to query
 232 * @devfn: PCI device to query
 233 * @cap:   capability code
 234 *
 235 * Like pci_find_capability() but works for pci devices that do not have a
 236 * pci_dev structure set up yet. 
 237 *
 238 * Returns the address of the requested capability structure within the
 239 * device's PCI configuration space or 0 in case the device does not
 240 * support it.
 241 */
 242int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 243{
 244        int pos;
 245        u8 hdr_type;
 246
 247        pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 248
 249        pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 250        if (pos)
 251                pos = __pci_find_next_cap(bus, devfn, pos, cap);
 252
 253        return pos;
 254}
 255
 256/**
 257 * pci_find_next_ext_capability - Find an extended capability
 258 * @dev: PCI device to query
 259 * @start: address at which to start looking (0 to start at beginning of list)
 260 * @cap: capability code
 261 *
 262 * Returns the address of the next matching extended capability structure
 263 * within the device's PCI configuration space or 0 if the device does
 264 * not support it.  Some capabilities can occur several times, e.g., the
 265 * vendor-specific capability, and this provides a way to find them all.
 266 */
 267int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
 268{
 269        u32 header;
 270        int ttl;
 271        int pos = PCI_CFG_SPACE_SIZE;
 272
 273        /* minimum 8 bytes per capability */
 274        ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 275
 276        if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 277                return 0;
 278
 279        if (start)
 280                pos = start;
 281
 282        if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 283                return 0;
 284
 285        /*
 286         * If we have no capabilities, this is indicated by cap ID,
 287         * cap version and next pointer all being 0.
 288         */
 289        if (header == 0)
 290                return 0;
 291
 292        while (ttl-- > 0) {
 293                if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 294                        return pos;
 295
 296                pos = PCI_EXT_CAP_NEXT(header);
 297                if (pos < PCI_CFG_SPACE_SIZE)
 298                        break;
 299
 300                if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 301                        break;
 302        }
 303
 304        return 0;
 305}
 306EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 307
 308/**
 309 * pci_find_ext_capability - Find an extended capability
 310 * @dev: PCI device to query
 311 * @cap: capability code
 312 *
 313 * Returns the address of the requested extended capability structure
 314 * within the device's PCI configuration space or 0 if the device does
 315 * not support it.  Possible values for @cap:
 316 *
 317 *  %PCI_EXT_CAP_ID_ERR         Advanced Error Reporting
 318 *  %PCI_EXT_CAP_ID_VC          Virtual Channel
 319 *  %PCI_EXT_CAP_ID_DSN         Device Serial Number
 320 *  %PCI_EXT_CAP_ID_PWR         Power Budgeting
 321 */
 322int pci_find_ext_capability(struct pci_dev *dev, int cap)
 323{
 324        return pci_find_next_ext_capability(dev, 0, cap);
 325}
 326EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 327
 328static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 329{
 330        int rc, ttl = PCI_FIND_CAP_TTL;
 331        u8 cap, mask;
 332
 333        if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 334                mask = HT_3BIT_CAP_MASK;
 335        else
 336                mask = HT_5BIT_CAP_MASK;
 337
 338        pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 339                                      PCI_CAP_ID_HT, &ttl);
 340        while (pos) {
 341                rc = pci_read_config_byte(dev, pos + 3, &cap);
 342                if (rc != PCIBIOS_SUCCESSFUL)
 343                        return 0;
 344
 345                if ((cap & mask) == ht_cap)
 346                        return pos;
 347
 348                pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 349                                              pos + PCI_CAP_LIST_NEXT,
 350                                              PCI_CAP_ID_HT, &ttl);
 351        }
 352
 353        return 0;
 354}
 355/**
 356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 357 * @dev: PCI device to query
 358 * @pos: Position from which to continue searching
 359 * @ht_cap: Hypertransport capability code
 360 *
 361 * To be used in conjunction with pci_find_ht_capability() to search for
 362 * all capabilities matching @ht_cap. @pos should always be a value returned
 363 * from pci_find_ht_capability().
 364 *
 365 * NB. To be 100% safe against broken PCI devices, the caller should take
 366 * steps to avoid an infinite loop.
 367 */
 368int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 369{
 370        return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 371}
 372EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 373
 374/**
 375 * pci_find_ht_capability - query a device's Hypertransport capabilities
 376 * @dev: PCI device to query
 377 * @ht_cap: Hypertransport capability code
 378 *
 379 * Tell if a device supports a given Hypertransport capability.
 380 * Returns an address within the device's PCI configuration space
 381 * or 0 in case the device does not support the request capability.
 382 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 383 * which has a Hypertransport capability matching @ht_cap.
 384 */
 385int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 386{
 387        int pos;
 388
 389        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 390        if (pos)
 391                pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 392
 393        return pos;
 394}
 395EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 396
 397/**
 398 * pci_find_parent_resource - return resource region of parent bus of given region
 399 * @dev: PCI device structure contains resources to be searched
 400 * @res: child resource record for which parent is sought
 401 *
 402 *  For given resource region of given device, return the resource
 403 *  region of parent bus the given region is contained in or where
 404 *  it should be allocated from.
 405 */
 406struct resource *
 407pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 408{
 409        const struct pci_bus *bus = dev->bus;
 410        int i;
 411        struct resource *best = NULL, *r;
 412
 413        pci_bus_for_each_resource(bus, r, i) {
 414                if (!r)
 415                        continue;
 416                if (res->start && !(res->start >= r->start && res->end <= r->end))
 417                        continue;       /* Not contained */
 418                if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 419                        continue;       /* Wrong type */
 420                if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 421                        return r;       /* Exact match */
 422                /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 423                if (r->flags & IORESOURCE_PREFETCH)
 424                        continue;
 425                /* .. but we can put a prefetchable resource inside a non-prefetchable one */
 426                if (!best)
 427                        best = r;
 428        }
 429        return best;
 430}
 431
 432/**
 433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 434 * @dev: PCI device to have its BARs restored
 435 *
 436 * Restore the BAR values for a given device, so as to make it
 437 * accessible by its driver.
 438 */
 439static void
 440pci_restore_bars(struct pci_dev *dev)
 441{
 442        int i;
 443
 444        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 445                pci_update_resource(dev, i);
 446}
 447
 448static struct pci_platform_pm_ops *pci_platform_pm;
 449
 450int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 451{
 452        if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 453            || !ops->sleep_wake || !ops->can_wakeup)
 454                return -EINVAL;
 455        pci_platform_pm = ops;
 456        return 0;
 457}
 458
 459static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 460{
 461        return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 462}
 463
 464static inline int platform_pci_set_power_state(struct pci_dev *dev,
 465                                                pci_power_t t)
 466{
 467        return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 468}
 469
 470static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 471{
 472        return pci_platform_pm ?
 473                        pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 474}
 475
 476static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 477{
 478        return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 479}
 480
 481static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 482{
 483        return pci_platform_pm ?
 484                        pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 485}
 486
 487static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 488{
 489        return pci_platform_pm ?
 490                        pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 491}
 492
 493/**
 494 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 495 *                           given PCI device
 496 * @dev: PCI device to handle.
 497 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 498 *
 499 * RETURN VALUE:
 500 * -EINVAL if the requested state is invalid.
 501 * -EIO if device does not support PCI PM or its PM capabilities register has a
 502 * wrong version, or device doesn't support the requested state.
 503 * 0 if device already is in the requested state.
 504 * 0 if device's power state has been successfully changed.
 505 */
 506static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 507{
 508        u16 pmcsr;
 509        bool need_restore = false;
 510
 511        /* Check if we're already there */
 512        if (dev->current_state == state)
 513                return 0;
 514
 515        if (!dev->pm_cap)
 516                return -EIO;
 517
 518        if (state < PCI_D0 || state > PCI_D3hot)
 519                return -EINVAL;
 520
 521        /* Validate current state:
 522         * Can enter D0 from any state, but if we can only go deeper 
 523         * to sleep if we're already in a low power state
 524         */
 525        if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 526            && dev->current_state > state) {
 527                dev_err(&dev->dev, "invalid power transition "
 528                        "(from state %d to %d)\n", dev->current_state, state);
 529                return -EINVAL;
 530        }
 531
 532        /* check if this device supports the desired state */
 533        if ((state == PCI_D1 && !dev->d1_support)
 534           || (state == PCI_D2 && !dev->d2_support))
 535                return -EIO;
 536
 537        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 538
 539        /* If we're (effectively) in D3, force entire word to 0.
 540         * This doesn't affect PME_Status, disables PME_En, and
 541         * sets PowerState to 0.
 542         */
 543        switch (dev->current_state) {
 544        case PCI_D0:
 545        case PCI_D1:
 546        case PCI_D2:
 547                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 548                pmcsr |= state;
 549                break;
 550        case PCI_D3hot:
 551        case PCI_D3cold:
 552        case PCI_UNKNOWN: /* Boot-up */
 553                if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 554                 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 555                        need_restore = true;
 556                /* Fall-through: force to D0 */
 557        default:
 558                pmcsr = 0;
 559                break;
 560        }
 561
 562        /* enter specified state */
 563        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 564
 565        /* Mandatory power management transition delays */
 566        /* see PCI PM 1.1 5.6.1 table 18 */
 567        if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 568                pci_dev_d3_sleep(dev);
 569        else if (state == PCI_D2 || dev->current_state == PCI_D2)
 570                udelay(PCI_PM_D2_DELAY);
 571
 572        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 573        dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 574        if (dev->current_state != state && printk_ratelimit())
 575                dev_info(&dev->dev, "Refused to change power state, "
 576                        "currently in D%d\n", dev->current_state);
 577
 578        /*
 579         * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 580         * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 581         * from D3hot to D0 _may_ perform an internal reset, thereby
 582         * going to "D0 Uninitialized" rather than "D0 Initialized".
 583         * For example, at least some versions of the 3c905B and the
 584         * 3c556B exhibit this behaviour.
 585         *
 586         * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 587         * devices in a D3hot state at boot.  Consequently, we need to
 588         * restore at least the BARs so that the device will be
 589         * accessible to its driver.
 590         */
 591        if (need_restore)
 592                pci_restore_bars(dev);
 593
 594        if (dev->bus->self)
 595                pcie_aspm_pm_state_change(dev->bus->self);
 596
 597        return 0;
 598}
 599
 600/**
 601 * pci_update_current_state - Read PCI power state of given device from its
 602 *                            PCI PM registers and cache it
 603 * @dev: PCI device to handle.
 604 * @state: State to cache in case the device doesn't have the PM capability
 605 */
 606void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 607{
 608        if (dev->pm_cap) {
 609                u16 pmcsr;
 610
 611                /*
 612                 * Configuration space is not accessible for device in
 613                 * D3cold, so just keep or set D3cold for safety
 614                 */
 615                if (dev->current_state == PCI_D3cold)
 616                        return;
 617                if (state == PCI_D3cold) {
 618                        dev->current_state = PCI_D3cold;
 619                        return;
 620                }
 621                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 622                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 623        } else {
 624                dev->current_state = state;
 625        }
 626}
 627
 628/**
 629 * pci_power_up - Put the given device into D0 forcibly
 630 * @dev: PCI device to power up
 631 */
 632void pci_power_up(struct pci_dev *dev)
 633{
 634        if (platform_pci_power_manageable(dev))
 635                platform_pci_set_power_state(dev, PCI_D0);
 636
 637        pci_raw_set_power_state(dev, PCI_D0);
 638        pci_update_current_state(dev, PCI_D0);
 639}
 640
 641/**
 642 * pci_platform_power_transition - Use platform to change device power state
 643 * @dev: PCI device to handle.
 644 * @state: State to put the device into.
 645 */
 646static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 647{
 648        int error;
 649
 650        if (platform_pci_power_manageable(dev)) {
 651                error = platform_pci_set_power_state(dev, state);
 652                if (!error)
 653                        pci_update_current_state(dev, state);
 654                /* Fall back to PCI_D0 if native PM is not supported */
 655                if (!dev->pm_cap)
 656                        dev->current_state = PCI_D0;
 657        } else {
 658                error = -ENODEV;
 659                /* Fall back to PCI_D0 if native PM is not supported */
 660                if (!dev->pm_cap)
 661                        dev->current_state = PCI_D0;
 662        }
 663
 664        return error;
 665}
 666
 667/**
 668 * __pci_start_power_transition - Start power transition of a PCI device
 669 * @dev: PCI device to handle.
 670 * @state: State to put the device into.
 671 */
 672static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 673{
 674        if (state == PCI_D0) {
 675                pci_platform_power_transition(dev, PCI_D0);
 676                /*
 677                 * Mandatory power management transition delays, see
 678                 * PCI Express Base Specification Revision 2.0 Section
 679                 * 6.6.1: Conventional Reset.  Do not delay for
 680                 * devices powered on/off by corresponding bridge,
 681                 * because have already delayed for the bridge.
 682                 */
 683                if (dev->runtime_d3cold) {
 684                        msleep(dev->d3cold_delay);
 685                        /*
 686                         * When powering on a bridge from D3cold, the
 687                         * whole hierarchy may be powered on into
 688                         * D0uninitialized state, resume them to give
 689                         * them a chance to suspend again
 690                         */
 691                        pci_wakeup_bus(dev->subordinate);
 692                }
 693        }
 694}
 695
 696/**
 697 * __pci_dev_set_current_state - Set current state of a PCI device
 698 * @dev: Device to handle
 699 * @data: pointer to state to be set
 700 */
 701static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
 702{
 703        pci_power_t state = *(pci_power_t *)data;
 704
 705        dev->current_state = state;
 706        return 0;
 707}
 708
 709/**
 710 * __pci_bus_set_current_state - Walk given bus and set current state of devices
 711 * @bus: Top bus of the subtree to walk.
 712 * @state: state to be set
 713 */
 714static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
 715{
 716        if (bus)
 717                pci_walk_bus(bus, __pci_dev_set_current_state, &state);
 718}
 719
 720/**
 721 * __pci_complete_power_transition - Complete power transition of a PCI device
 722 * @dev: PCI device to handle.
 723 * @state: State to put the device into.
 724 *
 725 * This function should not be called directly by device drivers.
 726 */
 727int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 728{
 729        int ret;
 730
 731        if (state <= PCI_D0)
 732                return -EINVAL;
 733        ret = pci_platform_power_transition(dev, state);
 734        /* Power off the bridge may power off the whole hierarchy */
 735        if (!ret && state == PCI_D3cold)
 736                __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
 737        return ret;
 738}
 739EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 740
 741/**
 742 * pci_set_power_state - Set the power state of a PCI device
 743 * @dev: PCI device to handle.
 744 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 745 *
 746 * Transition a device to a new power state, using the platform firmware and/or
 747 * the device's PCI PM registers.
 748 *
 749 * RETURN VALUE:
 750 * -EINVAL if the requested state is invalid.
 751 * -EIO if device does not support PCI PM or its PM capabilities register has a
 752 * wrong version, or device doesn't support the requested state.
 753 * 0 if device already is in the requested state.
 754 * 0 if device's power state has been successfully changed.
 755 */
 756int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 757{
 758        int error;
 759
 760        /* bound the state we're entering */
 761        if (state > PCI_D3cold)
 762                state = PCI_D3cold;
 763        else if (state < PCI_D0)
 764                state = PCI_D0;
 765        else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 766                /*
 767                 * If the device or the parent bridge do not support PCI PM,
 768                 * ignore the request if we're doing anything other than putting
 769                 * it into D0 (which would only happen on boot).
 770                 */
 771                return 0;
 772
 773        /* Check if we're already there */
 774        if (dev->current_state == state)
 775                return 0;
 776
 777        __pci_start_power_transition(dev, state);
 778
 779        /* This device is quirked not to be put into D3, so
 780           don't put it in D3 */
 781        if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 782                return 0;
 783
 784        /*
 785         * To put device in D3cold, we put device into D3hot in native
 786         * way, then put device into D3cold with platform ops
 787         */
 788        error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
 789                                        PCI_D3hot : state);
 790
 791        if (!__pci_complete_power_transition(dev, state))
 792                error = 0;
 793        /*
 794         * When aspm_policy is "powersave" this call ensures
 795         * that ASPM is configured.
 796         */
 797        if (!error && dev->bus->self)
 798                pcie_aspm_powersave_config_link(dev->bus->self);
 799
 800        return error;
 801}
 802
 803/**
 804 * pci_choose_state - Choose the power state of a PCI device
 805 * @dev: PCI device to be suspended
 806 * @state: target sleep state for the whole system. This is the value
 807 *      that is passed to suspend() function.
 808 *
 809 * Returns PCI power state suitable for given device and given system
 810 * message.
 811 */
 812
 813pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 814{
 815        pci_power_t ret;
 816
 817        if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 818                return PCI_D0;
 819
 820        ret = platform_pci_choose_state(dev);
 821        if (ret != PCI_POWER_ERROR)
 822                return ret;
 823
 824        switch (state.event) {
 825        case PM_EVENT_ON:
 826                return PCI_D0;
 827        case PM_EVENT_FREEZE:
 828        case PM_EVENT_PRETHAW:
 829                /* REVISIT both freeze and pre-thaw "should" use D0 */
 830        case PM_EVENT_SUSPEND:
 831        case PM_EVENT_HIBERNATE:
 832                return PCI_D3hot;
 833        default:
 834                dev_info(&dev->dev, "unrecognized suspend event %d\n",
 835                         state.event);
 836                BUG();
 837        }
 838        return PCI_D0;
 839}
 840
 841EXPORT_SYMBOL(pci_choose_state);
 842
 843#define PCI_EXP_SAVE_REGS       7
 844
 845
 846static struct pci_cap_saved_state *pci_find_saved_cap(
 847        struct pci_dev *pci_dev, char cap)
 848{
 849        struct pci_cap_saved_state *tmp;
 850        struct hlist_node *pos;
 851
 852        hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
 853                if (tmp->cap.cap_nr == cap)
 854                        return tmp;
 855        }
 856        return NULL;
 857}
 858
 859static int pci_save_pcie_state(struct pci_dev *dev)
 860{
 861        int i = 0;
 862        struct pci_cap_saved_state *save_state;
 863        u16 *cap;
 864
 865        if (!pci_is_pcie(dev))
 866                return 0;
 867
 868        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 869        if (!save_state) {
 870                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 871                return -ENOMEM;
 872        }
 873
 874        cap = (u16 *)&save_state->cap.data[0];
 875        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
 876        pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
 877        pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
 878        pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
 879        pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
 880        pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
 881        pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
 882
 883        return 0;
 884}
 885
 886static void pci_restore_pcie_state(struct pci_dev *dev)
 887{
 888        int i = 0;
 889        struct pci_cap_saved_state *save_state;
 890        u16 *cap;
 891
 892        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 893        if (!save_state)
 894                return;
 895
 896        cap = (u16 *)&save_state->cap.data[0];
 897        pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
 898        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
 899        pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
 900        pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
 901        pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
 902        pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
 903        pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
 904}
 905
 906
 907static int pci_save_pcix_state(struct pci_dev *dev)
 908{
 909        int pos;
 910        struct pci_cap_saved_state *save_state;
 911
 912        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 913        if (pos <= 0)
 914                return 0;
 915
 916        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 917        if (!save_state) {
 918                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 919                return -ENOMEM;
 920        }
 921
 922        pci_read_config_word(dev, pos + PCI_X_CMD,
 923                             (u16 *)save_state->cap.data);
 924
 925        return 0;
 926}
 927
 928static void pci_restore_pcix_state(struct pci_dev *dev)
 929{
 930        int i = 0, pos;
 931        struct pci_cap_saved_state *save_state;
 932        u16 *cap;
 933
 934        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 935        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 936        if (!save_state || pos <= 0)
 937                return;
 938        cap = (u16 *)&save_state->cap.data[0];
 939
 940        pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 941}
 942
 943
 944/**
 945 * pci_save_state - save the PCI configuration space of a device before suspending
 946 * @dev: - PCI device that we're dealing with
 947 */
 948int
 949pci_save_state(struct pci_dev *dev)
 950{
 951        int i;
 952        /* XXX: 100% dword access ok here? */
 953        for (i = 0; i < 16; i++)
 954                pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 955        dev->state_saved = true;
 956        if ((i = pci_save_pcie_state(dev)) != 0)
 957                return i;
 958        if ((i = pci_save_pcix_state(dev)) != 0)
 959                return i;
 960        return 0;
 961}
 962
 963static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 964                                     u32 saved_val, int retry)
 965{
 966        u32 val;
 967
 968        pci_read_config_dword(pdev, offset, &val);
 969        if (val == saved_val)
 970                return;
 971
 972        for (;;) {
 973                dev_dbg(&pdev->dev, "restoring config space at offset "
 974                        "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
 975                pci_write_config_dword(pdev, offset, saved_val);
 976                if (retry-- <= 0)
 977                        return;
 978
 979                pci_read_config_dword(pdev, offset, &val);
 980                if (val == saved_val)
 981                        return;
 982
 983                mdelay(1);
 984        }
 985}
 986
 987static void pci_restore_config_space_range(struct pci_dev *pdev,
 988                                           int start, int end, int retry)
 989{
 990        int index;
 991
 992        for (index = end; index >= start; index--)
 993                pci_restore_config_dword(pdev, 4 * index,
 994                                         pdev->saved_config_space[index],
 995                                         retry);
 996}
 997
 998static void pci_restore_config_space(struct pci_dev *pdev)
 999{
1000        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1001                pci_restore_config_space_range(pdev, 10, 15, 0);
1002                /* Restore BARs before the command register. */
1003                pci_restore_config_space_range(pdev, 4, 9, 10);
1004                pci_restore_config_space_range(pdev, 0, 3, 0);
1005        } else {
1006                pci_restore_config_space_range(pdev, 0, 15, 0);
1007        }
1008}
1009
1010/** 
1011 * pci_restore_state - Restore the saved state of a PCI device
1012 * @dev: - PCI device that we're dealing with
1013 */
1014void pci_restore_state(struct pci_dev *dev)
1015{
1016        if (!dev->state_saved)
1017                return;
1018
1019        /* PCI Express register must be restored first */
1020        pci_restore_pcie_state(dev);
1021        pci_restore_ats_state(dev);
1022
1023        pci_restore_config_space(dev);
1024
1025        pci_restore_pcix_state(dev);
1026        pci_restore_msi_state(dev);
1027        pci_restore_iov_state(dev);
1028
1029        dev->state_saved = false;
1030}
1031
1032struct pci_saved_state {
1033        u32 config_space[16];
1034        struct pci_cap_saved_data cap[0];
1035};
1036
1037/**
1038 * pci_store_saved_state - Allocate and return an opaque struct containing
1039 *                         the device saved state.
1040 * @dev: PCI device that we're dealing with
1041 *
1042 * Rerturn NULL if no state or error.
1043 */
1044struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1045{
1046        struct pci_saved_state *state;
1047        struct pci_cap_saved_state *tmp;
1048        struct pci_cap_saved_data *cap;
1049        struct hlist_node *pos;
1050        size_t size;
1051
1052        if (!dev->state_saved)
1053                return NULL;
1054
1055        size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1056
1057        hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1058                size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1059
1060        state = kzalloc(size, GFP_KERNEL);
1061        if (!state)
1062                return NULL;
1063
1064        memcpy(state->config_space, dev->saved_config_space,
1065               sizeof(state->config_space));
1066
1067        cap = state->cap;
1068        hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1069                size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1070                memcpy(cap, &tmp->cap, len);
1071                cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1072        }
1073        /* Empty cap_save terminates list */
1074
1075        return state;
1076}
1077EXPORT_SYMBOL_GPL(pci_store_saved_state);
1078
1079/**
1080 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1081 * @dev: PCI device that we're dealing with
1082 * @state: Saved state returned from pci_store_saved_state()
1083 */
1084int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1085{
1086        struct pci_cap_saved_data *cap;
1087
1088        dev->state_saved = false;
1089
1090        if (!state)
1091                return 0;
1092
1093        memcpy(dev->saved_config_space, state->config_space,
1094               sizeof(state->config_space));
1095
1096        cap = state->cap;
1097        while (cap->size) {
1098                struct pci_cap_saved_state *tmp;
1099
1100                tmp = pci_find_saved_cap(dev, cap->cap_nr);
1101                if (!tmp || tmp->cap.size != cap->size)
1102                        return -EINVAL;
1103
1104                memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1105                cap = (struct pci_cap_saved_data *)((u8 *)cap +
1106                       sizeof(struct pci_cap_saved_data) + cap->size);
1107        }
1108
1109        dev->state_saved = true;
1110        return 0;
1111}
1112EXPORT_SYMBOL_GPL(pci_load_saved_state);
1113
1114/**
1115 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1116 *                                 and free the memory allocated for it.
1117 * @dev: PCI device that we're dealing with
1118 * @state: Pointer to saved state returned from pci_store_saved_state()
1119 */
1120int pci_load_and_free_saved_state(struct pci_dev *dev,
1121                                  struct pci_saved_state **state)
1122{
1123        int ret = pci_load_saved_state(dev, *state);
1124        kfree(*state);
1125        *state = NULL;
1126        return ret;
1127}
1128EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1129
1130static int do_pci_enable_device(struct pci_dev *dev, int bars)
1131{
1132        int err;
1133
1134        err = pci_set_power_state(dev, PCI_D0);
1135        if (err < 0 && err != -EIO)
1136                return err;
1137        err = pcibios_enable_device(dev, bars);
1138        if (err < 0)
1139                return err;
1140        pci_fixup_device(pci_fixup_enable, dev);
1141
1142        return 0;
1143}
1144
1145/**
1146 * pci_reenable_device - Resume abandoned device
1147 * @dev: PCI device to be resumed
1148 *
1149 *  Note this function is a backend of pci_default_resume and is not supposed
1150 *  to be called by normal code, write proper resume handler and use it instead.
1151 */
1152int pci_reenable_device(struct pci_dev *dev)
1153{
1154        if (pci_is_enabled(dev))
1155                return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1156        return 0;
1157}
1158
1159static int __pci_enable_device_flags(struct pci_dev *dev,
1160                                     resource_size_t flags)
1161{
1162        int err;
1163        int i, bars = 0;
1164
1165        /*
1166         * Power state could be unknown at this point, either due to a fresh
1167         * boot or a device removal call.  So get the current power state
1168         * so that things like MSI message writing will behave as expected
1169         * (e.g. if the device really is in D0 at enable time).
1170         */
1171        if (dev->pm_cap) {
1172                u16 pmcsr;
1173                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1174                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1175        }
1176
1177        if (atomic_add_return(1, &dev->enable_cnt) > 1)
1178                return 0;               /* already enabled */
1179
1180        /* only skip sriov related */
1181        for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1182                if (dev->resource[i].flags & flags)
1183                        bars |= (1 << i);
1184        for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1185                if (dev->resource[i].flags & flags)
1186                        bars |= (1 << i);
1187
1188        err = do_pci_enable_device(dev, bars);
1189        if (err < 0)
1190                atomic_dec(&dev->enable_cnt);
1191        return err;
1192}
1193
1194/**
1195 * pci_enable_device_io - Initialize a device for use with IO space
1196 * @dev: PCI device to be initialized
1197 *
1198 *  Initialize device before it's used by a driver. Ask low-level code
1199 *  to enable I/O resources. Wake up the device if it was suspended.
1200 *  Beware, this function can fail.
1201 */
1202int pci_enable_device_io(struct pci_dev *dev)
1203{
1204        return __pci_enable_device_flags(dev, IORESOURCE_IO);
1205}
1206
1207/**
1208 * pci_enable_device_mem - Initialize a device for use with Memory space
1209 * @dev: PCI device to be initialized
1210 *
1211 *  Initialize device before it's used by a driver. Ask low-level code
1212 *  to enable Memory resources. Wake up the device if it was suspended.
1213 *  Beware, this function can fail.
1214 */
1215int pci_enable_device_mem(struct pci_dev *dev)
1216{
1217        return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1218}
1219
1220/**
1221 * pci_enable_device - Initialize device before it's used by a driver.
1222 * @dev: PCI device to be initialized
1223 *
1224 *  Initialize device before it's used by a driver. Ask low-level code
1225 *  to enable I/O and memory. Wake up the device if it was suspended.
1226 *  Beware, this function can fail.
1227 *
1228 *  Note we don't actually enable the device many times if we call
1229 *  this function repeatedly (we just increment the count).
1230 */
1231int pci_enable_device(struct pci_dev *dev)
1232{
1233        return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1234}
1235
1236/*
1237 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1238 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1239 * there's no need to track it separately.  pci_devres is initialized
1240 * when a device is enabled using managed PCI device enable interface.
1241 */
1242struct pci_devres {
1243        unsigned int enabled:1;
1244        unsigned int pinned:1;
1245        unsigned int orig_intx:1;
1246        unsigned int restore_intx:1;
1247        u32 region_mask;
1248};
1249
1250static void pcim_release(struct device *gendev, void *res)
1251{
1252        struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1253        struct pci_devres *this = res;
1254        int i;
1255
1256        if (dev->msi_enabled)
1257                pci_disable_msi(dev);
1258        if (dev->msix_enabled)
1259                pci_disable_msix(dev);
1260
1261        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1262                if (this->region_mask & (1 << i))
1263                        pci_release_region(dev, i);
1264
1265        if (this->restore_intx)
1266                pci_intx(dev, this->orig_intx);
1267
1268        if (this->enabled && !this->pinned)
1269                pci_disable_device(dev);
1270}
1271
1272static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1273{
1274        struct pci_devres *dr, *new_dr;
1275
1276        dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1277        if (dr)
1278                return dr;
1279
1280        new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1281        if (!new_dr)
1282                return NULL;
1283        return devres_get(&pdev->dev, new_dr, NULL, NULL);
1284}
1285
1286static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1287{
1288        if (pci_is_managed(pdev))
1289                return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1290        return NULL;
1291}
1292
1293/**
1294 * pcim_enable_device - Managed pci_enable_device()
1295 * @pdev: PCI device to be initialized
1296 *
1297 * Managed pci_enable_device().
1298 */
1299int pcim_enable_device(struct pci_dev *pdev)
1300{
1301        struct pci_devres *dr;
1302        int rc;
1303
1304        dr = get_pci_dr(pdev);
1305        if (unlikely(!dr))
1306                return -ENOMEM;
1307        if (dr->enabled)
1308                return 0;
1309
1310        rc = pci_enable_device(pdev);
1311        if (!rc) {
1312                pdev->is_managed = 1;
1313                dr->enabled = 1;
1314        }
1315        return rc;
1316}
1317
1318/**
1319 * pcim_pin_device - Pin managed PCI device
1320 * @pdev: PCI device to pin
1321 *
1322 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1323 * driver detach.  @pdev must have been enabled with
1324 * pcim_enable_device().
1325 */
1326void pcim_pin_device(struct pci_dev *pdev)
1327{
1328        struct pci_devres *dr;
1329
1330        dr = find_pci_dr(pdev);
1331        WARN_ON(!dr || !dr->enabled);
1332        if (dr)
1333                dr->pinned = 1;
1334}
1335
1336/**
1337 * pcibios_disable_device - disable arch specific PCI resources for device dev
1338 * @dev: the PCI device to disable
1339 *
1340 * Disables architecture specific PCI resources for the device. This
1341 * is the default implementation. Architecture implementations can
1342 * override this.
1343 */
1344void __weak pcibios_disable_device (struct pci_dev *dev) {}
1345
1346static void do_pci_disable_device(struct pci_dev *dev)
1347{
1348        u16 pci_command;
1349
1350        pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1351        if (pci_command & PCI_COMMAND_MASTER) {
1352                pci_command &= ~PCI_COMMAND_MASTER;
1353                pci_write_config_word(dev, PCI_COMMAND, pci_command);
1354        }
1355
1356        pcibios_disable_device(dev);
1357}
1358
1359/**
1360 * pci_disable_enabled_device - Disable device without updating enable_cnt
1361 * @dev: PCI device to disable
1362 *
1363 * NOTE: This function is a backend of PCI power management routines and is
1364 * not supposed to be called drivers.
1365 */
1366void pci_disable_enabled_device(struct pci_dev *dev)
1367{
1368        if (pci_is_enabled(dev))
1369                do_pci_disable_device(dev);
1370}
1371
1372/**
1373 * pci_disable_device - Disable PCI device after use
1374 * @dev: PCI device to be disabled
1375 *
1376 * Signal to the system that the PCI device is not in use by the system
1377 * anymore.  This only involves disabling PCI bus-mastering, if active.
1378 *
1379 * Note we don't actually disable the device until all callers of
1380 * pci_enable_device() have called pci_disable_device().
1381 */
1382void
1383pci_disable_device(struct pci_dev *dev)
1384{
1385        struct pci_devres *dr;
1386
1387        dr = find_pci_dr(dev);
1388        if (dr)
1389                dr->enabled = 0;
1390
1391        if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1392                return;
1393
1394        do_pci_disable_device(dev);
1395
1396        dev->is_busmaster = 0;
1397}
1398
1399/**
1400 * pcibios_set_pcie_reset_state - set reset state for device dev
1401 * @dev: the PCIe device reset
1402 * @state: Reset state to enter into
1403 *
1404 *
1405 * Sets the PCIe reset state for the device. This is the default
1406 * implementation. Architecture implementations can override this.
1407 */
1408int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1409                                        enum pcie_reset_state state)
1410{
1411        return -EINVAL;
1412}
1413
1414/**
1415 * pci_set_pcie_reset_state - set reset state for device dev
1416 * @dev: the PCIe device reset
1417 * @state: Reset state to enter into
1418 *
1419 *
1420 * Sets the PCI reset state for the device.
1421 */
1422int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1423{
1424        return pcibios_set_pcie_reset_state(dev, state);
1425}
1426
1427/**
1428 * pci_check_pme_status - Check if given device has generated PME.
1429 * @dev: Device to check.
1430 *
1431 * Check the PME status of the device and if set, clear it and clear PME enable
1432 * (if set).  Return 'true' if PME status and PME enable were both set or
1433 * 'false' otherwise.
1434 */
1435bool pci_check_pme_status(struct pci_dev *dev)
1436{
1437        int pmcsr_pos;
1438        u16 pmcsr;
1439        bool ret = false;
1440
1441        if (!dev->pm_cap)
1442                return false;
1443
1444        pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1445        pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1446        if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1447                return false;
1448
1449        /* Clear PME status. */
1450        pmcsr |= PCI_PM_CTRL_PME_STATUS;
1451        if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1452                /* Disable PME to avoid interrupt flood. */
1453                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1454                ret = true;
1455        }
1456
1457        pci_write_config_word(dev, pmcsr_pos, pmcsr);
1458
1459        return ret;
1460}
1461
1462/**
1463 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1464 * @dev: Device to handle.
1465 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1466 *
1467 * Check if @dev has generated PME and queue a resume request for it in that
1468 * case.
1469 */
1470static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1471{
1472        if (pme_poll_reset && dev->pme_poll)
1473                dev->pme_poll = false;
1474
1475        if (pci_check_pme_status(dev)) {
1476                pci_wakeup_event(dev);
1477                pm_request_resume(&dev->dev);
1478        }
1479        return 0;
1480}
1481
1482/**
1483 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1484 * @bus: Top bus of the subtree to walk.
1485 */
1486void pci_pme_wakeup_bus(struct pci_bus *bus)
1487{
1488        if (bus)
1489                pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1490}
1491
1492/**
1493 * pci_wakeup - Wake up a PCI device
1494 * @pci_dev: Device to handle.
1495 * @ign: ignored parameter
1496 */
1497static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1498{
1499        pci_wakeup_event(pci_dev);
1500        pm_request_resume(&pci_dev->dev);
1501        return 0;
1502}
1503
1504/**
1505 * pci_wakeup_bus - Walk given bus and wake up devices on it
1506 * @bus: Top bus of the subtree to walk.
1507 */
1508void pci_wakeup_bus(struct pci_bus *bus)
1509{
1510        if (bus)
1511                pci_walk_bus(bus, pci_wakeup, NULL);
1512}
1513
1514/**
1515 * pci_pme_capable - check the capability of PCI device to generate PME#
1516 * @dev: PCI device to handle.
1517 * @state: PCI state from which device will issue PME#.
1518 */
1519bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1520{
1521        if (!dev->pm_cap)
1522                return false;
1523
1524        return !!(dev->pme_support & (1 << state));
1525}
1526
1527static void pci_pme_list_scan(struct work_struct *work)
1528{
1529        struct pci_pme_device *pme_dev, *n;
1530
1531        mutex_lock(&pci_pme_list_mutex);
1532        if (!list_empty(&pci_pme_list)) {
1533                list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1534                        if (pme_dev->dev->pme_poll) {
1535                                struct pci_dev *bridge;
1536
1537                                bridge = pme_dev->dev->bus->self;
1538                                /*
1539                                 * If bridge is in low power state, the
1540                                 * configuration space of subordinate devices
1541                                 * may be not accessible
1542                                 */
1543                                if (bridge && bridge->current_state != PCI_D0)
1544                                        continue;
1545                                pci_pme_wakeup(pme_dev->dev, NULL);
1546                        } else {
1547                                list_del(&pme_dev->list);
1548                                kfree(pme_dev);
1549                        }
1550                }
1551                if (!list_empty(&pci_pme_list))
1552                        schedule_delayed_work(&pci_pme_work,
1553                                              msecs_to_jiffies(PME_TIMEOUT));
1554        }
1555        mutex_unlock(&pci_pme_list_mutex);
1556}
1557
1558/**
1559 * pci_pme_active - enable or disable PCI device's PME# function
1560 * @dev: PCI device to handle.
1561 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1562 *
1563 * The caller must verify that the device is capable of generating PME# before
1564 * calling this function with @enable equal to 'true'.
1565 */
1566void pci_pme_active(struct pci_dev *dev, bool enable)
1567{
1568        u16 pmcsr;
1569
1570        if (!dev->pm_cap)
1571                return;
1572
1573        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1574        /* Clear PME_Status by writing 1 to it and enable PME# */
1575        pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1576        if (!enable)
1577                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1578
1579        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1580
1581        /* PCI (as opposed to PCIe) PME requires that the device have
1582           its PME# line hooked up correctly. Not all hardware vendors
1583           do this, so the PME never gets delivered and the device
1584           remains asleep. The easiest way around this is to
1585           periodically walk the list of suspended devices and check
1586           whether any have their PME flag set. The assumption is that
1587           we'll wake up often enough anyway that this won't be a huge
1588           hit, and the power savings from the devices will still be a
1589           win. */
1590
1591        if (dev->pme_poll) {
1592                struct pci_pme_device *pme_dev;
1593                if (enable) {
1594                        pme_dev = kmalloc(sizeof(struct pci_pme_device),
1595                                          GFP_KERNEL);
1596                        if (!pme_dev)
1597                                goto out;
1598                        pme_dev->dev = dev;
1599                        mutex_lock(&pci_pme_list_mutex);
1600                        list_add(&pme_dev->list, &pci_pme_list);
1601                        if (list_is_singular(&pci_pme_list))
1602                                schedule_delayed_work(&pci_pme_work,
1603                                                      msecs_to_jiffies(PME_TIMEOUT));
1604                        mutex_unlock(&pci_pme_list_mutex);
1605                } else {
1606                        mutex_lock(&pci_pme_list_mutex);
1607                        list_for_each_entry(pme_dev, &pci_pme_list, list) {
1608                                if (pme_dev->dev == dev) {
1609                                        list_del(&pme_dev->list);
1610                                        kfree(pme_dev);
1611                                        break;
1612                                }
1613                        }
1614                        mutex_unlock(&pci_pme_list_mutex);
1615                }
1616        }
1617
1618out:
1619        dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1620}
1621
1622/**
1623 * __pci_enable_wake - enable PCI device as wakeup event source
1624 * @dev: PCI device affected
1625 * @state: PCI state from which device will issue wakeup events
1626 * @runtime: True if the events are to be generated at run time
1627 * @enable: True to enable event generation; false to disable
1628 *
1629 * This enables the device as a wakeup event source, or disables it.
1630 * When such events involves platform-specific hooks, those hooks are
1631 * called automatically by this routine.
1632 *
1633 * Devices with legacy power management (no standard PCI PM capabilities)
1634 * always require such platform hooks.
1635 *
1636 * RETURN VALUE:
1637 * 0 is returned on success
1638 * -EINVAL is returned if device is not supposed to wake up the system
1639 * Error code depending on the platform is returned if both the platform and
1640 * the native mechanism fail to enable the generation of wake-up events
1641 */
1642int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1643                      bool runtime, bool enable)
1644{
1645        int ret = 0;
1646
1647        if (enable && !runtime && !device_may_wakeup(&dev->dev))
1648                return -EINVAL;
1649
1650        /* Don't do the same thing twice in a row for one device. */
1651        if (!!enable == !!dev->wakeup_prepared)
1652                return 0;
1653
1654        /*
1655         * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1656         * Anderson we should be doing PME# wake enable followed by ACPI wake
1657         * enable.  To disable wake-up we call the platform first, for symmetry.
1658         */
1659
1660        if (enable) {
1661                int error;
1662
1663                if (pci_pme_capable(dev, state))
1664                        pci_pme_active(dev, true);
1665                else
1666                        ret = 1;
1667                error = runtime ? platform_pci_run_wake(dev, true) :
1668                                        platform_pci_sleep_wake(dev, true);
1669                if (ret)
1670                        ret = error;
1671                if (!ret)
1672                        dev->wakeup_prepared = true;
1673        } else {
1674                if (runtime)
1675                        platform_pci_run_wake(dev, false);
1676                else
1677                        platform_pci_sleep_wake(dev, false);
1678                pci_pme_active(dev, false);
1679                dev->wakeup_prepared = false;
1680        }
1681
1682        return ret;
1683}
1684EXPORT_SYMBOL(__pci_enable_wake);
1685
1686/**
1687 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1688 * @dev: PCI device to prepare
1689 * @enable: True to enable wake-up event generation; false to disable
1690 *
1691 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1692 * and this function allows them to set that up cleanly - pci_enable_wake()
1693 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1694 * ordering constraints.
1695 *
1696 * This function only returns error code if the device is not capable of
1697 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1698 * enable wake-up power for it.
1699 */
1700int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1701{
1702        return pci_pme_capable(dev, PCI_D3cold) ?
1703                        pci_enable_wake(dev, PCI_D3cold, enable) :
1704                        pci_enable_wake(dev, PCI_D3hot, enable);
1705}
1706
1707/**
1708 * pci_target_state - find an appropriate low power state for a given PCI dev
1709 * @dev: PCI device
1710 *
1711 * Use underlying platform code to find a supported low power state for @dev.
1712 * If the platform can't manage @dev, return the deepest state from which it
1713 * can generate wake events, based on any available PME info.
1714 */
1715pci_power_t pci_target_state(struct pci_dev *dev)
1716{
1717        pci_power_t target_state = PCI_D3hot;
1718
1719        if (platform_pci_power_manageable(dev)) {
1720                /*
1721                 * Call the platform to choose the target state of the device
1722                 * and enable wake-up from this state if supported.
1723                 */
1724                pci_power_t state = platform_pci_choose_state(dev);
1725
1726                switch (state) {
1727                case PCI_POWER_ERROR:
1728                case PCI_UNKNOWN:
1729                        break;
1730                case PCI_D1:
1731                case PCI_D2:
1732                        if (pci_no_d1d2(dev))
1733                                break;
1734                default:
1735                        target_state = state;
1736                }
1737        } else if (!dev->pm_cap) {
1738                target_state = PCI_D0;
1739        } else if (device_may_wakeup(&dev->dev)) {
1740                /*
1741                 * Find the deepest state from which the device can generate
1742                 * wake-up events, make it the target state and enable device
1743                 * to generate PME#.
1744                 */
1745                if (dev->pme_support) {
1746                        while (target_state
1747                              && !(dev->pme_support & (1 << target_state)))
1748                                target_state--;
1749                }
1750        }
1751
1752        return target_state;
1753}
1754
1755/**
1756 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1757 * @dev: Device to handle.
1758 *
1759 * Choose the power state appropriate for the device depending on whether
1760 * it can wake up the system and/or is power manageable by the platform
1761 * (PCI_D3hot is the default) and put the device into that state.
1762 */
1763int pci_prepare_to_sleep(struct pci_dev *dev)
1764{
1765        pci_power_t target_state = pci_target_state(dev);
1766        int error;
1767
1768        if (target_state == PCI_POWER_ERROR)
1769                return -EIO;
1770
1771        /* D3cold during system suspend/hibernate is not supported */
1772        if (target_state > PCI_D3hot)
1773                target_state = PCI_D3hot;
1774
1775        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1776
1777        error = pci_set_power_state(dev, target_state);
1778
1779        if (error)
1780                pci_enable_wake(dev, target_state, false);
1781
1782        return error;
1783}
1784
1785/**
1786 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1787 * @dev: Device to handle.
1788 *
1789 * Disable device's system wake-up capability and put it into D0.
1790 */
1791int pci_back_from_sleep(struct pci_dev *dev)
1792{
1793        pci_enable_wake(dev, PCI_D0, false);
1794        return pci_set_power_state(dev, PCI_D0);
1795}
1796
1797/**
1798 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1799 * @dev: PCI device being suspended.
1800 *
1801 * Prepare @dev to generate wake-up events at run time and put it into a low
1802 * power state.
1803 */
1804int pci_finish_runtime_suspend(struct pci_dev *dev)
1805{
1806        pci_power_t target_state = pci_target_state(dev);
1807        int error;
1808
1809        if (target_state == PCI_POWER_ERROR)
1810                return -EIO;
1811
1812        dev->runtime_d3cold = target_state == PCI_D3cold;
1813
1814        __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1815
1816        error = pci_set_power_state(dev, target_state);
1817
1818        if (error) {
1819                __pci_enable_wake(dev, target_state, true, false);
1820                dev->runtime_d3cold = false;
1821        }
1822
1823        return error;
1824}
1825
1826/**
1827 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1828 * @dev: Device to check.
1829 *
1830 * Return true if the device itself is cabable of generating wake-up events
1831 * (through the platform or using the native PCIe PME) or if the device supports
1832 * PME and one of its upstream bridges can generate wake-up events.
1833 */
1834bool pci_dev_run_wake(struct pci_dev *dev)
1835{
1836        struct pci_bus *bus = dev->bus;
1837
1838        if (device_run_wake(&dev->dev))
1839                return true;
1840
1841        if (!dev->pme_support)
1842                return false;
1843
1844        while (bus->parent) {
1845                struct pci_dev *bridge = bus->self;
1846
1847                if (device_run_wake(&bridge->dev))
1848                        return true;
1849
1850                bus = bus->parent;
1851        }
1852
1853        /* We have reached the root bus. */
1854        if (bus->bridge)
1855                return device_run_wake(bus->bridge);
1856
1857        return false;
1858}
1859EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1860
1861void pci_config_pm_runtime_get(struct pci_dev *pdev)
1862{
1863        struct device *dev = &pdev->dev;
1864        struct device *parent = dev->parent;
1865
1866        if (parent)
1867                pm_runtime_get_sync(parent);
1868        pm_runtime_get_noresume(dev);
1869        /*
1870         * pdev->current_state is set to PCI_D3cold during suspending,
1871         * so wait until suspending completes
1872         */
1873        pm_runtime_barrier(dev);
1874        /*
1875         * Only need to resume devices in D3cold, because config
1876         * registers are still accessible for devices suspended but
1877         * not in D3cold.
1878         */
1879        if (pdev->current_state == PCI_D3cold)
1880                pm_runtime_resume(dev);
1881}
1882
1883void pci_config_pm_runtime_put(struct pci_dev *pdev)
1884{
1885        struct device *dev = &pdev->dev;
1886        struct device *parent = dev->parent;
1887
1888        pm_runtime_put(dev);
1889        if (parent)
1890                pm_runtime_put_sync(parent);
1891}
1892
1893/**
1894 * pci_pm_init - Initialize PM functions of given PCI device
1895 * @dev: PCI device to handle.
1896 */
1897void pci_pm_init(struct pci_dev *dev)
1898{
1899        int pm;
1900        u16 pmc;
1901
1902        pm_runtime_forbid(&dev->dev);
1903        pm_runtime_set_active(&dev->dev);
1904        pm_runtime_enable(&dev->dev);
1905        device_enable_async_suspend(&dev->dev);
1906        dev->wakeup_prepared = false;
1907
1908        dev->pm_cap = 0;
1909
1910        /* find PCI PM capability in list */
1911        pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1912        if (!pm)
1913                return;
1914        /* Check device's ability to generate PME# */
1915        pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1916
1917        if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1918                dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1919                        pmc & PCI_PM_CAP_VER_MASK);
1920                return;
1921        }
1922
1923        dev->pm_cap = pm;
1924        dev->d3_delay = PCI_PM_D3_WAIT;
1925        dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
1926        dev->d3cold_allowed = true;
1927
1928        dev->d1_support = false;
1929        dev->d2_support = false;
1930        if (!pci_no_d1d2(dev)) {
1931                if (pmc & PCI_PM_CAP_D1)
1932                        dev->d1_support = true;
1933                if (pmc & PCI_PM_CAP_D2)
1934                        dev->d2_support = true;
1935
1936                if (dev->d1_support || dev->d2_support)
1937                        dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1938                                   dev->d1_support ? " D1" : "",
1939                                   dev->d2_support ? " D2" : "");
1940        }
1941
1942        pmc &= PCI_PM_CAP_PME_MASK;
1943        if (pmc) {
1944                dev_printk(KERN_DEBUG, &dev->dev,
1945                         "PME# supported from%s%s%s%s%s\n",
1946                         (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1947                         (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1948                         (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1949                         (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1950                         (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1951                dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1952                dev->pme_poll = true;
1953                /*
1954                 * Make device's PM flags reflect the wake-up capability, but
1955                 * let the user space enable it to wake up the system as needed.
1956                 */
1957                device_set_wakeup_capable(&dev->dev, true);
1958                /* Disable the PME# generation functionality */
1959                pci_pme_active(dev, false);
1960        } else {
1961                dev->pme_support = 0;
1962        }
1963}
1964
1965/**
1966 * platform_pci_wakeup_init - init platform wakeup if present
1967 * @dev: PCI device
1968 *
1969 * Some devices don't have PCI PM caps but can still generate wakeup
1970 * events through platform methods (like ACPI events).  If @dev supports
1971 * platform wakeup events, set the device flag to indicate as much.  This
1972 * may be redundant if the device also supports PCI PM caps, but double
1973 * initialization should be safe in that case.
1974 */
1975void platform_pci_wakeup_init(struct pci_dev *dev)
1976{
1977        if (!platform_pci_can_wakeup(dev))
1978                return;
1979
1980        device_set_wakeup_capable(&dev->dev, true);
1981        platform_pci_sleep_wake(dev, false);
1982}
1983
1984static void pci_add_saved_cap(struct pci_dev *pci_dev,
1985        struct pci_cap_saved_state *new_cap)
1986{
1987        hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1988}
1989
1990/**
1991 * pci_add_save_buffer - allocate buffer for saving given capability registers
1992 * @dev: the PCI device
1993 * @cap: the capability to allocate the buffer for
1994 * @size: requested size of the buffer
1995 */
1996static int pci_add_cap_save_buffer(
1997        struct pci_dev *dev, char cap, unsigned int size)
1998{
1999        int pos;
2000        struct pci_cap_saved_state *save_state;
2001
2002        pos = pci_find_capability(dev, cap);
2003        if (pos <= 0)
2004                return 0;
2005
2006        save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2007        if (!save_state)
2008                return -ENOMEM;
2009
2010        save_state->cap.cap_nr = cap;
2011        save_state->cap.size = size;
2012        pci_add_saved_cap(dev, save_state);
2013
2014        return 0;
2015}
2016
2017/**
2018 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2019 * @dev: the PCI device
2020 */
2021void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2022{
2023        int error;
2024
2025        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2026                                        PCI_EXP_SAVE_REGS * sizeof(u16));
2027        if (error)
2028                dev_err(&dev->dev,
2029                        "unable to preallocate PCI Express save buffer\n");
2030
2031        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2032        if (error)
2033                dev_err(&dev->dev,
2034                        "unable to preallocate PCI-X save buffer\n");
2035}
2036
2037void pci_free_cap_save_buffers(struct pci_dev *dev)
2038{
2039        struct pci_cap_saved_state *tmp;
2040        struct hlist_node *pos, *n;
2041
2042        hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2043                kfree(tmp);
2044}
2045
2046/**
2047 * pci_enable_ari - enable ARI forwarding if hardware support it
2048 * @dev: the PCI device
2049 */
2050void pci_enable_ari(struct pci_dev *dev)
2051{
2052        u32 cap;
2053        struct pci_dev *bridge;
2054
2055        if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2056                return;
2057
2058        if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI))
2059                return;
2060
2061        bridge = dev->bus->self;
2062        if (!bridge)
2063                return;
2064
2065        pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2066        if (!(cap & PCI_EXP_DEVCAP2_ARI))
2067                return;
2068
2069        pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI);
2070        bridge->ari_enabled = 1;
2071}
2072
2073/**
2074 * pci_enable_ido - enable ID-based Ordering on a device
2075 * @dev: the PCI device
2076 * @type: which types of IDO to enable
2077 *
2078 * Enable ID-based ordering on @dev.  @type can contain the bits
2079 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2080 * which types of transactions are allowed to be re-ordered.
2081 */
2082void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2083{
2084        u16 ctrl = 0;
2085
2086        if (type & PCI_EXP_IDO_REQUEST)
2087                ctrl |= PCI_EXP_IDO_REQ_EN;
2088        if (type & PCI_EXP_IDO_COMPLETION)
2089                ctrl |= PCI_EXP_IDO_CMP_EN;
2090        if (ctrl)
2091                pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
2092}
2093EXPORT_SYMBOL(pci_enable_ido);
2094
2095/**
2096 * pci_disable_ido - disable ID-based ordering on a device
2097 * @dev: the PCI device
2098 * @type: which types of IDO to disable
2099 */
2100void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2101{
2102        u16 ctrl = 0;
2103
2104        if (type & PCI_EXP_IDO_REQUEST)
2105                ctrl |= PCI_EXP_IDO_REQ_EN;
2106        if (type & PCI_EXP_IDO_COMPLETION)
2107                ctrl |= PCI_EXP_IDO_CMP_EN;
2108        if (ctrl)
2109                pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
2110}
2111EXPORT_SYMBOL(pci_disable_ido);
2112
2113/**
2114 * pci_enable_obff - enable optimized buffer flush/fill
2115 * @dev: PCI device
2116 * @type: type of signaling to use
2117 *
2118 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2119 * signaling if possible, falling back to message signaling only if
2120 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2121 * be brought out of L0s or L1 to send the message.  It should be either
2122 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2123 *
2124 * If your device can benefit from receiving all messages, even at the
2125 * power cost of bringing the link back up from a low power state, use
2126 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2127 * preferred type).
2128 *
2129 * RETURNS:
2130 * Zero on success, appropriate error number on failure.
2131 */
2132int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2133{
2134        u32 cap;
2135        u16 ctrl;
2136        int ret;
2137
2138        pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2139        if (!(cap & PCI_EXP_OBFF_MASK))
2140                return -ENOTSUPP; /* no OBFF support at all */
2141
2142        /* Make sure the topology supports OBFF as well */
2143        if (dev->bus->self) {
2144                ret = pci_enable_obff(dev->bus->self, type);
2145                if (ret)
2146                        return ret;
2147        }
2148
2149        pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
2150        if (cap & PCI_EXP_OBFF_WAKE)
2151                ctrl |= PCI_EXP_OBFF_WAKE_EN;
2152        else {
2153                switch (type) {
2154                case PCI_EXP_OBFF_SIGNAL_L0:
2155                        if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2156                                ctrl |= PCI_EXP_OBFF_MSGA_EN;
2157                        break;
2158                case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2159                        ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2160                        ctrl |= PCI_EXP_OBFF_MSGB_EN;
2161                        break;
2162                default:
2163                        WARN(1, "bad OBFF signal type\n");
2164                        return -ENOTSUPP;
2165                }
2166        }
2167        pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl);
2168
2169        return 0;
2170}
2171EXPORT_SYMBOL(pci_enable_obff);
2172
2173/**
2174 * pci_disable_obff - disable optimized buffer flush/fill
2175 * @dev: PCI device
2176 *
2177 * Disable OBFF on @dev.
2178 */
2179void pci_disable_obff(struct pci_dev *dev)
2180{
2181        pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN);
2182}
2183EXPORT_SYMBOL(pci_disable_obff);
2184
2185/**
2186 * pci_ltr_supported - check whether a device supports LTR
2187 * @dev: PCI device
2188 *
2189 * RETURNS:
2190 * True if @dev supports latency tolerance reporting, false otherwise.
2191 */
2192static bool pci_ltr_supported(struct pci_dev *dev)
2193{
2194        u32 cap;
2195
2196        pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2197
2198        return cap & PCI_EXP_DEVCAP2_LTR;
2199}
2200
2201/**
2202 * pci_enable_ltr - enable latency tolerance reporting
2203 * @dev: PCI device
2204 *
2205 * Enable LTR on @dev if possible, which means enabling it first on
2206 * upstream ports.
2207 *
2208 * RETURNS:
2209 * Zero on success, errno on failure.
2210 */
2211int pci_enable_ltr(struct pci_dev *dev)
2212{
2213        int ret;
2214
2215        /* Only primary function can enable/disable LTR */
2216        if (PCI_FUNC(dev->devfn) != 0)
2217                return -EINVAL;
2218
2219        if (!pci_ltr_supported(dev))
2220                return -ENOTSUPP;
2221
2222        /* Enable upstream ports first */
2223        if (dev->bus->self) {
2224                ret = pci_enable_ltr(dev->bus->self);
2225                if (ret)
2226                        return ret;
2227        }
2228
2229        return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2230}
2231EXPORT_SYMBOL(pci_enable_ltr);
2232
2233/**
2234 * pci_disable_ltr - disable latency tolerance reporting
2235 * @dev: PCI device
2236 */
2237void pci_disable_ltr(struct pci_dev *dev)
2238{
2239        /* Only primary function can enable/disable LTR */
2240        if (PCI_FUNC(dev->devfn) != 0)
2241                return;
2242
2243        if (!pci_ltr_supported(dev))
2244                return;
2245
2246        pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2247}
2248EXPORT_SYMBOL(pci_disable_ltr);
2249
2250static int __pci_ltr_scale(int *val)
2251{
2252        int scale = 0;
2253
2254        while (*val > 1023) {
2255                *val = (*val + 31) / 32;
2256                scale++;
2257        }
2258        return scale;
2259}
2260
2261/**
2262 * pci_set_ltr - set LTR latency values
2263 * @dev: PCI device
2264 * @snoop_lat_ns: snoop latency in nanoseconds
2265 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2266 *
2267 * Figure out the scale and set the LTR values accordingly.
2268 */
2269int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2270{
2271        int pos, ret, snoop_scale, nosnoop_scale;
2272        u16 val;
2273
2274        if (!pci_ltr_supported(dev))
2275                return -ENOTSUPP;
2276
2277        snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2278        nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2279
2280        if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2281            nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2282                return -EINVAL;
2283
2284        if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2285            (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2286                return -EINVAL;
2287
2288        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2289        if (!pos)
2290                return -ENOTSUPP;
2291
2292        val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2293        ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2294        if (ret != 4)
2295                return -EIO;
2296
2297        val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2298        ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2299        if (ret != 4)
2300                return -EIO;
2301
2302        return 0;
2303}
2304EXPORT_SYMBOL(pci_set_ltr);
2305
2306static int pci_acs_enable;
2307
2308/**
2309 * pci_request_acs - ask for ACS to be enabled if supported
2310 */
2311void pci_request_acs(void)
2312{
2313        pci_acs_enable = 1;
2314}
2315
2316/**
2317 * pci_enable_acs - enable ACS if hardware support it
2318 * @dev: the PCI device
2319 */
2320void pci_enable_acs(struct pci_dev *dev)
2321{
2322        int pos;
2323        u16 cap;
2324        u16 ctrl;
2325
2326        if (!pci_acs_enable)
2327                return;
2328
2329        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2330        if (!pos)
2331                return;
2332
2333        pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2334        pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2335
2336        /* Source Validation */
2337        ctrl |= (cap & PCI_ACS_SV);
2338
2339        /* P2P Request Redirect */
2340        ctrl |= (cap & PCI_ACS_RR);
2341
2342        /* P2P Completion Redirect */
2343        ctrl |= (cap & PCI_ACS_CR);
2344
2345        /* Upstream Forwarding */
2346        ctrl |= (cap & PCI_ACS_UF);
2347
2348        pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2349}
2350
2351/**
2352 * pci_acs_enabled - test ACS against required flags for a given device
2353 * @pdev: device to test
2354 * @acs_flags: required PCI ACS flags
2355 *
2356 * Return true if the device supports the provided flags.  Automatically
2357 * filters out flags that are not implemented on multifunction devices.
2358 */
2359bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2360{
2361        int pos, ret;
2362        u16 ctrl;
2363
2364        ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2365        if (ret >= 0)
2366                return ret > 0;
2367
2368        if (!pci_is_pcie(pdev))
2369                return false;
2370
2371        /* Filter out flags not applicable to multifunction */
2372        if (pdev->multifunction)
2373                acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2374                              PCI_ACS_EC | PCI_ACS_DT);
2375
2376        if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
2377            pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
2378            pdev->multifunction) {
2379                pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2380                if (!pos)
2381                        return false;
2382
2383                pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2384                if ((ctrl & acs_flags) != acs_flags)
2385                        return false;
2386        }
2387
2388        return true;
2389}
2390
2391/**
2392 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2393 * @start: starting downstream device
2394 * @end: ending upstream device or NULL to search to the root bus
2395 * @acs_flags: required flags
2396 *
2397 * Walk up a device tree from start to end testing PCI ACS support.  If
2398 * any step along the way does not support the required flags, return false.
2399 */
2400bool pci_acs_path_enabled(struct pci_dev *start,
2401                          struct pci_dev *end, u16 acs_flags)
2402{
2403        struct pci_dev *pdev, *parent = start;
2404
2405        do {
2406                pdev = parent;
2407
2408                if (!pci_acs_enabled(pdev, acs_flags))
2409                        return false;
2410
2411                if (pci_is_root_bus(pdev->bus))
2412                        return (end == NULL);
2413
2414                parent = pdev->bus->self;
2415        } while (pdev != end);
2416
2417        return true;
2418}
2419
2420/**
2421 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2422 * @dev: the PCI device
2423 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2424 *
2425 * Perform INTx swizzling for a device behind one level of bridge.  This is
2426 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2427 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2428 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2429 * the PCI Express Base Specification, Revision 2.1)
2430 */
2431u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2432{
2433        int slot;
2434
2435        if (pci_ari_enabled(dev->bus))
2436                slot = 0;
2437        else
2438                slot = PCI_SLOT(dev->devfn);
2439
2440        return (((pin - 1) + slot) % 4) + 1;
2441}
2442
2443int
2444pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2445{
2446        u8 pin;
2447
2448        pin = dev->pin;
2449        if (!pin)
2450                return -1;
2451
2452        while (!pci_is_root_bus(dev->bus)) {
2453                pin = pci_swizzle_interrupt_pin(dev, pin);
2454                dev = dev->bus->self;
2455        }
2456        *bridge = dev;
2457        return pin;
2458}
2459
2460/**
2461 * pci_common_swizzle - swizzle INTx all the way to root bridge
2462 * @dev: the PCI device
2463 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2464 *
2465 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2466 * bridges all the way up to a PCI root bus.
2467 */
2468u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2469{
2470        u8 pin = *pinp;
2471
2472        while (!pci_is_root_bus(dev->bus)) {
2473                pin = pci_swizzle_interrupt_pin(dev, pin);
2474                dev = dev->bus->self;
2475        }
2476        *pinp = pin;
2477        return PCI_SLOT(dev->devfn);
2478}
2479
2480/**
2481 *      pci_release_region - Release a PCI bar
2482 *      @pdev: PCI device whose resources were previously reserved by pci_request_region
2483 *      @bar: BAR to release
2484 *
2485 *      Releases the PCI I/O and memory resources previously reserved by a
2486 *      successful call to pci_request_region.  Call this function only
2487 *      after all use of the PCI regions has ceased.
2488 */
2489void pci_release_region(struct pci_dev *pdev, int bar)
2490{
2491        struct pci_devres *dr;
2492
2493        if (pci_resource_len(pdev, bar) == 0)
2494                return;
2495        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2496                release_region(pci_resource_start(pdev, bar),
2497                                pci_resource_len(pdev, bar));
2498        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2499                release_mem_region(pci_resource_start(pdev, bar),
2500                                pci_resource_len(pdev, bar));
2501
2502        dr = find_pci_dr(pdev);
2503        if (dr)
2504                dr->region_mask &= ~(1 << bar);
2505}
2506
2507/**
2508 *      __pci_request_region - Reserved PCI I/O and memory resource
2509 *      @pdev: PCI device whose resources are to be reserved
2510 *      @bar: BAR to be reserved
2511 *      @res_name: Name to be associated with resource.
2512 *      @exclusive: whether the region access is exclusive or not
2513 *
2514 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2515 *      being reserved by owner @res_name.  Do not access any
2516 *      address inside the PCI regions unless this call returns
2517 *      successfully.
2518 *
2519 *      If @exclusive is set, then the region is marked so that userspace
2520 *      is explicitly not allowed to map the resource via /dev/mem or
2521 *      sysfs MMIO access.
2522 *
2523 *      Returns 0 on success, or %EBUSY on error.  A warning
2524 *      message is also printed on failure.
2525 */
2526static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2527                                                                        int exclusive)
2528{
2529        struct pci_devres *dr;
2530
2531        if (pci_resource_len(pdev, bar) == 0)
2532                return 0;
2533                
2534        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2535                if (!request_region(pci_resource_start(pdev, bar),
2536                            pci_resource_len(pdev, bar), res_name))
2537                        goto err_out;
2538        }
2539        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2540                if (!__request_mem_region(pci_resource_start(pdev, bar),
2541                                        pci_resource_len(pdev, bar), res_name,
2542                                        exclusive))
2543                        goto err_out;
2544        }
2545
2546        dr = find_pci_dr(pdev);
2547        if (dr)
2548                dr->region_mask |= 1 << bar;
2549
2550        return 0;
2551
2552err_out:
2553        dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2554                 &pdev->resource[bar]);
2555        return -EBUSY;
2556}
2557
2558/**
2559 *      pci_request_region - Reserve PCI I/O and memory resource
2560 *      @pdev: PCI device whose resources are to be reserved
2561 *      @bar: BAR to be reserved
2562 *      @res_name: Name to be associated with resource
2563 *
2564 *      Mark the PCI region associated with PCI device @pdev BAR @bar as
2565 *      being reserved by owner @res_name.  Do not access any
2566 *      address inside the PCI regions unless this call returns
2567 *      successfully.
2568 *
2569 *      Returns 0 on success, or %EBUSY on error.  A warning
2570 *      message is also printed on failure.
2571 */
2572int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2573{
2574        return __pci_request_region(pdev, bar, res_name, 0);
2575}
2576
2577/**
2578 *      pci_request_region_exclusive - Reserved PCI I/O and memory resource
2579 *      @pdev: PCI device whose resources are to be reserved
2580 *      @bar: BAR to be reserved
2581 *      @res_name: Name to be associated with resource.
2582 *
2583 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2584 *      being reserved by owner @res_name.  Do not access any
2585 *      address inside the PCI regions unless this call returns
2586 *      successfully.
2587 *
2588 *      Returns 0 on success, or %EBUSY on error.  A warning
2589 *      message is also printed on failure.
2590 *
2591 *      The key difference that _exclusive makes it that userspace is
2592 *      explicitly not allowed to map the resource via /dev/mem or
2593 *      sysfs.
2594 */
2595int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2596{
2597        return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2598}
2599/**
2600 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2601 * @pdev: PCI device whose resources were previously reserved
2602 * @bars: Bitmask of BARs to be released
2603 *
2604 * Release selected PCI I/O and memory resources previously reserved.
2605 * Call this function only after all use of the PCI regions has ceased.
2606 */
2607void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2608{
2609        int i;
2610
2611        for (i = 0; i < 6; i++)
2612                if (bars & (1 << i))
2613                        pci_release_region(pdev, i);
2614}
2615
2616int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2617                                 const char *res_name, int excl)
2618{
2619        int i;
2620
2621        for (i = 0; i < 6; i++)
2622                if (bars & (1 << i))
2623                        if (__pci_request_region(pdev, i, res_name, excl))
2624                                goto err_out;
2625        return 0;
2626
2627err_out:
2628        while(--i >= 0)
2629                if (bars & (1 << i))
2630                        pci_release_region(pdev, i);
2631
2632        return -EBUSY;
2633}
2634
2635
2636/**
2637 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2638 * @pdev: PCI device whose resources are to be reserved
2639 * @bars: Bitmask of BARs to be requested
2640 * @res_name: Name to be associated with resource
2641 */
2642int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2643                                 const char *res_name)
2644{
2645        return __pci_request_selected_regions(pdev, bars, res_name, 0);
2646}
2647
2648int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2649                                 int bars, const char *res_name)
2650{
2651        return __pci_request_selected_regions(pdev, bars, res_name,
2652                        IORESOURCE_EXCLUSIVE);
2653}
2654
2655/**
2656 *      pci_release_regions - Release reserved PCI I/O and memory resources
2657 *      @pdev: PCI device whose resources were previously reserved by pci_request_regions
2658 *
2659 *      Releases all PCI I/O and memory resources previously reserved by a
2660 *      successful call to pci_request_regions.  Call this function only
2661 *      after all use of the PCI regions has ceased.
2662 */
2663
2664void pci_release_regions(struct pci_dev *pdev)
2665{
2666        pci_release_selected_regions(pdev, (1 << 6) - 1);
2667}
2668
2669/**
2670 *      pci_request_regions - Reserved PCI I/O and memory resources
2671 *      @pdev: PCI device whose resources are to be reserved
2672 *      @res_name: Name to be associated with resource.
2673 *
2674 *      Mark all PCI regions associated with PCI device @pdev as
2675 *      being reserved by owner @res_name.  Do not access any
2676 *      address inside the PCI regions unless this call returns
2677 *      successfully.
2678 *
2679 *      Returns 0 on success, or %EBUSY on error.  A warning
2680 *      message is also printed on failure.
2681 */
2682int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2683{
2684        return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2685}
2686
2687/**
2688 *      pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2689 *      @pdev: PCI device whose resources are to be reserved
2690 *      @res_name: Name to be associated with resource.
2691 *
2692 *      Mark all PCI regions associated with PCI device @pdev as
2693 *      being reserved by owner @res_name.  Do not access any
2694 *      address inside the PCI regions unless this call returns
2695 *      successfully.
2696 *
2697 *      pci_request_regions_exclusive() will mark the region so that
2698 *      /dev/mem and the sysfs MMIO access will not be allowed.
2699 *
2700 *      Returns 0 on success, or %EBUSY on error.  A warning
2701 *      message is also printed on failure.
2702 */
2703int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2704{
2705        return pci_request_selected_regions_exclusive(pdev,
2706                                        ((1 << 6) - 1), res_name);
2707}
2708
2709static void __pci_set_master(struct pci_dev *dev, bool enable)
2710{
2711        u16 old_cmd, cmd;
2712
2713        pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2714        if (enable)
2715                cmd = old_cmd | PCI_COMMAND_MASTER;
2716        else
2717                cmd = old_cmd & ~PCI_COMMAND_MASTER;
2718        if (cmd != old_cmd) {
2719                dev_dbg(&dev->dev, "%s bus mastering\n",
2720                        enable ? "enabling" : "disabling");
2721                pci_write_config_word(dev, PCI_COMMAND, cmd);
2722        }
2723        dev->is_busmaster = enable;
2724}
2725
2726/**
2727 * pcibios_setup - process "pci=" kernel boot arguments
2728 * @str: string used to pass in "pci=" kernel boot arguments
2729 *
2730 * Process kernel boot arguments.  This is the default implementation.
2731 * Architecture specific implementations can override this as necessary.
2732 */
2733char * __weak __init pcibios_setup(char *str)
2734{
2735        return str;
2736}
2737
2738/**
2739 * pcibios_set_master - enable PCI bus-mastering for device dev
2740 * @dev: the PCI device to enable
2741 *
2742 * Enables PCI bus-mastering for the device.  This is the default
2743 * implementation.  Architecture specific implementations can override
2744 * this if necessary.
2745 */
2746void __weak pcibios_set_master(struct pci_dev *dev)
2747{
2748        u8 lat;
2749
2750        /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2751        if (pci_is_pcie(dev))
2752                return;
2753
2754        pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2755        if (lat < 16)
2756                lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2757        else if (lat > pcibios_max_latency)
2758                lat = pcibios_max_latency;
2759        else
2760                return;
2761        dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2762        pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2763}
2764
2765/**
2766 * pci_set_master - enables bus-mastering for device dev
2767 * @dev: the PCI device to enable
2768 *
2769 * Enables bus-mastering on the device and calls pcibios_set_master()
2770 * to do the needed arch specific settings.
2771 */
2772void pci_set_master(struct pci_dev *dev)
2773{
2774        __pci_set_master(dev, true);
2775        pcibios_set_master(dev);
2776}
2777
2778/**
2779 * pci_clear_master - disables bus-mastering for device dev
2780 * @dev: the PCI device to disable
2781 */
2782void pci_clear_master(struct pci_dev *dev)
2783{
2784        __pci_set_master(dev, false);
2785}
2786
2787/**
2788 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2789 * @dev: the PCI device for which MWI is to be enabled
2790 *
2791 * Helper function for pci_set_mwi.
2792 * Originally copied from drivers/net/acenic.c.
2793 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2794 *
2795 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2796 */
2797int pci_set_cacheline_size(struct pci_dev *dev)
2798{
2799        u8 cacheline_size;
2800
2801        if (!pci_cache_line_size)
2802                return -EINVAL;
2803
2804        /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2805           equal to or multiple of the right value. */
2806        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2807        if (cacheline_size >= pci_cache_line_size &&
2808            (cacheline_size % pci_cache_line_size) == 0)
2809                return 0;
2810
2811        /* Write the correct value. */
2812        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2813        /* Read it back. */
2814        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2815        if (cacheline_size == pci_cache_line_size)
2816                return 0;
2817
2818        dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2819                   "supported\n", pci_cache_line_size << 2);
2820
2821        return -EINVAL;
2822}
2823EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2824
2825#ifdef PCI_DISABLE_MWI
2826int pci_set_mwi(struct pci_dev *dev)
2827{
2828        return 0;
2829}
2830
2831int pci_try_set_mwi(struct pci_dev *dev)
2832{
2833        return 0;
2834}
2835
2836void pci_clear_mwi(struct pci_dev *dev)
2837{
2838}
2839
2840#else
2841
2842/**
2843 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2844 * @dev: the PCI device for which MWI is enabled
2845 *
2846 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2847 *
2848 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2849 */
2850int
2851pci_set_mwi(struct pci_dev *dev)
2852{
2853        int rc;
2854        u16 cmd;
2855
2856        rc = pci_set_cacheline_size(dev);
2857        if (rc)
2858                return rc;
2859
2860        pci_read_config_word(dev, PCI_COMMAND, &cmd);
2861        if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2862                dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2863                cmd |= PCI_COMMAND_INVALIDATE;
2864                pci_write_config_word(dev, PCI_COMMAND, cmd);
2865        }
2866        
2867        return 0;
2868}
2869
2870/**
2871 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2872 * @dev: the PCI device for which MWI is enabled
2873 *
2874 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2875 * Callers are not required to check the return value.
2876 *
2877 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2878 */
2879int pci_try_set_mwi(struct pci_dev *dev)
2880{
2881        int rc = pci_set_mwi(dev);
2882        return rc;
2883}
2884
2885/**
2886 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2887 * @dev: the PCI device to disable
2888 *
2889 * Disables PCI Memory-Write-Invalidate transaction on the device
2890 */
2891void
2892pci_clear_mwi(struct pci_dev *dev)
2893{
2894        u16 cmd;
2895
2896        pci_read_config_word(dev, PCI_COMMAND, &cmd);
2897        if (cmd & PCI_COMMAND_INVALIDATE) {
2898                cmd &= ~PCI_COMMAND_INVALIDATE;
2899                pci_write_config_word(dev, PCI_COMMAND, cmd);
2900        }
2901}
2902#endif /* ! PCI_DISABLE_MWI */
2903
2904/**
2905 * pci_intx - enables/disables PCI INTx for device dev
2906 * @pdev: the PCI device to operate on
2907 * @enable: boolean: whether to enable or disable PCI INTx
2908 *
2909 * Enables/disables PCI INTx for device dev
2910 */
2911void
2912pci_intx(struct pci_dev *pdev, int enable)
2913{
2914        u16 pci_command, new;
2915
2916        pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2917
2918        if (enable) {
2919                new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2920        } else {
2921                new = pci_command | PCI_COMMAND_INTX_DISABLE;
2922        }
2923
2924        if (new != pci_command) {
2925                struct pci_devres *dr;
2926
2927                pci_write_config_word(pdev, PCI_COMMAND, new);
2928
2929                dr = find_pci_dr(pdev);
2930                if (dr && !dr->restore_intx) {
2931                        dr->restore_intx = 1;
2932                        dr->orig_intx = !enable;
2933                }
2934        }
2935}
2936
2937/**
2938 * pci_intx_mask_supported - probe for INTx masking support
2939 * @dev: the PCI device to operate on
2940 *
2941 * Check if the device dev support INTx masking via the config space
2942 * command word.
2943 */
2944bool pci_intx_mask_supported(struct pci_dev *dev)
2945{
2946        bool mask_supported = false;
2947        u16 orig, new;
2948
2949        if (dev->broken_intx_masking)
2950                return false;
2951
2952        pci_cfg_access_lock(dev);
2953
2954        pci_read_config_word(dev, PCI_COMMAND, &orig);
2955        pci_write_config_word(dev, PCI_COMMAND,
2956                              orig ^ PCI_COMMAND_INTX_DISABLE);
2957        pci_read_config_word(dev, PCI_COMMAND, &new);
2958
2959        /*
2960         * There's no way to protect against hardware bugs or detect them
2961         * reliably, but as long as we know what the value should be, let's
2962         * go ahead and check it.
2963         */
2964        if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2965                dev_err(&dev->dev, "Command register changed from "
2966                        "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2967        } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2968                mask_supported = true;
2969                pci_write_config_word(dev, PCI_COMMAND, orig);
2970        }
2971
2972        pci_cfg_access_unlock(dev);
2973        return mask_supported;
2974}
2975EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2976
2977static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2978{
2979        struct pci_bus *bus = dev->bus;
2980        bool mask_updated = true;
2981        u32 cmd_status_dword;
2982        u16 origcmd, newcmd;
2983        unsigned long flags;
2984        bool irq_pending;
2985
2986        /*
2987         * We do a single dword read to retrieve both command and status.
2988         * Document assumptions that make this possible.
2989         */
2990        BUILD_BUG_ON(PCI_COMMAND % 4);
2991        BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2992
2993        raw_spin_lock_irqsave(&pci_lock, flags);
2994
2995        bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2996
2997        irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2998
2999        /*
3000         * Check interrupt status register to see whether our device
3001         * triggered the interrupt (when masking) or the next IRQ is
3002         * already pending (when unmasking).
3003         */
3004        if (mask != irq_pending) {
3005                mask_updated = false;
3006                goto done;
3007        }
3008
3009        origcmd = cmd_status_dword;
3010        newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3011        if (mask)
3012                newcmd |= PCI_COMMAND_INTX_DISABLE;
3013        if (newcmd != origcmd)
3014                bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3015
3016done:
3017        raw_spin_unlock_irqrestore(&pci_lock, flags);
3018
3019        return mask_updated;
3020}
3021
3022/**
3023 * pci_check_and_mask_intx - mask INTx on pending interrupt
3024 * @dev: the PCI device to operate on
3025 *
3026 * Check if the device dev has its INTx line asserted, mask it and
3027 * return true in that case. False is returned if not interrupt was
3028 * pending.
3029 */
3030bool pci_check_and_mask_intx(struct pci_dev *dev)
3031{
3032        return pci_check_and_set_intx_mask(dev, true);
3033}
3034EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3035
3036/**
3037 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
3038 * @dev: the PCI device to operate on
3039 *
3040 * Check if the device dev has its INTx line asserted, unmask it if not
3041 * and return true. False is returned and the mask remains active if
3042 * there was still an interrupt pending.
3043 */
3044bool pci_check_and_unmask_intx(struct pci_dev *dev)
3045{
3046        return pci_check_and_set_intx_mask(dev, false);
3047}
3048EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3049
3050/**
3051 * pci_msi_off - disables any msi or msix capabilities
3052 * @dev: the PCI device to operate on
3053 *
3054 * If you want to use msi see pci_enable_msi and friends.
3055 * This is a lower level primitive that allows us to disable
3056 * msi operation at the device level.
3057 */
3058void pci_msi_off(struct pci_dev *dev)
3059{
3060        int pos;
3061        u16 control;
3062
3063        pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3064        if (pos) {
3065                pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3066                control &= ~PCI_MSI_FLAGS_ENABLE;
3067                pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3068        }
3069        pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3070        if (pos) {
3071                pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3072                control &= ~PCI_MSIX_FLAGS_ENABLE;
3073                pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3074        }
3075}
3076EXPORT_SYMBOL_GPL(pci_msi_off);
3077
3078int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3079{
3080        return dma_set_max_seg_size(&dev->dev, size);
3081}
3082EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3083
3084int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3085{
3086        return dma_set_seg_boundary(&dev->dev, mask);
3087}
3088EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3089
3090static int pcie_flr(struct pci_dev *dev, int probe)
3091{
3092        int i;
3093        u32 cap;
3094        u16 status;
3095
3096        pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3097        if (!(cap & PCI_EXP_DEVCAP_FLR))
3098                return -ENOTTY;
3099
3100        if (probe)
3101                return 0;
3102
3103        /* Wait for Transaction Pending bit clean */
3104        for (i = 0; i < 4; i++) {
3105                if (i)
3106                        msleep((1 << (i - 1)) * 100);
3107
3108                pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3109                if (!(status & PCI_EXP_DEVSTA_TRPND))
3110                        goto clear;
3111        }
3112
3113        dev_err(&dev->dev, "transaction is not cleared; "
3114                        "proceeding with reset anyway\n");
3115
3116clear:
3117        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3118
3119        msleep(100);
3120
3121        return 0;
3122}
3123
3124static int pci_af_flr(struct pci_dev *dev, int probe)
3125{
3126        int i;
3127        int pos;
3128        u8 cap;
3129        u8 status;
3130
3131        pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3132        if (!pos)
3133                return -ENOTTY;
3134
3135        pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3136        if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3137                return -ENOTTY;
3138
3139        if (probe)
3140                return 0;
3141
3142        /* Wait for Transaction Pending bit clean */
3143        for (i = 0; i < 4; i++) {
3144                if (i)
3145                        msleep((1 << (i - 1)) * 100);
3146
3147                pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3148                if (!(status & PCI_AF_STATUS_TP))
3149                        goto clear;
3150        }
3151
3152        dev_err(&dev->dev, "transaction is not cleared; "
3153                        "proceeding with reset anyway\n");
3154
3155clear:
3156        pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3157        msleep(100);
3158
3159        return 0;
3160}
3161
3162/**
3163 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3164 * @dev: Device to reset.
3165 * @probe: If set, only check if the device can be reset this way.
3166 *
3167 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3168 * unset, it will be reinitialized internally when going from PCI_D3hot to
3169 * PCI_D0.  If that's the case and the device is not in a low-power state
3170 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3171 *
3172 * NOTE: This causes the caller to sleep for twice the device power transition
3173 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3174 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3175 * Moreover, only devices in D0 can be reset by this function.
3176 */
3177static int pci_pm_reset(struct pci_dev *dev, int probe)
3178{
3179        u16 csr;
3180
3181        if (!dev->pm_cap)
3182                return -ENOTTY;
3183
3184        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3185        if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3186                return -ENOTTY;
3187
3188        if (probe)
3189                return 0;
3190
3191        if (dev->current_state != PCI_D0)
3192                return -EINVAL;
3193
3194        csr &= ~PCI_PM_CTRL_STATE_MASK;
3195        csr |= PCI_D3hot;
3196        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3197        pci_dev_d3_sleep(dev);
3198
3199        csr &= ~PCI_PM_CTRL_STATE_MASK;
3200        csr |= PCI_D0;
3201        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3202        pci_dev_d3_sleep(dev);
3203
3204        return 0;
3205}
3206
3207static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3208{
3209        u16 ctrl;
3210        struct pci_dev *pdev;
3211
3212        if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3213                return -ENOTTY;
3214
3215        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3216                if (pdev != dev)
3217                        return -ENOTTY;
3218
3219        if (probe)
3220                return 0;
3221
3222        pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3223        ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3224        pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3225        msleep(100);
3226
3227        ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3228        pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3229        msleep(100);
3230
3231        return 0;
3232}
3233
3234static int __pci_dev_reset(struct pci_dev *dev, int probe)
3235{
3236        int rc;
3237
3238        might_sleep();
3239
3240        rc = pci_dev_specific_reset(dev, probe);
3241        if (rc != -ENOTTY)
3242                goto done;
3243
3244        rc = pcie_flr(dev, probe);
3245        if (rc != -ENOTTY)
3246                goto done;
3247
3248        rc = pci_af_flr(dev, probe);
3249        if (rc != -ENOTTY)
3250                goto done;
3251
3252        rc = pci_pm_reset(dev, probe);
3253        if (rc != -ENOTTY)
3254                goto done;
3255
3256        rc = pci_parent_bus_reset(dev, probe);
3257done:
3258        return rc;
3259}
3260
3261static int pci_dev_reset(struct pci_dev *dev, int probe)
3262{
3263        int rc;
3264
3265        if (!probe) {
3266                pci_cfg_access_lock(dev);
3267                /* block PM suspend, driver probe, etc. */
3268                device_lock(&dev->dev);
3269        }
3270
3271        rc = __pci_dev_reset(dev, probe);
3272
3273        if (!probe) {
3274                device_unlock(&dev->dev);
3275                pci_cfg_access_unlock(dev);
3276        }
3277        return rc;
3278}
3279/**
3280 * __pci_reset_function - reset a PCI device function
3281 * @dev: PCI device to reset
3282 *
3283 * Some devices allow an individual function to be reset without affecting
3284 * other functions in the same device.  The PCI device must be responsive
3285 * to PCI config space in order to use this function.
3286 *
3287 * The device function is presumed to be unused when this function is called.
3288 * Resetting the device will make the contents of PCI configuration space
3289 * random, so any caller of this must be prepared to reinitialise the
3290 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3291 * etc.
3292 *
3293 * Returns 0 if the device function was successfully reset or negative if the
3294 * device doesn't support resetting a single function.
3295 */
3296int __pci_reset_function(struct pci_dev *dev)
3297{
3298        return pci_dev_reset(dev, 0);
3299}
3300EXPORT_SYMBOL_GPL(__pci_reset_function);
3301
3302/**
3303 * __pci_reset_function_locked - reset a PCI device function while holding
3304 * the @dev mutex lock.
3305 * @dev: PCI device to reset
3306 *
3307 * Some devices allow an individual function to be reset without affecting
3308 * other functions in the same device.  The PCI device must be responsive
3309 * to PCI config space in order to use this function.
3310 *
3311 * The device function is presumed to be unused and the caller is holding
3312 * the device mutex lock when this function is called.
3313 * Resetting the device will make the contents of PCI configuration space
3314 * random, so any caller of this must be prepared to reinitialise the
3315 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3316 * etc.
3317 *
3318 * Returns 0 if the device function was successfully reset or negative if the
3319 * device doesn't support resetting a single function.
3320 */
3321int __pci_reset_function_locked(struct pci_dev *dev)
3322{
3323        return __pci_dev_reset(dev, 0);
3324}
3325EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3326
3327/**
3328 * pci_probe_reset_function - check whether the device can be safely reset
3329 * @dev: PCI device to reset
3330 *
3331 * Some devices allow an individual function to be reset without affecting
3332 * other functions in the same device.  The PCI device must be responsive
3333 * to PCI config space in order to use this function.
3334 *
3335 * Returns 0 if the device function can be reset or negative if the
3336 * device doesn't support resetting a single function.
3337 */
3338int pci_probe_reset_function(struct pci_dev *dev)
3339{
3340        return pci_dev_reset(dev, 1);
3341}
3342
3343/**
3344 * pci_reset_function - quiesce and reset a PCI device function
3345 * @dev: PCI device to reset
3346 *
3347 * Some devices allow an individual function to be reset without affecting
3348 * other functions in the same device.  The PCI device must be responsive
3349 * to PCI config space in order to use this function.
3350 *
3351 * This function does not just reset the PCI portion of a device, but
3352 * clears all the state associated with the device.  This function differs
3353 * from __pci_reset_function in that it saves and restores device state
3354 * over the reset.
3355 *
3356 * Returns 0 if the device function was successfully reset or negative if the
3357 * device doesn't support resetting a single function.
3358 */
3359int pci_reset_function(struct pci_dev *dev)
3360{
3361        int rc;
3362
3363        rc = pci_dev_reset(dev, 1);
3364        if (rc)
3365                return rc;
3366
3367        pci_save_state(dev);
3368
3369        /*
3370         * both INTx and MSI are disabled after the Interrupt Disable bit
3371         * is set and the Bus Master bit is cleared.
3372         */
3373        pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3374
3375        rc = pci_dev_reset(dev, 0);
3376
3377        pci_restore_state(dev);
3378
3379        return rc;
3380}
3381EXPORT_SYMBOL_GPL(pci_reset_function);
3382
3383/**
3384 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3385 * @dev: PCI device to query
3386 *
3387 * Returns mmrbc: maximum designed memory read count in bytes
3388 *    or appropriate error value.
3389 */
3390int pcix_get_max_mmrbc(struct pci_dev *dev)
3391{
3392        int cap;
3393        u32 stat;
3394
3395        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3396        if (!cap)
3397                return -EINVAL;
3398
3399        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3400                return -EINVAL;
3401
3402        return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3403}
3404EXPORT_SYMBOL(pcix_get_max_mmrbc);
3405
3406/**
3407 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3408 * @dev: PCI device to query
3409 *
3410 * Returns mmrbc: maximum memory read count in bytes
3411 *    or appropriate error value.
3412 */
3413int pcix_get_mmrbc(struct pci_dev *dev)
3414{
3415        int cap;
3416        u16 cmd;
3417
3418        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3419        if (!cap)
3420                return -EINVAL;
3421
3422        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3423                return -EINVAL;
3424
3425        return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3426}
3427EXPORT_SYMBOL(pcix_get_mmrbc);
3428
3429/**
3430 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3431 * @dev: PCI device to query
3432 * @mmrbc: maximum memory read count in bytes
3433 *    valid values are 512, 1024, 2048, 4096
3434 *
3435 * If possible sets maximum memory read byte count, some bridges have erratas
3436 * that prevent this.
3437 */
3438int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3439{
3440        int cap;
3441        u32 stat, v, o;
3442        u16 cmd;
3443
3444        if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3445                return -EINVAL;
3446
3447        v = ffs(mmrbc) - 10;
3448
3449        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3450        if (!cap)
3451                return -EINVAL;
3452
3453        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3454                return -EINVAL;
3455
3456        if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3457                return -E2BIG;
3458
3459        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3460                return -EINVAL;
3461
3462        o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3463        if (o != v) {
3464                if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3465                        return -EIO;
3466
3467                cmd &= ~PCI_X_CMD_MAX_READ;
3468                cmd |= v << 2;
3469                if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3470                        return -EIO;
3471        }
3472        return 0;
3473}
3474EXPORT_SYMBOL(pcix_set_mmrbc);
3475
3476/**
3477 * pcie_get_readrq - get PCI Express read request size
3478 * @dev: PCI device to query
3479 *
3480 * Returns maximum memory read request in bytes
3481 *    or appropriate error value.
3482 */
3483int pcie_get_readrq(struct pci_dev *dev)
3484{
3485        u16 ctl;
3486
3487        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3488
3489        return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3490}
3491EXPORT_SYMBOL(pcie_get_readrq);
3492
3493/**
3494 * pcie_set_readrq - set PCI Express maximum memory read request
3495 * @dev: PCI device to query
3496 * @rq: maximum memory read count in bytes
3497 *    valid values are 128, 256, 512, 1024, 2048, 4096
3498 *
3499 * If possible sets maximum memory read request in bytes
3500 */
3501int pcie_set_readrq(struct pci_dev *dev, int rq)
3502{
3503        u16 v;
3504
3505        if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3506                return -EINVAL;
3507
3508        /*
3509         * If using the "performance" PCIe config, we clamp the
3510         * read rq size to the max packet size to prevent the
3511         * host bridge generating requests larger than we can
3512         * cope with
3513         */
3514        if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3515                int mps = pcie_get_mps(dev);
3516
3517                if (mps < 0)
3518                        return mps;
3519                if (mps < rq)
3520                        rq = mps;
3521        }
3522
3523        v = (ffs(rq) - 8) << 12;
3524
3525        return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3526                                                  PCI_EXP_DEVCTL_READRQ, v);
3527}
3528EXPORT_SYMBOL(pcie_set_readrq);
3529
3530/**
3531 * pcie_get_mps - get PCI Express maximum payload size
3532 * @dev: PCI device to query
3533 *
3534 * Returns maximum payload size in bytes
3535 *    or appropriate error value.
3536 */
3537int pcie_get_mps(struct pci_dev *dev)
3538{
3539        u16 ctl;
3540
3541        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
3542
3543        return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3544}
3545
3546/**
3547 * pcie_set_mps - set PCI Express maximum payload size
3548 * @dev: PCI device to query
3549 * @mps: maximum payload size in bytes
3550 *    valid values are 128, 256, 512, 1024, 2048, 4096
3551 *
3552 * If possible sets maximum payload size
3553 */
3554int pcie_set_mps(struct pci_dev *dev, int mps)
3555{
3556        u16 v;
3557
3558        if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3559                return -EINVAL;
3560
3561        v = ffs(mps) - 8;
3562        if (v > dev->pcie_mpss) 
3563                return -EINVAL;
3564        v <<= 5;
3565
3566        return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3567                                                  PCI_EXP_DEVCTL_PAYLOAD, v);
3568}
3569
3570/**
3571 * pci_select_bars - Make BAR mask from the type of resource
3572 * @dev: the PCI device for which BAR mask is made
3573 * @flags: resource type mask to be selected
3574 *
3575 * This helper routine makes bar mask from the type of resource.
3576 */
3577int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3578{
3579        int i, bars = 0;
3580        for (i = 0; i < PCI_NUM_RESOURCES; i++)
3581                if (pci_resource_flags(dev, i) & flags)
3582                        bars |= (1 << i);
3583        return bars;
3584}
3585
3586/**
3587 * pci_resource_bar - get position of the BAR associated with a resource
3588 * @dev: the PCI device
3589 * @resno: the resource number
3590 * @type: the BAR type to be filled in
3591 *
3592 * Returns BAR position in config space, or 0 if the BAR is invalid.
3593 */
3594int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3595{
3596        int reg;
3597
3598        if (resno < PCI_ROM_RESOURCE) {
3599                *type = pci_bar_unknown;
3600                return PCI_BASE_ADDRESS_0 + 4 * resno;
3601        } else if (resno == PCI_ROM_RESOURCE) {
3602                *type = pci_bar_mem32;
3603                return dev->rom_base_reg;
3604        } else if (resno < PCI_BRIDGE_RESOURCES) {
3605                /* device specific resource */
3606                reg = pci_iov_resource_bar(dev, resno, type);
3607                if (reg)
3608                        return reg;
3609        }
3610
3611        dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3612        return 0;
3613}
3614
3615/* Some architectures require additional programming to enable VGA */
3616static arch_set_vga_state_t arch_set_vga_state;
3617
3618void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3619{
3620        arch_set_vga_state = func;      /* NULL disables */
3621}
3622
3623static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3624                      unsigned int command_bits, u32 flags)
3625{
3626        if (arch_set_vga_state)
3627                return arch_set_vga_state(dev, decode, command_bits,
3628                                                flags);
3629        return 0;
3630}
3631
3632/**
3633 * pci_set_vga_state - set VGA decode state on device and parents if requested
3634 * @dev: the PCI device
3635 * @decode: true = enable decoding, false = disable decoding
3636 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3637 * @flags: traverse ancestors and change bridges
3638 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3639 */
3640int pci_set_vga_state(struct pci_dev *dev, bool decode,
3641                      unsigned int command_bits, u32 flags)
3642{
3643        struct pci_bus *bus;
3644        struct pci_dev *bridge;
3645        u16 cmd;
3646        int rc;
3647
3648        WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3649
3650        /* ARCH specific VGA enables */
3651        rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3652        if (rc)
3653                return rc;
3654
3655        if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3656                pci_read_config_word(dev, PCI_COMMAND, &cmd);
3657                if (decode == true)
3658                        cmd |= command_bits;
3659                else
3660                        cmd &= ~command_bits;
3661                pci_write_config_word(dev, PCI_COMMAND, cmd);
3662        }
3663
3664        if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3665                return 0;
3666
3667        bus = dev->bus;
3668        while (bus) {
3669                bridge = bus->self;
3670                if (bridge) {
3671                        pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3672                                             &cmd);
3673                        if (decode == true)
3674                                cmd |= PCI_BRIDGE_CTL_VGA;
3675                        else
3676                                cmd &= ~PCI_BRIDGE_CTL_VGA;
3677                        pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3678                                              cmd);
3679                }
3680                bus = bus->parent;
3681        }
3682        return 0;
3683}
3684
3685#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3686static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3687static DEFINE_SPINLOCK(resource_alignment_lock);
3688
3689/**
3690 * pci_specified_resource_alignment - get resource alignment specified by user.
3691 * @dev: the PCI device to get
3692 *
3693 * RETURNS: Resource alignment if it is specified.
3694 *          Zero if it is not specified.
3695 */
3696resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3697{
3698        int seg, bus, slot, func, align_order, count;
3699        resource_size_t align = 0;
3700        char *p;
3701
3702        spin_lock(&resource_alignment_lock);
3703        p = resource_alignment_param;
3704        while (*p) {
3705                count = 0;
3706                if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3707                                                        p[count] == '@') {
3708                        p += count + 1;
3709                } else {
3710                        align_order = -1;
3711                }
3712                if (sscanf(p, "%x:%x:%x.%x%n",
3713                        &seg, &bus, &slot, &func, &count) != 4) {
3714                        seg = 0;
3715                        if (sscanf(p, "%x:%x.%x%n",
3716                                        &bus, &slot, &func, &count) != 3) {
3717                                /* Invalid format */
3718                                printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3719                                        p);
3720                                break;
3721                        }
3722                }
3723                p += count;
3724                if (seg == pci_domain_nr(dev->bus) &&
3725                        bus == dev->bus->number &&
3726                        slot == PCI_SLOT(dev->devfn) &&
3727                        func == PCI_FUNC(dev->devfn)) {
3728                        if (align_order == -1) {
3729                                align = PAGE_SIZE;
3730                        } else {
3731                                align = 1 << align_order;
3732                        }
3733                        /* Found */
3734                        break;
3735                }
3736                if (*p != ';' && *p != ',') {
3737                        /* End of param or invalid format */
3738                        break;
3739                }
3740                p++;
3741        }
3742        spin_unlock(&resource_alignment_lock);
3743        return align;
3744}
3745
3746/**
3747 * pci_is_reassigndev - check if specified PCI is target device to reassign
3748 * @dev: the PCI device to check
3749 *
3750 * RETURNS: non-zero for PCI device is a target device to reassign,
3751 *          or zero is not.
3752 */
3753int pci_is_reassigndev(struct pci_dev *dev)
3754{
3755        return (pci_specified_resource_alignment(dev) != 0);
3756}
3757
3758/*
3759 * This function disables memory decoding and releases memory resources
3760 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3761 * It also rounds up size to specified alignment.
3762 * Later on, the kernel will assign page-aligned memory resource back
3763 * to the device.
3764 */
3765void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3766{
3767        int i;
3768        struct resource *r;
3769        resource_size_t align, size;
3770        u16 command;
3771
3772        if (!pci_is_reassigndev(dev))
3773                return;
3774
3775        if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3776            (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3777                dev_warn(&dev->dev,
3778                        "Can't reassign resources to host bridge.\n");
3779                return;
3780        }
3781
3782        dev_info(&dev->dev,
3783                "Disabling memory decoding and releasing memory resources.\n");
3784        pci_read_config_word(dev, PCI_COMMAND, &command);
3785        command &= ~PCI_COMMAND_MEMORY;
3786        pci_write_config_word(dev, PCI_COMMAND, command);
3787
3788        align = pci_specified_resource_alignment(dev);
3789        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3790                r = &dev->resource[i];
3791                if (!(r->flags & IORESOURCE_MEM))
3792                        continue;
3793                size = resource_size(r);
3794                if (size < align) {
3795                        size = align;
3796                        dev_info(&dev->dev,
3797                                "Rounding up size of resource #%d to %#llx.\n",
3798                                i, (unsigned long long)size);
3799                }
3800                r->end = size - 1;
3801                r->start = 0;
3802        }
3803        /* Need to disable bridge's resource window,
3804         * to enable the kernel to reassign new resource
3805         * window later on.
3806         */
3807        if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3808            (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3809                for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3810                        r = &dev->resource[i];
3811                        if (!(r->flags & IORESOURCE_MEM))
3812                                continue;
3813                        r->end = resource_size(r) - 1;
3814                        r->start = 0;
3815                }
3816                pci_disable_bridge_window(dev);
3817        }
3818}
3819
3820ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3821{
3822        if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3823                count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3824        spin_lock(&resource_alignment_lock);
3825        strncpy(resource_alignment_param, buf, count);
3826        resource_alignment_param[count] = '\0';
3827        spin_unlock(&resource_alignment_lock);
3828        return count;
3829}
3830
3831ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3832{
3833        size_t count;
3834        spin_lock(&resource_alignment_lock);
3835        count = snprintf(buf, size, "%s", resource_alignment_param);
3836        spin_unlock(&resource_alignment_lock);
3837        return count;
3838}
3839
3840static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3841{
3842        return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3843}
3844
3845static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3846                                        const char *buf, size_t count)
3847{
3848        return pci_set_resource_alignment_param(buf, count);
3849}
3850
3851BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3852                                        pci_resource_alignment_store);
3853
3854static int __init pci_resource_alignment_sysfs_init(void)
3855{
3856        return bus_create_file(&pci_bus_type,
3857                                        &bus_attr_resource_alignment);
3858}
3859
3860late_initcall(pci_resource_alignment_sysfs_init);
3861
3862static void __devinit pci_no_domains(void)
3863{
3864#ifdef CONFIG_PCI_DOMAINS
3865        pci_domains_supported = 0;
3866#endif
3867}
3868
3869/**
3870 * pci_ext_cfg_enabled - can we access extended PCI config space?
3871 * @dev: The PCI device of the root bridge.
3872 *
3873 * Returns 1 if we can access PCI extended config space (offsets
3874 * greater than 0xff). This is the default implementation. Architecture
3875 * implementations can override this.
3876 */
3877int __weak pci_ext_cfg_avail(struct pci_dev *dev)
3878{
3879        return 1;
3880}
3881
3882void __weak pci_fixup_cardbus(struct pci_bus *bus)
3883{
3884}
3885EXPORT_SYMBOL(pci_fixup_cardbus);
3886
3887static int __init pci_setup(char *str)
3888{
3889        while (str) {
3890                char *k = strchr(str, ',');
3891                if (k)
3892                        *k++ = 0;
3893                if (*str && (str = pcibios_setup(str)) && *str) {
3894                        if (!strcmp(str, "nomsi")) {
3895                                pci_no_msi();
3896                        } else if (!strcmp(str, "noaer")) {
3897                                pci_no_aer();
3898                        } else if (!strncmp(str, "realloc=", 8)) {
3899                                pci_realloc_get_opt(str + 8);
3900                        } else if (!strncmp(str, "realloc", 7)) {
3901                                pci_realloc_get_opt("on");
3902                        } else if (!strcmp(str, "nodomains")) {
3903                                pci_no_domains();
3904                        } else if (!strncmp(str, "noari", 5)) {
3905                                pcie_ari_disabled = true;
3906                        } else if (!strncmp(str, "cbiosize=", 9)) {
3907                                pci_cardbus_io_size = memparse(str + 9, &str);
3908                        } else if (!strncmp(str, "cbmemsize=", 10)) {
3909                                pci_cardbus_mem_size = memparse(str + 10, &str);
3910                        } else if (!strncmp(str, "resource_alignment=", 19)) {
3911                                pci_set_resource_alignment_param(str + 19,
3912                                                        strlen(str + 19));
3913                        } else if (!strncmp(str, "ecrc=", 5)) {
3914                                pcie_ecrc_get_policy(str + 5);
3915                        } else if (!strncmp(str, "hpiosize=", 9)) {
3916                                pci_hotplug_io_size = memparse(str + 9, &str);
3917                        } else if (!strncmp(str, "hpmemsize=", 10)) {
3918                                pci_hotplug_mem_size = memparse(str + 10, &str);
3919                        } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3920                                pcie_bus_config = PCIE_BUS_TUNE_OFF;
3921                        } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3922                                pcie_bus_config = PCIE_BUS_SAFE;
3923                        } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3924                                pcie_bus_config = PCIE_BUS_PERFORMANCE;
3925                        } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3926                                pcie_bus_config = PCIE_BUS_PEER2PEER;
3927                        } else if (!strncmp(str, "pcie_scan_all", 13)) {
3928                                pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
3929                        } else {
3930                                printk(KERN_ERR "PCI: Unknown option `%s'\n",
3931                                                str);
3932                        }
3933                }
3934                str = k;
3935        }
3936        return 0;
3937}
3938early_param("pci", pci_setup);
3939
3940EXPORT_SYMBOL(pci_reenable_device);
3941EXPORT_SYMBOL(pci_enable_device_io);
3942EXPORT_SYMBOL(pci_enable_device_mem);
3943EXPORT_SYMBOL(pci_enable_device);
3944EXPORT_SYMBOL(pcim_enable_device);
3945EXPORT_SYMBOL(pcim_pin_device);
3946EXPORT_SYMBOL(pci_disable_device);
3947EXPORT_SYMBOL(pci_find_capability);
3948EXPORT_SYMBOL(pci_bus_find_capability);
3949EXPORT_SYMBOL(pci_release_regions);
3950EXPORT_SYMBOL(pci_request_regions);
3951EXPORT_SYMBOL(pci_request_regions_exclusive);
3952EXPORT_SYMBOL(pci_release_region);
3953EXPORT_SYMBOL(pci_request_region);
3954EXPORT_SYMBOL(pci_request_region_exclusive);
3955EXPORT_SYMBOL(pci_release_selected_regions);
3956EXPORT_SYMBOL(pci_request_selected_regions);
3957EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3958EXPORT_SYMBOL(pci_set_master);
3959EXPORT_SYMBOL(pci_clear_master);
3960EXPORT_SYMBOL(pci_set_mwi);
3961EXPORT_SYMBOL(pci_try_set_mwi);
3962EXPORT_SYMBOL(pci_clear_mwi);
3963EXPORT_SYMBOL_GPL(pci_intx);
3964EXPORT_SYMBOL(pci_assign_resource);
3965EXPORT_SYMBOL(pci_find_parent_resource);
3966EXPORT_SYMBOL(pci_select_bars);
3967
3968EXPORT_SYMBOL(pci_set_power_state);
3969EXPORT_SYMBOL(pci_save_state);
3970EXPORT_SYMBOL(pci_restore_state);
3971EXPORT_SYMBOL(pci_pme_capable);
3972EXPORT_SYMBOL(pci_pme_active);
3973EXPORT_SYMBOL(pci_wake_from_d3);
3974EXPORT_SYMBOL(pci_target_state);
3975EXPORT_SYMBOL(pci_prepare_to_sleep);
3976EXPORT_SYMBOL(pci_back_from_sleep);
3977EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3978
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.