linux/drivers/pci/vpd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI VPD support
   4 *
   5 * Copyright (C) 2010 Broadcom Corporation.
   6 */
   7
   8#include <linux/pci.h>
   9#include <linux/delay.h>
  10#include <linux/export.h>
  11#include <linux/sched/signal.h>
  12#include "pci.h"
  13
  14/* VPD access through PCI 2.2+ VPD capability */
  15
  16struct pci_vpd_ops {
  17        ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
  18        ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
  19};
  20
  21struct pci_vpd {
  22        const struct pci_vpd_ops *ops;
  23        struct mutex    lock;
  24        unsigned int    len;
  25        u16             flag;
  26        u8              cap;
  27        unsigned int    busy:1;
  28        unsigned int    valid:1;
  29};
  30
  31static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
  32{
  33        return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
  34}
  35
  36/**
  37 * pci_read_vpd - Read one entry from Vital Product Data
  38 * @dev:        pci device struct
  39 * @pos:        offset in vpd space
  40 * @count:      number of bytes to read
  41 * @buf:        pointer to where to store result
  42 */
  43ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
  44{
  45        if (!dev->vpd || !dev->vpd->ops)
  46                return -ENODEV;
  47        return dev->vpd->ops->read(dev, pos, count, buf);
  48}
  49EXPORT_SYMBOL(pci_read_vpd);
  50
  51/**
  52 * pci_write_vpd - Write entry to Vital Product Data
  53 * @dev:        pci device struct
  54 * @pos:        offset in vpd space
  55 * @count:      number of bytes to write
  56 * @buf:        buffer containing write data
  57 */
  58ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
  59{
  60        if (!dev->vpd || !dev->vpd->ops)
  61                return -ENODEV;
  62        return dev->vpd->ops->write(dev, pos, count, buf);
  63}
  64EXPORT_SYMBOL(pci_write_vpd);
  65
  66#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
  67
  68/**
  69 * pci_vpd_size - determine actual size of Vital Product Data
  70 * @dev:        pci device struct
  71 * @old_size:   current assumed size, also maximum allowed size
  72 */
  73static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
  74{
  75        size_t off = 0;
  76        unsigned char header[1+2];      /* 1 byte tag, 2 bytes length */
  77
  78        while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
  79                unsigned char tag;
  80
  81                if (!header[0] && !off) {
  82                        pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n");
  83                        return 0;
  84                }
  85
  86                if (header[0] & PCI_VPD_LRDT) {
  87                        /* Large Resource Data Type Tag */
  88                        tag = pci_vpd_lrdt_tag(header);
  89                        /* Only read length from known tag items */
  90                        if ((tag == PCI_VPD_LTIN_ID_STRING) ||
  91                            (tag == PCI_VPD_LTIN_RO_DATA) ||
  92                            (tag == PCI_VPD_LTIN_RW_DATA)) {
  93                                if (pci_read_vpd(dev, off+1, 2,
  94                                                 &header[1]) != 2) {
  95                                        pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
  96                                                 tag, off + 1);
  97                                        return 0;
  98                                }
  99                                off += PCI_VPD_LRDT_TAG_SIZE +
 100                                        pci_vpd_lrdt_size(header);
 101                        }
 102                } else {
 103                        /* Short Resource Data Type Tag */
 104                        off += PCI_VPD_SRDT_TAG_SIZE +
 105                                pci_vpd_srdt_size(header);
 106                        tag = pci_vpd_srdt_tag(header);
 107                }
 108
 109                if (tag == PCI_VPD_STIN_END)    /* End tag descriptor */
 110                        return off;
 111
 112                if ((tag != PCI_VPD_LTIN_ID_STRING) &&
 113                    (tag != PCI_VPD_LTIN_RO_DATA) &&
 114                    (tag != PCI_VPD_LTIN_RW_DATA)) {
 115                        pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
 116                                 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
 117                                 tag, off);
 118                        return 0;
 119                }
 120        }
 121        return 0;
 122}
 123
 124/*
 125 * Wait for last operation to complete.
 126 * This code has to spin since there is no other notification from the PCI
 127 * hardware. Since the VPD is often implemented by serial attachment to an
 128 * EEPROM, it may take many milliseconds to complete.
 129 *
 130 * Returns 0 on success, negative values indicate error.
 131 */
 132static int pci_vpd_wait(struct pci_dev *dev)
 133{
 134        struct pci_vpd *vpd = dev->vpd;
 135        unsigned long timeout = jiffies + msecs_to_jiffies(125);
 136        unsigned long max_sleep = 16;
 137        u16 status;
 138        int ret;
 139
 140        if (!vpd->busy)
 141                return 0;
 142
 143        do {
 144                ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 145                                                &status);
 146                if (ret < 0)
 147                        return ret;
 148
 149                if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
 150                        vpd->busy = 0;
 151                        return 0;
 152                }
 153
 154                if (fatal_signal_pending(current))
 155                        return -EINTR;
 156
 157                if (time_after(jiffies, timeout))
 158                        break;
 159
 160                usleep_range(10, max_sleep);
 161                if (max_sleep < 1024)
 162                        max_sleep *= 2;
 163        } while (true);
 164
 165        pci_warn(dev, "VPD access failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
 166        return -ETIMEDOUT;
 167}
 168
 169static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
 170                            void *arg)
 171{
 172        struct pci_vpd *vpd = dev->vpd;
 173        int ret;
 174        loff_t end = pos + count;
 175        u8 *buf = arg;
 176
 177        if (pos < 0)
 178                return -EINVAL;
 179
 180        if (!vpd->valid) {
 181                vpd->valid = 1;
 182                vpd->len = pci_vpd_size(dev, vpd->len);
 183        }
 184
 185        if (vpd->len == 0)
 186                return -EIO;
 187
 188        if (pos > vpd->len)
 189                return 0;
 190
 191        if (end > vpd->len) {
 192                end = vpd->len;
 193                count = end - pos;
 194        }
 195
 196        if (mutex_lock_killable(&vpd->lock))
 197                return -EINTR;
 198
 199        ret = pci_vpd_wait(dev);
 200        if (ret < 0)
 201                goto out;
 202
 203        while (pos < end) {
 204                u32 val;
 205                unsigned int i, skip;
 206
 207                ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 208                                                 pos & ~3);
 209                if (ret < 0)
 210                        break;
 211                vpd->busy = 1;
 212                vpd->flag = PCI_VPD_ADDR_F;
 213                ret = pci_vpd_wait(dev);
 214                if (ret < 0)
 215                        break;
 216
 217                ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
 218                if (ret < 0)
 219                        break;
 220
 221                skip = pos & 3;
 222                for (i = 0;  i < sizeof(u32); i++) {
 223                        if (i >= skip) {
 224                                *buf++ = val;
 225                                if (++pos == end)
 226                                        break;
 227                        }
 228                        val >>= 8;
 229                }
 230        }
 231out:
 232        mutex_unlock(&vpd->lock);
 233        return ret ? ret : count;
 234}
 235
 236static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
 237                             const void *arg)
 238{
 239        struct pci_vpd *vpd = dev->vpd;
 240        const u8 *buf = arg;
 241        loff_t end = pos + count;
 242        int ret = 0;
 243
 244        if (pos < 0 || (pos & 3) || (count & 3))
 245                return -EINVAL;
 246
 247        if (!vpd->valid) {
 248                vpd->valid = 1;
 249                vpd->len = pci_vpd_size(dev, vpd->len);
 250        }
 251
 252        if (vpd->len == 0)
 253                return -EIO;
 254
 255        if (end > vpd->len)
 256                return -EINVAL;
 257
 258        if (mutex_lock_killable(&vpd->lock))
 259                return -EINTR;
 260
 261        ret = pci_vpd_wait(dev);
 262        if (ret < 0)
 263                goto out;
 264
 265        while (pos < end) {
 266                u32 val;
 267
 268                val = *buf++;
 269                val |= *buf++ << 8;
 270                val |= *buf++ << 16;
 271                val |= *buf++ << 24;
 272
 273                ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
 274                if (ret < 0)
 275                        break;
 276                ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 277                                                 pos | PCI_VPD_ADDR_F);
 278                if (ret < 0)
 279                        break;
 280
 281                vpd->busy = 1;
 282                vpd->flag = 0;
 283                ret = pci_vpd_wait(dev);
 284                if (ret < 0)
 285                        break;
 286
 287                pos += sizeof(u32);
 288        }
 289out:
 290        mutex_unlock(&vpd->lock);
 291        return ret ? ret : count;
 292}
 293
 294static const struct pci_vpd_ops pci_vpd_ops = {
 295        .read = pci_vpd_read,
 296        .write = pci_vpd_write,
 297};
 298
 299static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
 300                               void *arg)
 301{
 302        struct pci_dev *tdev = pci_get_func0_dev(dev);
 303        ssize_t ret;
 304
 305        if (!tdev)
 306                return -ENODEV;
 307
 308        ret = pci_read_vpd(tdev, pos, count, arg);
 309        pci_dev_put(tdev);
 310        return ret;
 311}
 312
 313static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
 314                                const void *arg)
 315{
 316        struct pci_dev *tdev = pci_get_func0_dev(dev);
 317        ssize_t ret;
 318
 319        if (!tdev)
 320                return -ENODEV;
 321
 322        ret = pci_write_vpd(tdev, pos, count, arg);
 323        pci_dev_put(tdev);
 324        return ret;
 325}
 326
 327static const struct pci_vpd_ops pci_vpd_f0_ops = {
 328        .read = pci_vpd_f0_read,
 329        .write = pci_vpd_f0_write,
 330};
 331
 332void pci_vpd_init(struct pci_dev *dev)
 333{
 334        struct pci_vpd *vpd;
 335        u8 cap;
 336
 337        cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
 338        if (!cap)
 339                return;
 340
 341        vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
 342        if (!vpd)
 343                return;
 344
 345        vpd->len = PCI_VPD_MAX_SIZE;
 346        if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
 347                vpd->ops = &pci_vpd_f0_ops;
 348        else
 349                vpd->ops = &pci_vpd_ops;
 350        mutex_init(&vpd->lock);
 351        vpd->cap = cap;
 352        vpd->busy = 0;
 353        vpd->valid = 0;
 354        dev->vpd = vpd;
 355}
 356
 357void pci_vpd_release(struct pci_dev *dev)
 358{
 359        kfree(dev->vpd);
 360}
 361
 362static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
 363                        struct bin_attribute *bin_attr, char *buf, loff_t off,
 364                        size_t count)
 365{
 366        struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
 367
 368        return pci_read_vpd(dev, off, count, buf);
 369}
 370
 371static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
 372                         struct bin_attribute *bin_attr, char *buf, loff_t off,
 373                         size_t count)
 374{
 375        struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
 376
 377        return pci_write_vpd(dev, off, count, buf);
 378}
 379static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
 380
 381static struct bin_attribute *vpd_attrs[] = {
 382        &bin_attr_vpd,
 383        NULL,
 384};
 385
 386static umode_t vpd_attr_is_visible(struct kobject *kobj,
 387                                   struct bin_attribute *a, int n)
 388{
 389        struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
 390
 391        if (!pdev->vpd)
 392                return 0;
 393
 394        return a->attr.mode;
 395}
 396
 397const struct attribute_group pci_dev_vpd_attr_group = {
 398        .bin_attrs = vpd_attrs,
 399        .is_bin_visible = vpd_attr_is_visible,
 400};
 401
 402int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
 403{
 404        int i = 0;
 405
 406        /* look for LRDT tags only, end tag is the only SRDT tag */
 407        while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
 408                if (buf[i] == rdt)
 409                        return i;
 410
 411                i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
 412        }
 413
 414        return -ENOENT;
 415}
 416EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
 417
 418int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 419                              unsigned int len, const char *kw)
 420{
 421        int i;
 422
 423        for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
 424                if (buf[i + 0] == kw[0] &&
 425                    buf[i + 1] == kw[1])
 426                        return i;
 427
 428                i += PCI_VPD_INFO_FLD_HDR_SIZE +
 429                     pci_vpd_info_field_size(&buf[i]);
 430        }
 431
 432        return -ENOENT;
 433}
 434EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
 435
 436#ifdef CONFIG_PCI_QUIRKS
 437/*
 438 * Quirk non-zero PCI functions to route VPD access through function 0 for
 439 * devices that share VPD resources between functions.  The functions are
 440 * expected to be identical devices.
 441 */
 442static void quirk_f0_vpd_link(struct pci_dev *dev)
 443{
 444        struct pci_dev *f0;
 445
 446        if (!PCI_FUNC(dev->devfn))
 447                return;
 448
 449        f0 = pci_get_func0_dev(dev);
 450        if (!f0)
 451                return;
 452
 453        if (f0->vpd && dev->class == f0->class &&
 454            dev->vendor == f0->vendor && dev->device == f0->device)
 455                dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
 456
 457        pci_dev_put(f0);
 458}
 459DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
 460                              PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
 461
 462/*
 463 * If a device follows the VPD format spec, the PCI core will not read or
 464 * write past the VPD End Tag.  But some vendors do not follow the VPD
 465 * format spec, so we can't tell how much data is safe to access.  Devices
 466 * may behave unpredictably if we access too much.  Blacklist these devices
 467 * so we don't touch VPD at all.
 468 */
 469static void quirk_blacklist_vpd(struct pci_dev *dev)
 470{
 471        if (dev->vpd) {
 472                dev->vpd->len = 0;
 473                pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
 474        }
 475}
 476DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
 477DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
 478DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
 479DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
 480DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
 481DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
 482DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
 483DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
 484DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
 485DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
 486DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
 487DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
 488                quirk_blacklist_vpd);
 489/*
 490 * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
 491 * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
 492 */
 493DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
 494                              PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
 495
 496static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
 497{
 498        struct pci_vpd *vpd = dev->vpd;
 499
 500        if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
 501                return;
 502
 503        vpd->valid = 1;
 504        vpd->len = len;
 505}
 506
 507static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
 508{
 509        int chip = (dev->device & 0xf000) >> 12;
 510        int func = (dev->device & 0x0f00) >>  8;
 511        int prod = (dev->device & 0x00ff) >>  0;
 512
 513        /*
 514         * If this is a T3-based adapter, there's a 1KB VPD area at offset
 515         * 0xc00 which contains the preferred VPD values.  If this is a T4 or
 516         * later based adapter, the special VPD is at offset 0x400 for the
 517         * Physical Functions (the SR-IOV Virtual Functions have no VPD
 518         * Capabilities).  The PCI VPD Access core routines will normally
 519         * compute the size of the VPD by parsing the VPD Data Structure at
 520         * offset 0x000.  This will result in silent failures when attempting
 521         * to accesses these other VPD areas which are beyond those computed
 522         * limits.
 523         */
 524        if (chip == 0x0 && prod >= 0x20)
 525                pci_vpd_set_size(dev, 8192);
 526        else if (chip >= 0x4 && func < 0x8)
 527                pci_vpd_set_size(dev, 2048);
 528}
 529
 530DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
 531                        quirk_chelsio_extend_vpd);
 532
 533#endif
 534