linux/drivers/dma/ioat_dca.c
<<
>>
Prefs
   1/*
   2 * Intel I/OAT DMA Linux driver
   3 * Copyright(c) 2007 - 2009 Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/pci.h>
  25#include <linux/smp.h>
  26#include <linux/interrupt.h>
  27#include <linux/dca.h>
  28
  29/* either a kernel change is needed, or we need something like this in kernel */
  30#ifndef CONFIG_SMP
  31#include <asm/smp.h>
  32#undef cpu_physical_id
  33#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
  34#endif
  35
  36#include "ioatdma.h"
  37#include "ioatdma_registers.h"
  38
  39/*
  40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
  41 * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
  42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
  43 */
  44#define DCA_TAG_MAP_VALID 0x80
  45
  46#define DCA3_TAG_MAP_BIT_TO_INV 0x80
  47#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
  48#define DCA3_TAG_MAP_LITERAL_VAL 0x1
  49
  50#define DCA_TAG_MAP_MASK 0xDF
  51
  52/* expected tag map bytes for I/OAT ver.2 */
  53#define DCA2_TAG_MAP_BYTE0 0x80
  54#define DCA2_TAG_MAP_BYTE1 0x0
  55#define DCA2_TAG_MAP_BYTE2 0x81
  56#define DCA2_TAG_MAP_BYTE3 0x82
  57#define DCA2_TAG_MAP_BYTE4 0x82
  58
  59/* verify if tag map matches expected values */
  60static inline int dca2_tag_map_valid(u8 *tag_map)
  61{
  62        return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
  63                (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
  64                (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
  65                (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
  66                (tag_map[4] == DCA2_TAG_MAP_BYTE4));
  67}
  68
  69/*
  70 * "Legacy" DCA systems do not implement the DCA register set in the
  71 * I/OAT device.  Software needs direct support for their tag mappings.
  72 */
  73
  74#define APICID_BIT(x)           (DCA_TAG_MAP_VALID | (x))
  75#define IOAT_TAG_MAP_LEN        8
  76
  77static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
  78        1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  79static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
  80        1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  81static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
  82        1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
  83static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
  84
  85/* pack PCI B/D/F into a u16 */
  86static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
  87{
  88        return (pci->bus->number << 8) | pci->devfn;
  89}
  90
  91static int dca_enabled_in_bios(struct pci_dev *pdev)
  92{
  93        /* CPUID level 9 returns DCA configuration */
  94        /* Bit 0 indicates DCA enabled by the BIOS */
  95        unsigned long cpuid_level_9;
  96        int res;
  97
  98        cpuid_level_9 = cpuid_eax(9);
  99        res = test_bit(0, &cpuid_level_9);
 100        if (!res)
 101                dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
 102
 103        return res;
 104}
 105
 106static int system_has_dca_enabled(struct pci_dev *pdev)
 107{
 108        if (boot_cpu_has(X86_FEATURE_DCA))
 109                return dca_enabled_in_bios(pdev);
 110
 111        dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
 112        return 0;
 113}
 114
 115struct ioat_dca_slot {
 116        struct pci_dev *pdev;   /* requester device */
 117        u16 rid;                /* requester id, as used by IOAT */
 118};
 119
 120#define IOAT_DCA_MAX_REQ 6
 121#define IOAT3_DCA_MAX_REQ 2
 122
 123struct ioat_dca_priv {
 124        void __iomem            *iobase;
 125        void __iomem            *dca_base;
 126        int                      max_requesters;
 127        int                      requester_count;
 128        u8                       tag_map[IOAT_TAG_MAP_LEN];
 129        struct ioat_dca_slot     req_slots[0];
 130};
 131
 132/* 5000 series chipset DCA Port Requester ID Table Entry Format
 133 * [15:8]       PCI-Express Bus Number
 134 * [7:3]        PCI-Express Device Number
 135 * [2:0]        PCI-Express Function Number
 136 *
 137 * 5000 series chipset DCA control register format
 138 * [7:1]        Reserved (0)
 139 * [0]          Ignore Function Number
 140 */
 141
 142static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
 143{
 144        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 145        struct pci_dev *pdev;
 146        int i;
 147        u16 id;
 148
 149        /* This implementation only supports PCI-Express */
 150        if (dev->bus != &pci_bus_type)
 151                return -ENODEV;
 152        pdev = to_pci_dev(dev);
 153        id = dcaid_from_pcidev(pdev);
 154
 155        if (ioatdca->requester_count == ioatdca->max_requesters)
 156                return -ENODEV;
 157
 158        for (i = 0; i < ioatdca->max_requesters; i++) {
 159                if (ioatdca->req_slots[i].pdev == NULL) {
 160                        /* found an empty slot */
 161                        ioatdca->requester_count++;
 162                        ioatdca->req_slots[i].pdev = pdev;
 163                        ioatdca->req_slots[i].rid = id;
 164                        writew(id, ioatdca->dca_base + (i * 4));
 165                        /* make sure the ignore function bit is off */
 166                        writeb(0, ioatdca->dca_base + (i * 4) + 2);
 167                        return i;
 168                }
 169        }
 170        /* Error, ioatdma->requester_count is out of whack */
 171        return -EFAULT;
 172}
 173
 174static int ioat_dca_remove_requester(struct dca_provider *dca,
 175                                     struct device *dev)
 176{
 177        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 178        struct pci_dev *pdev;
 179        int i;
 180
 181        /* This implementation only supports PCI-Express */
 182        if (dev->bus != &pci_bus_type)
 183                return -ENODEV;
 184        pdev = to_pci_dev(dev);
 185
 186        for (i = 0; i < ioatdca->max_requesters; i++) {
 187                if (ioatdca->req_slots[i].pdev == pdev) {
 188                        writew(0, ioatdca->dca_base + (i * 4));
 189                        ioatdca->req_slots[i].pdev = NULL;
 190                        ioatdca->req_slots[i].rid = 0;
 191                        ioatdca->requester_count--;
 192                        return i;
 193                }
 194        }
 195        return -ENODEV;
 196}
 197
 198static u8 ioat_dca_get_tag(struct dca_provider *dca,
 199                           struct device *dev,
 200                           int cpu)
 201{
 202        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 203        int i, apic_id, bit, value;
 204        u8 entry, tag;
 205
 206        tag = 0;
 207        apic_id = cpu_physical_id(cpu);
 208
 209        for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
 210                entry = ioatdca->tag_map[i];
 211                if (entry & DCA_TAG_MAP_VALID) {
 212                        bit = entry & ~DCA_TAG_MAP_VALID;
 213                        value = (apic_id & (1 << bit)) ? 1 : 0;
 214                } else {
 215                        value = entry ? 1 : 0;
 216                }
 217                tag |= (value << i);
 218        }
 219        return tag;
 220}
 221
 222static int ioat_dca_dev_managed(struct dca_provider *dca,
 223                                struct device *dev)
 224{
 225        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 226        struct pci_dev *pdev;
 227        int i;
 228
 229        pdev = to_pci_dev(dev);
 230        for (i = 0; i < ioatdca->max_requesters; i++) {
 231                if (ioatdca->req_slots[i].pdev == pdev)
 232                        return 1;
 233        }
 234        return 0;
 235}
 236
 237static struct dca_ops ioat_dca_ops = {
 238        .add_requester          = ioat_dca_add_requester,
 239        .remove_requester       = ioat_dca_remove_requester,
 240        .get_tag                = ioat_dca_get_tag,
 241        .dev_managed            = ioat_dca_dev_managed,
 242};
 243
 244
 245struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 246{
 247        struct dca_provider *dca;
 248        struct ioat_dca_priv *ioatdca;
 249        u8 *tag_map = NULL;
 250        int i;
 251        int err;
 252        u8 version;
 253        u8 max_requesters;
 254
 255        if (!system_has_dca_enabled(pdev))
 256                return NULL;
 257
 258        /* I/OAT v1 systems must have a known tag_map to support DCA */
 259        switch (pdev->vendor) {
 260        case PCI_VENDOR_ID_INTEL:
 261                switch (pdev->device) {
 262                case PCI_DEVICE_ID_INTEL_IOAT:
 263                        tag_map = ioat_tag_map_BNB;
 264                        break;
 265                case PCI_DEVICE_ID_INTEL_IOAT_CNB:
 266                        tag_map = ioat_tag_map_CNB;
 267                        break;
 268                case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
 269                        tag_map = ioat_tag_map_SCNB;
 270                        break;
 271                }
 272                break;
 273        case PCI_VENDOR_ID_UNISYS:
 274                switch (pdev->device) {
 275                case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
 276                        tag_map = ioat_tag_map_UNISYS;
 277                        break;
 278                }
 279                break;
 280        }
 281        if (tag_map == NULL)
 282                return NULL;
 283
 284        version = readb(iobase + IOAT_VER_OFFSET);
 285        if (version == IOAT_VER_3_0)
 286                max_requesters = IOAT3_DCA_MAX_REQ;
 287        else
 288                max_requesters = IOAT_DCA_MAX_REQ;
 289
 290        dca = alloc_dca_provider(&ioat_dca_ops,
 291                        sizeof(*ioatdca) +
 292                        (sizeof(struct ioat_dca_slot) * max_requesters));
 293        if (!dca)
 294                return NULL;
 295
 296        ioatdca = dca_priv(dca);
 297        ioatdca->max_requesters = max_requesters;
 298        ioatdca->dca_base = iobase + 0x54;
 299
 300        /* copy over the APIC ID to DCA tag mapping */
 301        for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
 302                ioatdca->tag_map[i] = tag_map[i];
 303
 304        err = register_dca_provider(dca, &pdev->dev);
 305        if (err) {
 306                free_dca_provider(dca);
 307                return NULL;
 308        }
 309
 310        return dca;
 311}
 312
 313
 314static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
 315{
 316        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 317        struct pci_dev *pdev;
 318        int i;
 319        u16 id;
 320        u16 global_req_table;
 321
 322        /* This implementation only supports PCI-Express */
 323        if (dev->bus != &pci_bus_type)
 324                return -ENODEV;
 325        pdev = to_pci_dev(dev);
 326        id = dcaid_from_pcidev(pdev);
 327
 328        if (ioatdca->requester_count == ioatdca->max_requesters)
 329                return -ENODEV;
 330
 331        for (i = 0; i < ioatdca->max_requesters; i++) {
 332                if (ioatdca->req_slots[i].pdev == NULL) {
 333                        /* found an empty slot */
 334                        ioatdca->requester_count++;
 335                        ioatdca->req_slots[i].pdev = pdev;
 336                        ioatdca->req_slots[i].rid = id;
 337                        global_req_table =
 338                              readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
 339                        writel(id | IOAT_DCA_GREQID_VALID,
 340                               ioatdca->iobase + global_req_table + (i * 4));
 341                        return i;
 342                }
 343        }
 344        /* Error, ioatdma->requester_count is out of whack */
 345        return -EFAULT;
 346}
 347
 348static int ioat2_dca_remove_requester(struct dca_provider *dca,
 349                                      struct device *dev)
 350{
 351        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 352        struct pci_dev *pdev;
 353        int i;
 354        u16 global_req_table;
 355
 356        /* This implementation only supports PCI-Express */
 357        if (dev->bus != &pci_bus_type)
 358                return -ENODEV;
 359        pdev = to_pci_dev(dev);
 360
 361        for (i = 0; i < ioatdca->max_requesters; i++) {
 362                if (ioatdca->req_slots[i].pdev == pdev) {
 363                        global_req_table =
 364                              readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
 365                        writel(0, ioatdca->iobase + global_req_table + (i * 4));
 366                        ioatdca->req_slots[i].pdev = NULL;
 367                        ioatdca->req_slots[i].rid = 0;
 368                        ioatdca->requester_count--;
 369                        return i;
 370                }
 371        }
 372        return -ENODEV;
 373}
 374
 375static u8 ioat2_dca_get_tag(struct dca_provider *dca,
 376                            struct device *dev,
 377                            int cpu)
 378{
 379        u8 tag;
 380
 381        tag = ioat_dca_get_tag(dca, dev, cpu);
 382        tag = (~tag) & 0x1F;
 383        return tag;
 384}
 385
 386static struct dca_ops ioat2_dca_ops = {
 387        .add_requester          = ioat2_dca_add_requester,
 388        .remove_requester       = ioat2_dca_remove_requester,
 389        .get_tag                = ioat2_dca_get_tag,
 390        .dev_managed            = ioat_dca_dev_managed,
 391};
 392
 393static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
 394{
 395        int slots = 0;
 396        u32 req;
 397        u16 global_req_table;
 398
 399        global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
 400        if (global_req_table == 0)
 401                return 0;
 402        do {
 403                req = readl(iobase + global_req_table + (slots * sizeof(u32)));
 404                slots++;
 405        } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
 406
 407        return slots;
 408}
 409
 410struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 411{
 412        struct dca_provider *dca;
 413        struct ioat_dca_priv *ioatdca;
 414        int slots;
 415        int i;
 416        int err;
 417        u32 tag_map;
 418        u16 dca_offset;
 419        u16 csi_fsb_control;
 420        u16 pcie_control;
 421        u8 bit;
 422
 423        if (!system_has_dca_enabled(pdev))
 424                return NULL;
 425
 426        dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
 427        if (dca_offset == 0)
 428                return NULL;
 429
 430        slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
 431        if (slots == 0)
 432                return NULL;
 433
 434        dca = alloc_dca_provider(&ioat2_dca_ops,
 435                                 sizeof(*ioatdca)
 436                                      + (sizeof(struct ioat_dca_slot) * slots));
 437        if (!dca)
 438                return NULL;
 439
 440        ioatdca = dca_priv(dca);
 441        ioatdca->iobase = iobase;
 442        ioatdca->dca_base = iobase + dca_offset;
 443        ioatdca->max_requesters = slots;
 444
 445        /* some bios might not know to turn these on */
 446        csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
 447        if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
 448                csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
 449                writew(csi_fsb_control,
 450                       ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
 451        }
 452        pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
 453        if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
 454                pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
 455                writew(pcie_control,
 456                       ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
 457        }
 458
 459
 460        /* TODO version, compatibility and configuration checks */
 461
 462        /* copy out the APIC to DCA tag map */
 463        tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
 464        for (i = 0; i < 5; i++) {
 465                bit = (tag_map >> (4 * i)) & 0x0f;
 466                if (bit < 8)
 467                        ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
 468                else
 469                        ioatdca->tag_map[i] = 0;
 470        }
 471
 472        if (!dca2_tag_map_valid(ioatdca->tag_map)) {
 473                dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
 474                        "disabling DCA\n");
 475                free_dca_provider(dca);
 476                return NULL;
 477        }
 478
 479        err = register_dca_provider(dca, &pdev->dev);
 480        if (err) {
 481                free_dca_provider(dca);
 482                return NULL;
 483        }
 484
 485        return dca;
 486}
 487
 488static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
 489{
 490        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 491        struct pci_dev *pdev;
 492        int i;
 493        u16 id;
 494        u16 global_req_table;
 495
 496        /* This implementation only supports PCI-Express */
 497        if (dev->bus != &pci_bus_type)
 498                return -ENODEV;
 499        pdev = to_pci_dev(dev);
 500        id = dcaid_from_pcidev(pdev);
 501
 502        if (ioatdca->requester_count == ioatdca->max_requesters)
 503                return -ENODEV;
 504
 505        for (i = 0; i < ioatdca->max_requesters; i++) {
 506                if (ioatdca->req_slots[i].pdev == NULL) {
 507                        /* found an empty slot */
 508                        ioatdca->requester_count++;
 509                        ioatdca->req_slots[i].pdev = pdev;
 510                        ioatdca->req_slots[i].rid = id;
 511                        global_req_table =
 512                              readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
 513                        writel(id | IOAT_DCA_GREQID_VALID,
 514                               ioatdca->iobase + global_req_table + (i * 4));
 515                        return i;
 516                }
 517        }
 518        /* Error, ioatdma->requester_count is out of whack */
 519        return -EFAULT;
 520}
 521
 522static int ioat3_dca_remove_requester(struct dca_provider *dca,
 523                                      struct device *dev)
 524{
 525        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 526        struct pci_dev *pdev;
 527        int i;
 528        u16 global_req_table;
 529
 530        /* This implementation only supports PCI-Express */
 531        if (dev->bus != &pci_bus_type)
 532                return -ENODEV;
 533        pdev = to_pci_dev(dev);
 534
 535        for (i = 0; i < ioatdca->max_requesters; i++) {
 536                if (ioatdca->req_slots[i].pdev == pdev) {
 537                        global_req_table =
 538                              readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
 539                        writel(0, ioatdca->iobase + global_req_table + (i * 4));
 540                        ioatdca->req_slots[i].pdev = NULL;
 541                        ioatdca->req_slots[i].rid = 0;
 542                        ioatdca->requester_count--;
 543                        return i;
 544                }
 545        }
 546        return -ENODEV;
 547}
 548
 549static u8 ioat3_dca_get_tag(struct dca_provider *dca,
 550                            struct device *dev,
 551                            int cpu)
 552{
 553        u8 tag;
 554
 555        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 556        int i, apic_id, bit, value;
 557        u8 entry;
 558
 559        tag = 0;
 560        apic_id = cpu_physical_id(cpu);
 561
 562        for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
 563                entry = ioatdca->tag_map[i];
 564                if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
 565                        bit = entry &
 566                                ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
 567                        value = (apic_id & (1 << bit)) ? 1 : 0;
 568                } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
 569                        bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
 570                        value = (apic_id & (1 << bit)) ? 0 : 1;
 571                } else {
 572                        value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
 573                }
 574                tag |= (value << i);
 575        }
 576
 577        return tag;
 578}
 579
 580static struct dca_ops ioat3_dca_ops = {
 581        .add_requester          = ioat3_dca_add_requester,
 582        .remove_requester       = ioat3_dca_remove_requester,
 583        .get_tag                = ioat3_dca_get_tag,
 584        .dev_managed            = ioat_dca_dev_managed,
 585};
 586
 587static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
 588{
 589        int slots = 0;
 590        u32 req;
 591        u16 global_req_table;
 592
 593        global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
 594        if (global_req_table == 0)
 595                return 0;
 596
 597        do {
 598                req = readl(iobase + global_req_table + (slots * sizeof(u32)));
 599                slots++;
 600        } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
 601
 602        return slots;
 603}
 604
 605struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 606{
 607        struct dca_provider *dca;
 608        struct ioat_dca_priv *ioatdca;
 609        int slots;
 610        int i;
 611        int err;
 612        u16 dca_offset;
 613        u16 csi_fsb_control;
 614        u16 pcie_control;
 615        u8 bit;
 616
 617        union {
 618                u64 full;
 619                struct {
 620                        u32 low;
 621                        u32 high;
 622                };
 623        } tag_map;
 624
 625        if (!system_has_dca_enabled(pdev))
 626                return NULL;
 627
 628        dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
 629        if (dca_offset == 0)
 630                return NULL;
 631
 632        slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
 633        if (slots == 0)
 634                return NULL;
 635
 636        dca = alloc_dca_provider(&ioat3_dca_ops,
 637                                 sizeof(*ioatdca)
 638                                      + (sizeof(struct ioat_dca_slot) * slots));
 639        if (!dca)
 640                return NULL;
 641
 642        ioatdca = dca_priv(dca);
 643        ioatdca->iobase = iobase;
 644        ioatdca->dca_base = iobase + dca_offset;
 645        ioatdca->max_requesters = slots;
 646
 647        /* some bios might not know to turn these on */
 648        csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
 649        if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
 650                csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
 651                writew(csi_fsb_control,
 652                       ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
 653        }
 654        pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
 655        if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
 656                pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
 657                writew(pcie_control,
 658                       ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
 659        }
 660
 661
 662        /* TODO version, compatibility and configuration checks */
 663
 664        /* copy out the APIC to DCA tag map */
 665        tag_map.low =
 666                readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
 667        tag_map.high =
 668                readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
 669        for (i = 0; i < 8; i++) {
 670                bit = tag_map.full >> (8 * i);
 671                ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
 672        }
 673
 674        err = register_dca_provider(dca, &pdev->dev);
 675        if (err) {
 676                free_dca_provider(dca);
 677                return NULL;
 678        }
 679
 680        return dca;
 681}
 682