linux/drivers/pci/intr_remapping.c
<<
>>
Prefs
   1#include <linux/interrupt.h>
   2#include <linux/dmar.h>
   3#include <linux/spinlock.h>
   4#include <linux/slab.h>
   5#include <linux/jiffies.h>
   6#include <linux/hpet.h>
   7#include <linux/pci.h>
   8#include <linux/irq.h>
   9#include <asm/io_apic.h>
  10#include <asm/smp.h>
  11#include <asm/cpu.h>
  12#include <linux/intel-iommu.h>
  13#include "intr_remapping.h"
  14#include <acpi/acpi.h>
  15#include <asm/pci-direct.h>
  16#include "pci.h"
  17
  18static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
  19static struct hpet_scope ir_hpet[MAX_HPET_TBS];
  20static int ir_ioapic_num, ir_hpet_num;
  21int intr_remapping_enabled;
  22
  23static int disable_intremap;
  24static int disable_sourceid_checking;
  25
  26static __init int setup_nointremap(char *str)
  27{
  28        disable_intremap = 1;
  29        return 0;
  30}
  31early_param("nointremap", setup_nointremap);
  32
  33static __init int setup_intremap(char *str)
  34{
  35        if (!str)
  36                return -EINVAL;
  37
  38        if (!strncmp(str, "on", 2))
  39                disable_intremap = 0;
  40        else if (!strncmp(str, "off", 3))
  41                disable_intremap = 1;
  42        else if (!strncmp(str, "nosid", 5))
  43                disable_sourceid_checking = 1;
  44
  45        return 0;
  46}
  47early_param("intremap", setup_intremap);
  48
  49static DEFINE_SPINLOCK(irq_2_ir_lock);
  50
  51static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
  52{
  53        struct irq_cfg *cfg = irq_get_chip_data(irq);
  54        return cfg ? &cfg->irq_2_iommu : NULL;
  55}
  56
  57int get_irte(int irq, struct irte *entry)
  58{
  59        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  60        unsigned long flags;
  61        int index;
  62
  63        if (!entry || !irq_iommu)
  64                return -1;
  65
  66        spin_lock_irqsave(&irq_2_ir_lock, flags);
  67
  68        index = irq_iommu->irte_index + irq_iommu->sub_handle;
  69        *entry = *(irq_iommu->iommu->ir_table->base + index);
  70
  71        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  72        return 0;
  73}
  74
  75int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
  76{
  77        struct ir_table *table = iommu->ir_table;
  78        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  79        u16 index, start_index;
  80        unsigned int mask = 0;
  81        unsigned long flags;
  82        int i;
  83
  84        if (!count || !irq_iommu)
  85                return -1;
  86
  87        /*
  88         * start the IRTE search from index 0.
  89         */
  90        index = start_index = 0;
  91
  92        if (count > 1) {
  93                count = __roundup_pow_of_two(count);
  94                mask = ilog2(count);
  95        }
  96
  97        if (mask > ecap_max_handle_mask(iommu->ecap)) {
  98                printk(KERN_ERR
  99                       "Requested mask %x exceeds the max invalidation handle"
 100                       " mask value %Lx\n", mask,
 101                       ecap_max_handle_mask(iommu->ecap));
 102                return -1;
 103        }
 104
 105        spin_lock_irqsave(&irq_2_ir_lock, flags);
 106        do {
 107                for (i = index; i < index + count; i++)
 108                        if  (table->base[i].present)
 109                                break;
 110                /* empty index found */
 111                if (i == index + count)
 112                        break;
 113
 114                index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
 115
 116                if (index == start_index) {
 117                        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 118                        printk(KERN_ERR "can't allocate an IRTE\n");
 119                        return -1;
 120                }
 121        } while (1);
 122
 123        for (i = index; i < index + count; i++)
 124                table->base[i].present = 1;
 125
 126        irq_iommu->iommu = iommu;
 127        irq_iommu->irte_index =  index;
 128        irq_iommu->sub_handle = 0;
 129        irq_iommu->irte_mask = mask;
 130
 131        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 132
 133        return index;
 134}
 135
 136static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
 137{
 138        struct qi_desc desc;
 139
 140        desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
 141                   | QI_IEC_SELECTIVE;
 142        desc.high = 0;
 143
 144        return qi_submit_sync(&desc, iommu);
 145}
 146
 147int map_irq_to_irte_handle(int irq, u16 *sub_handle)
 148{
 149        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 150        unsigned long flags;
 151        int index;
 152
 153        if (!irq_iommu)
 154                return -1;
 155
 156        spin_lock_irqsave(&irq_2_ir_lock, flags);
 157        *sub_handle = irq_iommu->sub_handle;
 158        index = irq_iommu->irte_index;
 159        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 160        return index;
 161}
 162
 163int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 164{
 165        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 166        unsigned long flags;
 167
 168        if (!irq_iommu)
 169                return -1;
 170
 171        spin_lock_irqsave(&irq_2_ir_lock, flags);
 172
 173        irq_iommu->iommu = iommu;
 174        irq_iommu->irte_index = index;
 175        irq_iommu->sub_handle = subhandle;
 176        irq_iommu->irte_mask = 0;
 177
 178        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 179
 180        return 0;
 181}
 182
 183int modify_irte(int irq, struct irte *irte_modified)
 184{
 185        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 186        struct intel_iommu *iommu;
 187        unsigned long flags;
 188        struct irte *irte;
 189        int rc, index;
 190
 191        if (!irq_iommu)
 192                return -1;
 193
 194        spin_lock_irqsave(&irq_2_ir_lock, flags);
 195
 196        iommu = irq_iommu->iommu;
 197
 198        index = irq_iommu->irte_index + irq_iommu->sub_handle;
 199        irte = &iommu->ir_table->base[index];
 200
 201        set_64bit(&irte->low, irte_modified->low);
 202        set_64bit(&irte->high, irte_modified->high);
 203        __iommu_flush_cache(iommu, irte, sizeof(*irte));
 204
 205        rc = qi_flush_iec(iommu, index, 0);
 206        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 207
 208        return rc;
 209}
 210
 211struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
 212{
 213        int i;
 214
 215        for (i = 0; i < MAX_HPET_TBS; i++)
 216                if (ir_hpet[i].id == hpet_id)
 217                        return ir_hpet[i].iommu;
 218        return NULL;
 219}
 220
 221struct intel_iommu *map_ioapic_to_ir(int apic)
 222{
 223        int i;
 224
 225        for (i = 0; i < MAX_IO_APICS; i++)
 226                if (ir_ioapic[i].id == apic)
 227                        return ir_ioapic[i].iommu;
 228        return NULL;
 229}
 230
 231struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
 232{
 233        struct dmar_drhd_unit *drhd;
 234
 235        drhd = dmar_find_matched_drhd_unit(dev);
 236        if (!drhd)
 237                return NULL;
 238
 239        return drhd->iommu;
 240}
 241
 242static int clear_entries(struct irq_2_iommu *irq_iommu)
 243{
 244        struct irte *start, *entry, *end;
 245        struct intel_iommu *iommu;
 246        int index;
 247
 248        if (irq_iommu->sub_handle)
 249                return 0;
 250
 251        iommu = irq_iommu->iommu;
 252        index = irq_iommu->irte_index + irq_iommu->sub_handle;
 253
 254        start = iommu->ir_table->base + index;
 255        end = start + (1 << irq_iommu->irte_mask);
 256
 257        for (entry = start; entry < end; entry++) {
 258                set_64bit(&entry->low, 0);
 259                set_64bit(&entry->high, 0);
 260        }
 261
 262        return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 263}
 264
 265int free_irte(int irq)
 266{
 267        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 268        unsigned long flags;
 269        int rc;
 270
 271        if (!irq_iommu)
 272                return -1;
 273
 274        spin_lock_irqsave(&irq_2_ir_lock, flags);
 275
 276        rc = clear_entries(irq_iommu);
 277
 278        irq_iommu->iommu = NULL;
 279        irq_iommu->irte_index = 0;
 280        irq_iommu->sub_handle = 0;
 281        irq_iommu->irte_mask = 0;
 282
 283        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 284
 285        return rc;
 286}
 287
 288/*
 289 * source validation type
 290 */
 291#define SVT_NO_VERIFY           0x0  /* no verification is required */
 292#define SVT_VERIFY_SID_SQ       0x1  /* verify using SID and SQ fields */
 293#define SVT_VERIFY_BUS          0x2  /* verify bus of request-id */
 294
 295/*
 296 * source-id qualifier
 297 */
 298#define SQ_ALL_16       0x0  /* verify all 16 bits of request-id */
 299#define SQ_13_IGNORE_1  0x1  /* verify most significant 13 bits, ignore
 300                              * the third least significant bit
 301                              */
 302#define SQ_13_IGNORE_2  0x2  /* verify most significant 13 bits, ignore
 303                              * the second and third least significant bits
 304                              */
 305#define SQ_13_IGNORE_3  0x3  /* verify most significant 13 bits, ignore
 306                              * the least three significant bits
 307                              */
 308
 309/*
 310 * set SVT, SQ and SID fields of irte to verify
 311 * source ids of interrupt requests
 312 */
 313static void set_irte_sid(struct irte *irte, unsigned int svt,
 314                         unsigned int sq, unsigned int sid)
 315{
 316        if (disable_sourceid_checking)
 317                svt = SVT_NO_VERIFY;
 318        irte->svt = svt;
 319        irte->sq = sq;
 320        irte->sid = sid;
 321}
 322
 323int set_ioapic_sid(struct irte *irte, int apic)
 324{
 325        int i;
 326        u16 sid = 0;
 327
 328        if (!irte)
 329                return -1;
 330
 331        for (i = 0; i < MAX_IO_APICS; i++) {
 332                if (ir_ioapic[i].id == apic) {
 333                        sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
 334                        break;
 335                }
 336        }
 337
 338        if (sid == 0) {
 339                pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
 340                return -1;
 341        }
 342
 343        set_irte_sid(irte, 1, 0, sid);
 344
 345        return 0;
 346}
 347
 348int set_hpet_sid(struct irte *irte, u8 id)
 349{
 350        int i;
 351        u16 sid = 0;
 352
 353        if (!irte)
 354                return -1;
 355
 356        for (i = 0; i < MAX_HPET_TBS; i++) {
 357                if (ir_hpet[i].id == id) {
 358                        sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
 359                        break;
 360                }
 361        }
 362
 363        if (sid == 0) {
 364                pr_warning("Failed to set source-id of HPET block (%d)\n", id);
 365                return -1;
 366        }
 367
 368        /*
 369         * Should really use SQ_ALL_16. Some platforms are broken.
 370         * While we figure out the right quirks for these broken platforms, use
 371         * SQ_13_IGNORE_3 for now.
 372         */
 373        set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
 374
 375        return 0;
 376}
 377
 378int set_msi_sid(struct irte *irte, struct pci_dev *dev)
 379{
 380        struct pci_dev *bridge;
 381
 382        if (!irte || !dev)
 383                return -1;
 384
 385        /* PCIe device or Root Complex integrated PCI device */
 386        if (pci_is_pcie(dev) || !dev->bus->parent) {
 387                set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
 388                             (dev->bus->number << 8) | dev->devfn);
 389                return 0;
 390        }
 391
 392        bridge = pci_find_upstream_pcie_bridge(dev);
 393        if (bridge) {
 394                if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
 395                        set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
 396                                (bridge->bus->number << 8) | dev->bus->number);
 397                else /* this is a legacy PCI bridge */
 398                        set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
 399                                (bridge->bus->number << 8) | bridge->devfn);
 400        }
 401
 402        return 0;
 403}
 404
 405static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
 406{
 407        u64 addr;
 408        u32 sts;
 409        unsigned long flags;
 410
 411        addr = virt_to_phys((void *)iommu->ir_table->base);
 412
 413        spin_lock_irqsave(&iommu->register_lock, flags);
 414
 415        dmar_writeq(iommu->reg + DMAR_IRTA_REG,
 416                    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
 417
 418        /* Set interrupt-remapping table pointer */
 419        iommu->gcmd |= DMA_GCMD_SIRTP;
 420        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
 421
 422        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
 423                      readl, (sts & DMA_GSTS_IRTPS), sts);
 424        spin_unlock_irqrestore(&iommu->register_lock, flags);
 425
 426        /*
 427         * global invalidation of interrupt entry cache before enabling
 428         * interrupt-remapping.
 429         */
 430        qi_global_iec(iommu);
 431
 432        spin_lock_irqsave(&iommu->register_lock, flags);
 433
 434        /* Enable interrupt-remapping */
 435        iommu->gcmd |= DMA_GCMD_IRE;
 436        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
 437
 438        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
 439                      readl, (sts & DMA_GSTS_IRES), sts);
 440
 441        spin_unlock_irqrestore(&iommu->register_lock, flags);
 442}
 443
 444
 445static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
 446{
 447        struct ir_table *ir_table;
 448        struct page *pages;
 449
 450        ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
 451                                             GFP_ATOMIC);
 452
 453        if (!iommu->ir_table)
 454                return -ENOMEM;
 455
 456        pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
 457                                 INTR_REMAP_PAGE_ORDER);
 458
 459        if (!pages) {
 460                printk(KERN_ERR "failed to allocate pages of order %d\n",
 461                       INTR_REMAP_PAGE_ORDER);
 462                kfree(iommu->ir_table);
 463                return -ENOMEM;
 464        }
 465
 466        ir_table->base = page_address(pages);
 467
 468        iommu_set_intr_remapping(iommu, mode);
 469        return 0;
 470}
 471
 472/*
 473 * Disable Interrupt Remapping.
 474 */
 475static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
 476{
 477        unsigned long flags;
 478        u32 sts;
 479
 480        if (!ecap_ir_support(iommu->ecap))
 481                return;
 482
 483        /*
 484         * global invalidation of interrupt entry cache before disabling
 485         * interrupt-remapping.
 486         */
 487        qi_global_iec(iommu);
 488
 489        spin_lock_irqsave(&iommu->register_lock, flags);
 490
 491        sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
 492        if (!(sts & DMA_GSTS_IRES))
 493                goto end;
 494
 495        iommu->gcmd &= ~DMA_GCMD_IRE;
 496        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
 497
 498        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
 499                      readl, !(sts & DMA_GSTS_IRES), sts);
 500
 501end:
 502        spin_unlock_irqrestore(&iommu->register_lock, flags);
 503}
 504
 505int __init intr_remapping_supported(void)
 506{
 507        struct dmar_drhd_unit *drhd;
 508
 509        if (disable_intremap)
 510                return 0;
 511
 512        if (!dmar_ir_support())
 513                return 0;
 514
 515        for_each_drhd_unit(drhd) {
 516                struct intel_iommu *iommu = drhd->iommu;
 517
 518                if (!ecap_ir_support(iommu->ecap))
 519                        return 0;
 520        }
 521
 522        return 1;
 523}
 524
 525int __init enable_intr_remapping(int eim)
 526{
 527        struct dmar_drhd_unit *drhd;
 528        int setup = 0;
 529
 530        if (parse_ioapics_under_ir() != 1) {
 531                printk(KERN_INFO "Not enable interrupt remapping\n");
 532                return -1;
 533        }
 534
 535        for_each_drhd_unit(drhd) {
 536                struct intel_iommu *iommu = drhd->iommu;
 537
 538                /*
 539                 * If the queued invalidation is already initialized,
 540                 * shouldn't disable it.
 541                 */
 542                if (iommu->qi)
 543                        continue;
 544
 545                /*
 546                 * Clear previous faults.
 547                 */
 548                dmar_fault(-1, iommu);
 549
 550                /*
 551                 * Disable intr remapping and queued invalidation, if already
 552                 * enabled prior to OS handover.
 553                 */
 554                iommu_disable_intr_remapping(iommu);
 555
 556                dmar_disable_qi(iommu);
 557        }
 558
 559        /*
 560         * check for the Interrupt-remapping support
 561         */
 562        for_each_drhd_unit(drhd) {
 563                struct intel_iommu *iommu = drhd->iommu;
 564
 565                if (!ecap_ir_support(iommu->ecap))
 566                        continue;
 567
 568                if (eim && !ecap_eim_support(iommu->ecap)) {
 569                        printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
 570                               " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
 571                        return -1;
 572                }
 573        }
 574
 575        /*
 576         * Enable queued invalidation for all the DRHD's.
 577         */
 578        for_each_drhd_unit(drhd) {
 579                int ret;
 580                struct intel_iommu *iommu = drhd->iommu;
 581                ret = dmar_enable_qi(iommu);
 582
 583                if (ret) {
 584                        printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
 585                               " invalidation, ecap %Lx, ret %d\n",
 586                               drhd->reg_base_addr, iommu->ecap, ret);
 587                        return -1;
 588                }
 589        }
 590
 591        /*
 592         * Setup Interrupt-remapping for all the DRHD's now.
 593         */
 594        for_each_drhd_unit(drhd) {
 595                struct intel_iommu *iommu = drhd->iommu;
 596
 597                if (!ecap_ir_support(iommu->ecap))
 598                        continue;
 599
 600                if (setup_intr_remapping(iommu, eim))
 601                        goto error;
 602
 603                setup = 1;
 604        }
 605
 606        if (!setup)
 607                goto error;
 608
 609        intr_remapping_enabled = 1;
 610
 611        return 0;
 612
 613error:
 614        /*
 615         * handle error condition gracefully here!
 616         */
 617        return -1;
 618}
 619
 620static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
 621                                      struct intel_iommu *iommu)
 622{
 623        struct acpi_dmar_pci_path *path;
 624        u8 bus;
 625        int count;
 626
 627        bus = scope->bus;
 628        path = (struct acpi_dmar_pci_path *)(scope + 1);
 629        count = (scope->length - sizeof(struct acpi_dmar_device_scope))
 630                / sizeof(struct acpi_dmar_pci_path);
 631
 632        while (--count > 0) {
 633                /*
 634                 * Access PCI directly due to the PCI
 635                 * subsystem isn't initialized yet.
 636                 */
 637                bus = read_pci_config_byte(bus, path->dev, path->fn,
 638                                           PCI_SECONDARY_BUS);
 639                path++;
 640        }
 641        ir_hpet[ir_hpet_num].bus   = bus;
 642        ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
 643        ir_hpet[ir_hpet_num].iommu = iommu;
 644        ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
 645        ir_hpet_num++;
 646}
 647
 648static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
 649                                      struct intel_iommu *iommu)
 650{
 651        struct acpi_dmar_pci_path *path;
 652        u8 bus;
 653        int count;
 654
 655        bus = scope->bus;
 656        path = (struct acpi_dmar_pci_path *)(scope + 1);
 657        count = (scope->length - sizeof(struct acpi_dmar_device_scope))
 658                / sizeof(struct acpi_dmar_pci_path);
 659
 660        while (--count > 0) {
 661                /*
 662                 * Access PCI directly due to the PCI
 663                 * subsystem isn't initialized yet.
 664                 */
 665                bus = read_pci_config_byte(bus, path->dev, path->fn,
 666                                           PCI_SECONDARY_BUS);
 667                path++;
 668        }
 669
 670        ir_ioapic[ir_ioapic_num].bus   = bus;
 671        ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
 672        ir_ioapic[ir_ioapic_num].iommu = iommu;
 673        ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
 674        ir_ioapic_num++;
 675}
 676
 677static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
 678                                      struct intel_iommu *iommu)
 679{
 680        struct acpi_dmar_hardware_unit *drhd;
 681        struct acpi_dmar_device_scope *scope;
 682        void *start, *end;
 683
 684        drhd = (struct acpi_dmar_hardware_unit *)header;
 685
 686        start = (void *)(drhd + 1);
 687        end = ((void *)drhd) + header->length;
 688
 689        while (start < end) {
 690                scope = start;
 691                if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
 692                        if (ir_ioapic_num == MAX_IO_APICS) {
 693                                printk(KERN_WARNING "Exceeded Max IO APICS\n");
 694                                return -1;
 695                        }
 696
 697                        printk(KERN_INFO "IOAPIC id %d under DRHD base "
 698                               " 0x%Lx IOMMU %d\n", scope->enumeration_id,
 699                               drhd->address, iommu->seq_id);
 700
 701                        ir_parse_one_ioapic_scope(scope, iommu);
 702                } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
 703                        if (ir_hpet_num == MAX_HPET_TBS) {
 704                                printk(KERN_WARNING "Exceeded Max HPET blocks\n");
 705                                return -1;
 706                        }
 707
 708                        printk(KERN_INFO "HPET id %d under DRHD base"
 709                               " 0x%Lx\n", scope->enumeration_id,
 710                               drhd->address);
 711
 712                        ir_parse_one_hpet_scope(scope, iommu);
 713                }
 714                start += scope->length;
 715        }
 716
 717        return 0;
 718}
 719
 720/*
 721 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
 722 * hardware unit.
 723 */
 724int __init parse_ioapics_under_ir(void)
 725{
 726        struct dmar_drhd_unit *drhd;
 727        int ir_supported = 0;
 728
 729        for_each_drhd_unit(drhd) {
 730                struct intel_iommu *iommu = drhd->iommu;
 731
 732                if (ecap_ir_support(iommu->ecap)) {
 733                        if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
 734                                return -1;
 735
 736                        ir_supported = 1;
 737                }
 738        }
 739
 740        if (ir_supported && ir_ioapic_num != nr_ioapics) {
 741                printk(KERN_WARNING
 742                       "Not all IO-APIC's listed under remapping hardware\n");
 743                return -1;
 744        }
 745
 746        return ir_supported;
 747}
 748
 749void disable_intr_remapping(void)
 750{
 751        struct dmar_drhd_unit *drhd;
 752        struct intel_iommu *iommu = NULL;
 753
 754        /*
 755         * Disable Interrupt-remapping for all the DRHD's now.
 756         */
 757        for_each_iommu(iommu, drhd) {
 758                if (!ecap_ir_support(iommu->ecap))
 759                        continue;
 760
 761                iommu_disable_intr_remapping(iommu);
 762        }
 763}
 764
 765int reenable_intr_remapping(int eim)
 766{
 767        struct dmar_drhd_unit *drhd;
 768        int setup = 0;
 769        struct intel_iommu *iommu = NULL;
 770
 771        for_each_iommu(iommu, drhd)
 772                if (iommu->qi)
 773                        dmar_reenable_qi(iommu);
 774
 775        /*
 776         * Setup Interrupt-remapping for all the DRHD's now.
 777         */
 778        for_each_iommu(iommu, drhd) {
 779                if (!ecap_ir_support(iommu->ecap))
 780                        continue;
 781
 782                /* Set up interrupt remapping for iommu.*/
 783                iommu_set_intr_remapping(iommu, eim);
 784                setup = 1;
 785        }
 786
 787        if (!setup)
 788                goto error;
 789
 790        return 0;
 791
 792error:
 793        /*
 794         * handle error condition gracefully here!
 795         */
 796        return -1;
 797}
 798
 799
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.