linux/drivers/pci/intr_remapping.c
<<
>>
Prefs
   1#include <linux/interrupt.h>
   2#include <linux/dmar.h>
   3#include <linux/spinlock.h>
   4#include <linux/jiffies.h>
   5#include <linux/pci.h>
   6#include <linux/irq.h>
   7#include <asm/io_apic.h>
   8#include <asm/smp.h>
   9#include <asm/cpu.h>
  10#include <linux/intel-iommu.h>
  11#include "intr_remapping.h"
  12#include <acpi/acpi.h>
  13
  14static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
  15static int ir_ioapic_num;
  16int intr_remapping_enabled;
  17
  18struct irq_2_iommu {
  19        struct intel_iommu *iommu;
  20        u16 irte_index;
  21        u16 sub_handle;
  22        u8  irte_mask;
  23};
  24
  25#ifdef CONFIG_GENERIC_HARDIRQS
  26static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
  27{
  28        struct irq_2_iommu *iommu;
  29        int node;
  30
  31        node = cpu_to_node(cpu);
  32
  33        iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
  34        printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
  35
  36        return iommu;
  37}
  38
  39static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
  40{
  41        struct irq_desc *desc;
  42
  43        desc = irq_to_desc(irq);
  44
  45        if (WARN_ON_ONCE(!desc))
  46                return NULL;
  47
  48        return desc->irq_2_iommu;
  49}
  50
  51static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
  52{
  53        struct irq_desc *desc;
  54        struct irq_2_iommu *irq_iommu;
  55
  56        /*
  57         * alloc irq desc if not allocated already.
  58         */
  59        desc = irq_to_desc_alloc_cpu(irq, cpu);
  60        if (!desc) {
  61                printk(KERN_INFO "can not get irq_desc for %d\n", irq);
  62                return NULL;
  63        }
  64
  65        irq_iommu = desc->irq_2_iommu;
  66
  67        if (!irq_iommu)
  68                desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
  69
  70        return desc->irq_2_iommu;
  71}
  72
  73static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
  74{
  75        return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
  76}
  77
  78#else /* !CONFIG_SPARSE_IRQ */
  79
  80static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
  81
  82static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
  83{
  84        if (irq < nr_irqs)
  85                return &irq_2_iommuX[irq];
  86
  87        return NULL;
  88}
  89static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
  90{
  91        return irq_2_iommu(irq);
  92}
  93#endif
  94
  95static DEFINE_SPINLOCK(irq_2_ir_lock);
  96
  97static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
  98{
  99        struct irq_2_iommu *irq_iommu;
 100
 101        irq_iommu = irq_2_iommu(irq);
 102
 103        if (!irq_iommu)
 104                return NULL;
 105
 106        if (!irq_iommu->iommu)
 107                return NULL;
 108
 109        return irq_iommu;
 110}
 111
 112int irq_remapped(int irq)
 113{
 114        return valid_irq_2_iommu(irq) != NULL;
 115}
 116
 117int get_irte(int irq, struct irte *entry)
 118{
 119        int index;
 120        struct irq_2_iommu *irq_iommu;
 121        unsigned long flags;
 122
 123        if (!entry)
 124                return -1;
 125
 126        spin_lock_irqsave(&irq_2_ir_lock, flags);
 127        irq_iommu = valid_irq_2_iommu(irq);
 128        if (!irq_iommu) {
 129                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 130                return -1;
 131        }
 132
 133        index = irq_iommu->irte_index + irq_iommu->sub_handle;
 134        *entry = *(irq_iommu->iommu->ir_table->base + index);
 135
 136        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 137        return 0;
 138}
 139
 140int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
 141{
 142        struct ir_table *table = iommu->ir_table;
 143        struct irq_2_iommu *irq_iommu;
 144        u16 index, start_index;
 145        unsigned int mask = 0;
 146        unsigned long flags;
 147        int i;
 148
 149        if (!count)
 150                return -1;
 151
 152#ifndef CONFIG_SPARSE_IRQ
 153        /* protect irq_2_iommu_alloc later */
 154        if (irq >= nr_irqs)
 155                return -1;
 156#endif
 157
 158        /*
 159         * start the IRTE search from index 0.
 160         */
 161        index = start_index = 0;
 162
 163        if (count > 1) {
 164                count = __roundup_pow_of_two(count);
 165                mask = ilog2(count);
 166        }
 167
 168        if (mask > ecap_max_handle_mask(iommu->ecap)) {
 169                printk(KERN_ERR
 170                       "Requested mask %x exceeds the max invalidation handle"
 171                       " mask value %Lx\n", mask,
 172                       ecap_max_handle_mask(iommu->ecap));
 173                return -1;
 174        }
 175
 176        spin_lock_irqsave(&irq_2_ir_lock, flags);
 177        do {
 178                for (i = index; i < index + count; i++)
 179                        if  (table->base[i].present)
 180                                break;
 181                /* empty index found */
 182                if (i == index + count)
 183                        break;
 184
 185                index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
 186
 187                if (index == start_index) {
 188                        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 189                        printk(KERN_ERR "can't allocate an IRTE\n");
 190                        return -1;
 191                }
 192        } while (1);
 193
 194        for (i = index; i < index + count; i++)
 195                table->base[i].present = 1;
 196
 197        irq_iommu = irq_2_iommu_alloc(irq);
 198        if (!irq_iommu) {
 199                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 200                printk(KERN_ERR "can't allocate irq_2_iommu\n");
 201                return -1;
 202        }
 203
 204        irq_iommu->iommu = iommu;
 205        irq_iommu->irte_index =  index;
 206        irq_iommu->sub_handle = 0;
 207        irq_iommu->irte_mask = mask;
 208
 209        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 210
 211        return index;
 212}
 213
 214static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
 215{
 216        struct qi_desc desc;
 217
 218        desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
 219                   | QI_IEC_SELECTIVE;
 220        desc.high = 0;
 221
 222        return qi_submit_sync(&desc, iommu);
 223}
 224
 225int map_irq_to_irte_handle(int irq, u16 *sub_handle)
 226{
 227        int index;
 228        struct irq_2_iommu *irq_iommu;
 229        unsigned long flags;
 230
 231        spin_lock_irqsave(&irq_2_ir_lock, flags);
 232        irq_iommu = valid_irq_2_iommu(irq);
 233        if (!irq_iommu) {
 234                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 235                return -1;
 236        }
 237
 238        *sub_handle = irq_iommu->sub_handle;
 239        index = irq_iommu->irte_index;
 240        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 241        return index;
 242}
 243
 244int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 245{
 246        struct irq_2_iommu *irq_iommu;
 247        unsigned long flags;
 248
 249        spin_lock_irqsave(&irq_2_ir_lock, flags);
 250
 251        irq_iommu = irq_2_iommu_alloc(irq);
 252
 253        if (!irq_iommu) {
 254                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 255                printk(KERN_ERR "can't allocate irq_2_iommu\n");
 256                return -1;
 257        }
 258
 259        irq_iommu->iommu = iommu;
 260        irq_iommu->irte_index = index;
 261        irq_iommu->sub_handle = subhandle;
 262        irq_iommu->irte_mask = 0;
 263
 264        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 265
 266        return 0;
 267}
 268
 269int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
 270{
 271        struct irq_2_iommu *irq_iommu;
 272        unsigned long flags;
 273
 274        spin_lock_irqsave(&irq_2_ir_lock, flags);
 275        irq_iommu = valid_irq_2_iommu(irq);
 276        if (!irq_iommu) {
 277                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 278                return -1;
 279        }
 280
 281        irq_iommu->iommu = NULL;
 282        irq_iommu->irte_index = 0;
 283        irq_iommu->sub_handle = 0;
 284        irq_2_iommu(irq)->irte_mask = 0;
 285
 286        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 287
 288        return 0;
 289}
 290
 291int modify_irte(int irq, struct irte *irte_modified)
 292{
 293        int rc;
 294        int index;
 295        struct irte *irte;
 296        struct intel_iommu *iommu;
 297        struct irq_2_iommu *irq_iommu;
 298        unsigned long flags;
 299
 300        spin_lock_irqsave(&irq_2_ir_lock, flags);
 301        irq_iommu = valid_irq_2_iommu(irq);
 302        if (!irq_iommu) {
 303                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 304                return -1;
 305        }
 306
 307        iommu = irq_iommu->iommu;
 308
 309        index = irq_iommu->irte_index + irq_iommu->sub_handle;
 310        irte = &iommu->ir_table->base[index];
 311
 312        set_64bit((unsigned long *)irte, irte_modified->low);
 313        __iommu_flush_cache(iommu, irte, sizeof(*irte));
 314
 315        rc = qi_flush_iec(iommu, index, 0);
 316        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 317
 318        return rc;
 319}
 320
 321int flush_irte(int irq)
 322{
 323        int rc;
 324        int index;
 325        struct intel_iommu *iommu;
 326        struct irq_2_iommu *irq_iommu;
 327        unsigned long flags;
 328
 329        spin_lock_irqsave(&irq_2_ir_lock, flags);
 330        irq_iommu = valid_irq_2_iommu(irq);
 331        if (!irq_iommu) {
 332                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 333                return -1;
 334        }
 335
 336        iommu = irq_iommu->iommu;
 337
 338        index = irq_iommu->irte_index + irq_iommu->sub_handle;
 339
 340        rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 341        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 342
 343        return rc;
 344}
 345
 346struct intel_iommu *map_ioapic_to_ir(int apic)
 347{
 348        int i;
 349
 350        for (i = 0; i < MAX_IO_APICS; i++)
 351                if (ir_ioapic[i].id == apic)
 352                        return ir_ioapic[i].iommu;
 353        return NULL;
 354}
 355
 356struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
 357{
 358        struct dmar_drhd_unit *drhd;
 359
 360        drhd = dmar_find_matched_drhd_unit(dev);
 361        if (!drhd)
 362                return NULL;
 363
 364        return drhd->iommu;
 365}
 366
 367int free_irte(int irq)
 368{
 369        int rc = 0;
 370        int index, i;
 371        struct irte *irte;
 372        struct intel_iommu *iommu;
 373        struct irq_2_iommu *irq_iommu;
 374        unsigned long flags;
 375
 376        spin_lock_irqsave(&irq_2_ir_lock, flags);
 377        irq_iommu = valid_irq_2_iommu(irq);
 378        if (!irq_iommu) {
 379                spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 380                return -1;
 381        }
 382
 383        iommu = irq_iommu->iommu;
 384
 385        index = irq_iommu->irte_index + irq_iommu->sub_handle;
 386        irte = &iommu->ir_table->base[index];
 387
 388        if (!irq_iommu->sub_handle) {
 389                for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
 390                        set_64bit((unsigned long *)(irte + i), 0);
 391                rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 392        }
 393
 394        irq_iommu->iommu = NULL;
 395        irq_iommu->irte_index = 0;
 396        irq_iommu->sub_handle = 0;
 397        irq_iommu->irte_mask = 0;
 398
 399        spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 400
 401        return rc;
 402}
 403
 404static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
 405{
 406        u64 addr;
 407        u32 cmd, sts;
 408        unsigned long flags;
 409
 410        addr = virt_to_phys((void *)iommu->ir_table->base);
 411
 412        spin_lock_irqsave(&iommu->register_lock, flags);
 413
 414        dmar_writeq(iommu->reg + DMAR_IRTA_REG,
 415                    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
 416
 417        /* Set interrupt-remapping table pointer */
 418        cmd = iommu->gcmd | DMA_GCMD_SIRTP;
 419        iommu->gcmd |= DMA_GCMD_SIRTP;
 420        writel(cmd, iommu->reg + DMAR_GCMD_REG);
 421
 422        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
 423                      readl, (sts & DMA_GSTS_IRTPS), sts);
 424        spin_unlock_irqrestore(&iommu->register_lock, flags);
 425
 426        if (mode == 0) {
 427                spin_lock_irqsave(&iommu->register_lock, flags);
 428
 429                /* enable comaptiblity format interrupt pass through */
 430                cmd = iommu->gcmd | DMA_GCMD_CFI;
 431                iommu->gcmd |= DMA_GCMD_CFI;
 432                writel(cmd, iommu->reg + DMAR_GCMD_REG);
 433
 434                IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
 435                              readl, (sts & DMA_GSTS_CFIS), sts);
 436
 437                spin_unlock_irqrestore(&iommu->register_lock, flags);
 438        }
 439
 440        /*
 441         * global invalidation of interrupt entry cache before enabling
 442         * interrupt-remapping.
 443         */
 444        qi_global_iec(iommu);
 445
 446        spin_lock_irqsave(&iommu->register_lock, flags);
 447
 448        /* Enable interrupt-remapping */
 449        cmd = iommu->gcmd | DMA_GCMD_IRE;
 450        iommu->gcmd |= DMA_GCMD_IRE;
 451        writel(cmd, iommu->reg + DMAR_GCMD_REG);
 452
 453        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
 454                      readl, (sts & DMA_GSTS_IRES), sts);
 455
 456        spin_unlock_irqrestore(&iommu->register_lock, flags);
 457}
 458
 459
 460static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
 461{
 462        struct ir_table *ir_table;
 463        struct page *pages;
 464
 465        ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
 466                                             GFP_ATOMIC);
 467
 468        if (!iommu->ir_table)
 469                return -ENOMEM;
 470
 471        pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
 472
 473        if (!pages) {
 474                printk(KERN_ERR "failed to allocate pages of order %d\n",
 475                       INTR_REMAP_PAGE_ORDER);
 476                kfree(iommu->ir_table);
 477                return -ENOMEM;
 478        }
 479
 480        ir_table->base = page_address(pages);
 481
 482        iommu_set_intr_remapping(iommu, mode);
 483        return 0;
 484}
 485
 486/*
 487 * Disable Interrupt Remapping.
 488 */
 489static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
 490{
 491        unsigned long flags;
 492        u32 sts;
 493
 494        if (!ecap_ir_support(iommu->ecap))
 495                return;
 496
 497        /*
 498         * global invalidation of interrupt entry cache before disabling
 499         * interrupt-remapping.
 500         */
 501        qi_global_iec(iommu);
 502
 503        spin_lock_irqsave(&iommu->register_lock, flags);
 504
 505        sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
 506        if (!(sts & DMA_GSTS_IRES))
 507                goto end;
 508
 509        iommu->gcmd &= ~DMA_GCMD_IRE;
 510        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
 511
 512        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
 513                      readl, !(sts & DMA_GSTS_IRES), sts);
 514
 515end:
 516        spin_unlock_irqrestore(&iommu->register_lock, flags);
 517}
 518
 519int __init enable_intr_remapping(int eim)
 520{
 521        struct dmar_drhd_unit *drhd;
 522        int setup = 0;
 523
 524        for_each_drhd_unit(drhd) {
 525                struct intel_iommu *iommu = drhd->iommu;
 526
 527                /*
 528                 * If the queued invalidation is already initialized,
 529                 * shouldn't disable it.
 530                 */
 531                if (iommu->qi)
 532                        continue;
 533
 534                /*
 535                 * Clear previous faults.
 536                 */
 537                dmar_fault(-1, iommu);
 538
 539                /*
 540                 * Disable intr remapping and queued invalidation, if already
 541                 * enabled prior to OS handover.
 542                 */
 543                iommu_disable_intr_remapping(iommu);
 544
 545                dmar_disable_qi(iommu);
 546        }
 547
 548        /*
 549         * check for the Interrupt-remapping support
 550         */
 551        for_each_drhd_unit(drhd) {
 552                struct intel_iommu *iommu = drhd->iommu;
 553
 554                if (!ecap_ir_support(iommu->ecap))
 555                        continue;
 556
 557                if (eim && !ecap_eim_support(iommu->ecap)) {
 558                        printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
 559                               " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
 560                        return -1;
 561                }
 562        }
 563
 564        /*
 565         * Enable queued invalidation for all the DRHD's.
 566         */
 567        for_each_drhd_unit(drhd) {
 568                int ret;
 569                struct intel_iommu *iommu = drhd->iommu;
 570                ret = dmar_enable_qi(iommu);
 571
 572                if (ret) {
 573                        printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
 574                               " invalidation, ecap %Lx, ret %d\n",
 575                               drhd->reg_base_addr, iommu->ecap, ret);
 576                        return -1;
 577                }
 578        }
 579
 580        /*
 581         * Setup Interrupt-remapping for all the DRHD's now.
 582         */
 583        for_each_drhd_unit(drhd) {
 584                struct intel_iommu *iommu = drhd->iommu;
 585
 586                if (!ecap_ir_support(iommu->ecap))
 587                        continue;
 588
 589                if (setup_intr_remapping(iommu, eim))
 590                        goto error;
 591
 592                setup = 1;
 593        }
 594
 595        if (!setup)
 596                goto error;
 597
 598        intr_remapping_enabled = 1;
 599
 600        return 0;
 601
 602error:
 603        /*
 604         * handle error condition gracefully here!
 605         */
 606        return -1;
 607}
 608
 609static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
 610                                 struct intel_iommu *iommu)
 611{
 612        struct acpi_dmar_hardware_unit *drhd;
 613        struct acpi_dmar_device_scope *scope;
 614        void *start, *end;
 615
 616        drhd = (struct acpi_dmar_hardware_unit *)header;
 617
 618        start = (void *)(drhd + 1);
 619        end = ((void *)drhd) + header->length;
 620
 621        while (start < end) {
 622                scope = start;
 623                if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
 624                        if (ir_ioapic_num == MAX_IO_APICS) {
 625                                printk(KERN_WARNING "Exceeded Max IO APICS\n");
 626                                return -1;
 627                        }
 628
 629                        printk(KERN_INFO "IOAPIC id %d under DRHD base"
 630                               " 0x%Lx\n", scope->enumeration_id,
 631                               drhd->address);
 632
 633                        ir_ioapic[ir_ioapic_num].iommu = iommu;
 634                        ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
 635                        ir_ioapic_num++;
 636                }
 637                start += scope->length;
 638        }
 639
 640        return 0;
 641}
 642
 643/*
 644 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
 645 * hardware unit.
 646 */
 647int __init parse_ioapics_under_ir(void)
 648{
 649        struct dmar_drhd_unit *drhd;
 650        int ir_supported = 0;
 651
 652        for_each_drhd_unit(drhd) {
 653                struct intel_iommu *iommu = drhd->iommu;
 654
 655                if (ecap_ir_support(iommu->ecap)) {
 656                        if (ir_parse_ioapic_scope(drhd->hdr, iommu))
 657                                return -1;
 658
 659                        ir_supported = 1;
 660                }
 661        }
 662
 663        if (ir_supported && ir_ioapic_num != nr_ioapics) {
 664                printk(KERN_WARNING
 665                       "Not all IO-APIC's listed under remapping hardware\n");
 666                return -1;
 667        }
 668
 669        return ir_supported;
 670}
 671
 672void disable_intr_remapping(void)
 673{
 674        struct dmar_drhd_unit *drhd;
 675        struct intel_iommu *iommu = NULL;
 676
 677        /*
 678         * Disable Interrupt-remapping for all the DRHD's now.
 679         */
 680        for_each_iommu(iommu, drhd) {
 681                if (!ecap_ir_support(iommu->ecap))
 682                        continue;
 683
 684                iommu_disable_intr_remapping(iommu);
 685        }
 686}
 687
 688int reenable_intr_remapping(int eim)
 689{
 690        struct dmar_drhd_unit *drhd;
 691        int setup = 0;
 692        struct intel_iommu *iommu = NULL;
 693
 694        for_each_iommu(iommu, drhd)
 695                if (iommu->qi)
 696                        dmar_reenable_qi(iommu);
 697
 698        /*
 699         * Setup Interrupt-remapping for all the DRHD's now.
 700         */
 701        for_each_iommu(iommu, drhd) {
 702                if (!ecap_ir_support(iommu->ecap))
 703                        continue;
 704
 705                /* Set up interrupt remapping for iommu.*/
 706                iommu_set_intr_remapping(iommu, eim);
 707                setup = 1;
 708        }
 709
 710        if (!setup)
 711                goto error;
 712
 713        return 0;
 714
 715error:
 716        /*
 717         * handle error condition gracefully here!
 718         */
 719        return -1;
 720}
 721
 722