linux/drivers/iommu/amd_iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <joerg.roedel@amd.com>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/ratelimit.h>
  21#include <linux/pci.h>
  22#include <linux/pci-ats.h>
  23#include <linux/bitmap.h>
  24#include <linux/slab.h>
  25#include <linux/debugfs.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/iommu-helper.h>
  29#include <linux/iommu.h>
  30#include <linux/delay.h>
  31#include <linux/amd-iommu.h>
  32#include <linux/notifier.h>
  33#include <linux/export.h>
  34#include <linux/irq.h>
  35#include <linux/msi.h>
  36#include <asm/irq_remapping.h>
  37#include <asm/io_apic.h>
  38#include <asm/apic.h>
  39#include <asm/hw_irq.h>
  40#include <asm/msidef.h>
  41#include <asm/proto.h>
  42#include <asm/iommu.h>
  43#include <asm/gart.h>
  44#include <asm/dma.h>
  45
  46#include "amd_iommu_proto.h"
  47#include "amd_iommu_types.h"
  48#include "irq_remapping.h"
  49
  50#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  51
  52#define LOOP_TIMEOUT    100000
  53
  54/*
  55 * This bitmap is used to advertise the page sizes our hardware support
  56 * to the IOMMU core, which will then use this information to split
  57 * physically contiguous memory regions it is mapping into page sizes
  58 * that we support.
  59 *
  60 * 512GB Pages are not supported due to a hardware bug
  61 */
  62#define AMD_IOMMU_PGSIZES       ((~0xFFFUL) & ~(2ULL << 38))
  63
  64static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  65
  66/* A list of preallocated protection domains */
  67static LIST_HEAD(iommu_pd_list);
  68static DEFINE_SPINLOCK(iommu_pd_list_lock);
  69
  70/* List of all available dev_data structures */
  71static LIST_HEAD(dev_data_list);
  72static DEFINE_SPINLOCK(dev_data_list_lock);
  73
  74LIST_HEAD(ioapic_map);
  75LIST_HEAD(hpet_map);
  76
  77/*
  78 * Domain for untranslated devices - only allocated
  79 * if iommu=pt passed on kernel cmd line.
  80 */
  81static struct protection_domain *pt_domain;
  82
  83static struct iommu_ops amd_iommu_ops;
  84
  85static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
  86int amd_iommu_max_glx_val = -1;
  87
  88static struct dma_map_ops amd_iommu_dma_ops;
  89
  90/*
  91 * general struct to manage commands send to an IOMMU
  92 */
  93struct iommu_cmd {
  94        u32 data[4];
  95};
  96
  97struct kmem_cache *amd_iommu_irq_cache;
  98
  99static void update_domain(struct protection_domain *domain);
 100static int __init alloc_passthrough_domain(void);
 101
 102/****************************************************************************
 103 *
 104 * Helper functions
 105 *
 106 ****************************************************************************/
 107
 108static struct iommu_dev_data *alloc_dev_data(u16 devid)
 109{
 110        struct iommu_dev_data *dev_data;
 111        unsigned long flags;
 112
 113        dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
 114        if (!dev_data)
 115                return NULL;
 116
 117        dev_data->devid = devid;
 118        atomic_set(&dev_data->bind, 0);
 119
 120        spin_lock_irqsave(&dev_data_list_lock, flags);
 121        list_add_tail(&dev_data->dev_data_list, &dev_data_list);
 122        spin_unlock_irqrestore(&dev_data_list_lock, flags);
 123
 124        return dev_data;
 125}
 126
 127static void free_dev_data(struct iommu_dev_data *dev_data)
 128{
 129        unsigned long flags;
 130
 131        spin_lock_irqsave(&dev_data_list_lock, flags);
 132        list_del(&dev_data->dev_data_list);
 133        spin_unlock_irqrestore(&dev_data_list_lock, flags);
 134
 135        if (dev_data->group)
 136                iommu_group_put(dev_data->group);
 137
 138        kfree(dev_data);
 139}
 140
 141static struct iommu_dev_data *search_dev_data(u16 devid)
 142{
 143        struct iommu_dev_data *dev_data;
 144        unsigned long flags;
 145
 146        spin_lock_irqsave(&dev_data_list_lock, flags);
 147        list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
 148                if (dev_data->devid == devid)
 149                        goto out_unlock;
 150        }
 151
 152        dev_data = NULL;
 153
 154out_unlock:
 155        spin_unlock_irqrestore(&dev_data_list_lock, flags);
 156
 157        return dev_data;
 158}
 159
 160static struct iommu_dev_data *find_dev_data(u16 devid)
 161{
 162        struct iommu_dev_data *dev_data;
 163
 164        dev_data = search_dev_data(devid);
 165
 166        if (dev_data == NULL)
 167                dev_data = alloc_dev_data(devid);
 168
 169        return dev_data;
 170}
 171
 172static inline u16 get_device_id(struct device *dev)
 173{
 174        struct pci_dev *pdev = to_pci_dev(dev);
 175
 176        return calc_devid(pdev->bus->number, pdev->devfn);
 177}
 178
 179static struct iommu_dev_data *get_dev_data(struct device *dev)
 180{
 181        return dev->archdata.iommu;
 182}
 183
 184static bool pci_iommuv2_capable(struct pci_dev *pdev)
 185{
 186        static const int caps[] = {
 187                PCI_EXT_CAP_ID_ATS,
 188                PCI_EXT_CAP_ID_PRI,
 189                PCI_EXT_CAP_ID_PASID,
 190        };
 191        int i, pos;
 192
 193        for (i = 0; i < 3; ++i) {
 194                pos = pci_find_ext_capability(pdev, caps[i]);
 195                if (pos == 0)
 196                        return false;
 197        }
 198
 199        return true;
 200}
 201
 202static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 203{
 204        struct iommu_dev_data *dev_data;
 205
 206        dev_data = get_dev_data(&pdev->dev);
 207
 208        return dev_data->errata & (1 << erratum) ? true : false;
 209}
 210
 211/*
 212 * In this function the list of preallocated protection domains is traversed to
 213 * find the domain for a specific device
 214 */
 215static struct dma_ops_domain *find_protection_domain(u16 devid)
 216{
 217        struct dma_ops_domain *entry, *ret = NULL;
 218        unsigned long flags;
 219        u16 alias = amd_iommu_alias_table[devid];
 220
 221        if (list_empty(&iommu_pd_list))
 222                return NULL;
 223
 224        spin_lock_irqsave(&iommu_pd_list_lock, flags);
 225
 226        list_for_each_entry(entry, &iommu_pd_list, list) {
 227                if (entry->target_dev == devid ||
 228                    entry->target_dev == alias) {
 229                        ret = entry;
 230                        break;
 231                }
 232        }
 233
 234        spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 235
 236        return ret;
 237}
 238
 239/*
 240 * This function checks if the driver got a valid device from the caller to
 241 * avoid dereferencing invalid pointers.
 242 */
 243static bool check_device(struct device *dev)
 244{
 245        u16 devid;
 246
 247        if (!dev || !dev->dma_mask)
 248                return false;
 249
 250        /* No device or no PCI device */
 251        if (dev->bus != &pci_bus_type)
 252                return false;
 253
 254        devid = get_device_id(dev);
 255
 256        /* Out of our scope? */
 257        if (devid > amd_iommu_last_bdf)
 258                return false;
 259
 260        if (amd_iommu_rlookup_table[devid] == NULL)
 261                return false;
 262
 263        return true;
 264}
 265
 266static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
 267{
 268        pci_dev_put(*from);
 269        *from = to;
 270}
 271
 272static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
 273{
 274        while (!bus->self) {
 275                if (!pci_is_root_bus(bus))
 276                        bus = bus->parent;
 277                else
 278                        return ERR_PTR(-ENODEV);
 279        }
 280
 281        return bus;
 282}
 283
 284#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
 285
 286static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
 287{
 288        struct pci_dev *dma_pdev = pdev;
 289
 290        /* Account for quirked devices */
 291        swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
 292
 293        /*
 294         * If it's a multifunction device that does not support our
 295         * required ACS flags, add to the same group as function 0.
 296         */
 297        if (dma_pdev->multifunction &&
 298            !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
 299                swap_pci_ref(&dma_pdev,
 300                             pci_get_slot(dma_pdev->bus,
 301                                          PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
 302                                          0)));
 303
 304        /*
 305         * Devices on the root bus go through the iommu.  If that's not us,
 306         * find the next upstream device and test ACS up to the root bus.
 307         * Finding the next device may require skipping virtual buses.
 308         */
 309        while (!pci_is_root_bus(dma_pdev->bus)) {
 310                struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
 311                if (IS_ERR(bus))
 312                        break;
 313
 314                if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 315                        break;
 316
 317                swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
 318        }
 319
 320        return dma_pdev;
 321}
 322
 323static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
 324{
 325        struct iommu_group *group = iommu_group_get(&pdev->dev);
 326        int ret;
 327
 328        if (!group) {
 329                group = iommu_group_alloc();
 330                if (IS_ERR(group))
 331                        return PTR_ERR(group);
 332
 333                WARN_ON(&pdev->dev != dev);
 334        }
 335
 336        ret = iommu_group_add_device(group, dev);
 337        iommu_group_put(group);
 338        return ret;
 339}
 340
 341static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
 342                                    struct device *dev)
 343{
 344        if (!dev_data->group) {
 345                struct iommu_group *group = iommu_group_alloc();
 346                if (IS_ERR(group))
 347                        return PTR_ERR(group);
 348
 349                dev_data->group = group;
 350        }
 351
 352        return iommu_group_add_device(dev_data->group, dev);
 353}
 354
 355static int init_iommu_group(struct device *dev)
 356{
 357        struct iommu_dev_data *dev_data;
 358        struct iommu_group *group;
 359        struct pci_dev *dma_pdev;
 360        int ret;
 361
 362        group = iommu_group_get(dev);
 363        if (group) {
 364                iommu_group_put(group);
 365                return 0;
 366        }
 367
 368        dev_data = find_dev_data(get_device_id(dev));
 369        if (!dev_data)
 370                return -ENOMEM;
 371
 372        if (dev_data->alias_data) {
 373                u16 alias;
 374                struct pci_bus *bus;
 375
 376                if (dev_data->alias_data->group)
 377                        goto use_group;
 378
 379                /*
 380                 * If the alias device exists, it's effectively just a first
 381                 * level quirk for finding the DMA source.
 382                 */
 383                alias = amd_iommu_alias_table[dev_data->devid];
 384                dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
 385                if (dma_pdev) {
 386                        dma_pdev = get_isolation_root(dma_pdev);
 387                        goto use_pdev;
 388                }
 389
 390                /*
 391                 * If the alias is virtual, try to find a parent device
 392                 * and test whether the IOMMU group is actualy rooted above
 393                 * the alias.  Be careful to also test the parent device if
 394                 * we think the alias is the root of the group.
 395                 */
 396                bus = pci_find_bus(0, alias >> 8);
 397                if (!bus)
 398                        goto use_group;
 399
 400                bus = find_hosted_bus(bus);
 401                if (IS_ERR(bus) || !bus->self)
 402                        goto use_group;
 403
 404                dma_pdev = get_isolation_root(pci_dev_get(bus->self));
 405                if (dma_pdev != bus->self || (dma_pdev->multifunction &&
 406                    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
 407                        goto use_pdev;
 408
 409                pci_dev_put(dma_pdev);
 410                goto use_group;
 411        }
 412
 413        dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
 414use_pdev:
 415        ret = use_pdev_iommu_group(dma_pdev, dev);
 416        pci_dev_put(dma_pdev);
 417        return ret;
 418use_group:
 419        return use_dev_data_iommu_group(dev_data->alias_data, dev);
 420}
 421
 422static int iommu_init_device(struct device *dev)
 423{
 424        struct pci_dev *pdev = to_pci_dev(dev);
 425        struct iommu_dev_data *dev_data;
 426        u16 alias;
 427        int ret;
 428
 429        if (dev->archdata.iommu)
 430                return 0;
 431
 432        dev_data = find_dev_data(get_device_id(dev));
 433        if (!dev_data)
 434                return -ENOMEM;
 435
 436        alias = amd_iommu_alias_table[dev_data->devid];
 437        if (alias != dev_data->devid) {
 438                struct iommu_dev_data *alias_data;
 439
 440                alias_data = find_dev_data(alias);
 441                if (alias_data == NULL) {
 442                        pr_err("AMD-Vi: Warning: Unhandled device %s\n",
 443                                        dev_name(dev));
 444                        free_dev_data(dev_data);
 445                        return -ENOTSUPP;
 446                }
 447                dev_data->alias_data = alias_data;
 448        }
 449
 450        ret = init_iommu_group(dev);
 451        if (ret)
 452                return ret;
 453
 454        if (pci_iommuv2_capable(pdev)) {
 455                struct amd_iommu *iommu;
 456
 457                iommu              = amd_iommu_rlookup_table[dev_data->devid];
 458                dev_data->iommu_v2 = iommu->is_iommu_v2;
 459        }
 460
 461        dev->archdata.iommu = dev_data;
 462
 463        return 0;
 464}
 465
 466static void iommu_ignore_device(struct device *dev)
 467{
 468        u16 devid, alias;
 469
 470        devid = get_device_id(dev);
 471        alias = amd_iommu_alias_table[devid];
 472
 473        memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
 474        memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
 475
 476        amd_iommu_rlookup_table[devid] = NULL;
 477        amd_iommu_rlookup_table[alias] = NULL;
 478}
 479
 480static void iommu_uninit_device(struct device *dev)
 481{
 482        iommu_group_remove_device(dev);
 483
 484        /*
 485         * Nothing to do here - we keep dev_data around for unplugged devices
 486         * and reuse it when the device is re-plugged - not doing so would
 487         * introduce a ton of races.
 488         */
 489}
 490
 491void __init amd_iommu_uninit_devices(void)
 492{
 493        struct iommu_dev_data *dev_data, *n;
 494        struct pci_dev *pdev = NULL;
 495
 496        for_each_pci_dev(pdev) {
 497
 498                if (!check_device(&pdev->dev))
 499                        continue;
 500
 501                iommu_uninit_device(&pdev->dev);
 502        }
 503
 504        /* Free all of our dev_data structures */
 505        list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
 506                free_dev_data(dev_data);
 507}
 508
 509int __init amd_iommu_init_devices(void)
 510{
 511        struct pci_dev *pdev = NULL;
 512        int ret = 0;
 513
 514        for_each_pci_dev(pdev) {
 515
 516                if (!check_device(&pdev->dev))
 517                        continue;
 518
 519                ret = iommu_init_device(&pdev->dev);
 520                if (ret == -ENOTSUPP)
 521                        iommu_ignore_device(&pdev->dev);
 522                else if (ret)
 523                        goto out_free;
 524        }
 525
 526        return 0;
 527
 528out_free:
 529
 530        amd_iommu_uninit_devices();
 531
 532        return ret;
 533}
 534#ifdef CONFIG_AMD_IOMMU_STATS
 535
 536/*
 537 * Initialization code for statistics collection
 538 */
 539
 540DECLARE_STATS_COUNTER(compl_wait);
 541DECLARE_STATS_COUNTER(cnt_map_single);
 542DECLARE_STATS_COUNTER(cnt_unmap_single);
 543DECLARE_STATS_COUNTER(cnt_map_sg);
 544DECLARE_STATS_COUNTER(cnt_unmap_sg);
 545DECLARE_STATS_COUNTER(cnt_alloc_coherent);
 546DECLARE_STATS_COUNTER(cnt_free_coherent);
 547DECLARE_STATS_COUNTER(cross_page);
 548DECLARE_STATS_COUNTER(domain_flush_single);
 549DECLARE_STATS_COUNTER(domain_flush_all);
 550DECLARE_STATS_COUNTER(alloced_io_mem);
 551DECLARE_STATS_COUNTER(total_map_requests);
 552DECLARE_STATS_COUNTER(complete_ppr);
 553DECLARE_STATS_COUNTER(invalidate_iotlb);
 554DECLARE_STATS_COUNTER(invalidate_iotlb_all);
 555DECLARE_STATS_COUNTER(pri_requests);
 556
 557static struct dentry *stats_dir;
 558static struct dentry *de_fflush;
 559
 560static void amd_iommu_stats_add(struct __iommu_counter *cnt)
 561{
 562        if (stats_dir == NULL)
 563                return;
 564
 565        cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
 566                                       &cnt->value);
 567}
 568
 569static void amd_iommu_stats_init(void)
 570{
 571        stats_dir = debugfs_create_dir("amd-iommu", NULL);
 572        if (stats_dir == NULL)
 573                return;
 574
 575        de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
 576                                         &amd_iommu_unmap_flush);
 577
 578        amd_iommu_stats_add(&compl_wait);
 579        amd_iommu_stats_add(&cnt_map_single);
 580        amd_iommu_stats_add(&cnt_unmap_single);
 581        amd_iommu_stats_add(&cnt_map_sg);
 582        amd_iommu_stats_add(&cnt_unmap_sg);
 583        amd_iommu_stats_add(&cnt_alloc_coherent);
 584        amd_iommu_stats_add(&cnt_free_coherent);
 585        amd_iommu_stats_add(&cross_page);
 586        amd_iommu_stats_add(&domain_flush_single);
 587        amd_iommu_stats_add(&domain_flush_all);
 588        amd_iommu_stats_add(&alloced_io_mem);
 589        amd_iommu_stats_add(&total_map_requests);
 590        amd_iommu_stats_add(&complete_ppr);
 591        amd_iommu_stats_add(&invalidate_iotlb);
 592        amd_iommu_stats_add(&invalidate_iotlb_all);
 593        amd_iommu_stats_add(&pri_requests);
 594}
 595
 596#endif
 597
 598/****************************************************************************
 599 *
 600 * Interrupt handling functions
 601 *
 602 ****************************************************************************/
 603
 604static void dump_dte_entry(u16 devid)
 605{
 606        int i;
 607
 608        for (i = 0; i < 4; ++i)
 609                pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
 610                        amd_iommu_dev_table[devid].data[i]);
 611}
 612
 613static void dump_command(unsigned long phys_addr)
 614{
 615        struct iommu_cmd *cmd = phys_to_virt(phys_addr);
 616        int i;
 617
 618        for (i = 0; i < 4; ++i)
 619                pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
 620}
 621
 622static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 623{
 624        int type, devid, domid, flags;
 625        volatile u32 *event = __evt;
 626        int count = 0;
 627        u64 address;
 628
 629retry:
 630        type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
 631        devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 632        domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
 633        flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 634        address = (u64)(((u64)event[3]) << 32) | event[2];
 635
 636        if (type == 0) {
 637                /* Did we hit the erratum? */
 638                if (++count == LOOP_TIMEOUT) {
 639                        pr_err("AMD-Vi: No event written to event log\n");
 640                        return;
 641                }
 642                udelay(1);
 643                goto retry;
 644        }
 645
 646        printk(KERN_ERR "AMD-Vi: Event logged [");
 647
 648        switch (type) {
 649        case EVENT_TYPE_ILL_DEV:
 650                printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
 651                       "address=0x%016llx flags=0x%04x]\n",
 652                       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 653                       address, flags);
 654                dump_dte_entry(devid);
 655                break;
 656        case EVENT_TYPE_IO_FAULT:
 657                printk("IO_PAGE_FAULT device=%02x:%02x.%x "
 658                       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 659                       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 660                       domid, address, flags);
 661                break;
 662        case EVENT_TYPE_DEV_TAB_ERR:
 663                printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 664                       "address=0x%016llx flags=0x%04x]\n",
 665                       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 666                       address, flags);
 667                break;
 668        case EVENT_TYPE_PAGE_TAB_ERR:
 669                printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 670                       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 671                       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 672                       domid, address, flags);
 673                break;
 674        case EVENT_TYPE_ILL_CMD:
 675                printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
 676                dump_command(address);
 677                break;
 678        case EVENT_TYPE_CMD_HARD_ERR:
 679                printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
 680                       "flags=0x%04x]\n", address, flags);
 681                break;
 682        case EVENT_TYPE_IOTLB_INV_TO:
 683                printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
 684                       "address=0x%016llx]\n",
 685                       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 686                       address);
 687                break;
 688        case EVENT_TYPE_INV_DEV_REQ:
 689                printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
 690                       "address=0x%016llx flags=0x%04x]\n",
 691                       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 692                       address, flags);
 693                break;
 694        default:
 695                printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
 696        }
 697
 698        memset(__evt, 0, 4 * sizeof(u32));
 699}
 700
 701static void iommu_poll_events(struct amd_iommu *iommu)
 702{
 703        u32 head, tail;
 704        unsigned long flags;
 705
 706        spin_lock_irqsave(&iommu->lock, flags);
 707
 708        head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 709        tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 710
 711        while (head != tail) {
 712                iommu_print_event(iommu, iommu->evt_buf + head);
 713                head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
 714        }
 715
 716        writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 717
 718        spin_unlock_irqrestore(&iommu->lock, flags);
 719}
 720
 721static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 722{
 723        struct amd_iommu_fault fault;
 724
 725        INC_STATS_COUNTER(pri_requests);
 726
 727        if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 728                pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 729                return;
 730        }
 731
 732        fault.address   = raw[1];
 733        fault.pasid     = PPR_PASID(raw[0]);
 734        fault.device_id = PPR_DEVID(raw[0]);
 735        fault.tag       = PPR_TAG(raw[0]);
 736        fault.flags     = PPR_FLAGS(raw[0]);
 737
 738        atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 739}
 740
 741static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 742{
 743        unsigned long flags;
 744        u32 head, tail;
 745
 746        if (iommu->ppr_log == NULL)
 747                return;
 748
 749        /* enable ppr interrupts again */
 750        writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
 751
 752        spin_lock_irqsave(&iommu->lock, flags);
 753
 754        head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 755        tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 756
 757        while (head != tail) {
 758                volatile u64 *raw;
 759                u64 entry[2];
 760                int i;
 761
 762                raw = (u64 *)(iommu->ppr_log + head);
 763
 764                /*
 765                 * Hardware bug: Interrupt may arrive before the entry is
 766                 * written to memory. If this happens we need to wait for the
 767                 * entry to arrive.
 768                 */
 769                for (i = 0; i < LOOP_TIMEOUT; ++i) {
 770                        if (PPR_REQ_TYPE(raw[0]) != 0)
 771                                break;
 772                        udelay(1);
 773                }
 774
 775                /* Avoid memcpy function-call overhead */
 776                entry[0] = raw[0];
 777                entry[1] = raw[1];
 778
 779                /*
 780                 * To detect the hardware bug we need to clear the entry
 781                 * back to zero.
 782                 */
 783                raw[0] = raw[1] = 0UL;
 784
 785                /* Update head pointer of hardware ring-buffer */
 786                head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
 787                writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 788
 789                /*
 790                 * Release iommu->lock because ppr-handling might need to
 791                 * re-acquire it
 792                 */
 793                spin_unlock_irqrestore(&iommu->lock, flags);
 794
 795                /* Handle PPR entry */
 796                iommu_handle_ppr_entry(iommu, entry);
 797
 798                spin_lock_irqsave(&iommu->lock, flags);
 799
 800                /* Refresh ring-buffer information */
 801                head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 802                tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 803        }
 804
 805        spin_unlock_irqrestore(&iommu->lock, flags);
 806}
 807
 808irqreturn_t amd_iommu_int_thread(int irq, void *data)
 809{
 810        struct amd_iommu *iommu;
 811
 812        for_each_iommu(iommu) {
 813                iommu_poll_events(iommu);
 814                iommu_poll_ppr_log(iommu);
 815        }
 816
 817        return IRQ_HANDLED;
 818}
 819
 820irqreturn_t amd_iommu_int_handler(int irq, void *data)
 821{
 822        return IRQ_WAKE_THREAD;
 823}
 824
 825/****************************************************************************
 826 *
 827 * IOMMU command queuing functions
 828 *
 829 ****************************************************************************/
 830
 831static int wait_on_sem(volatile u64 *sem)
 832{
 833        int i = 0;
 834
 835        while (*sem == 0 && i < LOOP_TIMEOUT) {
 836                udelay(1);
 837                i += 1;
 838        }
 839
 840        if (i == LOOP_TIMEOUT) {
 841                pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
 842                return -EIO;
 843        }
 844
 845        return 0;
 846}
 847
 848static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 849                               struct iommu_cmd *cmd,
 850                               u32 tail)
 851{
 852        u8 *target;
 853
 854        target = iommu->cmd_buf + tail;
 855        tail   = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
 856
 857        /* Copy command to buffer */
 858        memcpy(target, cmd, sizeof(*cmd));
 859
 860        /* Tell the IOMMU about it */
 861        writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 862}
 863
 864static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 865{
 866        WARN_ON(address & 0x7ULL);
 867
 868        memset(cmd, 0, sizeof(*cmd));
 869        cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
 870        cmd->data[1] = upper_32_bits(__pa(address));
 871        cmd->data[2] = 1;
 872        CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 873}
 874
 875static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
 876{
 877        memset(cmd, 0, sizeof(*cmd));
 878        cmd->data[0] = devid;
 879        CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
 880}
 881
 882static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
 883                                  size_t size, u16 domid, int pde)
 884{
 885        u64 pages;
 886        int s;
 887
 888        pages = iommu_num_pages(address, size, PAGE_SIZE);
 889        s     = 0;
 890
 891        if (pages > 1) {
 892                /*
 893                 * If we have to flush more than one page, flush all
 894                 * TLB entries for this domain
 895                 */
 896                address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 897                s = 1;
 898        }
 899
 900        address &= PAGE_MASK;
 901
 902        memset(cmd, 0, sizeof(*cmd));
 903        cmd->data[1] |= domid;
 904        cmd->data[2]  = lower_32_bits(address);
 905        cmd->data[3]  = upper_32_bits(address);
 906        CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 907        if (s) /* size bit - we flush more than one 4kb page */
 908                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 909        if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
 910                cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 911}
 912
 913static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
 914                                  u64 address, size_t size)
 915{
 916        u64 pages;
 917        int s;
 918
 919        pages = iommu_num_pages(address, size, PAGE_SIZE);
 920        s     = 0;
 921
 922        if (pages > 1) {
 923                /*
 924                 * If we have to flush more than one page, flush all
 925                 * TLB entries for this domain
 926                 */
 927                address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 928                s = 1;
 929        }
 930
 931        address &= PAGE_MASK;
 932
 933        memset(cmd, 0, sizeof(*cmd));
 934        cmd->data[0]  = devid;
 935        cmd->data[0] |= (qdep & 0xff) << 24;
 936        cmd->data[1]  = devid;
 937        cmd->data[2]  = lower_32_bits(address);
 938        cmd->data[3]  = upper_32_bits(address);
 939        CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 940        if (s)
 941                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 942}
 943
 944static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
 945                                  u64 address, bool size)
 946{
 947        memset(cmd, 0, sizeof(*cmd));
 948
 949        address &= ~(0xfffULL);
 950
 951        cmd->data[0]  = pasid & PASID_MASK;
 952        cmd->data[1]  = domid;
 953        cmd->data[2]  = lower_32_bits(address);
 954        cmd->data[3]  = upper_32_bits(address);
 955        cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 956        cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 957        if (size)
 958                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 959        CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 960}
 961
 962static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
 963                                  int qdep, u64 address, bool size)
 964{
 965        memset(cmd, 0, sizeof(*cmd));
 966
 967        address &= ~(0xfffULL);
 968
 969        cmd->data[0]  = devid;
 970        cmd->data[0] |= (pasid & 0xff) << 16;
 971        cmd->data[0] |= (qdep  & 0xff) << 24;
 972        cmd->data[1]  = devid;
 973        cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
 974        cmd->data[2]  = lower_32_bits(address);
 975        cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 976        cmd->data[3]  = upper_32_bits(address);
 977        if (size)
 978                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 979        CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 980}
 981
 982static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
 983                               int status, int tag, bool gn)
 984{
 985        memset(cmd, 0, sizeof(*cmd));
 986
 987        cmd->data[0]  = devid;
 988        if (gn) {
 989                cmd->data[1]  = pasid & PASID_MASK;
 990                cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
 991        }
 992        cmd->data[3]  = tag & 0x1ff;
 993        cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
 994
 995        CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
 996}
 997
 998static void build_inv_all(struct iommu_cmd *cmd)
 999{
1000        memset(cmd, 0, sizeof(*cmd));
1001        CMD_SET_TYPE(cmd, CMD_INV_ALL);
1002}
1003
1004static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1005{
1006        memset(cmd, 0, sizeof(*cmd));
1007        cmd->data[0] = devid;
1008        CMD_SET_TYPE(cmd, CMD_INV_IRT);
1009}
1010
1011/*
1012 * Writes the command to the IOMMUs command buffer and informs the
1013 * hardware about the new command.
1014 */
1015static int iommu_queue_command_sync(struct amd_iommu *iommu,
1016                                    struct iommu_cmd *cmd,
1017                                    bool sync)
1018{
1019        u32 left, tail, head, next_tail;
1020        unsigned long flags;
1021
1022        WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
1023
1024again:
1025        spin_lock_irqsave(&iommu->lock, flags);
1026
1027        head      = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
1028        tail      = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1029        next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
1030        left      = (head - next_tail) % iommu->cmd_buf_size;
1031
1032        if (left <= 2) {
1033                struct iommu_cmd sync_cmd;
1034                volatile u64 sem = 0;
1035                int ret;
1036
1037                build_completion_wait(&sync_cmd, (u64)&sem);
1038                copy_cmd_to_buffer(iommu, &sync_cmd, tail);
1039
1040                spin_unlock_irqrestore(&iommu->lock, flags);
1041
1042                if ((ret = wait_on_sem(&sem)) != 0)
1043                        return ret;
1044
1045                goto again;
1046        }
1047
1048        copy_cmd_to_buffer(iommu, cmd, tail);
1049
1050        /* We need to sync now to make sure all commands are processed */
1051        iommu->need_sync = sync;
1052
1053        spin_unlock_irqrestore(&iommu->lock, flags);
1054
1055        return 0;
1056}
1057
1058static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1059{
1060        return iommu_queue_command_sync(iommu, cmd, true);
1061}
1062
1063/*
1064 * This function queues a completion wait command into the command
1065 * buffer of an IOMMU
1066 */
1067static int iommu_completion_wait(struct amd_iommu *iommu)
1068{
1069        struct iommu_cmd cmd;
1070        volatile u64 sem = 0;
1071        int ret;
1072
1073        if (!iommu->need_sync)
1074                return 0;
1075
1076        build_completion_wait(&cmd, (u64)&sem);
1077
1078        ret = iommu_queue_command_sync(iommu, &cmd, false);
1079        if (ret)
1080                return ret;
1081
1082        return wait_on_sem(&sem);
1083}
1084
1085static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1086{
1087        struct iommu_cmd cmd;
1088
1089        build_inv_dte(&cmd, devid);
1090
1091        return iommu_queue_command(iommu, &cmd);
1092}
1093
1094static void iommu_flush_dte_all(struct amd_iommu *iommu)
1095{
1096        u32 devid;
1097
1098        for (devid = 0; devid <= 0xffff; ++devid)
1099                iommu_flush_dte(iommu, devid);
1100
1101        iommu_completion_wait(iommu);
1102}
1103
1104/*
1105 * This function uses heavy locking and may disable irqs for some time. But
1106 * this is no issue because it is only called during resume.
1107 */
1108static void iommu_flush_tlb_all(struct amd_iommu *iommu)
1109{
1110        u32 dom_id;
1111
1112        for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1113                struct iommu_cmd cmd;
1114                build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1115                                      dom_id, 1);
1116                iommu_queue_command(iommu, &cmd);
1117        }
1118
1119        iommu_completion_wait(iommu);
1120}
1121
1122static void iommu_flush_all(struct amd_iommu *iommu)
1123{
1124        struct iommu_cmd cmd;
1125
1126        build_inv_all(&cmd);
1127
1128        iommu_queue_command(iommu, &cmd);
1129        iommu_completion_wait(iommu);
1130}
1131
1132static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1133{
1134        struct iommu_cmd cmd;
1135
1136        build_inv_irt(&cmd, devid);
1137
1138        iommu_queue_command(iommu, &cmd);
1139}
1140
1141static void iommu_flush_irt_all(struct amd_iommu *iommu)
1142{
1143        u32 devid;
1144
1145        for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1146                iommu_flush_irt(iommu, devid);
1147
1148        iommu_completion_wait(iommu);
1149}
1150
1151void iommu_flush_all_caches(struct amd_iommu *iommu)
1152{
1153        if (iommu_feature(iommu, FEATURE_IA)) {
1154                iommu_flush_all(iommu);
1155        } else {
1156                iommu_flush_dte_all(iommu);
1157                iommu_flush_irt_all(iommu);
1158                iommu_flush_tlb_all(iommu);
1159        }
1160}
1161
1162/*
1163 * Command send function for flushing on-device TLB
1164 */
1165static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1166                              u64 address, size_t size)
1167{
1168        struct amd_iommu *iommu;
1169        struct iommu_cmd cmd;
1170        int qdep;
1171
1172        qdep     = dev_data->ats.qdep;
1173        iommu    = amd_iommu_rlookup_table[dev_data->devid];
1174
1175        build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1176
1177        return iommu_queue_command(iommu, &cmd);
1178}
1179
1180/*
1181 * Command send function for invalidating a device table entry
1182 */
1183static int device_flush_dte(struct iommu_dev_data *dev_data)
1184{
1185        struct amd_iommu *iommu;
1186        int ret;
1187
1188        iommu = amd_iommu_rlookup_table[dev_data->devid];
1189
1190        ret = iommu_flush_dte(iommu, dev_data->devid);
1191        if (ret)
1192                return ret;
1193
1194        if (dev_data->ats.enabled)
1195                ret = device_flush_iotlb(dev_data, 0, ~0UL);
1196
1197        return ret;
1198}
1199
1200/*
1201 * TLB invalidation function which is called from the mapping functions.
1202 * It invalidates a single PTE if the range to flush is within a single
1203 * page. Otherwise it flushes the whole TLB of the IOMMU.
1204 */
1205static void __domain_flush_pages(struct protection_domain *domain,
1206                                 u64 address, size_t size, int pde)
1207{
1208        struct iommu_dev_data *dev_data;
1209        struct iommu_cmd cmd;
1210        int ret = 0, i;
1211
1212        build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1213
1214        for (i = 0; i < amd_iommus_present; ++i) {
1215                if (!domain->dev_iommu[i])
1216                        continue;
1217
1218                /*
1219                 * Devices of this domain are behind this IOMMU
1220                 * We need a TLB flush
1221                 */
1222                ret |= iommu_queue_command(amd_iommus[i], &cmd);
1223        }
1224
1225        list_for_each_entry(dev_data, &domain->dev_list, list) {
1226
1227                if (!dev_data->ats.enabled)
1228                        continue;
1229
1230                ret |= device_flush_iotlb(dev_data, address, size);
1231        }
1232
1233        WARN_ON(ret);
1234}
1235
1236static void domain_flush_pages(struct protection_domain *domain,
1237                               u64 address, size_t size)
1238{
1239        __domain_flush_pages(domain, address, size, 0);
1240}
1241
1242/* Flush the whole IO/TLB for a given protection domain */
1243static void domain_flush_tlb(struct protection_domain *domain)
1244{
1245        __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1246}
1247
1248/* Flush the whole IO/TLB for a given protection domain - including PDE */
1249static void domain_flush_tlb_pde(struct protection_domain *domain)
1250{
1251        __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1252}
1253
1254static void domain_flush_complete(struct protection_domain *domain)
1255{
1256        int i;
1257
1258        for (i = 0; i < amd_iommus_present; ++i) {
1259                if (!domain->dev_iommu[i])
1260                        continue;
1261
1262                /*
1263                 * Devices of this domain are behind this IOMMU
1264                 * We need to wait for completion of all commands.
1265                 */
1266                iommu_completion_wait(amd_iommus[i]);
1267        }
1268}
1269
1270
1271/*
1272 * This function flushes the DTEs for all devices in domain
1273 */
1274static void domain_flush_devices(struct protection_domain *domain)
1275{
1276        struct iommu_dev_data *dev_data;
1277
1278        list_for_each_entry(dev_data, &domain->dev_list, list)
1279                device_flush_dte(dev_data);
1280}
1281
1282/****************************************************************************
1283 *
1284 * The functions below are used the create the page table mappings for
1285 * unity mapped regions.
1286 *
1287 ****************************************************************************/
1288
1289/*
1290 * This function is used to add another level to an IO page table. Adding
1291 * another level increases the size of the address space by 9 bits to a size up
1292 * to 64 bits.
1293 */
1294static bool increase_address_space(struct protection_domain *domain,
1295                                   gfp_t gfp)
1296{
1297        u64 *pte;
1298
1299        if (domain->mode == PAGE_MODE_6_LEVEL)
1300                /* address space already 64 bit large */
1301                return false;
1302
1303        pte = (void *)get_zeroed_page(gfp);
1304        if (!pte)
1305                return false;
1306
1307        *pte             = PM_LEVEL_PDE(domain->mode,
1308                                        virt_to_phys(domain->pt_root));
1309        domain->pt_root  = pte;
1310        domain->mode    += 1;
1311        domain->updated  = true;
1312
1313        return true;
1314}
1315
1316static u64 *alloc_pte(struct protection_domain *domain,
1317                      unsigned long address,
1318                      unsigned long page_size,
1319                      u64 **pte_page,
1320                      gfp_t gfp)
1321{
1322        int level, end_lvl;
1323        u64 *pte, *page;
1324
1325        BUG_ON(!is_power_of_2(page_size));
1326
1327        while (address > PM_LEVEL_SIZE(domain->mode))
1328                increase_address_space(domain, gfp);
1329
1330        level   = domain->mode - 1;
1331        pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1332        address = PAGE_SIZE_ALIGN(address, page_size);
1333        end_lvl = PAGE_SIZE_LEVEL(page_size);
1334
1335        while (level > end_lvl) {
1336                if (!IOMMU_PTE_PRESENT(*pte)) {
1337                        page = (u64 *)get_zeroed_page(gfp);
1338                        if (!page)
1339                                return NULL;
1340                        *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1341                }
1342
1343                /* No level skipping support yet */
1344                if (PM_PTE_LEVEL(*pte) != level)
1345                        return NULL;
1346
1347                level -= 1;
1348
1349                pte = IOMMU_PTE_PAGE(*pte);
1350
1351                if (pte_page && level == end_lvl)
1352                        *pte_page = pte;
1353
1354                pte = &pte[PM_LEVEL_INDEX(level, address)];
1355        }
1356
1357        return pte;
1358}
1359
1360/*
1361 * This function checks if there is a PTE for a given dma address. If
1362 * there is one, it returns the pointer to it.
1363 */
1364static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
1365{
1366        int level;
1367        u64 *pte;
1368
1369        if (address > PM_LEVEL_SIZE(domain->mode))
1370                return NULL;
1371
1372        level   =  domain->mode - 1;
1373        pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1374
1375        while (level > 0) {
1376
1377                /* Not Present */
1378                if (!IOMMU_PTE_PRESENT(*pte))
1379                        return NULL;
1380
1381                /* Large PTE */
1382                if (PM_PTE_LEVEL(*pte) == 0x07) {
1383                        unsigned long pte_mask, __pte;
1384
1385                        /*
1386                         * If we have a series of large PTEs, make
1387                         * sure to return a pointer to the first one.
1388                         */
1389                        pte_mask = PTE_PAGE_SIZE(*pte);
1390                        pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1391                        __pte    = ((unsigned long)pte) & pte_mask;
1392
1393                        return (u64 *)__pte;
1394                }
1395
1396                /* No level skipping support yet */
1397                if (PM_PTE_LEVEL(*pte) != level)
1398                        return NULL;
1399
1400                level -= 1;
1401
1402                /* Walk to the next level */
1403                pte = IOMMU_PTE_PAGE(*pte);
1404                pte = &pte[PM_LEVEL_INDEX(level, address)];
1405        }
1406
1407        return pte;
1408}
1409
1410/*
1411 * Generic mapping functions. It maps a physical address into a DMA
1412 * address space. It allocates the page table pages if necessary.
1413 * In the future it can be extended to a generic mapping function
1414 * supporting all features of AMD IOMMU page tables like level skipping
1415 * and full 64 bit address spaces.
1416 */
1417static int iommu_map_page(struct protection_domain *dom,
1418                          unsigned long bus_addr,
1419                          unsigned long phys_addr,
1420                          int prot,
1421                          unsigned long page_size)
1422{
1423        u64 __pte, *pte;
1424        int i, count;
1425
1426        if (!(prot & IOMMU_PROT_MASK))
1427                return -EINVAL;
1428
1429        bus_addr  = PAGE_ALIGN(bus_addr);
1430        phys_addr = PAGE_ALIGN(phys_addr);
1431        count     = PAGE_SIZE_PTE_COUNT(page_size);
1432        pte       = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
1433
1434        for (i = 0; i < count; ++i)
1435                if (IOMMU_PTE_PRESENT(pte[i]))
1436                        return -EBUSY;
1437
1438        if (page_size > PAGE_SIZE) {
1439                __pte = PAGE_SIZE_PTE(phys_addr, page_size);
1440                __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1441        } else
1442                __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1443
1444        if (prot & IOMMU_PROT_IR)
1445                __pte |= IOMMU_PTE_IR;
1446        if (prot & IOMMU_PROT_IW)
1447                __pte |= IOMMU_PTE_IW;
1448
1449        for (i = 0; i < count; ++i)
1450                pte[i] = __pte;
1451
1452        update_domain(dom);
1453
1454        return 0;
1455}
1456
1457static unsigned long iommu_unmap_page(struct protection_domain *dom,
1458                                      unsigned long bus_addr,
1459                                      unsigned long page_size)
1460{
1461        unsigned long long unmap_size, unmapped;
1462        u64 *pte;
1463
1464        BUG_ON(!is_power_of_2(page_size));
1465
1466        unmapped = 0;
1467
1468        while (unmapped < page_size) {
1469
1470                pte = fetch_pte(dom, bus_addr);
1471
1472                if (!pte) {
1473                        /*
1474                         * No PTE for this address
1475                         * move forward in 4kb steps
1476                         */
1477                        unmap_size = PAGE_SIZE;
1478                } else if (PM_PTE_LEVEL(*pte) == 0) {
1479                        /* 4kb PTE found for this address */
1480                        unmap_size = PAGE_SIZE;
1481                        *pte       = 0ULL;
1482                } else {
1483                        int count, i;
1484
1485                        /* Large PTE found which maps this address */
1486                        unmap_size = PTE_PAGE_SIZE(*pte);
1487                        count      = PAGE_SIZE_PTE_COUNT(unmap_size);
1488                        for (i = 0; i < count; i++)
1489                                pte[i] = 0ULL;
1490                }
1491
1492                bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1493                unmapped += unmap_size;
1494        }
1495
1496        BUG_ON(!is_power_of_2(unmapped));
1497
1498        return unmapped;
1499}
1500
1501/*
1502 * This function checks if a specific unity mapping entry is needed for
1503 * this specific IOMMU.
1504 */
1505static int iommu_for_unity_map(struct amd_iommu *iommu,
1506                               struct unity_map_entry *entry)
1507{
1508        u16 bdf, i;
1509
1510        for (i = entry->devid_start; i <= entry->devid_end; ++i) {
1511                bdf = amd_iommu_alias_table[i];
1512                if (amd_iommu_rlookup_table[bdf] == iommu)
1513                        return 1;
1514        }
1515
1516        return 0;
1517}
1518
1519/*
1520 * This function actually applies the mapping to the page table of the
1521 * dma_ops domain.
1522 */
1523static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
1524                             struct unity_map_entry *e)
1525{
1526        u64 addr;
1527        int ret;
1528
1529        for (addr = e->address_start; addr < e->address_end;
1530             addr += PAGE_SIZE) {
1531                ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
1532                                     PAGE_SIZE);
1533                if (ret)
1534                        return ret;
1535                /*
1536                 * if unity mapping is in aperture range mark the page
1537                 * as allocated in the aperture
1538                 */
1539                if (addr < dma_dom->aperture_size)
1540                        __set_bit(addr >> PAGE_SHIFT,
1541                                  dma_dom->aperture[0]->bitmap);
1542        }
1543
1544        return 0;
1545}
1546
1547/*
1548 * Init the unity mappings for a specific IOMMU in the system
1549 *
1550 * Basically iterates over all unity mapping entries and applies them to
1551 * the default domain DMA of that IOMMU if necessary.
1552 */
1553static int iommu_init_unity_mappings(struct amd_iommu *iommu)
1554{
1555        struct unity_map_entry *entry;
1556        int ret;
1557
1558        list_for_each_entry(entry, &amd_iommu_unity_map, list) {
1559                if (!iommu_for_unity_map(iommu, entry))
1560                        continue;
1561                ret = dma_ops_unity_map(iommu->default_dom, entry);
1562                if (ret)
1563                        return ret;
1564        }
1565
1566        return 0;
1567}
1568
1569/*
1570 * Inits the unity mappings required for a specific device
1571 */
1572static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
1573                                          u16 devid)
1574{
1575        struct unity_map_entry *e;
1576        int ret;
1577
1578        list_for_each_entry(e, &amd_iommu_unity_map, list) {
1579                if (!(devid >= e->devid_start && devid <= e->devid_end))
1580                        continue;
1581                ret = dma_ops_unity_map(dma_dom, e);
1582                if (ret)
1583                        return ret;
1584        }
1585
1586        return 0;
1587}
1588
1589/****************************************************************************
1590 *
1591 * The next functions belong to the address allocator for the dma_ops
1592 * interface functions. They work like the allocators in the other IOMMU
1593 * drivers. Its basically a bitmap which marks the allocated pages in
1594 * the aperture. Maybe it could be enhanced in the future to a more
1595 * efficient allocator.
1596 *
1597 ****************************************************************************/
1598
1599/*
1600 * The address allocator core functions.
1601 *
1602 * called with domain->lock held
1603 */
1604
1605/*
1606 * Used to reserve address ranges in the aperture (e.g. for exclusion
1607 * ranges.
1608 */
1609static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1610                                      unsigned long start_page,
1611                                      unsigned int pages)
1612{
1613        unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1614
1615        if (start_page + pages > last_page)
1616                pages = last_page - start_page;
1617
1618        for (i = start_page; i < start_page + pages; ++i) {
1619                int index = i / APERTURE_RANGE_PAGES;
1620                int page  = i % APERTURE_RANGE_PAGES;
1621                __set_bit(page, dom->aperture[index]->bitmap);
1622        }
1623}
1624
1625/*
1626 * This function is used to add a new aperture range to an existing
1627 * aperture in case of dma_ops domain allocation or address allocation
1628 * failure.
1629 */
1630static int alloc_new_range(struct dma_ops_domain *dma_dom,
1631                           bool populate, gfp_t gfp)
1632{
1633        int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
1634        struct amd_iommu *iommu;
1635        unsigned long i, old_size;
1636
1637#ifdef CONFIG_IOMMU_STRESS
1638        populate = false;
1639#endif
1640
1641        if (index >= APERTURE_MAX_RANGES)
1642                return -ENOMEM;
1643
1644        dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
1645        if (!dma_dom->aperture[index])
1646                return -ENOMEM;
1647
1648        dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
1649        if (!dma_dom->aperture[index]->bitmap)
1650                goto out_free;
1651
1652        dma_dom->aperture[index]->offset = dma_dom->aperture_size;
1653
1654        if (populate) {
1655                unsigned long address = dma_dom->aperture_size;
1656                int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1657                u64 *pte, *pte_page;
1658
1659                for (i = 0; i < num_ptes; ++i) {
1660                        pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
1661                                        &pte_page, gfp);
1662                        if (!pte)
1663                                goto out_free;
1664
1665                        dma_dom->aperture[index]->pte_pages[i] = pte_page;
1666
1667                        address += APERTURE_RANGE_SIZE / 64;
1668                }
1669        }
1670
1671        old_size                = dma_dom->aperture_size;
1672        dma_dom->aperture_size += APERTURE_RANGE_SIZE;
1673
1674        /* Reserve address range used for MSI messages */
1675        if (old_size < MSI_ADDR_BASE_LO &&
1676            dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
1677                unsigned long spage;
1678                int pages;
1679
1680                pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
1681                spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
1682
1683                dma_ops_reserve_addresses(dma_dom, spage, pages);
1684        }
1685
1686        /* Initialize the exclusion range if necessary */
1687        for_each_iommu(iommu) {
1688                if (iommu->exclusion_start &&
1689                    iommu->exclusion_start >= dma_dom->aperture[index]->offset
1690                    && iommu->exclusion_start < dma_dom->aperture_size) {
1691                        unsigned long startpage;
1692                        int pages = iommu_num_pages(iommu->exclusion_start,
1693                                                    iommu->exclusion_length,
1694                                                    PAGE_SIZE);
1695                        startpage = iommu->exclusion_start >> PAGE_SHIFT;
1696                        dma_ops_reserve_addresses(dma_dom, startpage, pages);
1697                }
1698        }
1699
1700        /*
1701         * Check for areas already mapped as present in the new aperture
1702         * range and mark those pages as reserved in the allocator. Such
1703         * mappings may already exist as a result of requested unity
1704         * mappings for devices.
1705         */
1706        for (i = dma_dom->aperture[index]->offset;
1707             i < dma_dom->aperture_size;
1708             i += PAGE_SIZE) {
1709                u64 *pte = fetch_pte(&dma_dom->domain, i);
1710                if (!pte || !IOMMU_PTE_PRESENT(*pte))
1711                        continue;
1712
1713                dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
1714        }
1715
1716        update_domain(&dma_dom->domain);
1717
1718        return 0;
1719
1720out_free:
1721        update_domain(&dma_dom->domain);
1722
1723        free_page((unsigned long)dma_dom->aperture[index]->bitmap);
1724
1725        kfree(dma_dom->aperture[index]);
1726        dma_dom->aperture[index] = NULL;
1727
1728        return -ENOMEM;
1729}
1730
1731static unsigned long dma_ops_area_alloc(struct device *dev,
1732                                        struct dma_ops_domain *dom,
1733                                        unsigned int pages,
1734                                        unsigned long align_mask,
1735                                        u64 dma_mask,
1736                                        unsigned long start)
1737{
1738        unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
1739        int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1740        int i = start >> APERTURE_RANGE_SHIFT;
1741        unsigned long boundary_size;
1742        unsigned long address = -1;
1743        unsigned long limit;
1744
1745        next_bit >>= PAGE_SHIFT;
1746
1747        boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1748                        PAGE_SIZE) >> PAGE_SHIFT;
1749
1750        for (;i < max_index; ++i) {
1751                unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
1752
1753                if (dom->aperture[i]->offset >= dma_mask)
1754                        break;
1755
1756                limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1757                                               dma_mask >> PAGE_SHIFT);
1758
1759                address = iommu_area_alloc(dom->aperture[i]->bitmap,
1760                                           limit, next_bit, pages, 0,
1761                                            boundary_size, align_mask);
1762                if (address != -1) {
1763                        address = dom->aperture[i]->offset +
1764                                  (address << PAGE_SHIFT);
1765                        dom->next_address = address + (pages << PAGE_SHIFT);
1766                        break;
1767                }
1768
1769                next_bit = 0;
1770        }
1771
1772        return address;
1773}
1774
1775static unsigned long dma_ops_alloc_addresses(struct device *dev,
1776                                             struct dma_ops_domain *dom,
1777                                             unsigned int pages,
1778                                             unsigned long align_mask,
1779                                             u64 dma_mask)
1780{
1781        unsigned long address;
1782
1783#ifdef CONFIG_IOMMU_STRESS
1784        dom->next_address = 0;
1785        dom->need_flush = true;
1786#endif
1787
1788        address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1789                                     dma_mask, dom->next_address);
1790
1791        if (address == -1) {
1792                dom->next_address = 0;
1793                address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1794                                             dma_mask, 0);
1795                dom->need_flush = true;
1796        }
1797
1798        if (unlikely(address == -1))
1799                address = DMA_ERROR_CODE;
1800
1801        WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1802
1803        return address;
1804}
1805
1806/*
1807 * The address free function.
1808 *
1809 * called with domain->lock held
1810 */
1811static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1812                                   unsigned long address,
1813                                   unsigned int pages)
1814{
1815        unsigned i = address >> APERTURE_RANGE_SHIFT;
1816        struct aperture_range *range = dom->aperture[i];
1817
1818        BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1819
1820#ifdef CONFIG_IOMMU_STRESS
1821        if (i < 4)
1822                return;
1823#endif
1824
1825        if (address >= dom->next_address)
1826                dom->need_flush = true;
1827
1828        address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1829
1830        bitmap_clear(range->bitmap, address, pages);
1831
1832}
1833
1834/****************************************************************************
1835 *
1836 * The next functions belong to the domain allocation. A domain is
1837 * allocated for every IOMMU as the default domain. If device isolation
1838 * is enabled, every device get its own domain. The most important thing
1839 * about domains is the page table mapping the DMA address space they
1840 * contain.
1841 *
1842 ****************************************************************************/
1843
1844/*
1845 * This function adds a protection domain to the global protection domain list
1846 */
1847static void add_domain_to_list(struct protection_domain *domain)
1848{
1849        unsigned long flags;
1850
1851        spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1852        list_add(&domain->list, &amd_iommu_pd_list);
1853        spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1854}
1855
1856/*
1857 * This function removes a protection domain to the global
1858 * protection domain list
1859 */
1860static void del_domain_from_list(struct protection_domain *domain)
1861{
1862        unsigned long flags;
1863
1864        spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1865        list_del(&domain->list);
1866        spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1867}
1868
1869static u16 domain_id_alloc(void)
1870{
1871        unsigned long flags;
1872        int id;
1873
1874        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1875        id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1876        BUG_ON(id == 0);
1877        if (id > 0 && id < MAX_DOMAIN_ID)
1878                __set_bit(id, amd_iommu_pd_alloc_bitmap);
1879        else
1880                id = 0;
1881        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1882
1883        return id;
1884}
1885
1886static void domain_id_free(int id)
1887{
1888        unsigned long flags;
1889
1890        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1891        if (id > 0 && id < MAX_DOMAIN_ID)
1892                __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1893        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1894}
1895
1896static void free_pagetable(struct protection_domain *domain)
1897{
1898        int i, j;
1899        u64 *p1, *p2, *p3;
1900
1901        p1 = domain->pt_root;
1902
1903        if (!p1)
1904                return;
1905
1906        for (i = 0; i < 512; ++i) {
1907                if (!IOMMU_PTE_PRESENT(p1[i]))
1908                        continue;
1909
1910                p2 = IOMMU_PTE_PAGE(p1[i]);
1911                for (j = 0; j < 512; ++j) {
1912                        if (!IOMMU_PTE_PRESENT(p2[j]))
1913                                continue;
1914                        p3 = IOMMU_PTE_PAGE(p2[j]);
1915                        free_page((unsigned long)p3);
1916                }
1917
1918                free_page((unsigned long)p2);
1919        }
1920
1921        free_page((unsigned long)p1);
1922
1923        domain->pt_root = NULL;
1924}
1925
1926static void free_gcr3_tbl_level1(u64 *tbl)
1927{
1928        u64 *ptr;
1929        int i;
1930
1931        for (i = 0; i < 512; ++i) {
1932                if (!(tbl[i] & GCR3_VALID))
1933                        continue;
1934
1935                ptr = __va(tbl[i] & PAGE_MASK);
1936
1937                free_page((unsigned long)ptr);
1938        }
1939}
1940
1941static void free_gcr3_tbl_level2(u64 *tbl)
1942{
1943        u64 *ptr;
1944        int i;
1945
1946        for (i = 0; i < 512; ++i) {
1947                if (!(tbl[i] & GCR3_VALID))
1948                        continue;
1949
1950                ptr = __va(tbl[i] & PAGE_MASK);
1951
1952                free_gcr3_tbl_level1(ptr);
1953        }
1954}
1955
1956static void free_gcr3_table(struct protection_domain *domain)
1957{
1958        if (domain->glx == 2)
1959                free_gcr3_tbl_level2(domain->gcr3_tbl);
1960        else if (domain->glx == 1)
1961                free_gcr3_tbl_level1(domain->gcr3_tbl);
1962        else if (domain->glx != 0)
1963                BUG();
1964
1965        free_page((unsigned long)domain->gcr3_tbl);
1966}
1967
1968/*
1969 * Free a domain, only used if something went wrong in the
1970 * allocation path and we need to free an already allocated page table
1971 */
1972static void dma_ops_domain_free(struct dma_ops_domain *dom)
1973{
1974        int i;
1975
1976        if (!dom)
1977                return;
1978
1979        del_domain_from_list(&dom->domain);
1980
1981        free_pagetable(&dom->domain);
1982
1983        for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1984                if (!dom->aperture[i])
1985                        continue;
1986                free_page((unsigned long)dom->aperture[i]->bitmap);
1987                kfree(dom->aperture[i]);
1988        }
1989
1990        kfree(dom);
1991}
1992
1993/*
1994 * Allocates a new protection domain usable for the dma_ops functions.
1995 * It also initializes the page table and the address allocator data
1996 * structures required for the dma_ops interface
1997 */
1998static struct dma_ops_domain *dma_ops_domain_alloc(void)
1999{
2000        struct dma_ops_domain *dma_dom;
2001
2002        dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
2003        if (!dma_dom)
2004                return NULL;
2005
2006        spin_lock_init(&dma_dom->domain.lock);
2007
2008        dma_dom->domain.id = domain_id_alloc();
2009        if (dma_dom->domain.id == 0)
2010                goto free_dma_dom;
2011        INIT_LIST_HEAD(&dma_dom->domain.dev_list);
2012        dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
2013        dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2014        dma_dom->domain.flags = PD_DMA_OPS_MASK;
2015        dma_dom->domain.priv = dma_dom;
2016        if (!dma_dom->domain.pt_root)
2017                goto free_dma_dom;
2018
2019        dma_dom->need_flush = false;
2020        dma_dom->target_dev = 0xffff;
2021
2022        add_domain_to_list(&dma_dom->domain);
2023
2024        if (alloc_new_range(dma_dom, true, GFP_KERNEL))
2025                goto free_dma_dom;
2026
2027        /*
2028         * mark the first page as allocated so we never return 0 as
2029         * a valid dma-address. So we can use 0 as error value
2030         */
2031        dma_dom->aperture[0]->bitmap[0] = 1;
2032        dma_dom->next_address = 0;
2033
2034
2035        return dma_dom;
2036
2037free_dma_dom:
2038        dma_ops_domain_free(dma_dom);
2039
2040        return NULL;
2041}
2042
2043/*
2044 * little helper function to check whether a given protection domain is a
2045 * dma_ops domain
2046 */
2047static bool dma_ops_domain(struct protection_domain *domain)
2048{
2049        return domain->flags & PD_DMA_OPS_MASK;
2050}
2051
2052static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
2053{
2054        u64 pte_root = 0;
2055        u64 flags = 0;
2056
2057        if (domain->mode != PAGE_MODE_NONE)
2058                pte_root = virt_to_phys(domain->pt_root);
2059
2060        pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
2061                    << DEV_ENTRY_MODE_SHIFT;
2062        pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
2063
2064        flags = amd_iommu_dev_table[devid].data[1];
2065
2066        if (ats)
2067                flags |= DTE_FLAG_IOTLB;
2068
2069        if (domain->flags & PD_IOMMUV2_MASK) {
2070                u64 gcr3 = __pa(domain->gcr3_tbl);
2071                u64 glx  = domain->glx;
2072                u64 tmp;
2073
2074                pte_root |= DTE_FLAG_GV;
2075                pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
2076
2077                /* First mask out possible old values for GCR3 table */
2078                tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
2079                flags    &= ~tmp;
2080
2081                tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
2082                flags    &= ~tmp;
2083
2084                /* Encode GCR3 table into DTE */
2085                tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
2086                pte_root |= tmp;
2087
2088                tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
2089                flags    |= tmp;
2090
2091                tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
2092                flags    |= tmp;
2093        }
2094
2095        flags &= ~(0xffffUL);
2096        flags |= domain->id;
2097
2098        amd_iommu_dev_table[devid].data[1]  = flags;
2099        amd_iommu_dev_table[devid].data[0]  = pte_root;
2100}
2101
2102static void clear_dte_entry(u16 devid)
2103{
2104        /* remove entry from the device table seen by the hardware */
2105        amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
2106        amd_iommu_dev_table[devid].data[1] = 0;
2107
2108        amd_iommu_apply_erratum_63(devid);
2109}
2110
2111static void do_attach(struct iommu_dev_data *dev_data,
2112                      struct protection_domain *domain)
2113{
2114        struct amd_iommu *iommu;
2115        bool ats;
2116
2117        iommu = amd_iommu_rlookup_table[dev_data->devid];
2118        ats   = dev_data->ats.enabled;
2119
2120        /* Update data structures */
2121        dev_data->domain = domain;
2122        list_add(&dev_data->list, &domain->dev_list);
2123        set_dte_entry(dev_data->devid, domain, ats);
2124
2125        /* Do reference counting */
2126        domain->dev_iommu[iommu->index] += 1;
2127        domain->dev_cnt                 += 1;
2128
2129        /* Flush the DTE entry */
2130        device_flush_dte(dev_data);
2131}
2132
2133static void do_detach(struct iommu_dev_data *dev_data)
2134{
2135        struct amd_iommu *iommu;
2136
2137        iommu = amd_iommu_rlookup_table[dev_data->devid];
2138
2139        /* decrease reference counters */
2140        dev_data->domain->dev_iommu[iommu->index] -= 1;
2141        dev_data->domain->dev_cnt                 -= 1;
2142
2143        /* Update data structures */
2144        dev_data->domain = NULL;
2145        list_del(&dev_data->list);
2146        clear_dte_entry(dev_data->devid);
2147
2148        /* Flush the DTE entry */
2149        device_flush_dte(dev_data);
2150}
2151
2152/*
2153 * If a device is not yet associated with a domain, this function does
2154 * assigns it visible for the hardware
2155 */
2156static int __attach_device(struct iommu_dev_data *dev_data,
2157                           struct protection_domain *domain)
2158{
2159        int ret;
2160
2161        /* lock domain */
2162        spin_lock(&domain->lock);
2163
2164        if (dev_data->alias_data != NULL) {
2165                struct iommu_dev_data *alias_data = dev_data->alias_data;
2166
2167                /* Some sanity checks */
2168                ret = -EBUSY;
2169                if (alias_data->domain != NULL &&
2170                                alias_data->domain != domain)
2171                        goto out_unlock;
2172
2173                if (dev_data->domain != NULL &&
2174                                dev_data->domain != domain)
2175                        goto out_unlock;
2176
2177                /* Do real assignment */
2178                if (alias_data->domain == NULL)
2179                        do_attach(alias_data, domain);
2180
2181                atomic_inc(&alias_data->bind);
2182        }
2183
2184        if (dev_data->domain == NULL)
2185                do_attach(dev_data, domain);
2186
2187        atomic_inc(&dev_data->bind);
2188
2189        ret = 0;
2190
2191out_unlock:
2192
2193        /* ready */
2194        spin_unlock(&domain->lock);
2195
2196        return ret;
2197}
2198
2199
2200static void pdev_iommuv2_disable(struct pci_dev *pdev)
2201{
2202        pci_disable_ats(pdev);
2203        pci_disable_pri(pdev);
2204        pci_disable_pasid(pdev);
2205}
2206
2207/* FIXME: Change generic reset-function to do the same */
2208static int pri_reset_while_enabled(struct pci_dev *pdev)
2209{
2210        u16 control;
2211        int pos;
2212
2213        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2214        if (!pos)
2215                return -EINVAL;
2216
2217        pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
2218        control |= PCI_PRI_CTRL_RESET;
2219        pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
2220
2221        return 0;
2222}
2223
2224static int pdev_iommuv2_enable(struct pci_dev *pdev)
2225{
2226        bool reset_enable;
2227        int reqs, ret;
2228
2229        /* FIXME: Hardcode number of outstanding requests for now */
2230        reqs = 32;
2231        if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2232                reqs = 1;
2233        reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
2234
2235        /* Only allow access to user-accessible pages */
2236        ret = pci_enable_pasid(pdev, 0);
2237        if (ret)
2238                goto out_err;
2239
2240        /* First reset the PRI state of the device */
2241        ret = pci_reset_pri(pdev);
2242        if (ret)
2243                goto out_err;
2244
2245        /* Enable PRI */
2246        ret = pci_enable_pri(pdev, reqs);
2247        if (ret)
2248                goto out_err;
2249
2250        if (reset_enable) {
2251                ret = pri_reset_while_enabled(pdev);
2252                if (ret)
2253                        goto out_err;
2254        }
2255
2256        ret = pci_enable_ats(pdev, PAGE_SHIFT);
2257        if (ret)
2258                goto out_err;
2259
2260        return 0;
2261
2262out_err:
2263        pci_disable_pri(pdev);
2264        pci_disable_pasid(pdev);
2265
2266        return ret;
2267}
2268
2269/* FIXME: Move this to PCI code */
2270#define PCI_PRI_TLP_OFF         (1 << 15)
2271
2272static bool pci_pri_tlp_required(struct pci_dev *pdev)
2273{
2274        u16 status;
2275        int pos;
2276
2277        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2278        if (!pos)
2279                return false;
2280
2281        pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
2282
2283        return (status & PCI_PRI_TLP_OFF) ? true : false;
2284}
2285
2286/*
2287 * If a device is not yet associated with a domain, this function
2288 * assigns it visible for the hardware
2289 */
2290static int attach_device(struct device *dev,
2291                         struct protection_domain *domain)
2292{
2293        struct pci_dev *pdev = to_pci_dev(dev);
2294        struct iommu_dev_data *dev_data;
2295        unsigned long flags;
2296        int ret;
2297
2298        dev_data = get_dev_data(dev);
2299
2300        if (domain->flags & PD_IOMMUV2_MASK) {
2301                if (!dev_data->iommu_v2 || !dev_data->passthrough)
2302                        return -EINVAL;
2303
2304                if (pdev_iommuv2_enable(pdev) != 0)
2305                        return -EINVAL;
2306
2307                dev_data->ats.enabled = true;
2308                dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2309                dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
2310        } else if (amd_iommu_iotlb_sup &&
2311                   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2312                dev_data->ats.enabled = true;
2313                dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2314        }
2315
2316        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2317        ret = __attach_device(dev_data, domain);
2318        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2319
2320        /*
2321         * We might boot into a crash-kernel here. The crashed kernel
2322         * left the caches in the IOMMU dirty. So we have to flush
2323         * here to evict all dirty stuff.
2324         */
2325        domain_flush_tlb_pde(domain);
2326
2327        return ret;
2328}
2329
2330/*
2331 * Removes a device from a protection domain (unlocked)
2332 */
2333static void __detach_device(struct iommu_dev_data *dev_data)
2334{
2335        struct protection_domain *domain;
2336        unsigned long flags;
2337
2338        BUG_ON(!dev_data->domain);
2339
2340        domain = dev_data->domain;
2341
2342        spin_lock_irqsave(&domain->lock, flags);
2343
2344        if (dev_data->alias_data != NULL) {
2345                struct iommu_dev_data *alias_data = dev_data->alias_data;
2346
2347                if (atomic_dec_and_test(&alias_data->bind))
2348                        do_detach(alias_data);
2349        }
2350
2351        if (atomic_dec_and_test(&dev_data->bind))
2352                do_detach(dev_data);
2353
2354        spin_unlock_irqrestore(&domain->lock, flags);
2355
2356        /*
2357         * If we run in passthrough mode the device must be assigned to the
2358         * passthrough domain if it is detached from any other domain.
2359         * Make sure we can deassign from the pt_domain itself.
2360         */
2361        if (dev_data->passthrough &&
2362            (dev_data->domain == NULL && domain != pt_domain))
2363                __attach_device(dev_data, pt_domain);
2364}
2365
2366/*
2367 * Removes a device from a protection domain (with devtable_lock held)
2368 */
2369static void detach_device(struct device *dev)
2370{
2371        struct protection_domain *domain;
2372        struct iommu_dev_data *dev_data;
2373        unsigned long flags;
2374
2375        dev_data = get_dev_data(dev);
2376        domain   = dev_data->domain;
2377
2378        /* lock device table */
2379        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2380        __detach_device(dev_data);
2381        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2382
2383        if (domain->flags & PD_IOMMUV2_MASK)
2384                pdev_iommuv2_disable(to_pci_dev(dev));
2385        else if (dev_data->ats.enabled)
2386                pci_disable_ats(to_pci_dev(dev));
2387
2388        dev_data->ats.enabled = false;
2389}
2390
2391/*
2392 * Find out the protection domain structure for a given PCI device. This
2393 * will give us the pointer to the page table root for example.
2394 */
2395static struct protection_domain *domain_for_device(struct device *dev)
2396{
2397        struct iommu_dev_data *dev_data;
2398        struct protection_domain *dom = NULL;
2399        unsigned long flags;
2400
2401        dev_data   = get_dev_data(dev);
2402
2403        if (dev_data->domain)
2404                return dev_data->domain;
2405
2406        if (dev_data->alias_data != NULL) {
2407                struct iommu_dev_data *alias_data = dev_data->alias_data;
2408
2409                read_lock_irqsave(&amd_iommu_devtable_lock, flags);
2410                if (alias_data->domain != NULL) {
2411                        __attach_device(dev_data, alias_data->domain);
2412                        dom = alias_data->domain;
2413                }
2414                read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2415        }
2416
2417        return dom;
2418}
2419
2420static int device_change_notifier(struct notifier_block *nb,
2421                                  unsigned long action, void *data)
2422{
2423        struct dma_ops_domain *dma_domain;
2424        struct protection_domain *domain;
2425        struct iommu_dev_data *dev_data;
2426        struct device *dev = data;
2427        struct amd_iommu *iommu;
2428        unsigned long flags;
2429        u16 devid;
2430
2431        if (!check_device(dev))
2432                return 0;
2433
2434        devid    = get_device_id(dev);
2435        iommu    = amd_iommu_rlookup_table[devid];
2436        dev_data = get_dev_data(dev);
2437
2438        switch (action) {
2439        case BUS_NOTIFY_UNBOUND_DRIVER:
2440
2441                domain = domain_for_device(dev);
2442
2443                if (!domain)
2444                        goto out;
2445                if (dev_data->passthrough)
2446                        break;
2447                detach_device(dev);
2448                break;
2449        case BUS_NOTIFY_ADD_DEVICE:
2450
2451                iommu_init_device(dev);
2452
2453                /*
2454                 * dev_data is still NULL and
2455                 * got initialized in iommu_init_device
2456                 */
2457                dev_data = get_dev_data(dev);
2458
2459                if (iommu_pass_through || dev_data->iommu_v2) {
2460                        dev_data->passthrough = true;
2461                        attach_device(dev, pt_domain);
2462                        break;
2463                }
2464
2465                domain = domain_for_device(dev);
2466
2467                /* allocate a protection domain if a device is added */
2468                dma_domain = find_protection_domain(devid);
2469                if (dma_domain)
2470                        goto out;
2471                dma_domain = dma_ops_domain_alloc();
2472                if (!dma_domain)
2473                        goto out;
2474                dma_domain->target_dev = devid;
2475
2476                spin_lock_irqsave(&iommu_pd_list_lock, flags);
2477                list_add_tail(&dma_domain->list, &iommu_pd_list);
2478                spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
2479
2480                dev_data = get_dev_data(dev);
2481
2482                dev->archdata.dma_ops = &amd_iommu_dma_ops;
2483
2484                break;
2485        case BUS_NOTIFY_DEL_DEVICE:
2486
2487                iommu_uninit_device(dev);
2488
2489        default:
2490                goto out;
2491        }
2492
2493        iommu_completion_wait(iommu);
2494
2495out:
2496        return 0;
2497}
2498
2499static struct notifier_block device_nb = {
2500        .notifier_call = device_change_notifier,
2501};
2502
2503void amd_iommu_init_notifier(void)
2504{
2505        bus_register_notifier(&pci_bus_type, &device_nb);
2506}
2507
2508/*****************************************************************************
2509 *
2510 * The next functions belong to the dma_ops mapping/unmapping code.
2511 *
2512 *****************************************************************************/
2513
2514/*
2515 * In the dma_ops path we only have the struct device. This function
2516 * finds the corresponding IOMMU, the protection domain and the
2517 * requestor id for a given device.
2518 * If the device is not yet associated with a domain this is also done
2519 * in this function.
2520 */
2521static struct protection_domain *get_domain(struct device *dev)
2522{
2523        struct protection_domain *domain;
2524        struct dma_ops_domain *dma_dom;
2525        u16 devid = get_device_id(dev);
2526
2527        if (!check_device(dev))
2528                return ERR_PTR(-EINVAL);
2529
2530        domain = domain_for_device(dev);
2531        if (domain != NULL && !dma_ops_domain(domain))
2532                return ERR_PTR(-EBUSY);
2533
2534        if (domain != NULL)
2535                return domain;
2536
2537        /* Device not bound yet - bind it */
2538        dma_dom = find_protection_domain(devid);
2539        if (!dma_dom)
2540                dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
2541        attach_device(dev, &dma_dom->domain);
2542        DUMP_printk("Using protection domain %d for device %s\n",
2543                    dma_dom->domain.id, dev_name(dev));
2544
2545        return &dma_dom->domain;
2546}
2547
2548static void update_device_table(struct protection_domain *domain)
2549{
2550        struct iommu_dev_data *dev_data;
2551
2552        list_for_each_entry(dev_data, &domain->dev_list, list)
2553                set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
2554}
2555
2556static void update_domain(struct protection_domain *domain)
2557{
2558        if (!domain->updated)
2559                return;
2560
2561        update_device_table(domain);
2562
2563        domain_flush_devices(domain);
2564        domain_flush_tlb_pde(domain);
2565
2566        domain->updated = false;
2567}
2568
2569/*
2570 * This function fetches the PTE for a given address in the aperture
2571 */
2572static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
2573                            unsigned long address)
2574{
2575        struct aperture_range *aperture;
2576        u64 *pte, *pte_page;
2577
2578        aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2579        if (!aperture)
2580                return NULL;
2581
2582        pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2583        if (!pte) {
2584                pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
2585                                GFP_ATOMIC);
2586                aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
2587        } else
2588                pte += PM_LEVEL_INDEX(0, address);
2589
2590        update_domain(&dom->domain);
2591
2592        return pte;
2593}
2594
2595/*
2596 * This is the generic map function. It maps one 4kb page at paddr to
2597 * the given address in the DMA address space for the domain.
2598 */
2599static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
2600                                     unsigned long address,
2601                                     phys_addr_t paddr,
2602                                     int direction)
2603{
2604        u64 *pte, __pte;
2605
2606        WARN_ON(address > dom->aperture_size);
2607
2608        paddr &= PAGE_MASK;
2609
2610        pte  = dma_ops_get_pte(dom, address);
2611        if (!pte)
2612                return DMA_ERROR_CODE;
2613
2614        __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
2615
2616        if (direction == DMA_TO_DEVICE)
2617                __pte |= IOMMU_PTE_IR;
2618        else if (direction == DMA_FROM_DEVICE)
2619                __pte |= IOMMU_PTE_IW;
2620        else if (direction == DMA_BIDIRECTIONAL)
2621                __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
2622
2623        WARN_ON(*pte);
2624
2625        *pte = __pte;
2626
2627        return (dma_addr_t)address;
2628}
2629
2630/*
2631 * The generic unmapping function for on page in the DMA address space.
2632 */
2633static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
2634                                 unsigned long address)
2635{
2636        struct aperture_range *aperture;
2637        u64 *pte;
2638
2639        if (address >= dom->aperture_size)
2640                return;
2641
2642        aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2643        if (!aperture)
2644                return;
2645
2646        pte  = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2647        if (!pte)
2648                return;
2649
2650        pte += PM_LEVEL_INDEX(0, address);
2651
2652        WARN_ON(!*pte);
2653
2654        *pte = 0ULL;
2655}
2656
2657/*
2658 * This function contains common code for mapping of a physically
2659 * contiguous memory region into DMA address space. It is used by all
2660 * mapping functions provided with this IOMMU driver.
2661 * Must be called with the domain lock held.
2662 */
2663static dma_addr_t __map_single(struct device *dev,
2664                               struct dma_ops_domain *dma_dom,
2665                               phys_addr_t paddr,
2666                               size_t size,
2667                               int dir,
2668                               bool align,
2669                               u64 dma_mask)
2670{
2671        dma_addr_t offset = paddr & ~PAGE_MASK;
2672        dma_addr_t address, start, ret;
2673        unsigned int pages;
2674        unsigned long align_mask = 0;
2675        int i;
2676
2677        pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2678        paddr &= PAGE_MASK;
2679
2680        INC_STATS_COUNTER(total_map_requests);
2681
2682        if (pages > 1)
2683                INC_STATS_COUNTER(cross_page);
2684
2685        if (align)
2686                align_mask = (1UL << get_order(size)) - 1;
2687
2688retry:
2689        address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2690                                          dma_mask);
2691        if (unlikely(address == DMA_ERROR_CODE)) {
2692                /*
2693                 * setting next_address here will let the address
2694                 * allocator only scan the new allocated range in the
2695                 * first run. This is a small optimization.
2696                 */
2697                dma_dom->next_address = dma_dom->aperture_size;
2698
2699                if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
2700                        goto out;
2701
2702                /*
2703                 * aperture was successfully enlarged by 128 MB, try
2704                 * allocation again
2705                 */
2706                goto retry;
2707        }
2708
2709        start = address;
2710        for (i = 0; i < pages; ++i) {
2711                ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
2712                if (ret == DMA_ERROR_CODE)
2713                        goto out_unmap;
2714
2715                paddr += PAGE_SIZE;
2716                start += PAGE_SIZE;
2717        }
2718        address += offset;
2719
2720        ADD_STATS_COUNTER(alloced_io_mem, size);
2721
2722        if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
2723                domain_flush_tlb(&dma_dom->domain);
2724                dma_dom->need_flush = false;
2725        } else if (unlikely(amd_iommu_np_cache))
2726                domain_flush_pages(&dma_dom->domain, address, size);
2727
2728out:
2729        return address;
2730
2731out_unmap:
2732
2733        for (--i; i >= 0; --i) {
2734                start -= PAGE_SIZE;
2735                dma_ops_domain_unmap(dma_dom, start);
2736        }
2737
2738        dma_ops_free_addresses(dma_dom, address, pages);
2739
2740        return DMA_ERROR_CODE;
2741}
2742
2743/*
2744 * Does the reverse of the __map_single function. Must be called with
2745 * the domain lock held too
2746 */
2747static void __unmap_single(struct dma_ops_domain *dma_dom,
2748                           dma_addr_t dma_addr,
2749                           size_t size,
2750                           int dir)
2751{
2752        dma_addr_t flush_addr;
2753        dma_addr_t i, start;
2754        unsigned int pages;
2755
2756        if ((dma_addr == DMA_ERROR_CODE) ||
2757            (dma_addr + size > dma_dom->aperture_size))
2758                return;
2759
2760        flush_addr = dma_addr;
2761        pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2762        dma_addr &= PAGE_MASK;
2763        start = dma_addr;
2764
2765        for (i = 0; i < pages; ++i) {
2766                dma_ops_domain_unmap(dma_dom, start);
2767                start += PAGE_SIZE;
2768        }
2769
2770        SUB_STATS_COUNTER(alloced_io_mem, size);
2771
2772        dma_ops_free_addresses(dma_dom, dma_addr, pages);
2773
2774        if (amd_iommu_unmap_flush || dma_dom->need_flush) {
2775                domain_flush_pages(&dma_dom->domain, flush_addr, size);
2776                dma_dom->need_flush = false;
2777        }
2778}
2779
2780/*
2781 * The exported map_single function for dma_ops.
2782 */
2783static dma_addr_t map_page(struct device *dev, struct page *page,
2784                           unsigned long offset, size_t size,
2785                           enum dma_data_direction dir,
2786                           struct dma_attrs *attrs)
2787{
2788        unsigned long flags;
2789        struct protection_domain *domain;
2790        dma_addr_t addr;
2791        u64 dma_mask;
2792        phys_addr_t paddr = page_to_phys(page) + offset;
2793
2794        INC_STATS_COUNTER(cnt_map_single);
2795
2796        domain = get_domain(dev);
2797        if (PTR_ERR(domain) == -EINVAL)
2798                return (dma_addr_t)paddr;
2799        else if (IS_ERR(domain))
2800                return DMA_ERROR_CODE;
2801
2802        dma_mask = *dev->dma_mask;
2803
2804        spin_lock_irqsave(&domain->lock, flags);
2805
2806        addr = __map_single(dev, domain->priv, paddr, size, dir, false,
2807                            dma_mask);
2808        if (addr == DMA_ERROR_CODE)
2809                goto out;
2810
2811        domain_flush_complete(domain);
2812
2813out:
2814        spin_unlock_irqrestore(&domain->lock, flags);
2815
2816        return addr;
2817}
2818
2819/*
2820 * The exported unmap_single function for dma_ops.
2821 */
2822static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2823                       enum dma_data_direction dir, struct dma_attrs *attrs)
2824{
2825        unsigned long flags;
2826        struct protection_domain *domain;
2827
2828        INC_STATS_COUNTER(cnt_unmap_single);
2829
2830        domain = get_domain(dev);
2831        if (IS_ERR(domain))
2832                return;
2833
2834        spin_lock_irqsave(&domain->lock, flags);
2835
2836        __unmap_single(domain->priv, dma_addr, size, dir);
2837
2838        domain_flush_complete(domain);
2839
2840        spin_unlock_irqrestore(&domain->lock, flags);
2841}
2842
2843/*
2844 * This is a special map_sg function which is used if we should map a
2845 * device which is not handled by an AMD IOMMU in the system.
2846 */
2847static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
2848                           int nelems, int dir)
2849{
2850        struct scatterlist *s;
2851        int i;
2852
2853        for_each_sg(sglist, s, nelems, i) {
2854                s->dma_address = (dma_addr_t)sg_phys(s);
2855                s->dma_length  = s->length;
2856        }
2857
2858        return nelems;
2859}
2860
2861/*
2862 * The exported map_sg function for dma_ops (handles scatter-gather
2863 * lists).
2864 */
2865static int map_sg(struct device *dev, struct scatterlist *sglist,
2866                  int nelems, enum dma_data_direction dir,
2867                  struct dma_attrs *attrs)
2868{
2869        unsigned long flags;
2870        struct protection_domain *domain;
2871        int i;
2872        struct scatterlist *s;
2873        phys_addr_t paddr;
2874        int mapped_elems = 0;
2875        u64 dma_mask;
2876
2877        INC_STATS_COUNTER(cnt_map_sg);
2878
2879        domain = get_domain(dev);
2880        if (PTR_ERR(domain) == -EINVAL)
2881                return map_sg_no_iommu(dev, sglist, nelems, dir);
2882        else if (IS_ERR(domain))
2883                return 0;
2884
2885        dma_mask = *dev->dma_mask;
2886
2887        spin_lock_irqsave(&domain->lock, flags);
2888
2889        for_each_sg(sglist, s, nelems, i) {
2890                paddr = sg_phys(s);
2891
2892                s->dma_address = __map_single(dev, domain->priv,
2893                                              paddr, s->length, dir, false,
2894                                              dma_mask);
2895
2896                if (s->dma_address) {
2897                        s->dma_length = s->length;
2898                        mapped_elems++;
2899                } else
2900                        goto unmap;
2901        }
2902
2903        domain_flush_complete(domain);
2904
2905out:
2906        spin_unlock_irqrestore(&domain->lock, flags);
2907
2908        return mapped_elems;
2909unmap:
2910        for_each_sg(sglist, s, mapped_elems, i) {
2911                if (s->dma_address)
2912                        __unmap_single(domain->priv, s->dma_address,
2913                                       s->dma_length, dir);
2914                s->dma_address = s->dma_length = 0;
2915        }
2916
2917        mapped_elems = 0;
2918
2919        goto out;
2920}
2921
2922/*
2923 * The exported map_sg function for dma_ops (handles scatter-gather
2924 * lists).
2925 */
2926static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2927                     int nelems, enum dma_data_direction dir,
2928                     struct dma_attrs *attrs)
2929{
2930        unsigned long flags;
2931        struct protection_domain *domain;
2932        struct scatterlist *s;
2933        int i;
2934
2935        INC_STATS_COUNTER(cnt_unmap_sg);
2936
2937        domain = get_domain(dev);
2938        if (IS_ERR(domain))
2939                return;
2940
2941        spin_lock_irqsave(&domain->lock, flags);
2942
2943        for_each_sg(sglist, s, nelems, i) {
2944                __unmap_single(domain->priv, s->dma_address,
2945                               s->dma_length, dir);
2946                s->dma_address = s->dma_length = 0;
2947        }
2948
2949        domain_flush_complete(domain);
2950
2951        spin_unlock_irqrestore(&domain->lock, flags);
2952}
2953
2954/*
2955 * The exported alloc_coherent function for dma_ops.
2956 */
2957static void *alloc_coherent(struct device *dev, size_t size,
2958                            dma_addr_t *dma_addr, gfp_t flag,
2959                            struct dma_attrs *attrs)
2960{
2961        unsigned long flags;
2962        void *virt_addr;
2963        struct protection_domain *domain;
2964        phys_addr_t paddr;
2965        u64 dma_mask = dev->coherent_dma_mask;
2966
2967        INC_STATS_COUNTER(cnt_alloc_coherent);
2968
2969        domain = get_domain(dev);
2970        if (PTR_ERR(domain) == -EINVAL) {
2971                virt_addr = (void *)__get_free_pages(flag, get_order(size));
2972                *dma_addr = __pa(virt_addr);
2973                return virt_addr;
2974        } else if (IS_ERR(domain))
2975                return NULL;
2976
2977        dma_mask  = dev->coherent_dma_mask;
2978        flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2979        flag     |= __GFP_ZERO;
2980
2981        virt_addr = (void *)__get_free_pages(flag, get_order(size));
2982        if (!virt_addr)
2983                return NULL;
2984
2985        paddr = virt_to_phys(virt_addr);
2986
2987        if (!dma_mask)
2988                dma_mask = *dev->dma_mask;
2989
2990        spin_lock_irqsave(&domain->lock, flags);
2991
2992        *dma_addr = __map_single(dev, domain->priv, paddr,
2993                                 size, DMA_BIDIRECTIONAL, true, dma_mask);
2994
2995        if (*dma_addr == DMA_ERROR_CODE) {
2996                spin_unlock_irqrestore(&domain->lock, flags);
2997                goto out_free;
2998        }
2999
3000        domain_flush_complete(domain);
3001
3002        spin_unlock_irqrestore(&domain->lock, flags);
3003
3004        return virt_addr;
3005
3006out_free:
3007
3008        free_pages((unsigned long)virt_addr, get_order(size));
3009
3010        return NULL;
3011}
3012
3013/*
3014 * The exported free_coherent function for dma_ops.
3015 */
3016static void free_coherent(struct device *dev, size_t size,
3017                          void *virt_addr, dma_addr_t dma_addr,
3018                          struct dma_attrs *attrs)
3019{
3020        unsigned long flags;
3021        struct protection_domain *domain;
3022
3023        INC_STATS_COUNTER(cnt_free_coherent);
3024
3025        domain = get_domain(dev);
3026        if (IS_ERR(domain))
3027                goto free_mem;
3028
3029        spin_lock_irqsave(&domain->lock, flags);
3030
3031        __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
3032
3033        domain_flush_complete(domain);
3034
3035        spin_unlock_irqrestore(&domain->lock, flags);
3036
3037free_mem:
3038        free_pages((unsigned long)virt_addr, get_order(size));
3039}
3040
3041/*
3042 * This function is called by the DMA layer to find out if we can handle a
3043 * particular device. It is part of the dma_ops.
3044 */
3045static int amd_iommu_dma_supported(struct device *dev, u64 mask)
3046{
3047        return check_device(dev);
3048}
3049
3050/*
3051 * The function for pre-allocating protection domains.
3052 *
3053 * If the driver core informs the DMA layer if a driver grabs a device
3054 * we don't need to preallocate the protection domains anymore.
3055 * For now we have to.
3056 */
3057static void __init prealloc_protection_domains(void)
3058{
3059        struct iommu_dev_data *dev_data;
3060        struct dma_ops_domain *dma_dom;
3061        struct pci_dev *dev = NULL;
3062        u16 devid;
3063
3064        for_each_pci_dev(dev) {
3065
3066                /* Do we handle this device? */
3067                if (!check_device(&dev->dev))
3068                        continue;
3069
3070                dev_data = get_dev_data(&dev->dev);
3071                if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
3072                        /* Make sure passthrough domain is allocated */
3073                        alloc_passthrough_domain();
3074                        dev_data->passthrough = true;
3075                        attach_device(&dev->dev, pt_domain);
3076                        pr_info("AMD-Vi: Using passthrough domain for device %s\n",
3077                                dev_name(&dev->dev));
3078                }
3079
3080                /* Is there already any domain for it? */
3081                if (domain_for_device(&dev->dev))
3082                        continue;
3083
3084                devid = get_device_id(&dev->dev);
3085
3086                dma_dom = dma_ops_domain_alloc();
3087                if (!dma_dom)
3088                        continue;
3089                init_unity_mappings_for_device(dma_dom, devid);
3090                dma_dom->target_dev = devid;
3091
3092                attach_device(&dev->dev, &dma_dom->domain);
3093
3094                list_add_tail(&dma_dom->list, &iommu_pd_list);
3095        }
3096}
3097
3098static struct dma_map_ops amd_iommu_dma_ops = {
3099        .alloc = alloc_coherent,
3100        .free = free_coherent,
3101        .map_page = map_page,
3102        .unmap_page = unmap_page,
3103        .map_sg = map_sg,
3104        .unmap_sg = unmap_sg,
3105        .dma_supported = amd_iommu_dma_supported,
3106};
3107
3108static unsigned device_dma_ops_init(void)
3109{
3110        struct iommu_dev_data *dev_data;
3111        struct pci_dev *pdev = NULL;
3112        unsigned unhandled = 0;
3113
3114        for_each_pci_dev(pdev) {
3115                if (!check_device(&pdev->dev)) {
3116
3117                        iommu_ignore_device(&pdev->dev);
3118
3119                        unhandled += 1;
3120                        continue;
3121                }
3122
3123                dev_data = get_dev_data(&pdev->dev);
3124
3125                if (!dev_data->passthrough)
3126                        pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
3127                else
3128                        pdev->dev.archdata.dma_ops = &nommu_dma_ops;
3129        }
3130
3131        return unhandled;
3132}
3133
3134/*
3135 * The function which clues the AMD IOMMU driver into dma_ops.
3136 */
3137
3138void __init amd_iommu_init_api(void)
3139{
3140        bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
3141}
3142
3143int __init amd_iommu_init_dma_ops(void)
3144{
3145        struct amd_iommu *iommu;
3146        int ret, unhandled;
3147
3148        /*
3149         * first allocate a default protection domain for every IOMMU we
3150         * found in the system. Devices not assigned to any other
3151         * protection domain will be assigned to the default one.
3152         */
3153        for_each_iommu(iommu) {
3154                iommu->default_dom = dma_ops_domain_alloc();
3155                if (iommu->default_dom == NULL)
3156                        return -ENOMEM;
3157                iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
3158                ret = iommu_init_unity_mappings(iommu);
3159                if (ret)
3160                        goto free_domains;
3161        }
3162
3163        /*
3164         * Pre-allocate the protection domains for each device.
3165         */
3166        prealloc_protection_domains();
3167
3168        iommu_detected = 1;
3169        swiotlb = 0;
3170
3171        /* Make the driver finally visible to the drivers */
3172        unhandled = device_dma_ops_init();
3173        if (unhandled && max_pfn > MAX_DMA32_PFN) {
3174                /* There are unhandled devices - initialize swiotlb for them */
3175                swiotlb = 1;
3176        }
3177
3178        amd_iommu_stats_init();
3179
3180        if (amd_iommu_unmap_flush)
3181                pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
3182        else
3183                pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
3184
3185        return 0;
3186
3187free_domains:
3188
3189        for_each_iommu(iommu) {
3190                if (iommu->default_dom)
3191                        dma_ops_domain_free(iommu->default_dom);
3192        }
3193
3194        return ret;
3195}
3196
3197/*****************************************************************************
3198 *
3199 * The following functions belong to the exported interface of AMD IOMMU
3200 *
3201 * This interface allows access to lower level functions of the IOMMU
3202 * like protection domain handling and assignement of devices to domains
3203 * which is not possible with the dma_ops interface.
3204 *
3205 *****************************************************************************/
3206
3207static void cleanup_domain(struct protection_domain *domain)
3208{
3209        struct iommu_dev_data *dev_data, *next;
3210        unsigned long flags;
3211
3212        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3213
3214        list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
3215                __detach_device(dev_data);
3216                atomic_set(&dev_data->bind, 0);
3217        }
3218
3219        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3220}
3221
3222static void protection_domain_free(struct protection_domain *domain)
3223{
3224        if (!domain)
3225                return;
3226
3227        del_domain_from_list(domain);
3228
3229        if (domain->id)
3230                domain_id_free(domain->id);
3231
3232        kfree(domain);
3233}
3234
3235static struct protection_domain *protection_domain_alloc(void)
3236{
3237        struct protection_domain *domain;
3238
3239        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
3240        if (!domain)
3241                return NULL;
3242
3243        spin_lock_init(&domain->lock);
3244        mutex_init(&domain->api_lock);
3245        domain->id = domain_id_alloc();
3246        if (!domain->id)
3247                goto out_err;
3248        INIT_LIST_HEAD(&domain->dev_list);
3249
3250        add_domain_to_list(domain);
3251
3252        return domain;
3253
3254out_err:
3255        kfree(domain);
3256
3257        return NULL;
3258}
3259
3260static int __init alloc_passthrough_domain(void)
3261{
3262        if (pt_domain != NULL)
3263                return 0;
3264
3265        /* allocate passthrough domain */
3266        pt_domain = protection_domain_alloc();
3267        if (!pt_domain)
3268                return -ENOMEM;
3269
3270        pt_domain->mode = PAGE_MODE_NONE;
3271
3272        return 0;
3273}
3274static int amd_iommu_domain_init(struct iommu_domain *dom)
3275{
3276        struct protection_domain *domain;
3277
3278        domain = protection_domain_alloc();
3279        if (!domain)
3280                goto out_free;
3281
3282        domain->mode    = PAGE_MODE_3_LEVEL;
3283        domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
3284        if (!domain->pt_root)
3285                goto out_free;
3286
3287        domain->iommu_domain = dom;
3288
3289        dom->priv = domain;
3290
3291        dom->geometry.aperture_start = 0;
3292        dom->geometry.aperture_end   = ~0ULL;
3293        dom->geometry.force_aperture = true;
3294
3295        return 0;
3296
3297out_free:
3298        protection_domain_free(domain);
3299
3300        return -ENOMEM;
3301}
3302
3303static void amd_iommu_domain_destroy(struct iommu_domain *dom)
3304{
3305        struct protection_domain *domain = dom->priv;
3306
3307        if (!domain)
3308                return;
3309
3310        if (domain->dev_cnt > 0)
3311                cleanup_domain(domain);
3312
3313        BUG_ON(domain->dev_cnt != 0);
3314
3315        if (domain->mode != PAGE_MODE_NONE)
3316                free_pagetable(domain);
3317
3318        if (domain->flags & PD_IOMMUV2_MASK)
3319                free_gcr3_table(domain);
3320
3321        protection_domain_free(domain);
3322
3323        dom->priv = NULL;
3324}
3325
3326static void amd_iommu_detach_device(struct iommu_domain *dom,
3327                                    struct device *dev)
3328{
3329        struct iommu_dev_data *dev_data = dev->archdata.iommu;
3330        struct amd_iommu *iommu;
3331        u16 devid;
3332
3333        if (!check_device(dev))
3334                return;
3335
3336        devid = get_device_id(dev);
3337
3338        if (dev_data->domain != NULL)
3339                detach_device(dev);
3340
3341        iommu = amd_iommu_rlookup_table[devid];
3342        if (!iommu)
3343                return;
3344
3345        iommu_completion_wait(iommu);
3346}
3347
3348static int amd_iommu_attach_device(struct iommu_domain *dom,
3349                                   struct device *dev)
3350{
3351        struct protection_domain *domain = dom->priv;
3352        struct iommu_dev_data *dev_data;
3353        struct amd_iommu *iommu;
3354        int ret;
3355
3356        if (!check_device(dev))
3357                return -EINVAL;
3358
3359        dev_data = dev->archdata.iommu;
3360
3361        iommu = amd_iommu_rlookup_table[dev_data->devid];
3362        if (!iommu)
3363                return -EINVAL;
3364
3365        if (dev_data->domain)
3366                detach_device(dev);
3367
3368        ret = attach_device(dev, domain);
3369
3370        iommu_completion_wait(iommu);
3371
3372        return ret;
3373}
3374
3375static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3376                         phys_addr_t paddr, size_t page_size, int iommu_prot)
3377{
3378        struct protection_domain *domain = dom->priv;
3379        int prot = 0;
3380        int ret;
3381
3382        if (domain->mode == PAGE_MODE_NONE)
3383                return -EINVAL;
3384
3385        if (iommu_prot & IOMMU_READ)
3386                prot |= IOMMU_PROT_IR;
3387        if (iommu_prot & IOMMU_WRITE)
3388                prot |= IOMMU_PROT_IW;
3389
3390        mutex_lock(&domain->api_lock);
3391        ret = iommu_map_page(domain, iova, paddr, prot, page_size);
3392        mutex_unlock(&domain->api_lock);
3393
3394        return ret;
3395}
3396
3397static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3398                           size_t page_size)
3399{
3400        struct protection_domain *domain = dom->priv;
3401        size_t unmap_size;
3402
3403        if (domain->mode == PAGE_MODE_NONE)
3404                return -EINVAL;
3405
3406        mutex_lock(&domain->api_lock);
3407        unmap_size = iommu_unmap_page(domain, iova, page_size);
3408        mutex_unlock(&domain->api_lock);
3409
3410        domain_flush_tlb_pde(domain);
3411
3412        return unmap_size;
3413}
3414
3415static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3416                                          unsigned long iova)
3417{
3418        struct protection_domain *domain = dom->priv;
3419        unsigned long offset_mask;
3420        phys_addr_t paddr;
3421        u64 *pte, __pte;
3422
3423        if (domain->mode == PAGE_MODE_NONE)
3424                return iova;
3425
3426        pte = fetch_pte(domain, iova);
3427
3428        if (!pte || !IOMMU_PTE_PRESENT(*pte))
3429                return 0;
3430
3431        if (PM_PTE_LEVEL(*pte) == 0)
3432                offset_mask = PAGE_SIZE - 1;
3433        else
3434                offset_mask = PTE_PAGE_SIZE(*pte) - 1;
3435
3436        __pte = *pte & PM_ADDR_MASK;
3437        paddr = (__pte & ~offset_mask) | (iova & offset_mask);
3438
3439        return paddr;
3440}
3441
3442static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
3443                                    unsigned long cap)
3444{
3445        switch (cap) {
3446        case IOMMU_CAP_CACHE_COHERENCY:
3447                return 1;
3448        case IOMMU_CAP_INTR_REMAP:
3449                return irq_remapping_enabled;
3450        }
3451
3452        return 0;
3453}
3454
3455static struct iommu_ops amd_iommu_ops = {
3456        .domain_init = amd_iommu_domain_init,
3457        .domain_destroy = amd_iommu_domain_destroy,
3458        .attach_dev = amd_iommu_attach_device,
3459        .detach_dev = amd_iommu_detach_device,
3460        .map = amd_iommu_map,
3461        .unmap = amd_iommu_unmap,
3462        .iova_to_phys = amd_iommu_iova_to_phys,
3463        .domain_has_cap = amd_iommu_domain_has_cap,
3464        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
3465};
3466
3467/*****************************************************************************
3468 *
3469 * The next functions do a basic initialization of IOMMU for pass through
3470 * mode
3471 *
3472 * In passthrough mode the IOMMU is initialized and enabled but not used for
3473 * DMA-API translation.
3474 *
3475 *****************************************************************************/
3476
3477int __init amd_iommu_init_passthrough(void)
3478{
3479        struct iommu_dev_data *dev_data;
3480        struct pci_dev *dev = NULL;
3481        struct amd_iommu *iommu;
3482        u16 devid;
3483        int ret;
3484
3485        ret = alloc_passthrough_domain();
3486        if (ret)
3487                return ret;
3488
3489        for_each_pci_dev(dev) {
3490                if (!check_device(&dev->dev))
3491                        continue;
3492
3493                dev_data = get_dev_data(&dev->dev);
3494                dev_data->passthrough = true;
3495
3496                devid = get_device_id(&dev->dev);
3497
3498                iommu = amd_iommu_rlookup_table[devid];
3499                if (!iommu)
3500                        continue;
3501
3502                attach_device(&dev->dev, pt_domain);
3503        }
3504
3505        amd_iommu_stats_init();
3506
3507        pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3508
3509        return 0;
3510}
3511
3512/* IOMMUv2 specific functions */
3513int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3514{
3515        return atomic_notifier_chain_register(&ppr_notifier, nb);
3516}
3517EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3518
3519int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3520{
3521        return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3522}
3523EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3524
3525void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3526{
3527        struct protection_domain *domain = dom->priv;
3528        unsigned long flags;
3529
3530        spin_lock_irqsave(&domain->lock, flags);
3531
3532        /* Update data structure */
3533        domain->mode    = PAGE_MODE_NONE;
3534        domain->updated = true;
3535
3536        /* Make changes visible to IOMMUs */
3537        update_domain(domain);
3538
3539        /* Page-table is not visible to IOMMU anymore, so free it */
3540        free_pagetable(domain);
3541
3542        spin_unlock_irqrestore(&domain->lock, flags);
3543}
3544EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3545
3546int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3547{
3548        struct protection_domain *domain = dom->priv;
3549        unsigned long flags;
3550        int levels, ret;
3551
3552        if (pasids <= 0 || pasids > (PASID_MASK + 1))
3553                return -EINVAL;
3554
3555        /* Number of GCR3 table levels required */
3556        for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3557                levels += 1;
3558
3559        if (levels > amd_iommu_max_glx_val)
3560                return -EINVAL;
3561
3562        spin_lock_irqsave(&domain->lock, flags);
3563
3564        /*
3565         * Save us all sanity checks whether devices already in the
3566         * domain support IOMMUv2. Just force that the domain has no
3567         * devices attached when it is switched into IOMMUv2 mode.
3568         */
3569        ret = -EBUSY;
3570        if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3571                goto out;
3572
3573        ret = -ENOMEM;
3574        domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3575        if (domain->gcr3_tbl == NULL)
3576                goto out;
3577
3578        domain->glx      = levels;
3579        domain->flags   |= PD_IOMMUV2_MASK;
3580        domain->updated  = true;
3581
3582        update_domain(domain);
3583
3584        ret = 0;
3585
3586out:
3587        spin_unlock_irqrestore(&domain->lock, flags);
3588
3589        return ret;
3590}
3591EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3592
3593static int __flush_pasid(struct protection_domain *domain, int pasid,
3594                         u64 address, bool size)
3595{
3596        struct iommu_dev_data *dev_data;
3597        struct iommu_cmd cmd;
3598        int i, ret;
3599
3600        if (!(domain->flags & PD_IOMMUV2_MASK))
3601                return -EINVAL;
3602
3603        build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3604
3605        /*
3606         * IOMMU TLB needs to be flushed before Device TLB to
3607         * prevent device TLB refill from IOMMU TLB
3608         */
3609        for (i = 0; i < amd_iommus_present; ++i) {
3610                if (domain->dev_iommu[i] == 0)
3611                        continue;
3612
3613                ret = iommu_queue_command(amd_iommus[i], &cmd);
3614                if (ret != 0)
3615                        goto out;
3616        }
3617
3618        /* Wait until IOMMU TLB flushes are complete */
3619        domain_flush_complete(domain);
3620
3621        /* Now flush device TLBs */
3622        list_for_each_entry(dev_data, &domain->dev_list, list) {
3623                struct amd_iommu *iommu;
3624                int qdep;
3625
3626                BUG_ON(!dev_data->ats.enabled);
3627
3628                qdep  = dev_data->ats.qdep;
3629                iommu = amd_iommu_rlookup_table[dev_data->devid];
3630
3631                build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3632                                      qdep, address, size);
3633
3634                ret = iommu_queue_command(iommu, &cmd);
3635                if (ret != 0)
3636                        goto out;
3637        }
3638
3639        /* Wait until all device TLBs are flushed */
3640        domain_flush_complete(domain);
3641
3642        ret = 0;
3643
3644out:
3645
3646        return ret;
3647}
3648
3649static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3650                                  u64 address)
3651{
3652        INC_STATS_COUNTER(invalidate_iotlb);
3653
3654        return __flush_pasid(domain, pasid, address, false);
3655}
3656
3657int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3658                         u64 address)
3659{
3660        struct protection_domain *domain = dom->priv;
3661        unsigned long flags;
3662        int ret;
3663
3664        spin_lock_irqsave(&domain->lock, flags);
3665        ret = __amd_iommu_flush_page(domain, pasid, address);
3666        spin_unlock_irqrestore(&domain->lock, flags);
3667
3668        return ret;
3669}
3670EXPORT_SYMBOL(amd_iommu_flush_page);
3671
3672static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3673{
3674        INC_STATS_COUNTER(invalidate_iotlb_all);
3675
3676        return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3677                             true);
3678}
3679
3680int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3681{
3682        struct protection_domain *domain = dom->priv;
3683        unsigned long flags;
3684        int ret;
3685
3686        spin_lock_irqsave(&domain->lock, flags);
3687        ret = __amd_iommu_flush_tlb(domain, pasid);
3688        spin_unlock_irqrestore(&domain->lock, flags);
3689
3690        return ret;
3691}
3692EXPORT_SYMBOL(amd_iommu_flush_tlb);
3693
3694static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3695{
3696        int index;
3697        u64 *pte;
3698
3699        while (true) {
3700
3701                index = (pasid >> (9 * level)) & 0x1ff;
3702                pte   = &root[index];
3703
3704                if (level == 0)
3705                        break;
3706
3707                if (!(*pte & GCR3_VALID)) {
3708                        if (!alloc)
3709                                return NULL;
3710
3711                        root = (void *)get_zeroed_page(GFP_ATOMIC);
3712                        if (root == NULL)
3713                                return NULL;
3714
3715                        *pte = __pa(root) | GCR3_VALID;
3716                }
3717
3718                root = __va(*pte & PAGE_MASK);
3719
3720                level -= 1;
3721        }
3722
3723        return pte;
3724}
3725
3726static int __set_gcr3(struct protection_domain *domain, int pasid,
3727                      unsigned long cr3)
3728{
3729        u64 *pte;
3730
3731        if (domain->mode != PAGE_MODE_NONE)
3732                return -EINVAL;
3733
3734        pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3735        if (pte == NULL)
3736                return -ENOMEM;
3737
3738        *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3739
3740        return __amd_iommu_flush_tlb(domain, pasid);
3741}
3742
3743static int __clear_gcr3(struct protection_domain *domain, int pasid)
3744{
3745        u64 *pte;
3746
3747        if (domain->mode != PAGE_MODE_NONE)
3748                return -EINVAL;
3749
3750        pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3751        if (pte == NULL)
3752                return 0;
3753
3754        *pte = 0;
3755
3756        return __amd_iommu_flush_tlb(domain, pasid);
3757}
3758
3759int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3760                              unsigned long cr3)
3761{
3762        struct protection_domain *domain = dom->priv;
3763        unsigned long flags;
3764        int ret;
3765
3766        spin_lock_irqsave(&domain->lock, flags);
3767        ret = __set_gcr3(domain, pasid, cr3);
3768        spin_unlock_irqrestore(&domain->lock, flags);
3769
3770        return ret;
3771}
3772EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3773
3774int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3775{
3776        struct protection_domain *domain = dom->priv;
3777        unsigned long flags;
3778        int ret;
3779
3780        spin_lock_irqsave(&domain->lock, flags);
3781        ret = __clear_gcr3(domain, pasid);
3782        spin_unlock_irqrestore(&domain->lock, flags);
3783
3784        return ret;
3785}
3786EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3787
3788int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3789                           int status, int tag)
3790{
3791        struct iommu_dev_data *dev_data;
3792        struct amd_iommu *iommu;
3793        struct iommu_cmd cmd;
3794
3795        INC_STATS_COUNTER(complete_ppr);
3796
3797        dev_data = get_dev_data(&pdev->dev);
3798        iommu    = amd_iommu_rlookup_table[dev_data->devid];
3799
3800        build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3801                           tag, dev_data->pri_tlp);
3802
3803        return iommu_queue_command(iommu, &cmd);
3804}
3805EXPORT_SYMBOL(amd_iommu_complete_ppr);
3806
3807struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3808{
3809        struct protection_domain *domain;
3810
3811        domain = get_domain(&pdev->dev);
3812        if (IS_ERR(domain))
3813                return NULL;
3814
3815        /* Only return IOMMUv2 domains */
3816        if (!(domain->flags & PD_IOMMUV2_MASK))
3817                return NULL;
3818
3819        return domain->iommu_domain;
3820}
3821EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3822
3823void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3824{
3825        struct iommu_dev_data *dev_data;
3826
3827        if (!amd_iommu_v2_supported())
3828                return;
3829
3830        dev_data = get_dev_data(&pdev->dev);
3831        dev_data->errata |= (1 << erratum);
3832}
3833EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
3834
3835int amd_iommu_device_info(struct pci_dev *pdev,
3836                          struct amd_iommu_device_info *info)
3837{
3838        int max_pasids;
3839        int pos;
3840
3841        if (pdev == NULL || info == NULL)
3842                return -EINVAL;
3843
3844        if (!amd_iommu_v2_supported())
3845                return -EINVAL;
3846
3847        memset(info, 0, sizeof(*info));
3848
3849        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3850        if (pos)
3851                info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3852
3853        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3854        if (pos)
3855                info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3856
3857        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3858        if (pos) {
3859                int features;
3860
3861                max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3862                max_pasids = min(max_pasids, (1 << 20));
3863
3864                info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3865                info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3866
3867                features = pci_pasid_features(pdev);
3868                if (features & PCI_PASID_CAP_EXEC)
3869                        info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3870                if (features & PCI_PASID_CAP_PRIV)
3871                        info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3872        }
3873
3874        return 0;
3875}
3876EXPORT_SYMBOL(amd_iommu_device_info);
3877
3878#ifdef CONFIG_IRQ_REMAP
3879
3880/*****************************************************************************
3881 *
3882 * Interrupt Remapping Implementation
3883 *
3884 *****************************************************************************/
3885
3886union irte {
3887        u32 val;
3888        struct {
3889                u32 valid       : 1,
3890                    no_fault    : 1,
3891                    int_type    : 3,
3892                    rq_eoi      : 1,
3893                    dm          : 1,
3894                    rsvd_1      : 1,
3895                    destination : 8,
3896                    vector      : 8,
3897                    rsvd_2      : 8;
3898        } fields;
3899};
3900
3901#define DTE_IRQ_PHYS_ADDR_MASK  (((1ULL << 45)-1) << 6)
3902#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
3903#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
3904#define DTE_IRQ_REMAP_ENABLE    1ULL
3905
3906static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3907{
3908        u64 dte;
3909
3910        dte     = amd_iommu_dev_table[devid].data[2];
3911        dte     &= ~DTE_IRQ_PHYS_ADDR_MASK;
3912        dte     |= virt_to_phys(table->table);
3913        dte     |= DTE_IRQ_REMAP_INTCTL;
3914        dte     |= DTE_IRQ_TABLE_LEN;
3915        dte     |= DTE_IRQ_REMAP_ENABLE;
3916
3917        amd_iommu_dev_table[devid].data[2] = dte;
3918}
3919
3920#define IRTE_ALLOCATED (~1U)
3921
3922static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3923{
3924        struct irq_remap_table *table = NULL;
3925        struct amd_iommu *iommu;
3926        unsigned long flags;
3927        u16 alias;
3928
3929        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3930
3931        iommu = amd_iommu_rlookup_table[devid];
3932        if (!iommu)
3933                goto out_unlock;
3934
3935        table = irq_lookup_table[devid];
3936        if (table)
3937                goto out;
3938
3939        alias = amd_iommu_alias_table[devid];
3940        table = irq_lookup_table[alias];
3941        if (table) {
3942                irq_lookup_table[devid] = table;
3943                set_dte_irq_entry(devid, table);
3944                iommu_flush_dte(iommu, devid);
3945                goto out;
3946        }
3947
3948        /* Nothing there yet, allocate new irq remapping table */
3949        table = kzalloc(sizeof(*table), GFP_ATOMIC);
3950        if (!table)
3951                goto out;
3952
3953        if (ioapic)
3954                /* Keep the first 32 indexes free for IOAPIC interrupts */
3955                table->min_index = 32;
3956
3957        table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3958        if (!table->table) {
3959                kfree(table);
3960                table = NULL;
3961                goto out;
3962        }
3963
3964        memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
3965
3966        if (ioapic) {
3967                int i;
3968
3969                for (i = 0; i < 32; ++i)
3970                        table->table[i] = IRTE_ALLOCATED;
3971        }
3972
3973        irq_lookup_table[devid] = table;
3974        set_dte_irq_entry(devid, table);
3975        iommu_flush_dte(iommu, devid);
3976        if (devid != alias) {
3977                irq_lookup_table[alias] = table;
3978                set_dte_irq_entry(devid, table);
3979                iommu_flush_dte(iommu, alias);
3980        }
3981
3982out:
3983        iommu_completion_wait(iommu);
3984
3985out_unlock:
3986        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3987
3988        return table;
3989}
3990
3991static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
3992{
3993        struct irq_remap_table *table;
3994        unsigned long flags;
3995        int index, c;
3996
3997        table = get_irq_table(devid, false);
3998        if (!table)
3999                return -ENODEV;
4000
4001        spin_lock_irqsave(&table->lock, flags);
4002
4003        /* Scan table for free entries */
4004        for (c = 0, index = table->min_index;
4005             index < MAX_IRQS_PER_TABLE;
4006             ++index) {
4007                if (table->table[index] == 0)
4008                        c += 1;
4009                else
4010                        c = 0;
4011
4012                if (c == count) {
4013                        struct irq_2_iommu *irte_info;
4014
4015                        for (; c != 0; --c)
4016                                table->table[index - c + 1] = IRTE_ALLOCATED;
4017
4018                        index -= count - 1;
4019
4020                        irte_info             = &cfg->irq_2_iommu;
4021                        irte_info->sub_handle = devid;
4022                        irte_info->irte_index = index;
4023                        irte_info->iommu      = (void *)cfg;
4024
4025                        goto out;
4026                }
4027        }
4028
4029        index = -ENOSPC;
4030
4031out:
4032        spin_unlock_irqrestore(&table->lock, flags);
4033
4034        return index;
4035}
4036
4037static int get_irte(u16 devid, int index, union irte *irte)
4038{
4039        struct irq_remap_table *table;
4040        unsigned long flags;
4041
4042        table = get_irq_table(devid, false);
4043        if (!table)
4044                return -ENOMEM;
4045
4046        spin_lock_irqsave(&table->lock, flags);
4047        irte->val = table->table[index];
4048        spin_unlock_irqrestore(&table->lock, flags);
4049
4050        return 0;
4051}
4052
4053static int modify_irte(u16 devid, int index, union irte irte)
4054{
4055        struct irq_remap_table *table;
4056        struct amd_iommu *iommu;
4057        unsigned long flags;
4058
4059        iommu = amd_iommu_rlookup_table[devid];
4060        if (iommu == NULL)
4061                return -EINVAL;
4062
4063        table = get_irq_table(devid, false);
4064        if (!table)
4065                return -ENOMEM;
4066
4067        spin_lock_irqsave(&table->lock, flags);
4068        table->table[index] = irte.val;
4069        spin_unlock_irqrestore(&table->lock, flags);
4070
4071        iommu_flush_irt(iommu, devid);
4072        iommu_completion_wait(iommu);
4073
4074        return 0;
4075}
4076
4077static void free_irte(u16 devid, int index)
4078{
4079        struct irq_remap_table *table;
4080        struct amd_iommu *iommu;
4081        unsigned long flags;
4082
4083        iommu = amd_iommu_rlookup_table[devid];
4084        if (iommu == NULL)
4085                return;
4086
4087        table = get_irq_table(devid, false);
4088        if (!table)
4089                return;
4090
4091        spin_lock_irqsave(&table->lock, flags);
4092        table->table[index] = 0;
4093        spin_unlock_irqrestore(&table->lock, flags);
4094
4095        iommu_flush_irt(iommu, devid);
4096        iommu_completion_wait(iommu);
4097}
4098
4099static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
4100                              unsigned int destination, int vector,
4101                              struct io_apic_irq_attr *attr)
4102{
4103        struct irq_remap_table *table;
4104        struct irq_2_iommu *irte_info;
4105        struct irq_cfg *cfg;
4106        union irte irte;
4107        int ioapic_id;
4108        int index;
4109        int devid;
4110        int ret;
4111
4112        cfg = irq_get_chip_data(irq);
4113        if (!cfg)
4114                return -EINVAL;
4115
4116        irte_info = &cfg->irq_2_iommu;
4117        ioapic_id = mpc_ioapic_id(attr->ioapic);
4118        devid     = get_ioapic_devid(ioapic_id);
4119
4120        if (devid < 0)
4121                return devid;
4122
4123        table = get_irq_table(devid, true);
4124        if (table == NULL)
4125                return -ENOMEM;
4126
4127        index = attr->ioapic_pin;
4128
4129        /* Setup IRQ remapping info */
4130        irte_info->sub_handle = devid;
4131        irte_info->irte_index = index;
4132        irte_info->iommu      = (void *)cfg;
4133
4134        /* Setup IRTE for IOMMU */
4135        irte.val                = 0;
4136        irte.fields.vector      = vector;
4137        irte.fields.int_type    = apic->irq_delivery_mode;
4138        irte.fields.destination = destination;
4139        irte.fields.dm          = apic->irq_dest_mode;
4140        irte.fields.valid       = 1;
4141
4142        ret = modify_irte(devid, index, irte);
4143        if (ret)
4144                return ret;
4145
4146        /* Setup IOAPIC entry */
4147        memset(entry, 0, sizeof(*entry));
4148
4149        entry->vector        = index;
4150        entry->mask          = 0;
4151        entry->trigger       = attr->trigger;
4152        entry->polarity      = attr->polarity;
4153
4154        /*
4155         * Mask level triggered irqs.
4156         */
4157        if (attr->trigger)
4158                entry->mask = 1;
4159
4160        return 0;
4161}
4162
4163static int set_affinity(struct irq_data *data, const struct cpumask *mask,
4164                        bool force)
4165{
4166        struct irq_2_iommu *irte_info;
4167        unsigned int dest, irq;
4168        struct irq_cfg *cfg;
4169        union irte irte;
4170        int err;
4171
4172        if (!config_enabled(CONFIG_SMP))
4173                return -1;
4174
4175        cfg       = data->chip_data;
4176        irq       = data->irq;
4177        irte_info = &cfg->irq_2_iommu;
4178
4179        if (!cpumask_intersects(mask, cpu_online_mask))
4180                return -EINVAL;
4181
4182        if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
4183                return -EBUSY;
4184
4185        if (assign_irq_vector(irq, cfg, mask))
4186                return -EBUSY;
4187
4188        err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
4189        if (err) {
4190                if (assign_irq_vector(irq, cfg, data->affinity))
4191                        pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
4192                return err;
4193        }
4194
4195        irte.fields.vector      = cfg->vector;
4196        irte.fields.destination = dest;
4197
4198        modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
4199
4200        if (cfg->move_in_progress)
4201                send_cleanup_vector(cfg);
4202
4203        cpumask_copy(data->affinity, mask);
4204
4205        return 0;
4206}
4207
4208static int free_irq(int irq)
4209{
4210        struct irq_2_iommu *irte_info;
4211        struct irq_cfg *cfg;
4212
4213        cfg = irq_get_chip_data(irq);
4214        if (!cfg)
4215                return -EINVAL;
4216
4217        irte_info = &cfg->irq_2_iommu;
4218
4219        free_irte(irte_info->sub_handle, irte_info->irte_index);
4220
4221        return 0;
4222}
4223
4224static void compose_msi_msg(struct pci_dev *pdev,
4225                            unsigned int irq, unsigned int dest,
4226                            struct msi_msg *msg, u8 hpet_id)
4227{
4228        struct irq_2_iommu *irte_info;
4229        struct irq_cfg *cfg;
4230        union irte irte;
4231
4232        cfg = irq_get_chip_data(irq);
4233        if (!cfg)
4234                return;
4235
4236        irte_info = &cfg->irq_2_iommu;
4237
4238        irte.val                = 0;
4239        irte.fields.vector      = cfg->vector;
4240        irte.fields.int_type    = apic->irq_delivery_mode;
4241        irte.fields.destination = dest;
4242        irte.fields.dm          = apic->irq_dest_mode;
4243        irte.fields.valid       = 1;
4244
4245        modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
4246
4247        msg->address_hi = MSI_ADDR_BASE_HI;
4248        msg->address_lo = MSI_ADDR_BASE_LO;
4249        msg->data       = irte_info->irte_index;
4250}
4251
4252static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
4253{
4254        struct irq_cfg *cfg;
4255        int index;
4256        u16 devid;
4257
4258        if (!pdev)
4259                return -EINVAL;
4260
4261        cfg = irq_get_chip_data(irq);
4262        if (!cfg)
4263                return -EINVAL;
4264
4265        devid = get_device_id(&pdev->dev);
4266        index = alloc_irq_index(cfg, devid, nvec);
4267
4268        return index < 0 ? MAX_IRQS_PER_TABLE : index;
4269}
4270
4271static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
4272                         int index, int offset)
4273{
4274        struct irq_2_iommu *irte_info;
4275        struct irq_cfg *cfg;
4276        u16 devid;
4277
4278        if (!pdev)
4279                return -EINVAL;
4280
4281        cfg = irq_get_chip_data(irq);
4282        if (!cfg)
4283                return -EINVAL;
4284
4285        if (index >= MAX_IRQS_PER_TABLE)
4286                return 0;
4287
4288        devid           = get_device_id(&pdev->dev);
4289        irte_info       = &cfg->irq_2_iommu;
4290
4291        irte_info->sub_handle = devid;
4292        irte_info->irte_index = index + offset;
4293        irte_info->iommu      = (void *)cfg;
4294
4295        return 0;
4296}
4297
4298static int setup_hpet_msi(unsigned int irq, unsigned int id)
4299{
4300        struct irq_2_iommu *irte_info;
4301        struct irq_cfg *cfg;
4302        int index, devid;
4303
4304        cfg = irq_get_chip_data(irq);
4305        if (!cfg)
4306                return -EINVAL;
4307
4308        irte_info = &cfg->irq_2_iommu;
4309        devid     = get_hpet_devid(id);
4310        if (devid < 0)
4311                return devid;
4312
4313        index = alloc_irq_index(cfg, devid, 1);
4314        if (index < 0)
4315                return index;
4316
4317        irte_info->sub_handle = devid;
4318        irte_info->irte_index = index;
4319        irte_info->iommu      = (void *)cfg;
4320
4321        return 0;
4322}
4323
4324struct irq_remap_ops amd_iommu_irq_ops = {
4325        .supported              = amd_iommu_supported,
4326        .prepare                = amd_iommu_prepare,
4327        .enable                 = amd_iommu_enable,
4328        .disable                = amd_iommu_disable,
4329        .reenable               = amd_iommu_reenable,
4330        .enable_faulting        = amd_iommu_enable_faulting,
4331        .setup_ioapic_entry     = setup_ioapic_entry,
4332        .set_affinity           = set_affinity,
4333        .free_irq               = free_irq,
4334        .compose_msi_msg        = compose_msi_msg,
4335        .msi_alloc_irq          = msi_alloc_irq,
4336        .msi_setup_irq          = msi_setup_irq,
4337        .setup_hpet_msi         = setup_hpet_msi,
4338};
4339#endif
4340
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.