linux/virt/kvm/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, Intel Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15 * Place - Suite 330, Boston, MA 02111-1307 USA.
  16 *
  17 * Copyright (C) 2006-2008 Intel Corporation
  18 * Copyright IBM Corporation, 2008
  19 * Author: Allen M. Kay <allen.m.kay@intel.com>
  20 * Author: Weidong Han <weidong.han@intel.com>
  21 * Author: Ben-Ami Yassour <benami@il.ibm.com>
  22 */
  23
  24#include <linux/list.h>
  25#include <linux/kvm_host.h>
  26#include <linux/pci.h>
  27#include <linux/dmar.h>
  28#include <linux/iommu.h>
  29#include <linux/intel-iommu.h>
  30
  31static int kvm_iommu_unmap_memslots(struct kvm *kvm);
  32static void kvm_iommu_put_pages(struct kvm *kvm,
  33                                gfn_t base_gfn, unsigned long npages);
  34
  35int kvm_iommu_map_pages(struct kvm *kvm,
  36                        gfn_t base_gfn, unsigned long npages)
  37{
  38        gfn_t gfn = base_gfn;
  39        pfn_t pfn;
  40        int i, r = 0;
  41        struct iommu_domain *domain = kvm->arch.iommu_domain;
  42
  43        /* check if iommu exists and in use */
  44        if (!domain)
  45                return 0;
  46
  47        for (i = 0; i < npages; i++) {
  48                /* check if already mapped */
  49                if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
  50                        continue;
  51
  52                pfn = gfn_to_pfn(kvm, gfn);
  53                r = iommu_map_range(domain,
  54                                    gfn_to_gpa(gfn),
  55                                    pfn_to_hpa(pfn),
  56                                    PAGE_SIZE,
  57                                    IOMMU_READ | IOMMU_WRITE);
  58                if (r) {
  59                        printk(KERN_ERR "kvm_iommu_map_address:"
  60                               "iommu failed to map pfn=%lx\n", pfn);
  61                        goto unmap_pages;
  62                }
  63                gfn++;
  64        }
  65        return 0;
  66
  67unmap_pages:
  68        kvm_iommu_put_pages(kvm, base_gfn, i);
  69        return r;
  70}
  71
  72static int kvm_iommu_map_memslots(struct kvm *kvm)
  73{
  74        int i, r = 0;
  75
  76        for (i = 0; i < kvm->nmemslots; i++) {
  77                r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
  78                                        kvm->memslots[i].npages);
  79                if (r)
  80                        break;
  81        }
  82
  83        return r;
  84}
  85
  86int kvm_assign_device(struct kvm *kvm,
  87                      struct kvm_assigned_dev_kernel *assigned_dev)
  88{
  89        struct pci_dev *pdev = NULL;
  90        struct iommu_domain *domain = kvm->arch.iommu_domain;
  91        int r;
  92
  93        /* check if iommu exists and in use */
  94        if (!domain)
  95                return 0;
  96
  97        pdev = assigned_dev->dev;
  98        if (pdev == NULL)
  99                return -ENODEV;
 100
 101        r = iommu_attach_device(domain, &pdev->dev);
 102        if (r) {
 103                printk(KERN_ERR "assign device %x:%x.%x failed",
 104                        pdev->bus->number,
 105                        PCI_SLOT(pdev->devfn),
 106                        PCI_FUNC(pdev->devfn));
 107                return r;
 108        }
 109
 110        printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
 111                assigned_dev->host_busnr,
 112                PCI_SLOT(assigned_dev->host_devfn),
 113                PCI_FUNC(assigned_dev->host_devfn));
 114
 115        return 0;
 116}
 117
 118int kvm_deassign_device(struct kvm *kvm,
 119                        struct kvm_assigned_dev_kernel *assigned_dev)
 120{
 121        struct iommu_domain *domain = kvm->arch.iommu_domain;
 122        struct pci_dev *pdev = NULL;
 123
 124        /* check if iommu exists and in use */
 125        if (!domain)
 126                return 0;
 127
 128        pdev = assigned_dev->dev;
 129        if (pdev == NULL)
 130                return -ENODEV;
 131
 132        iommu_detach_device(domain, &pdev->dev);
 133
 134        printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n",
 135                assigned_dev->host_busnr,
 136                PCI_SLOT(assigned_dev->host_devfn),
 137                PCI_FUNC(assigned_dev->host_devfn));
 138
 139        return 0;
 140}
 141
 142int kvm_iommu_map_guest(struct kvm *kvm)
 143{
 144        int r;
 145
 146        if (!iommu_found()) {
 147                printk(KERN_ERR "%s: iommu not found\n", __func__);
 148                return -ENODEV;
 149        }
 150
 151        kvm->arch.iommu_domain = iommu_domain_alloc();
 152        if (!kvm->arch.iommu_domain)
 153                return -ENOMEM;
 154
 155        r = kvm_iommu_map_memslots(kvm);
 156        if (r)
 157                goto out_unmap;
 158
 159        return 0;
 160
 161out_unmap:
 162        kvm_iommu_unmap_memslots(kvm);
 163        return r;
 164}
 165
 166static void kvm_iommu_put_pages(struct kvm *kvm,
 167                                gfn_t base_gfn, unsigned long npages)
 168{
 169        gfn_t gfn = base_gfn;
 170        pfn_t pfn;
 171        struct iommu_domain *domain = kvm->arch.iommu_domain;
 172        unsigned long i;
 173        u64 phys;
 174
 175        /* check if iommu exists and in use */
 176        if (!domain)
 177                return;
 178
 179        for (i = 0; i < npages; i++) {
 180                phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
 181                pfn = phys >> PAGE_SHIFT;
 182                kvm_release_pfn_clean(pfn);
 183                gfn++;
 184        }
 185
 186        iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages);
 187}
 188
 189static int kvm_iommu_unmap_memslots(struct kvm *kvm)
 190{
 191        int i;
 192
 193        for (i = 0; i < kvm->nmemslots; i++) {
 194                kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
 195                                    kvm->memslots[i].npages);
 196        }
 197
 198        return 0;
 199}
 200
 201int kvm_iommu_unmap_guest(struct kvm *kvm)
 202{
 203        struct iommu_domain *domain = kvm->arch.iommu_domain;
 204
 205        /* check if iommu exists and in use */
 206        if (!domain)
 207                return 0;
 208
 209        kvm_iommu_unmap_memslots(kvm);
 210        iommu_domain_free(domain);
 211        return 0;
 212}
 213
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.