linux/drivers/xen/swiotlb-xen.c
<<
>>
Prefs
   1/*
   2 *  Copyright 2010
   3 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
   4 *
   5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License v2.0 as published by
   9 * the Free Software Foundation
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * PV guests under Xen are running in an non-contiguous memory architecture.
  17 *
  18 * When PCI pass-through is utilized, this necessitates an IOMMU for
  19 * translating bus (DMA) to virtual and vice-versa and also providing a
  20 * mechanism to have contiguous pages for device drivers operations (say DMA
  21 * operations).
  22 *
  23 * Specifically, under Xen the Linux idea of pages is an illusion. It
  24 * assumes that pages start at zero and go up to the available memory. To
  25 * help with that, the Linux Xen MMU provides a lookup mechanism to
  26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
  27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
  28 * memory is not contiguous. Xen hypervisor stitches memory for guests
  29 * from different pools, which means there is no guarantee that PFN==MFN
  30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
  31 * allocated in descending order (high to low), meaning the guest might
  32 * never get any MFN's under the 4GB mark.
  33 *
  34 */
  35
  36#include <linux/bootmem.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/export.h>
  39#include <xen/swiotlb-xen.h>
  40#include <xen/page.h>
  41#include <xen/xen-ops.h>
  42#include <xen/hvc-console.h>
  43/*
  44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
  45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  46 * API.
  47 */
  48
  49static char *xen_io_tlb_start, *xen_io_tlb_end;
  50static unsigned long xen_io_tlb_nslabs;
  51/*
  52 * Quick lookup value of the bus address of the IOTLB.
  53 */
  54
  55static u64 start_dma_addr;
  56
  57static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
  58{
  59        return phys_to_machine(XPADDR(paddr)).maddr;
  60}
  61
  62static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
  63{
  64        return machine_to_phys(XMADDR(baddr)).paddr;
  65}
  66
  67static dma_addr_t xen_virt_to_bus(void *address)
  68{
  69        return xen_phys_to_bus(virt_to_phys(address));
  70}
  71
  72static int check_pages_physically_contiguous(unsigned long pfn,
  73                                             unsigned int offset,
  74                                             size_t length)
  75{
  76        unsigned long next_mfn;
  77        int i;
  78        int nr_pages;
  79
  80        next_mfn = pfn_to_mfn(pfn);
  81        nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
  82
  83        for (i = 1; i < nr_pages; i++) {
  84                if (pfn_to_mfn(++pfn) != ++next_mfn)
  85                        return 0;
  86        }
  87        return 1;
  88}
  89
  90static int range_straddles_page_boundary(phys_addr_t p, size_t size)
  91{
  92        unsigned long pfn = PFN_DOWN(p);
  93        unsigned int offset = p & ~PAGE_MASK;
  94
  95        if (offset + size <= PAGE_SIZE)
  96                return 0;
  97        if (check_pages_physically_contiguous(pfn, offset, size))
  98                return 0;
  99        return 1;
 100}
 101
 102static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
 103{
 104        unsigned long mfn = PFN_DOWN(dma_addr);
 105        unsigned long pfn = mfn_to_local_pfn(mfn);
 106        phys_addr_t paddr;
 107
 108        /* If the address is outside our domain, it CAN
 109         * have the same virtual address as another address
 110         * in our domain. Therefore _only_ check address within our domain.
 111         */
 112        if (pfn_valid(pfn)) {
 113                paddr = PFN_PHYS(pfn);
 114                return paddr >= virt_to_phys(xen_io_tlb_start) &&
 115                       paddr < virt_to_phys(xen_io_tlb_end);
 116        }
 117        return 0;
 118}
 119
 120static int max_dma_bits = 32;
 121
 122static int
 123xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
 124{
 125        int i, rc;
 126        int dma_bits;
 127
 128        dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
 129
 130        i = 0;
 131        do {
 132                int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
 133
 134                do {
 135                        rc = xen_create_contiguous_region(
 136                                (unsigned long)buf + (i << IO_TLB_SHIFT),
 137                                get_order(slabs << IO_TLB_SHIFT),
 138                                dma_bits);
 139                } while (rc && dma_bits++ < max_dma_bits);
 140                if (rc)
 141                        return rc;
 142
 143                i += slabs;
 144        } while (i < nslabs);
 145        return 0;
 146}
 147static unsigned long xen_set_nslabs(unsigned long nr_tbl)
 148{
 149        if (!nr_tbl) {
 150                xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
 151                xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
 152        } else
 153                xen_io_tlb_nslabs = nr_tbl;
 154
 155        return xen_io_tlb_nslabs << IO_TLB_SHIFT;
 156}
 157
 158enum xen_swiotlb_err {
 159        XEN_SWIOTLB_UNKNOWN = 0,
 160        XEN_SWIOTLB_ENOMEM,
 161        XEN_SWIOTLB_EFIXUP
 162};
 163
 164static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
 165{
 166        switch (err) {
 167        case XEN_SWIOTLB_ENOMEM:
 168                return "Cannot allocate Xen-SWIOTLB buffer\n";
 169        case XEN_SWIOTLB_EFIXUP:
 170                return "Failed to get contiguous memory for DMA from Xen!\n"\
 171                    "You either: don't have the permissions, do not have"\
 172                    " enough free memory under 4GB, or the hypervisor memory"\
 173                    " is too fragmented!";
 174        default:
 175                break;
 176        }
 177        return "";
 178}
 179int __ref xen_swiotlb_init(int verbose, bool early)
 180{
 181        unsigned long bytes, order;
 182        int rc = -ENOMEM;
 183        enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
 184        unsigned int repeat = 3;
 185
 186        xen_io_tlb_nslabs = swiotlb_nr_tbl();
 187retry:
 188        bytes = xen_set_nslabs(xen_io_tlb_nslabs);
 189        order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
 190        /*
 191         * Get IO TLB memory from any location.
 192         */
 193        if (early)
 194                xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
 195        else {
 196#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 197#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 198                while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 199                        xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
 200                        if (xen_io_tlb_start)
 201                                break;
 202                        order--;
 203                }
 204                if (order != get_order(bytes)) {
 205                        pr_warn("Warning: only able to allocate %ld MB "
 206                                "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
 207                        xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
 208                        bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
 209                }
 210        }
 211        if (!xen_io_tlb_start) {
 212                m_ret = XEN_SWIOTLB_ENOMEM;
 213                goto error;
 214        }
 215        xen_io_tlb_end = xen_io_tlb_start + bytes;
 216        /*
 217         * And replace that memory with pages under 4GB.
 218         */
 219        rc = xen_swiotlb_fixup(xen_io_tlb_start,
 220                               bytes,
 221                               xen_io_tlb_nslabs);
 222        if (rc) {
 223                if (early)
 224                        free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
 225                else {
 226                        free_pages((unsigned long)xen_io_tlb_start, order);
 227                        xen_io_tlb_start = NULL;
 228                }
 229                m_ret = XEN_SWIOTLB_EFIXUP;
 230                goto error;
 231        }
 232        start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
 233        if (early) {
 234                swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
 235                rc = 0;
 236        } else
 237                rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
 238        return rc;
 239error:
 240        if (repeat--) {
 241                xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
 242                                        (xen_io_tlb_nslabs >> 1));
 243                printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
 244                      (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
 245                goto retry;
 246        }
 247        pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
 248        if (early)
 249                panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
 250        else
 251                free_pages((unsigned long)xen_io_tlb_start, order);
 252        return rc;
 253}
 254void *
 255xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 256                           dma_addr_t *dma_handle, gfp_t flags,
 257                           struct dma_attrs *attrs)
 258{
 259        void *ret;
 260        int order = get_order(size);
 261        u64 dma_mask = DMA_BIT_MASK(32);
 262        unsigned long vstart;
 263        phys_addr_t phys;
 264        dma_addr_t dev_addr;
 265
 266        /*
 267        * Ignore region specifiers - the kernel's ideas of
 268        * pseudo-phys memory layout has nothing to do with the
 269        * machine physical layout.  We can't allocate highmem
 270        * because we can't return a pointer to it.
 271        */
 272        flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
 273
 274        if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
 275                return ret;
 276
 277        vstart = __get_free_pages(flags, order);
 278        ret = (void *)vstart;
 279
 280        if (!ret)
 281                return ret;
 282
 283        if (hwdev && hwdev->coherent_dma_mask)
 284                dma_mask = dma_alloc_coherent_mask(hwdev, flags);
 285
 286        phys = virt_to_phys(ret);
 287        dev_addr = xen_phys_to_bus(phys);
 288        if (((dev_addr + size - 1 <= dma_mask)) &&
 289            !range_straddles_page_boundary(phys, size))
 290                *dma_handle = dev_addr;
 291        else {
 292                if (xen_create_contiguous_region(vstart, order,
 293                                                 fls64(dma_mask)) != 0) {
 294                        free_pages(vstart, order);
 295                        return NULL;
 296                }
 297                *dma_handle = virt_to_machine(ret).maddr;
 298        }
 299        memset(ret, 0, size);
 300        return ret;
 301}
 302EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
 303
 304void
 305xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 306                          dma_addr_t dev_addr, struct dma_attrs *attrs)
 307{
 308        int order = get_order(size);
 309        phys_addr_t phys;
 310        u64 dma_mask = DMA_BIT_MASK(32);
 311
 312        if (dma_release_from_coherent(hwdev, order, vaddr))
 313                return;
 314
 315        if (hwdev && hwdev->coherent_dma_mask)
 316                dma_mask = hwdev->coherent_dma_mask;
 317
 318        phys = virt_to_phys(vaddr);
 319
 320        if (((dev_addr + size - 1 > dma_mask)) ||
 321            range_straddles_page_boundary(phys, size))
 322                xen_destroy_contiguous_region((unsigned long)vaddr, order);
 323
 324        free_pages((unsigned long)vaddr, order);
 325}
 326EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
 327
 328
 329/*
 330 * Map a single buffer of the indicated size for DMA in streaming mode.  The
 331 * physical address to use is returned.
 332 *
 333 * Once the device is given the dma address, the device owns this memory until
 334 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
 335 */
 336dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 337                                unsigned long offset, size_t size,
 338                                enum dma_data_direction dir,
 339                                struct dma_attrs *attrs)
 340{
 341        phys_addr_t map, phys = page_to_phys(page) + offset;
 342        dma_addr_t dev_addr = xen_phys_to_bus(phys);
 343
 344        BUG_ON(dir == DMA_NONE);
 345        /*
 346         * If the address happens to be in the device's DMA window,
 347         * we can safely return the device addr and not worry about bounce
 348         * buffering it.
 349         */
 350        if (dma_capable(dev, dev_addr, size) &&
 351            !range_straddles_page_boundary(phys, size) && !swiotlb_force)
 352                return dev_addr;
 353
 354        /*
 355         * Oh well, have to allocate and map a bounce buffer.
 356         */
 357        map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
 358        if (map == SWIOTLB_MAP_ERROR)
 359                return DMA_ERROR_CODE;
 360
 361        dev_addr = xen_phys_to_bus(map);
 362
 363        /*
 364         * Ensure that the address returned is DMA'ble
 365         */
 366        if (!dma_capable(dev, dev_addr, size)) {
 367                swiotlb_tbl_unmap_single(dev, map, size, dir);
 368                dev_addr = 0;
 369        }
 370        return dev_addr;
 371}
 372EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
 373
 374/*
 375 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 376 * match what was provided for in a previous xen_swiotlb_map_page call.  All
 377 * other usages are undefined.
 378 *
 379 * After this call, reads by the cpu to the buffer are guaranteed to see
 380 * whatever the device wrote there.
 381 */
 382static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 383                             size_t size, enum dma_data_direction dir)
 384{
 385        phys_addr_t paddr = xen_bus_to_phys(dev_addr);
 386
 387        BUG_ON(dir == DMA_NONE);
 388
 389        /* NOTE: We use dev_addr here, not paddr! */
 390        if (is_xen_swiotlb_buffer(dev_addr)) {
 391                swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
 392                return;
 393        }
 394
 395        if (dir != DMA_FROM_DEVICE)
 396                return;
 397
 398        /*
 399         * phys_to_virt doesn't work with hihgmem page but we could
 400         * call dma_mark_clean() with hihgmem page here. However, we
 401         * are fine since dma_mark_clean() is null on POWERPC. We can
 402         * make dma_mark_clean() take a physical address if necessary.
 403         */
 404        dma_mark_clean(phys_to_virt(paddr), size);
 405}
 406
 407void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 408                            size_t size, enum dma_data_direction dir,
 409                            struct dma_attrs *attrs)
 410{
 411        xen_unmap_single(hwdev, dev_addr, size, dir);
 412}
 413EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
 414
 415/*
 416 * Make physical memory consistent for a single streaming mode DMA translation
 417 * after a transfer.
 418 *
 419 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
 420 * using the cpu, yet do not wish to teardown the dma mapping, you must
 421 * call this function before doing so.  At the next point you give the dma
 422 * address back to the card, you must first perform a
 423 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
 424 */
 425static void
 426xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
 427                        size_t size, enum dma_data_direction dir,
 428                        enum dma_sync_target target)
 429{
 430        phys_addr_t paddr = xen_bus_to_phys(dev_addr);
 431
 432        BUG_ON(dir == DMA_NONE);
 433
 434        /* NOTE: We use dev_addr here, not paddr! */
 435        if (is_xen_swiotlb_buffer(dev_addr)) {
 436                swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
 437                return;
 438        }
 439
 440        if (dir != DMA_FROM_DEVICE)
 441                return;
 442
 443        dma_mark_clean(phys_to_virt(paddr), size);
 444}
 445
 446void
 447xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 448                                size_t size, enum dma_data_direction dir)
 449{
 450        xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
 451}
 452EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
 453
 454void
 455xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
 456                                   size_t size, enum dma_data_direction dir)
 457{
 458        xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
 459}
 460EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
 461
 462/*
 463 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 464 * This is the scatter-gather version of the above xen_swiotlb_map_page
 465 * interface.  Here the scatter gather list elements are each tagged with the
 466 * appropriate dma address and length.  They are obtained via
 467 * sg_dma_{address,length}(SG).
 468 *
 469 * NOTE: An implementation may be able to use a smaller number of
 470 *       DMA address/length pairs than there are SG table elements.
 471 *       (for example via virtual mapping capabilities)
 472 *       The routine returns the number of addr/length pairs actually
 473 *       used, at most nents.
 474 *
 475 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
 476 * same here.
 477 */
 478int
 479xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 480                         int nelems, enum dma_data_direction dir,
 481                         struct dma_attrs *attrs)
 482{
 483        struct scatterlist *sg;
 484        int i;
 485
 486        BUG_ON(dir == DMA_NONE);
 487
 488        for_each_sg(sgl, sg, nelems, i) {
 489                phys_addr_t paddr = sg_phys(sg);
 490                dma_addr_t dev_addr = xen_phys_to_bus(paddr);
 491
 492                if (swiotlb_force ||
 493                    !dma_capable(hwdev, dev_addr, sg->length) ||
 494                    range_straddles_page_boundary(paddr, sg->length)) {
 495                        phys_addr_t map = swiotlb_tbl_map_single(hwdev,
 496                                                                 start_dma_addr,
 497                                                                 sg_phys(sg),
 498                                                                 sg->length,
 499                                                                 dir);
 500                        if (map == SWIOTLB_MAP_ERROR) {
 501                                /* Don't panic here, we expect map_sg users
 502                                   to do proper error handling. */
 503                                xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
 504                                                           attrs);
 505                                sgl[0].dma_length = 0;
 506                                return DMA_ERROR_CODE;
 507                        }
 508                        sg->dma_address = xen_phys_to_bus(map);
 509                } else
 510                        sg->dma_address = dev_addr;
 511                sg->dma_length = sg->length;
 512        }
 513        return nelems;
 514}
 515EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
 516
 517/*
 518 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 519 * concerning calls here are the same as for swiotlb_unmap_page() above.
 520 */
 521void
 522xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 523                           int nelems, enum dma_data_direction dir,
 524                           struct dma_attrs *attrs)
 525{
 526        struct scatterlist *sg;
 527        int i;
 528
 529        BUG_ON(dir == DMA_NONE);
 530
 531        for_each_sg(sgl, sg, nelems, i)
 532                xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
 533
 534}
 535EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
 536
 537/*
 538 * Make physical memory consistent for a set of streaming mode DMA translations
 539 * after a transfer.
 540 *
 541 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 542 * and usage.
 543 */
 544static void
 545xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
 546                    int nelems, enum dma_data_direction dir,
 547                    enum dma_sync_target target)
 548{
 549        struct scatterlist *sg;
 550        int i;
 551
 552        for_each_sg(sgl, sg, nelems, i)
 553                xen_swiotlb_sync_single(hwdev, sg->dma_address,
 554                                        sg->dma_length, dir, target);
 555}
 556
 557void
 558xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 559                            int nelems, enum dma_data_direction dir)
 560{
 561        xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
 562}
 563EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
 564
 565void
 566xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 567                               int nelems, enum dma_data_direction dir)
 568{
 569        xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 570}
 571EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
 572
 573int
 574xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 575{
 576        return !dma_addr;
 577}
 578EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
 579
 580/*
 581 * Return whether the given device DMA address mask can be supported
 582 * properly.  For example, if your device can only drive the low 24-bits
 583 * during bus mastering, then you would pass 0x00ffffff as the mask to
 584 * this function.
 585 */
 586int
 587xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 588{
 589        return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 590}
 591EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
 592
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.