linux/arch/powerpc/kernel/dma-iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
   4 *
   5 * Provide default implementations of the DMA mapping callbacks for
   6 * busses using the iommu infrastructure
   7 */
   8
   9#include <linux/dma-direct.h>
  10#include <linux/pci.h>
  11#include <asm/iommu.h>
  12
  13#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
  14#define can_map_direct(dev, addr) \
  15        ((dev)->bus_dma_limit >= phys_to_dma((dev), (addr)))
  16
  17bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr)
  18{
  19        if (likely(!dev->bus_dma_limit))
  20                return false;
  21
  22        return can_map_direct(dev, addr);
  23}
  24
  25#define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset)
  26
  27bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle)
  28{
  29        if (likely(!dev->bus_dma_limit))
  30                return false;
  31
  32        return is_direct_handle(dev, dma_handle);
  33}
  34
  35bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
  36                            int nents)
  37{
  38        struct scatterlist *s;
  39        int i;
  40
  41        if (likely(!dev->bus_dma_limit))
  42                return false;
  43
  44        for_each_sg(sg, s, nents, i) {
  45                if (!can_map_direct(dev, sg_phys(s) + s->offset + s->length))
  46                        return false;
  47        }
  48
  49        return true;
  50}
  51
  52bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
  53                              int nents)
  54{
  55        struct scatterlist *s;
  56        int i;
  57
  58        if (likely(!dev->bus_dma_limit))
  59                return false;
  60
  61        for_each_sg(sg, s, nents, i) {
  62                if (!is_direct_handle(dev, s->dma_address + s->length))
  63                        return false;
  64        }
  65
  66        return true;
  67}
  68#endif /* CONFIG_ARCH_HAS_DMA_MAP_DIRECT */
  69
  70/*
  71 * Generic iommu implementation
  72 */
  73
  74/* Allocates a contiguous real buffer and creates mappings over it.
  75 * Returns the virtual address of the buffer and sets dma_handle
  76 * to the dma address (mapping) of the first page.
  77 */
  78static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
  79                                      dma_addr_t *dma_handle, gfp_t flag,
  80                                      unsigned long attrs)
  81{
  82        return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
  83                                    dma_handle, dev->coherent_dma_mask, flag,
  84                                    dev_to_node(dev));
  85}
  86
  87static void dma_iommu_free_coherent(struct device *dev, size_t size,
  88                                    void *vaddr, dma_addr_t dma_handle,
  89                                    unsigned long attrs)
  90{
  91        iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
  92}
  93
  94/* Creates TCEs for a user provided buffer.  The user buffer must be
  95 * contiguous real kernel storage (not vmalloc).  The address passed here
  96 * comprises a page address and offset into that page. The dma_addr_t
  97 * returned will point to the same byte within the page as was passed in.
  98 */
  99static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
 100                                     unsigned long offset, size_t size,
 101                                     enum dma_data_direction direction,
 102                                     unsigned long attrs)
 103{
 104        return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
 105                              size, dma_get_mask(dev), direction, attrs);
 106}
 107
 108
 109static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
 110                                 size_t size, enum dma_data_direction direction,
 111                                 unsigned long attrs)
 112{
 113        iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
 114                         attrs);
 115}
 116
 117
 118static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
 119                            int nelems, enum dma_data_direction direction,
 120                            unsigned long attrs)
 121{
 122        return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
 123                                dma_get_mask(dev), direction, attrs);
 124}
 125
 126static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
 127                int nelems, enum dma_data_direction direction,
 128                unsigned long attrs)
 129{
 130        ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
 131                           direction, attrs);
 132}
 133
 134static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
 135{
 136        struct pci_dev *pdev = to_pci_dev(dev);
 137        struct pci_controller *phb = pci_bus_to_host(pdev->bus);
 138
 139        if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
 140                return false;
 141        return phb->controller_ops.iommu_bypass_supported(pdev, mask);
 142}
 143
 144/* We support DMA to/from any memory page via the iommu */
 145int dma_iommu_dma_supported(struct device *dev, u64 mask)
 146{
 147        struct iommu_table *tbl = get_iommu_table_base(dev);
 148
 149        if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
 150                /*
 151                 * dma_iommu_bypass_supported() sets dma_max when there is
 152                 * 1:1 mapping but it is somehow limited.
 153                 * ibm,pmemory is one example.
 154                 */
 155                dev->dma_ops_bypass = dev->bus_dma_limit == 0;
 156                if (!dev->dma_ops_bypass)
 157                        dev_warn(dev,
 158                                 "iommu: 64-bit OK but direct DMA is limited by %llx\n",
 159                                 dev->bus_dma_limit);
 160                else
 161                        dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
 162                return 1;
 163        }
 164
 165        if (!tbl) {
 166                dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
 167                return 0;
 168        }
 169
 170        if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
 171                dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
 172                dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
 173                                mask, tbl->it_offset << tbl->it_page_shift);
 174                return 0;
 175        }
 176
 177        dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
 178        dev->dma_ops_bypass = false;
 179        return 1;
 180}
 181
 182u64 dma_iommu_get_required_mask(struct device *dev)
 183{
 184        struct iommu_table *tbl = get_iommu_table_base(dev);
 185        u64 mask;
 186
 187        if (!tbl)
 188                return 0;
 189
 190        mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
 191                        tbl->it_page_shift - 1);
 192        mask += mask - 1;
 193
 194        return mask;
 195}
 196
 197const struct dma_map_ops dma_iommu_ops = {
 198        .alloc                  = dma_iommu_alloc_coherent,
 199        .free                   = dma_iommu_free_coherent,
 200        .map_sg                 = dma_iommu_map_sg,
 201        .unmap_sg               = dma_iommu_unmap_sg,
 202        .dma_supported          = dma_iommu_dma_supported,
 203        .map_page               = dma_iommu_map_page,
 204        .unmap_page             = dma_iommu_unmap_page,
 205        .get_required_mask      = dma_iommu_get_required_mask,
 206        .mmap                   = dma_common_mmap,
 207        .get_sgtable            = dma_common_get_sgtable,
 208        .alloc_pages            = dma_common_alloc_pages,
 209        .free_pages             = dma_common_free_pages,
 210};
 211