linux/drivers/iommu/omap-iovmm.c
<<
>>
Prefs
   1/*
   2 * omap iommu: simple virtual address space management
   3 *
   4 * Copyright (C) 2008-2009 Nokia Corporation
   5 *
   6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/err.h>
  15#include <linux/slab.h>
  16#include <linux/vmalloc.h>
  17#include <linux/device.h>
  18#include <linux/scatterlist.h>
  19#include <linux/iommu.h>
  20#include <linux/omap-iommu.h>
  21#include <linux/platform_data/iommu-omap.h>
  22
  23#include <asm/cacheflush.h>
  24#include <asm/mach/map.h>
  25
  26#include "omap-iopgtable.h"
  27#include "omap-iommu.h"
  28
  29/*
  30 * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
  31 *
  32 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
  33 */
  34#define IOVMF_SW_SHIFT          16
  35
  36/*
  37 * iovma: h/w flags derived from cam and ram attribute
  38 */
  39#define IOVMF_CAM_MASK          (~((1 << 10) - 1))
  40#define IOVMF_RAM_MASK          (~IOVMF_CAM_MASK)
  41
  42#define IOVMF_PGSZ_MASK         (3 << 0)
  43#define IOVMF_PGSZ_1M           MMU_CAM_PGSZ_1M
  44#define IOVMF_PGSZ_64K          MMU_CAM_PGSZ_64K
  45#define IOVMF_PGSZ_4K           MMU_CAM_PGSZ_4K
  46#define IOVMF_PGSZ_16M          MMU_CAM_PGSZ_16M
  47
  48#define IOVMF_ENDIAN_MASK       (1 << 9)
  49#define IOVMF_ENDIAN_BIG        MMU_RAM_ENDIAN_BIG
  50
  51#define IOVMF_ELSZ_MASK         (3 << 7)
  52#define IOVMF_ELSZ_16           MMU_RAM_ELSZ_16
  53#define IOVMF_ELSZ_32           MMU_RAM_ELSZ_32
  54#define IOVMF_ELSZ_NONE         MMU_RAM_ELSZ_NONE
  55
  56#define IOVMF_MIXED_MASK        (1 << 6)
  57#define IOVMF_MIXED             MMU_RAM_MIXED
  58
  59/*
  60 * iovma: s/w flags, used for mapping and umapping internally.
  61 */
  62#define IOVMF_MMIO              (1 << IOVMF_SW_SHIFT)
  63#define IOVMF_ALLOC             (2 << IOVMF_SW_SHIFT)
  64#define IOVMF_ALLOC_MASK        (3 << IOVMF_SW_SHIFT)
  65
  66/* "superpages" is supported just with physically linear pages */
  67#define IOVMF_DISCONT           (1 << (2 + IOVMF_SW_SHIFT))
  68#define IOVMF_LINEAR            (2 << (2 + IOVMF_SW_SHIFT))
  69#define IOVMF_LINEAR_MASK       (3 << (2 + IOVMF_SW_SHIFT))
  70
  71#define IOVMF_DA_FIXED          (1 << (4 + IOVMF_SW_SHIFT))
  72
  73static struct kmem_cache *iovm_area_cachep;
  74
  75/* return the offset of the first scatterlist entry in a sg table */
  76static unsigned int sgtable_offset(const struct sg_table *sgt)
  77{
  78        if (!sgt || !sgt->nents)
  79                return 0;
  80
  81        return sgt->sgl->offset;
  82}
  83
  84/* return total bytes of sg buffers */
  85static size_t sgtable_len(const struct sg_table *sgt)
  86{
  87        unsigned int i, total = 0;
  88        struct scatterlist *sg;
  89
  90        if (!sgt)
  91                return 0;
  92
  93        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  94                size_t bytes;
  95
  96                bytes = sg->length + sg->offset;
  97
  98                if (!iopgsz_ok(bytes)) {
  99                        pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
 100                               __func__, i, bytes, sg->offset);
 101                        return 0;
 102                }
 103
 104                if (i && sg->offset) {
 105                        pr_err("%s: sg[%d] offset not allowed in internal "
 106                                        "entries\n", __func__, i);
 107                        return 0;
 108                }
 109
 110                total += bytes;
 111        }
 112
 113        return total;
 114}
 115#define sgtable_ok(x)   (!!sgtable_len(x))
 116
 117static unsigned max_alignment(u32 addr)
 118{
 119        int i;
 120        unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
 121        for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
 122                ;
 123        return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
 124}
 125
 126/*
 127 * calculate the optimal number sg elements from total bytes based on
 128 * iommu superpages
 129 */
 130static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
 131{
 132        unsigned nr_entries = 0, ent_sz;
 133
 134        if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
 135                pr_err("%s: wrong size %08x\n", __func__, bytes);
 136                return 0;
 137        }
 138
 139        while (bytes) {
 140                ent_sz = max_alignment(da | pa);
 141                ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
 142                nr_entries++;
 143                da += ent_sz;
 144                pa += ent_sz;
 145                bytes -= ent_sz;
 146        }
 147
 148        return nr_entries;
 149}
 150
 151/* allocate and initialize sg_table header(a kind of 'superblock') */
 152static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
 153                                                        u32 da, u32 pa)
 154{
 155        unsigned int nr_entries;
 156        int err;
 157        struct sg_table *sgt;
 158
 159        if (!bytes)
 160                return ERR_PTR(-EINVAL);
 161
 162        if (!IS_ALIGNED(bytes, PAGE_SIZE))
 163                return ERR_PTR(-EINVAL);
 164
 165        if (flags & IOVMF_LINEAR) {
 166                nr_entries = sgtable_nents(bytes, da, pa);
 167                if (!nr_entries)
 168                        return ERR_PTR(-EINVAL);
 169        } else
 170                nr_entries =  bytes / PAGE_SIZE;
 171
 172        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 173        if (!sgt)
 174                return ERR_PTR(-ENOMEM);
 175
 176        err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
 177        if (err) {
 178                kfree(sgt);
 179                return ERR_PTR(err);
 180        }
 181
 182        pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
 183
 184        return sgt;
 185}
 186
 187/* free sg_table header(a kind of superblock) */
 188static void sgtable_free(struct sg_table *sgt)
 189{
 190        if (!sgt)
 191                return;
 192
 193        sg_free_table(sgt);
 194        kfree(sgt);
 195
 196        pr_debug("%s: sgt:%p\n", __func__, sgt);
 197}
 198
 199/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
 200static void *vmap_sg(const struct sg_table *sgt)
 201{
 202        u32 va;
 203        size_t total;
 204        unsigned int i;
 205        struct scatterlist *sg;
 206        struct vm_struct *new;
 207        const struct mem_type *mtype;
 208
 209        mtype = get_mem_type(MT_DEVICE);
 210        if (!mtype)
 211                return ERR_PTR(-EINVAL);
 212
 213        total = sgtable_len(sgt);
 214        if (!total)
 215                return ERR_PTR(-EINVAL);
 216
 217        new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
 218        if (!new)
 219                return ERR_PTR(-ENOMEM);
 220        va = (u32)new->addr;
 221
 222        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 223                size_t bytes;
 224                u32 pa;
 225                int err;
 226
 227                pa = sg_phys(sg) - sg->offset;
 228                bytes = sg->length + sg->offset;
 229
 230                BUG_ON(bytes != PAGE_SIZE);
 231
 232                err = ioremap_page(va,  pa, mtype);
 233                if (err)
 234                        goto err_out;
 235
 236                va += bytes;
 237        }
 238
 239        flush_cache_vmap((unsigned long)new->addr,
 240                                (unsigned long)(new->addr + total));
 241        return new->addr;
 242
 243err_out:
 244        WARN_ON(1); /* FIXME: cleanup some mpu mappings */
 245        vunmap(new->addr);
 246        return ERR_PTR(-EAGAIN);
 247}
 248
 249static inline void vunmap_sg(const void *va)
 250{
 251        vunmap(va);
 252}
 253
 254static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
 255                                                        const u32 da)
 256{
 257        struct iovm_struct *tmp;
 258
 259        list_for_each_entry(tmp, &obj->mmap, list) {
 260                if ((da >= tmp->da_start) && (da < tmp->da_end)) {
 261                        size_t len;
 262
 263                        len = tmp->da_end - tmp->da_start;
 264
 265                        dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
 266                                __func__, tmp->da_start, da, tmp->da_end, len,
 267                                tmp->flags);
 268
 269                        return tmp;
 270                }
 271        }
 272
 273        return NULL;
 274}
 275
 276/**
 277 * omap_find_iovm_area  -  find iovma which includes @da
 278 * @dev:        client device
 279 * @da:         iommu device virtual address
 280 *
 281 * Find the existing iovma starting at @da
 282 */
 283struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
 284{
 285        struct omap_iommu *obj = dev_to_omap_iommu(dev);
 286        struct iovm_struct *area;
 287
 288        mutex_lock(&obj->mmap_lock);
 289        area = __find_iovm_area(obj, da);
 290        mutex_unlock(&obj->mmap_lock);
 291
 292        return area;
 293}
 294EXPORT_SYMBOL_GPL(omap_find_iovm_area);
 295
 296/*
 297 * This finds the hole(area) which fits the requested address and len
 298 * in iovmas mmap, and returns the new allocated iovma.
 299 */
 300static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
 301                                           size_t bytes, u32 flags)
 302{
 303        struct iovm_struct *new, *tmp;
 304        u32 start, prev_end, alignment;
 305
 306        if (!obj || !bytes)
 307                return ERR_PTR(-EINVAL);
 308
 309        start = da;
 310        alignment = PAGE_SIZE;
 311
 312        if (~flags & IOVMF_DA_FIXED) {
 313                /* Don't map address 0 */
 314                start = obj->da_start ? obj->da_start : alignment;
 315
 316                if (flags & IOVMF_LINEAR)
 317                        alignment = iopgsz_max(bytes);
 318                start = roundup(start, alignment);
 319        } else if (start < obj->da_start || start > obj->da_end ||
 320                                        obj->da_end - start < bytes) {
 321                return ERR_PTR(-EINVAL);
 322        }
 323
 324        tmp = NULL;
 325        if (list_empty(&obj->mmap))
 326                goto found;
 327
 328        prev_end = 0;
 329        list_for_each_entry(tmp, &obj->mmap, list) {
 330
 331                if (prev_end > start)
 332                        break;
 333
 334                if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
 335                        goto found;
 336
 337                if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
 338                        start = roundup(tmp->da_end + 1, alignment);
 339
 340                prev_end = tmp->da_end;
 341        }
 342
 343        if ((start >= prev_end) && (obj->da_end - start >= bytes))
 344                goto found;
 345
 346        dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
 347                __func__, da, bytes, flags);
 348
 349        return ERR_PTR(-EINVAL);
 350
 351found:
 352        new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
 353        if (!new)
 354                return ERR_PTR(-ENOMEM);
 355
 356        new->iommu = obj;
 357        new->da_start = start;
 358        new->da_end = start + bytes;
 359        new->flags = flags;
 360
 361        /*
 362         * keep ascending order of iovmas
 363         */
 364        if (tmp)
 365                list_add_tail(&new->list, &tmp->list);
 366        else
 367                list_add(&new->list, &obj->mmap);
 368
 369        dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
 370                __func__, new->da_start, start, new->da_end, bytes, flags);
 371
 372        return new;
 373}
 374
 375static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
 376{
 377        size_t bytes;
 378
 379        BUG_ON(!obj || !area);
 380
 381        bytes = area->da_end - area->da_start;
 382
 383        dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
 384                __func__, area->da_start, area->da_end, bytes, area->flags);
 385
 386        list_del(&area->list);
 387        kmem_cache_free(iovm_area_cachep, area);
 388}
 389
 390/**
 391 * omap_da_to_va - convert (d) to (v)
 392 * @dev:        client device
 393 * @da:         iommu device virtual address
 394 * @va:         mpu virtual address
 395 *
 396 * Returns mpu virtual addr which corresponds to a given device virtual addr
 397 */
 398void *omap_da_to_va(struct device *dev, u32 da)
 399{
 400        struct omap_iommu *obj = dev_to_omap_iommu(dev);
 401        void *va = NULL;
 402        struct iovm_struct *area;
 403
 404        mutex_lock(&obj->mmap_lock);
 405
 406        area = __find_iovm_area(obj, da);
 407        if (!area) {
 408                dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
 409                goto out;
 410        }
 411        va = area->va;
 412out:
 413        mutex_unlock(&obj->mmap_lock);
 414
 415        return va;
 416}
 417EXPORT_SYMBOL_GPL(omap_da_to_va);
 418
 419static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
 420{
 421        unsigned int i;
 422        struct scatterlist *sg;
 423        void *va = _va;
 424        void *va_end;
 425
 426        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 427                struct page *pg;
 428                const size_t bytes = PAGE_SIZE;
 429
 430                /*
 431                 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
 432                 */
 433                pg = vmalloc_to_page(va);
 434                BUG_ON(!pg);
 435                sg_set_page(sg, pg, bytes, 0);
 436
 437                va += bytes;
 438        }
 439
 440        va_end = _va + PAGE_SIZE * i;
 441}
 442
 443static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
 444{
 445        /*
 446         * Actually this is not necessary at all, just exists for
 447         * consistency of the code readability.
 448         */
 449        BUG_ON(!sgt);
 450}
 451
 452/* create 'da' <-> 'pa' mapping from 'sgt' */
 453static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
 454                        const struct sg_table *sgt, u32 flags)
 455{
 456        int err;
 457        unsigned int i, j;
 458        struct scatterlist *sg;
 459        u32 da = new->da_start;
 460
 461        if (!domain || !sgt)
 462                return -EINVAL;
 463
 464        BUG_ON(!sgtable_ok(sgt));
 465
 466        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 467                u32 pa;
 468                size_t bytes;
 469
 470                pa = sg_phys(sg) - sg->offset;
 471                bytes = sg->length + sg->offset;
 472
 473                flags &= ~IOVMF_PGSZ_MASK;
 474
 475                if (bytes_to_iopgsz(bytes) < 0)
 476                        goto err_out;
 477
 478                pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
 479                         i, da, pa, bytes);
 480
 481                err = iommu_map(domain, da, pa, bytes, flags);
 482                if (err)
 483                        goto err_out;
 484
 485                da += bytes;
 486        }
 487        return 0;
 488
 489err_out:
 490        da = new->da_start;
 491
 492        for_each_sg(sgt->sgl, sg, i, j) {
 493                size_t bytes;
 494
 495                bytes = sg->length + sg->offset;
 496
 497                /* ignore failures.. we're already handling one */
 498                iommu_unmap(domain, da, bytes);
 499
 500                da += bytes;
 501        }
 502        return err;
 503}
 504
 505/* release 'da' <-> 'pa' mapping */
 506static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
 507                                                struct iovm_struct *area)
 508{
 509        u32 start;
 510        size_t total = area->da_end - area->da_start;
 511        const struct sg_table *sgt = area->sgt;
 512        struct scatterlist *sg;
 513        int i;
 514        size_t unmapped;
 515
 516        BUG_ON(!sgtable_ok(sgt));
 517        BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
 518
 519        start = area->da_start;
 520        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 521                size_t bytes;
 522
 523                bytes = sg->length + sg->offset;
 524
 525                unmapped = iommu_unmap(domain, start, bytes);
 526                if (unmapped < bytes)
 527                        break;
 528
 529                dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
 530                                __func__, start, bytes, area->flags);
 531
 532                BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
 533
 534                total -= bytes;
 535                start += bytes;
 536        }
 537        BUG_ON(total);
 538}
 539
 540/* template function for all unmapping */
 541static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
 542                                      struct omap_iommu *obj, const u32 da,
 543                                      void (*fn)(const void *), u32 flags)
 544{
 545        struct sg_table *sgt = NULL;
 546        struct iovm_struct *area;
 547
 548        if (!IS_ALIGNED(da, PAGE_SIZE)) {
 549                dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
 550                return NULL;
 551        }
 552
 553        mutex_lock(&obj->mmap_lock);
 554
 555        area = __find_iovm_area(obj, da);
 556        if (!area) {
 557                dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
 558                goto out;
 559        }
 560
 561        if ((area->flags & flags) != flags) {
 562                dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
 563                        area->flags);
 564                goto out;
 565        }
 566        sgt = (struct sg_table *)area->sgt;
 567
 568        unmap_iovm_area(domain, obj, area);
 569
 570        fn(area->va);
 571
 572        dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
 573                area->da_start, da, area->da_end,
 574                area->da_end - area->da_start, area->flags);
 575
 576        free_iovm_area(obj, area);
 577out:
 578        mutex_unlock(&obj->mmap_lock);
 579
 580        return sgt;
 581}
 582
 583static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
 584                                u32 da, const struct sg_table *sgt, void *va,
 585                                size_t bytes, u32 flags)
 586{
 587        int err = -ENOMEM;
 588        struct iovm_struct *new;
 589
 590        mutex_lock(&obj->mmap_lock);
 591
 592        new = alloc_iovm_area(obj, da, bytes, flags);
 593        if (IS_ERR(new)) {
 594                err = PTR_ERR(new);
 595                goto err_alloc_iovma;
 596        }
 597        new->va = va;
 598        new->sgt = sgt;
 599
 600        if (map_iovm_area(domain, new, sgt, new->flags))
 601                goto err_map;
 602
 603        mutex_unlock(&obj->mmap_lock);
 604
 605        dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
 606                __func__, new->da_start, bytes, new->flags, va);
 607
 608        return new->da_start;
 609
 610err_map:
 611        free_iovm_area(obj, new);
 612err_alloc_iovma:
 613        mutex_unlock(&obj->mmap_lock);
 614        return err;
 615}
 616
 617static inline u32
 618__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
 619                                u32 da, const struct sg_table *sgt,
 620                                void *va, size_t bytes, u32 flags)
 621{
 622        return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
 623}
 624
 625/**
 626 * omap_iommu_vmap  -  (d)-(p)-(v) address mapper
 627 * @domain:     iommu domain
 628 * @dev:        client device
 629 * @sgt:        address of scatter gather table
 630 * @flags:      iovma and page property
 631 *
 632 * Creates 1-n-1 mapping with given @sgt and returns @da.
 633 * All @sgt element must be io page size aligned.
 634 */
 635u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
 636                const struct sg_table *sgt, u32 flags)
 637{
 638        struct omap_iommu *obj = dev_to_omap_iommu(dev);
 639        size_t bytes;
 640        void *va = NULL;
 641
 642        if (!obj || !obj->dev || !sgt)
 643                return -EINVAL;
 644
 645        bytes = sgtable_len(sgt);
 646        if (!bytes)
 647                return -EINVAL;
 648        bytes = PAGE_ALIGN(bytes);
 649
 650        if (flags & IOVMF_MMIO) {
 651                va = vmap_sg(sgt);
 652                if (IS_ERR(va))
 653                        return PTR_ERR(va);
 654        }
 655
 656        flags |= IOVMF_DISCONT;
 657        flags |= IOVMF_MMIO;
 658
 659        da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
 660        if (IS_ERR_VALUE(da))
 661                vunmap_sg(va);
 662
 663        return da + sgtable_offset(sgt);
 664}
 665EXPORT_SYMBOL_GPL(omap_iommu_vmap);
 666
 667/**
 668 * omap_iommu_vunmap  -  release virtual mapping obtained by 'omap_iommu_vmap()'
 669 * @domain:     iommu domain
 670 * @dev:        client device
 671 * @da:         iommu device virtual address
 672 *
 673 * Free the iommu virtually contiguous memory area starting at
 674 * @da, which was returned by 'omap_iommu_vmap()'.
 675 */
 676struct sg_table *
 677omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
 678{
 679        struct omap_iommu *obj = dev_to_omap_iommu(dev);
 680        struct sg_table *sgt;
 681        /*
 682         * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
 683         * Just returns 'sgt' to the caller to free
 684         */
 685        da &= PAGE_MASK;
 686        sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
 687                                        IOVMF_DISCONT | IOVMF_MMIO);
 688        if (!sgt)
 689                dev_dbg(obj->dev, "%s: No sgt\n", __func__);
 690        return sgt;
 691}
 692EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
 693
 694/**
 695 * omap_iommu_vmalloc  -  (d)-(p)-(v) address allocator and mapper
 696 * @dev:        client device
 697 * @da:         contiguous iommu virtual memory
 698 * @bytes:      allocation size
 699 * @flags:      iovma and page property
 700 *
 701 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
 702 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
 703 */
 704u32
 705omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
 706                                                size_t bytes, u32 flags)
 707{
 708        struct omap_iommu *obj = dev_to_omap_iommu(dev);
 709        void *va;
 710        struct sg_table *sgt;
 711
 712        if (!obj || !obj->dev || !bytes)
 713                return -EINVAL;
 714
 715        bytes = PAGE_ALIGN(bytes);
 716
 717        va = vmalloc(bytes);
 718        if (!va)
 719                return -ENOMEM;
 720
 721        flags |= IOVMF_DISCONT;
 722        flags |= IOVMF_ALLOC;
 723
 724        sgt = sgtable_alloc(bytes, flags, da, 0);
 725        if (IS_ERR(sgt)) {
 726                da = PTR_ERR(sgt);
 727                goto err_sgt_alloc;
 728        }
 729        sgtable_fill_vmalloc(sgt, va);
 730
 731        da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
 732        if (IS_ERR_VALUE(da))
 733                goto err_iommu_vmap;
 734
 735        return da;
 736
 737err_iommu_vmap:
 738        sgtable_drain_vmalloc(sgt);
 739        sgtable_free(sgt);
 740err_sgt_alloc:
 741        vfree(va);
 742        return da;
 743}
 744EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
 745
 746/**
 747 * omap_iommu_vfree  -  release memory allocated by 'omap_iommu_vmalloc()'
 748 * @dev:        client device
 749 * @da:         iommu device virtual address
 750 *
 751 * Frees the iommu virtually continuous memory area starting at
 752 * @da, as obtained from 'omap_iommu_vmalloc()'.
 753 */
 754void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
 755                                                                const u32 da)
 756{
 757        struct omap_iommu *obj = dev_to_omap_iommu(dev);
 758        struct sg_table *sgt;
 759
 760        sgt = unmap_vm_area(domain, obj, da, vfree,
 761                                                IOVMF_DISCONT | IOVMF_ALLOC);
 762        if (!sgt)
 763                dev_dbg(obj->dev, "%s: No sgt\n", __func__);
 764        sgtable_free(sgt);
 765}
 766EXPORT_SYMBOL_GPL(omap_iommu_vfree);
 767
 768static int __init iovmm_init(void)
 769{
 770        const unsigned long flags = SLAB_HWCACHE_ALIGN;
 771        struct kmem_cache *p;
 772
 773        p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
 774                              flags, NULL);
 775        if (!p)
 776                return -ENOMEM;
 777        iovm_area_cachep = p;
 778
 779        return 0;
 780}
 781module_init(iovmm_init);
 782
 783static void __exit iovmm_exit(void)
 784{
 785        kmem_cache_destroy(iovm_area_cachep);
 786}
 787module_exit(iovmm_exit);
 788
 789MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
 790MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
 791MODULE_LICENSE("GPL v2");
 792
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.