linux/drivers/iommu/exynos-iommu.c
<<
>>
Prefs
   1/* linux/drivers/iommu/exynos_iommu.c
   2 *
   3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   4 *              http://www.samsung.com
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
  12#define DEBUG
  13#endif
  14
  15#include <linux/io.h>
  16#include <linux/interrupt.h>
  17#include <linux/platform_device.h>
  18#include <linux/slab.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/clk.h>
  21#include <linux/err.h>
  22#include <linux/mm.h>
  23#include <linux/iommu.h>
  24#include <linux/errno.h>
  25#include <linux/list.h>
  26#include <linux/memblock.h>
  27#include <linux/export.h>
  28
  29#include <asm/cacheflush.h>
  30#include <asm/pgtable.h>
  31
  32#include <mach/sysmmu.h>
  33
  34/* We does not consider super section mapping (16MB) */
  35#define SECT_ORDER 20
  36#define LPAGE_ORDER 16
  37#define SPAGE_ORDER 12
  38
  39#define SECT_SIZE (1 << SECT_ORDER)
  40#define LPAGE_SIZE (1 << LPAGE_ORDER)
  41#define SPAGE_SIZE (1 << SPAGE_ORDER)
  42
  43#define SECT_MASK (~(SECT_SIZE - 1))
  44#define LPAGE_MASK (~(LPAGE_SIZE - 1))
  45#define SPAGE_MASK (~(SPAGE_SIZE - 1))
  46
  47#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
  48#define lv1ent_page(sent) ((*(sent) & 3) == 1)
  49#define lv1ent_section(sent) ((*(sent) & 3) == 2)
  50
  51#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
  52#define lv2ent_small(pent) ((*(pent) & 2) == 2)
  53#define lv2ent_large(pent) ((*(pent) & 3) == 1)
  54
  55#define section_phys(sent) (*(sent) & SECT_MASK)
  56#define section_offs(iova) ((iova) & 0xFFFFF)
  57#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
  58#define lpage_offs(iova) ((iova) & 0xFFFF)
  59#define spage_phys(pent) (*(pent) & SPAGE_MASK)
  60#define spage_offs(iova) ((iova) & 0xFFF)
  61
  62#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
  63#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
  64
  65#define NUM_LV1ENTRIES 4096
  66#define NUM_LV2ENTRIES 256
  67
  68#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
  69
  70#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
  71
  72#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
  73
  74#define mk_lv1ent_sect(pa) ((pa) | 2)
  75#define mk_lv1ent_page(pa) ((pa) | 1)
  76#define mk_lv2ent_lpage(pa) ((pa) | 1)
  77#define mk_lv2ent_spage(pa) ((pa) | 2)
  78
  79#define CTRL_ENABLE     0x5
  80#define CTRL_BLOCK      0x7
  81#define CTRL_DISABLE    0x0
  82
  83#define REG_MMU_CTRL            0x000
  84#define REG_MMU_CFG             0x004
  85#define REG_MMU_STATUS          0x008
  86#define REG_MMU_FLUSH           0x00C
  87#define REG_MMU_FLUSH_ENTRY     0x010
  88#define REG_PT_BASE_ADDR        0x014
  89#define REG_INT_STATUS          0x018
  90#define REG_INT_CLEAR           0x01C
  91
  92#define REG_PAGE_FAULT_ADDR     0x024
  93#define REG_AW_FAULT_ADDR       0x028
  94#define REG_AR_FAULT_ADDR       0x02C
  95#define REG_DEFAULT_SLAVE_ADDR  0x030
  96
  97#define REG_MMU_VERSION         0x034
  98
  99#define REG_PB0_SADDR           0x04C
 100#define REG_PB0_EADDR           0x050
 101#define REG_PB1_SADDR           0x054
 102#define REG_PB1_EADDR           0x058
 103
 104static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
 105{
 106        return pgtable + lv1ent_offset(iova);
 107}
 108
 109static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
 110{
 111        return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
 112}
 113
 114enum exynos_sysmmu_inttype {
 115        SYSMMU_PAGEFAULT,
 116        SYSMMU_AR_MULTIHIT,
 117        SYSMMU_AW_MULTIHIT,
 118        SYSMMU_BUSERROR,
 119        SYSMMU_AR_SECURITY,
 120        SYSMMU_AR_ACCESS,
 121        SYSMMU_AW_SECURITY,
 122        SYSMMU_AW_PROTECTION, /* 7 */
 123        SYSMMU_FAULT_UNKNOWN,
 124        SYSMMU_FAULTS_NUM
 125};
 126
 127/*
 128 * @itype: type of fault.
 129 * @pgtable_base: the physical address of page table base. This is 0 if @itype
 130 *                is SYSMMU_BUSERROR.
 131 * @fault_addr: the device (virtual) address that the System MMU tried to
 132 *             translated. This is 0 if @itype is SYSMMU_BUSERROR.
 133 */
 134typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
 135                        unsigned long pgtable_base, unsigned long fault_addr);
 136
 137static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
 138        REG_PAGE_FAULT_ADDR,
 139        REG_AR_FAULT_ADDR,
 140        REG_AW_FAULT_ADDR,
 141        REG_DEFAULT_SLAVE_ADDR,
 142        REG_AR_FAULT_ADDR,
 143        REG_AR_FAULT_ADDR,
 144        REG_AW_FAULT_ADDR,
 145        REG_AW_FAULT_ADDR
 146};
 147
 148static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
 149        "PAGE FAULT",
 150        "AR MULTI-HIT FAULT",
 151        "AW MULTI-HIT FAULT",
 152        "BUS ERROR",
 153        "AR SECURITY PROTECTION FAULT",
 154        "AR ACCESS PROTECTION FAULT",
 155        "AW SECURITY PROTECTION FAULT",
 156        "AW ACCESS PROTECTION FAULT",
 157        "UNKNOWN FAULT"
 158};
 159
 160struct exynos_iommu_domain {
 161        struct list_head clients; /* list of sysmmu_drvdata.node */
 162        unsigned long *pgtable; /* lv1 page table, 16KB */
 163        short *lv2entcnt; /* free lv2 entry counter for each section */
 164        spinlock_t lock; /* lock for this structure */
 165        spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
 166};
 167
 168struct sysmmu_drvdata {
 169        struct list_head node; /* entry of exynos_iommu_domain.clients */
 170        struct device *sysmmu;  /* System MMU's device descriptor */
 171        struct device *dev;     /* Owner of system MMU */
 172        char *dbgname;
 173        int nsfrs;
 174        void __iomem **sfrbases;
 175        struct clk *clk[2];
 176        int activations;
 177        rwlock_t lock;
 178        struct iommu_domain *domain;
 179        sysmmu_fault_handler_t fault_handler;
 180        unsigned long pgtable;
 181};
 182
 183static bool set_sysmmu_active(struct sysmmu_drvdata *data)
 184{
 185        /* return true if the System MMU was not active previously
 186           and it needs to be initialized */
 187        return ++data->activations == 1;
 188}
 189
 190static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
 191{
 192        /* return true if the System MMU is needed to be disabled */
 193        BUG_ON(data->activations < 1);
 194        return --data->activations == 0;
 195}
 196
 197static bool is_sysmmu_active(struct sysmmu_drvdata *data)
 198{
 199        return data->activations > 0;
 200}
 201
 202static void sysmmu_unblock(void __iomem *sfrbase)
 203{
 204        __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
 205}
 206
 207static bool sysmmu_block(void __iomem *sfrbase)
 208{
 209        int i = 120;
 210
 211        __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
 212        while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
 213                --i;
 214
 215        if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
 216                sysmmu_unblock(sfrbase);
 217                return false;
 218        }
 219
 220        return true;
 221}
 222
 223static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
 224{
 225        __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
 226}
 227
 228static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
 229                                                unsigned long iova)
 230{
 231        __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
 232}
 233
 234static void __sysmmu_set_ptbase(void __iomem *sfrbase,
 235                                       unsigned long pgd)
 236{
 237        __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
 238        __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
 239
 240        __sysmmu_tlb_invalidate(sfrbase);
 241}
 242
 243static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
 244                                                unsigned long size, int idx)
 245{
 246        __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
 247        __raw_writel(size - 1 + base,  sfrbase + REG_PB0_EADDR + idx * 8);
 248}
 249
 250static void __set_fault_handler(struct sysmmu_drvdata *data,
 251                                        sysmmu_fault_handler_t handler)
 252{
 253        unsigned long flags;
 254
 255        write_lock_irqsave(&data->lock, flags);
 256        data->fault_handler = handler;
 257        write_unlock_irqrestore(&data->lock, flags);
 258}
 259
 260void exynos_sysmmu_set_fault_handler(struct device *dev,
 261                                        sysmmu_fault_handler_t handler)
 262{
 263        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 264
 265        __set_fault_handler(data, handler);
 266}
 267
 268static int default_fault_handler(enum exynos_sysmmu_inttype itype,
 269                     unsigned long pgtable_base, unsigned long fault_addr)
 270{
 271        unsigned long *ent;
 272
 273        if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
 274                itype = SYSMMU_FAULT_UNKNOWN;
 275
 276        pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
 277                        sysmmu_fault_name[itype], fault_addr, pgtable_base);
 278
 279        ent = section_entry(__va(pgtable_base), fault_addr);
 280        pr_err("\tLv1 entry: 0x%lx\n", *ent);
 281
 282        if (lv1ent_page(ent)) {
 283                ent = page_entry(ent, fault_addr);
 284                pr_err("\t Lv2 entry: 0x%lx\n", *ent);
 285        }
 286
 287        pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
 288
 289        BUG();
 290
 291        return 0;
 292}
 293
 294static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 295{
 296        /* SYSMMU is in blocked when interrupt occurred. */
 297        struct sysmmu_drvdata *data = dev_id;
 298        struct resource *irqres;
 299        struct platform_device *pdev;
 300        enum exynos_sysmmu_inttype itype;
 301        unsigned long addr = -1;
 302
 303        int i, ret = -ENOSYS;
 304
 305        read_lock(&data->lock);
 306
 307        WARN_ON(!is_sysmmu_active(data));
 308
 309        pdev = to_platform_device(data->sysmmu);
 310        for (i = 0; i < (pdev->num_resources / 2); i++) {
 311                irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 312                if (irqres && ((int)irqres->start == irq))
 313                        break;
 314        }
 315
 316        if (i == pdev->num_resources) {
 317                itype = SYSMMU_FAULT_UNKNOWN;
 318        } else {
 319                itype = (enum exynos_sysmmu_inttype)
 320                        __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
 321                if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
 322                        itype = SYSMMU_FAULT_UNKNOWN;
 323                else
 324                        addr = __raw_readl(
 325                                data->sfrbases[i] + fault_reg_offset[itype]);
 326        }
 327
 328        if (data->domain)
 329                ret = report_iommu_fault(data->domain, data->dev,
 330                                addr, itype);
 331
 332        if ((ret == -ENOSYS) && data->fault_handler) {
 333                unsigned long base = data->pgtable;
 334                if (itype != SYSMMU_FAULT_UNKNOWN)
 335                        base = __raw_readl(
 336                                        data->sfrbases[i] + REG_PT_BASE_ADDR);
 337                ret = data->fault_handler(itype, base, addr);
 338        }
 339
 340        if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
 341                __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
 342        else
 343                dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
 344                                data->dbgname, sysmmu_fault_name[itype]);
 345
 346        if (itype != SYSMMU_FAULT_UNKNOWN)
 347                sysmmu_unblock(data->sfrbases[i]);
 348
 349        read_unlock(&data->lock);
 350
 351        return IRQ_HANDLED;
 352}
 353
 354static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
 355{
 356        unsigned long flags;
 357        bool disabled = false;
 358        int i;
 359
 360        write_lock_irqsave(&data->lock, flags);
 361
 362        if (!set_sysmmu_inactive(data))
 363                goto finish;
 364
 365        for (i = 0; i < data->nsfrs; i++)
 366                __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
 367
 368        if (data->clk[1])
 369                clk_disable(data->clk[1]);
 370        if (data->clk[0])
 371                clk_disable(data->clk[0]);
 372
 373        disabled = true;
 374        data->pgtable = 0;
 375        data->domain = NULL;
 376finish:
 377        write_unlock_irqrestore(&data->lock, flags);
 378
 379        if (disabled)
 380                dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
 381        else
 382                dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
 383                                        data->dbgname, data->activations);
 384
 385        return disabled;
 386}
 387
 388/* __exynos_sysmmu_enable: Enables System MMU
 389 *
 390 * returns -error if an error occurred and System MMU is not enabled,
 391 * 0 if the System MMU has been just enabled and 1 if System MMU was already
 392 * enabled before.
 393 */
 394static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
 395                        unsigned long pgtable, struct iommu_domain *domain)
 396{
 397        int i, ret = 0;
 398        unsigned long flags;
 399
 400        write_lock_irqsave(&data->lock, flags);
 401
 402        if (!set_sysmmu_active(data)) {
 403                if (WARN_ON(pgtable != data->pgtable)) {
 404                        ret = -EBUSY;
 405                        set_sysmmu_inactive(data);
 406                } else {
 407                        ret = 1;
 408                }
 409
 410                dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
 411                goto finish;
 412        }
 413
 414        if (data->clk[0])
 415                clk_enable(data->clk[0]);
 416        if (data->clk[1])
 417                clk_enable(data->clk[1]);
 418
 419        data->pgtable = pgtable;
 420
 421        for (i = 0; i < data->nsfrs; i++) {
 422                __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
 423
 424                if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
 425                        /* System MMU version is 3.x */
 426                        __raw_writel((1 << 12) | (2 << 28),
 427                                        data->sfrbases[i] + REG_MMU_CFG);
 428                        __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
 429                        __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
 430                }
 431
 432                __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
 433        }
 434
 435        data->domain = domain;
 436
 437        dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
 438finish:
 439        write_unlock_irqrestore(&data->lock, flags);
 440
 441        return ret;
 442}
 443
 444int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
 445{
 446        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 447        int ret;
 448
 449        BUG_ON(!memblock_is_memory(pgtable));
 450
 451        ret = pm_runtime_get_sync(data->sysmmu);
 452        if (ret < 0) {
 453                dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
 454                return ret;
 455        }
 456
 457        ret = __exynos_sysmmu_enable(data, pgtable, NULL);
 458        if (WARN_ON(ret < 0)) {
 459                pm_runtime_put(data->sysmmu);
 460                dev_err(data->sysmmu,
 461                        "(%s) Already enabled with page table %#lx\n",
 462                        data->dbgname, data->pgtable);
 463        } else {
 464                data->dev = dev;
 465        }
 466
 467        return ret;
 468}
 469
 470static bool exynos_sysmmu_disable(struct device *dev)
 471{
 472        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 473        bool disabled;
 474
 475        disabled = __exynos_sysmmu_disable(data);
 476        pm_runtime_put(data->sysmmu);
 477
 478        return disabled;
 479}
 480
 481static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
 482{
 483        unsigned long flags;
 484        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 485
 486        read_lock_irqsave(&data->lock, flags);
 487
 488        if (is_sysmmu_active(data)) {
 489                int i;
 490                for (i = 0; i < data->nsfrs; i++) {
 491                        if (sysmmu_block(data->sfrbases[i])) {
 492                                __sysmmu_tlb_invalidate_entry(
 493                                                data->sfrbases[i], iova);
 494                                sysmmu_unblock(data->sfrbases[i]);
 495                        }
 496                }
 497        } else {
 498                dev_dbg(data->sysmmu,
 499                        "(%s) Disabled. Skipping invalidating TLB.\n",
 500                        data->dbgname);
 501        }
 502
 503        read_unlock_irqrestore(&data->lock, flags);
 504}
 505
 506void exynos_sysmmu_tlb_invalidate(struct device *dev)
 507{
 508        unsigned long flags;
 509        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 510
 511        read_lock_irqsave(&data->lock, flags);
 512
 513        if (is_sysmmu_active(data)) {
 514                int i;
 515                for (i = 0; i < data->nsfrs; i++) {
 516                        if (sysmmu_block(data->sfrbases[i])) {
 517                                __sysmmu_tlb_invalidate(data->sfrbases[i]);
 518                                sysmmu_unblock(data->sfrbases[i]);
 519                        }
 520                }
 521        } else {
 522                dev_dbg(data->sysmmu,
 523                        "(%s) Disabled. Skipping invalidating TLB.\n",
 524                        data->dbgname);
 525        }
 526
 527        read_unlock_irqrestore(&data->lock, flags);
 528}
 529
 530static int exynos_sysmmu_probe(struct platform_device *pdev)
 531{
 532        int i, ret;
 533        struct device *dev;
 534        struct sysmmu_drvdata *data;
 535
 536        dev = &pdev->dev;
 537
 538        data = kzalloc(sizeof(*data), GFP_KERNEL);
 539        if (!data) {
 540                dev_dbg(dev, "Not enough memory\n");
 541                ret = -ENOMEM;
 542                goto err_alloc;
 543        }
 544
 545        ret = dev_set_drvdata(dev, data);
 546        if (ret) {
 547                dev_dbg(dev, "Unabled to initialize driver data\n");
 548                goto err_init;
 549        }
 550
 551        data->nsfrs = pdev->num_resources / 2;
 552        data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
 553                                                                GFP_KERNEL);
 554        if (data->sfrbases == NULL) {
 555                dev_dbg(dev, "Not enough memory\n");
 556                ret = -ENOMEM;
 557                goto err_init;
 558        }
 559
 560        for (i = 0; i < data->nsfrs; i++) {
 561                struct resource *res;
 562                res = platform_get_resource(pdev, IORESOURCE_MEM, i);
 563                if (!res) {
 564                        dev_dbg(dev, "Unable to find IOMEM region\n");
 565                        ret = -ENOENT;
 566                        goto err_res;
 567                }
 568
 569                data->sfrbases[i] = ioremap(res->start, resource_size(res));
 570                if (!data->sfrbases[i]) {
 571                        dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
 572                                                        res->start);
 573                        ret = -ENOENT;
 574                        goto err_res;
 575                }
 576        }
 577
 578        for (i = 0; i < data->nsfrs; i++) {
 579                ret = platform_get_irq(pdev, i);
 580                if (ret <= 0) {
 581                        dev_dbg(dev, "Unable to find IRQ resource\n");
 582                        goto err_irq;
 583                }
 584
 585                ret = request_irq(ret, exynos_sysmmu_irq, 0,
 586                                        dev_name(dev), data);
 587                if (ret) {
 588                        dev_dbg(dev, "Unabled to register interrupt handler\n");
 589                        goto err_irq;
 590                }
 591        }
 592
 593        if (dev_get_platdata(dev)) {
 594                char *deli, *beg;
 595                struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
 596
 597                beg = platdata->clockname;
 598
 599                for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
 600                        /* NOTHING */;
 601
 602                if (*deli == '\0')
 603                        deli = NULL;
 604                else
 605                        *deli = '\0';
 606
 607                data->clk[0] = clk_get(dev, beg);
 608                if (IS_ERR(data->clk[0])) {
 609                        data->clk[0] = NULL;
 610                        dev_dbg(dev, "No clock descriptor registered\n");
 611                }
 612
 613                if (data->clk[0] && deli) {
 614                        *deli = ',';
 615                        data->clk[1] = clk_get(dev, deli + 1);
 616                        if (IS_ERR(data->clk[1]))
 617                                data->clk[1] = NULL;
 618                }
 619
 620                data->dbgname = platdata->dbgname;
 621        }
 622
 623        data->sysmmu = dev;
 624        rwlock_init(&data->lock);
 625        INIT_LIST_HEAD(&data->node);
 626
 627        __set_fault_handler(data, &default_fault_handler);
 628
 629        if (dev->parent)
 630                pm_runtime_enable(dev);
 631
 632        dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
 633        return 0;
 634err_irq:
 635        while (i-- > 0) {
 636                int irq;
 637
 638                irq = platform_get_irq(pdev, i);
 639                free_irq(irq, data);
 640        }
 641err_res:
 642        while (data->nsfrs-- > 0)
 643                iounmap(data->sfrbases[data->nsfrs]);
 644        kfree(data->sfrbases);
 645err_init:
 646        kfree(data);
 647err_alloc:
 648        dev_err(dev, "Failed to initialize\n");
 649        return ret;
 650}
 651
 652static struct platform_driver exynos_sysmmu_driver = {
 653        .probe          = exynos_sysmmu_probe,
 654        .driver         = {
 655                .owner          = THIS_MODULE,
 656                .name           = "exynos-sysmmu",
 657        }
 658};
 659
 660static inline void pgtable_flush(void *vastart, void *vaend)
 661{
 662        dmac_flush_range(vastart, vaend);
 663        outer_flush_range(virt_to_phys(vastart),
 664                                virt_to_phys(vaend));
 665}
 666
 667static int exynos_iommu_domain_init(struct iommu_domain *domain)
 668{
 669        struct exynos_iommu_domain *priv;
 670
 671        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 672        if (!priv)
 673                return -ENOMEM;
 674
 675        priv->pgtable = (unsigned long *)__get_free_pages(
 676                                                GFP_KERNEL | __GFP_ZERO, 2);
 677        if (!priv->pgtable)
 678                goto err_pgtable;
 679
 680        priv->lv2entcnt = (short *)__get_free_pages(
 681                                                GFP_KERNEL | __GFP_ZERO, 1);
 682        if (!priv->lv2entcnt)
 683                goto err_counter;
 684
 685        pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
 686
 687        spin_lock_init(&priv->lock);
 688        spin_lock_init(&priv->pgtablelock);
 689        INIT_LIST_HEAD(&priv->clients);
 690
 691        domain->geometry.aperture_start = 0;
 692        domain->geometry.aperture_end   = ~0UL;
 693        domain->geometry.force_aperture = true;
 694
 695        domain->priv = priv;
 696        return 0;
 697
 698err_counter:
 699        free_pages((unsigned long)priv->pgtable, 2);
 700err_pgtable:
 701        kfree(priv);
 702        return -ENOMEM;
 703}
 704
 705static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
 706{
 707        struct exynos_iommu_domain *priv = domain->priv;
 708        struct sysmmu_drvdata *data;
 709        unsigned long flags;
 710        int i;
 711
 712        WARN_ON(!list_empty(&priv->clients));
 713
 714        spin_lock_irqsave(&priv->lock, flags);
 715
 716        list_for_each_entry(data, &priv->clients, node) {
 717                while (!exynos_sysmmu_disable(data->dev))
 718                        ; /* until System MMU is actually disabled */
 719        }
 720
 721        spin_unlock_irqrestore(&priv->lock, flags);
 722
 723        for (i = 0; i < NUM_LV1ENTRIES; i++)
 724                if (lv1ent_page(priv->pgtable + i))
 725                        kfree(__va(lv2table_base(priv->pgtable + i)));
 726
 727        free_pages((unsigned long)priv->pgtable, 2);
 728        free_pages((unsigned long)priv->lv2entcnt, 1);
 729        kfree(domain->priv);
 730        domain->priv = NULL;
 731}
 732
 733static int exynos_iommu_attach_device(struct iommu_domain *domain,
 734                                   struct device *dev)
 735{
 736        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 737        struct exynos_iommu_domain *priv = domain->priv;
 738        unsigned long flags;
 739        int ret;
 740
 741        ret = pm_runtime_get_sync(data->sysmmu);
 742        if (ret < 0)
 743                return ret;
 744
 745        ret = 0;
 746
 747        spin_lock_irqsave(&priv->lock, flags);
 748
 749        ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
 750
 751        if (ret == 0) {
 752                /* 'data->node' must not be appeared in priv->clients */
 753                BUG_ON(!list_empty(&data->node));
 754                data->dev = dev;
 755                list_add_tail(&data->node, &priv->clients);
 756        }
 757
 758        spin_unlock_irqrestore(&priv->lock, flags);
 759
 760        if (ret < 0) {
 761                dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
 762                                __func__, __pa(priv->pgtable));
 763                pm_runtime_put(data->sysmmu);
 764        } else if (ret > 0) {
 765                dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
 766                                        __func__, __pa(priv->pgtable));
 767        } else {
 768                dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
 769                                        __func__, __pa(priv->pgtable));
 770        }
 771
 772        return ret;
 773}
 774
 775static void exynos_iommu_detach_device(struct iommu_domain *domain,
 776                                    struct device *dev)
 777{
 778        struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
 779        struct exynos_iommu_domain *priv = domain->priv;
 780        struct list_head *pos;
 781        unsigned long flags;
 782        bool found = false;
 783
 784        spin_lock_irqsave(&priv->lock, flags);
 785
 786        list_for_each(pos, &priv->clients) {
 787                if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
 788                        found = true;
 789                        break;
 790                }
 791        }
 792
 793        if (!found)
 794                goto finish;
 795
 796        if (__exynos_sysmmu_disable(data)) {
 797                dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
 798                                        __func__, __pa(priv->pgtable));
 799                list_del_init(&data->node);
 800
 801        } else {
 802                dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
 803                                        __func__, __pa(priv->pgtable));
 804        }
 805
 806finish:
 807        spin_unlock_irqrestore(&priv->lock, flags);
 808
 809        if (found)
 810                pm_runtime_put(data->sysmmu);
 811}
 812
 813static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
 814                                        short *pgcounter)
 815{
 816        if (lv1ent_fault(sent)) {
 817                unsigned long *pent;
 818
 819                pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
 820                BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
 821                if (!pent)
 822                        return NULL;
 823
 824                *sent = mk_lv1ent_page(__pa(pent));
 825                *pgcounter = NUM_LV2ENTRIES;
 826                pgtable_flush(pent, pent + NUM_LV2ENTRIES);
 827                pgtable_flush(sent, sent + 1);
 828        }
 829
 830        return page_entry(sent, iova);
 831}
 832
 833static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
 834{
 835        if (lv1ent_section(sent))
 836                return -EADDRINUSE;
 837
 838        if (lv1ent_page(sent)) {
 839                if (*pgcnt != NUM_LV2ENTRIES)
 840                        return -EADDRINUSE;
 841
 842                kfree(page_entry(sent, 0));
 843
 844                *pgcnt = 0;
 845        }
 846
 847        *sent = mk_lv1ent_sect(paddr);
 848
 849        pgtable_flush(sent, sent + 1);
 850
 851        return 0;
 852}
 853
 854static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
 855                                                                short *pgcnt)
 856{
 857        if (size == SPAGE_SIZE) {
 858                if (!lv2ent_fault(pent))
 859                        return -EADDRINUSE;
 860
 861                *pent = mk_lv2ent_spage(paddr);
 862                pgtable_flush(pent, pent + 1);
 863                *pgcnt -= 1;
 864        } else { /* size == LPAGE_SIZE */
 865                int i;
 866                for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
 867                        if (!lv2ent_fault(pent)) {
 868                                memset(pent, 0, sizeof(*pent) * i);
 869                                return -EADDRINUSE;
 870                        }
 871
 872                        *pent = mk_lv2ent_lpage(paddr);
 873                }
 874                pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
 875                *pgcnt -= SPAGES_PER_LPAGE;
 876        }
 877
 878        return 0;
 879}
 880
 881static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
 882                         phys_addr_t paddr, size_t size, int prot)
 883{
 884        struct exynos_iommu_domain *priv = domain->priv;
 885        unsigned long *entry;
 886        unsigned long flags;
 887        int ret = -ENOMEM;
 888
 889        BUG_ON(priv->pgtable == NULL);
 890
 891        spin_lock_irqsave(&priv->pgtablelock, flags);
 892
 893        entry = section_entry(priv->pgtable, iova);
 894
 895        if (size == SECT_SIZE) {
 896                ret = lv1set_section(entry, paddr,
 897                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
 898        } else {
 899                unsigned long *pent;
 900
 901                pent = alloc_lv2entry(entry, iova,
 902                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
 903
 904                if (!pent)
 905                        ret = -ENOMEM;
 906                else
 907                        ret = lv2set_page(pent, paddr, size,
 908                                        &priv->lv2entcnt[lv1ent_offset(iova)]);
 909        }
 910
 911        if (ret) {
 912                pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
 913                                                        __func__, iova, size);
 914        }
 915
 916        spin_unlock_irqrestore(&priv->pgtablelock, flags);
 917
 918        return ret;
 919}
 920
 921static size_t exynos_iommu_unmap(struct iommu_domain *domain,
 922                                               unsigned long iova, size_t size)
 923{
 924        struct exynos_iommu_domain *priv = domain->priv;
 925        struct sysmmu_drvdata *data;
 926        unsigned long flags;
 927        unsigned long *ent;
 928
 929        BUG_ON(priv->pgtable == NULL);
 930
 931        spin_lock_irqsave(&priv->pgtablelock, flags);
 932
 933        ent = section_entry(priv->pgtable, iova);
 934
 935        if (lv1ent_section(ent)) {
 936                BUG_ON(size < SECT_SIZE);
 937
 938                *ent = 0;
 939                pgtable_flush(ent, ent + 1);
 940                size = SECT_SIZE;
 941                goto done;
 942        }
 943
 944        if (unlikely(lv1ent_fault(ent))) {
 945                if (size > SECT_SIZE)
 946                        size = SECT_SIZE;
 947                goto done;
 948        }
 949
 950        /* lv1ent_page(sent) == true here */
 951
 952        ent = page_entry(ent, iova);
 953
 954        if (unlikely(lv2ent_fault(ent))) {
 955                size = SPAGE_SIZE;
 956                goto done;
 957        }
 958
 959        if (lv2ent_small(ent)) {
 960                *ent = 0;
 961                size = SPAGE_SIZE;
 962                priv->lv2entcnt[lv1ent_offset(iova)] += 1;
 963                goto done;
 964        }
 965
 966        /* lv1ent_large(ent) == true here */
 967        BUG_ON(size < LPAGE_SIZE);
 968
 969        memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
 970
 971        size = LPAGE_SIZE;
 972        priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
 973done:
 974        spin_unlock_irqrestore(&priv->pgtablelock, flags);
 975
 976        spin_lock_irqsave(&priv->lock, flags);
 977        list_for_each_entry(data, &priv->clients, node)
 978                sysmmu_tlb_invalidate_entry(data->dev, iova);
 979        spin_unlock_irqrestore(&priv->lock, flags);
 980
 981
 982        return size;
 983}
 984
 985static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
 986                                          dma_addr_t iova)
 987{
 988        struct exynos_iommu_domain *priv = domain->priv;
 989        unsigned long *entry;
 990        unsigned long flags;
 991        phys_addr_t phys = 0;
 992
 993        spin_lock_irqsave(&priv->pgtablelock, flags);
 994
 995        entry = section_entry(priv->pgtable, iova);
 996
 997        if (lv1ent_section(entry)) {
 998                phys = section_phys(entry) + section_offs(iova);
 999        } else if (lv1ent_page(entry)) {
1000                entry = page_entry(entry, iova);
1001
1002                if (lv2ent_large(entry))
1003                        phys = lpage_phys(entry) + lpage_offs(iova);
1004                else if (lv2ent_small(entry))
1005                        phys = spage_phys(entry) + spage_offs(iova);
1006        }
1007
1008        spin_unlock_irqrestore(&priv->pgtablelock, flags);
1009
1010        return phys;
1011}
1012
1013static struct iommu_ops exynos_iommu_ops = {
1014        .domain_init = &exynos_iommu_domain_init,
1015        .domain_destroy = &exynos_iommu_domain_destroy,
1016        .attach_dev = &exynos_iommu_attach_device,
1017        .detach_dev = &exynos_iommu_detach_device,
1018        .map = &exynos_iommu_map,
1019        .unmap = &exynos_iommu_unmap,
1020        .iova_to_phys = &exynos_iommu_iova_to_phys,
1021        .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1022};
1023
1024static int __init exynos_iommu_init(void)
1025{
1026        int ret;
1027
1028        ret = platform_driver_register(&exynos_sysmmu_driver);
1029
1030        if (ret == 0)
1031                bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1032
1033        return ret;
1034}
1035subsys_initcall(exynos_iommu_init);
1036
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.