linux/arch/sparc/mm/iommu.c
<<
>>
Prefs
   1/*
   2 * iommu.c:  IOMMU specific routines for memory management.
   3 *
   4 * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   5 * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
   6 * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
   7 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
   8 */
   9 
  10#include <linux/kernel.h>
  11#include <linux/init.h>
  12#include <linux/mm.h>
  13#include <linux/slab.h>
  14#include <linux/highmem.h>      /* pte_offset_map => kmap_atomic */
  15#include <linux/scatterlist.h>
  16
  17#include <asm/pgalloc.h>
  18#include <asm/pgtable.h>
  19#include <asm/sbus.h>
  20#include <asm/io.h>
  21#include <asm/mxcc.h>
  22#include <asm/mbus.h>
  23#include <asm/cacheflush.h>
  24#include <asm/tlbflush.h>
  25#include <asm/bitext.h>
  26#include <asm/iommu.h>
  27#include <asm/dma.h>
  28
  29/*
  30 * This can be sized dynamically, but we will do this
  31 * only when we have a guidance about actual I/O pressures.
  32 */
  33#define IOMMU_RNGE      IOMMU_RNGE_256MB
  34#define IOMMU_START     0xF0000000
  35#define IOMMU_WINSIZE   (256*1024*1024U)
  36#define IOMMU_NPTES     (IOMMU_WINSIZE/PAGE_SIZE)       /* 64K PTEs, 265KB */
  37#define IOMMU_ORDER     6                               /* 4096 * (1<<6) */
  38
  39/* srmmu.c */
  40extern int viking_mxcc_present;
  41BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
  42#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
  43extern int flush_page_for_dma_global;
  44static int viking_flush;
  45/* viking.S */
  46extern void viking_flush_page(unsigned long page);
  47extern void viking_mxcc_flush_page(unsigned long page);
  48
  49/*
  50 * Values precomputed according to CPU type.
  51 */
  52static unsigned int ioperm_noc;         /* Consistent mapping iopte flags */
  53static pgprot_t dvma_prot;              /* Consistent mapping pte flags */
  54
  55#define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
  56#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
  57
  58void __init
  59iommu_init(int iommund, struct sbus_bus *sbus)
  60{
  61        unsigned int impl, vers;
  62        unsigned long tmp;
  63        struct iommu_struct *iommu;
  64        struct linux_prom_registers iommu_promregs[PROMREG_MAX];
  65        struct resource r;
  66        unsigned long *bitmap;
  67
  68        iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
  69        if (!iommu) {
  70                prom_printf("Unable to allocate iommu structure\n");
  71                prom_halt();
  72        }
  73        iommu->regs = NULL;
  74        if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
  75                         sizeof(iommu_promregs)) != -1) {
  76                memset(&r, 0, sizeof(r));
  77                r.flags = iommu_promregs[0].which_io;
  78                r.start = iommu_promregs[0].phys_addr;
  79                iommu->regs = (struct iommu_regs *)
  80                        sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
  81        }
  82        if (!iommu->regs) {
  83                prom_printf("Cannot map IOMMU registers\n");
  84                prom_halt();
  85        }
  86        impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
  87        vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
  88        tmp = iommu->regs->control;
  89        tmp &= ~(IOMMU_CTRL_RNGE);
  90        tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
  91        iommu->regs->control = tmp;
  92        iommu_invalidate(iommu->regs);
  93        iommu->start = IOMMU_START;
  94        iommu->end = 0xffffffff;
  95
  96        /* Allocate IOMMU page table */
  97        /* Stupid alignment constraints give me a headache. 
  98           We need 256K or 512K or 1M or 2M area aligned to
  99           its size and current gfp will fortunately give
 100           it to us. */
 101        tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
 102        if (!tmp) {
 103                prom_printf("Unable to allocate iommu table [0x%08x]\n",
 104                            IOMMU_NPTES*sizeof(iopte_t));
 105                prom_halt();
 106        }
 107        iommu->page_table = (iopte_t *)tmp;
 108
 109        /* Initialize new table. */
 110        memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
 111        flush_cache_all();
 112        flush_tlb_all();
 113        iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
 114        iommu_invalidate(iommu->regs);
 115
 116        bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
 117        if (!bitmap) {
 118                prom_printf("Unable to allocate iommu bitmap [%d]\n",
 119                            (int)(IOMMU_NPTES>>3));
 120                prom_halt();
 121        }
 122        bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
 123        /* To be coherent on HyperSparc, the page color of DVMA
 124         * and physical addresses must match.
 125         */
 126        if (srmmu_modtype == HyperSparc)
 127                iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
 128        else
 129                iommu->usemap.num_colors = 1;
 130
 131        printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
 132            impl, vers, iommu->page_table,
 133            (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
 134
 135        sbus->ofdev.dev.archdata.iommu = iommu;
 136}
 137
 138/* This begs to be btfixup-ed by srmmu. */
 139/* Flush the iotlb entries to ram. */
 140/* This could be better if we didn't have to flush whole pages. */
 141static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
 142{
 143        unsigned long start;
 144        unsigned long end;
 145
 146        start = (unsigned long)iopte;
 147        end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
 148        start &= PAGE_MASK;
 149        if (viking_mxcc_present) {
 150                while(start < end) {
 151                        viking_mxcc_flush_page(start);
 152                        start += PAGE_SIZE;
 153                }
 154        } else if (viking_flush) {
 155                while(start < end) {
 156                        viking_flush_page(start);
 157                        start += PAGE_SIZE;
 158                }
 159        } else {
 160                while(start < end) {
 161                        __flush_page_to_ram(start);
 162                        start += PAGE_SIZE;
 163                }
 164        }
 165}
 166
 167static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
 168{
 169        struct iommu_struct *iommu = sbus->ofdev.dev.archdata.iommu;
 170        int ioptex;
 171        iopte_t *iopte, *iopte0;
 172        unsigned int busa, busa0;
 173        int i;
 174
 175        /* page color = pfn of page */
 176        ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
 177        if (ioptex < 0)
 178                panic("iommu out");
 179        busa0 = iommu->start + (ioptex << PAGE_SHIFT);
 180        iopte0 = &iommu->page_table[ioptex];
 181
 182        busa = busa0;
 183        iopte = iopte0;
 184        for (i = 0; i < npages; i++) {
 185                iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
 186                iommu_invalidate_page(iommu->regs, busa);
 187                busa += PAGE_SIZE;
 188                iopte++;
 189                page++;
 190        }
 191
 192        iommu_flush_iotlb(iopte0, npages);
 193
 194        return busa0;
 195}
 196
 197static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
 198    struct sbus_bus *sbus)
 199{
 200        unsigned long off;
 201        int npages;
 202        struct page *page;
 203        u32 busa;
 204
 205        off = (unsigned long)vaddr & ~PAGE_MASK;
 206        npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
 207        page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
 208        busa = iommu_get_one(page, npages, sbus);
 209        return busa + off;
 210}
 211
 212static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
 213{
 214        return iommu_get_scsi_one(vaddr, len, sbus);
 215}
 216
 217static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
 218{
 219        flush_page_for_dma(0);
 220        return iommu_get_scsi_one(vaddr, len, sbus);
 221}
 222
 223static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
 224{
 225        unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
 226
 227        while(page < ((unsigned long)(vaddr + len))) {
 228                flush_page_for_dma(page);
 229                page += PAGE_SIZE;
 230        }
 231        return iommu_get_scsi_one(vaddr, len, sbus);
 232}
 233
 234static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
 235{
 236        int n;
 237
 238        while (sz != 0) {
 239                --sz;
 240                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
 241                sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
 242                sg->dvma_length = (__u32) sg->length;
 243                sg = sg_next(sg);
 244        }
 245}
 246
 247static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
 248{
 249        int n;
 250
 251        flush_page_for_dma(0);
 252        while (sz != 0) {
 253                --sz;
 254                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
 255                sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
 256                sg->dvma_length = (__u32) sg->length;
 257                sg = sg_next(sg);
 258        }
 259}
 260
 261static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
 262{
 263        unsigned long page, oldpage = 0;
 264        int n, i;
 265
 266        while(sz != 0) {
 267                --sz;
 268
 269                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
 270
 271                /*
 272                 * We expect unmapped highmem pages to be not in the cache.
 273                 * XXX Is this a good assumption?
 274                 * XXX What if someone else unmaps it here and races us?
 275                 */
 276                if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
 277                        for (i = 0; i < n; i++) {
 278                                if (page != oldpage) {  /* Already flushed? */
 279                                        flush_page_for_dma(page);
 280                                        oldpage = page;
 281                                }
 282                                page += PAGE_SIZE;
 283                        }
 284                }
 285
 286                sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
 287                sg->dvma_length = (__u32) sg->length;
 288                sg = sg_next(sg);
 289        }
 290}
 291
 292static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
 293{
 294        struct iommu_struct *iommu = sbus->ofdev.dev.archdata.iommu;
 295        int ioptex;
 296        int i;
 297
 298        BUG_ON(busa < iommu->start);
 299        ioptex = (busa - iommu->start) >> PAGE_SHIFT;
 300        for (i = 0; i < npages; i++) {
 301                iopte_val(iommu->page_table[ioptex + i]) = 0;
 302                iommu_invalidate_page(iommu->regs, busa);
 303                busa += PAGE_SIZE;
 304        }
 305        bit_map_clear(&iommu->usemap, ioptex, npages);
 306}
 307
 308static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
 309{
 310        unsigned long off;
 311        int npages;
 312
 313        off = vaddr & ~PAGE_MASK;
 314        npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
 315        iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
 316}
 317
 318static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
 319{
 320        int n;
 321
 322        while(sz != 0) {
 323                --sz;
 324
 325                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
 326                iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
 327                sg->dvma_address = 0x21212121;
 328                sg = sg_next(sg);
 329        }
 330}
 331
 332#ifdef CONFIG_SBUS
 333static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
 334    unsigned long addr, int len)
 335{
 336        unsigned long page, end;
 337        struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
 338        iopte_t *iopte = iommu->page_table;
 339        iopte_t *first;
 340        int ioptex;
 341
 342        BUG_ON((va & ~PAGE_MASK) != 0);
 343        BUG_ON((addr & ~PAGE_MASK) != 0);
 344        BUG_ON((len & ~PAGE_MASK) != 0);
 345
 346        /* page color = physical address */
 347        ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
 348                addr >> PAGE_SHIFT);
 349        if (ioptex < 0)
 350                panic("iommu out");
 351
 352        iopte += ioptex;
 353        first = iopte;
 354        end = addr + len;
 355        while(addr < end) {
 356                page = va;
 357                {
 358                        pgd_t *pgdp;
 359                        pmd_t *pmdp;
 360                        pte_t *ptep;
 361
 362                        if (viking_mxcc_present)
 363                                viking_mxcc_flush_page(page);
 364                        else if (viking_flush)
 365                                viking_flush_page(page);
 366                        else
 367                                __flush_page_to_ram(page);
 368
 369                        pgdp = pgd_offset(&init_mm, addr);
 370                        pmdp = pmd_offset(pgdp, addr);
 371                        ptep = pte_offset_map(pmdp, addr);
 372
 373                        set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
 374                }
 375                iopte_val(*iopte++) =
 376                    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
 377                addr += PAGE_SIZE;
 378                va += PAGE_SIZE;
 379        }
 380        /* P3: why do we need this?
 381         *
 382         * DAVEM: Because there are several aspects, none of which
 383         *        are handled by a single interface.  Some cpus are
 384         *        completely not I/O DMA coherent, and some have
 385         *        virtually indexed caches.  The driver DMA flushing
 386         *        methods handle the former case, but here during
 387         *        IOMMU page table modifications, and usage of non-cacheable
 388         *        cpu mappings of pages potentially in the cpu caches, we have
 389         *        to handle the latter case as well.
 390         */
 391        flush_cache_all();
 392        iommu_flush_iotlb(first, len >> PAGE_SHIFT);
 393        flush_tlb_all();
 394        iommu_invalidate(iommu->regs);
 395
 396        *pba = iommu->start + (ioptex << PAGE_SHIFT);
 397        return 0;
 398}
 399
 400static void iommu_unmap_dma_area(unsigned long busa, int len)
 401{
 402        struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
 403        iopte_t *iopte = iommu->page_table;
 404        unsigned long end;
 405        int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
 406
 407        BUG_ON((busa & ~PAGE_MASK) != 0);
 408        BUG_ON((len & ~PAGE_MASK) != 0);
 409
 410        iopte += ioptex;
 411        end = busa + len;
 412        while (busa < end) {
 413                iopte_val(*iopte++) = 0;
 414                busa += PAGE_SIZE;
 415        }
 416        flush_tlb_all();
 417        iommu_invalidate(iommu->regs);
 418        bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
 419}
 420
 421static struct page *iommu_translate_dvma(unsigned long busa)
 422{
 423        struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
 424        iopte_t *iopte = iommu->page_table;
 425
 426        iopte += ((busa - iommu->start) >> PAGE_SHIFT);
 427        return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
 428}
 429#endif
 430
 431static char *iommu_lockarea(char *vaddr, unsigned long len)
 432{
 433        return vaddr;
 434}
 435
 436static void iommu_unlockarea(char *vaddr, unsigned long len)
 437{
 438}
 439
 440void __init ld_mmu_iommu(void)
 441{
 442        viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
 443        BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
 444        BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
 445
 446        if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
 447                /* IO coherent chip */
 448                BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
 449                BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
 450        } else if (flush_page_for_dma_global) {
 451                /* flush_page_for_dma flushes everything, no matter of what page is it */
 452                BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
 453                BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
 454        } else {
 455                BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
 456                BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
 457        }
 458        BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
 459        BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
 460
 461#ifdef CONFIG_SBUS
 462        BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
 463        BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
 464        BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
 465#endif
 466
 467        if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
 468                dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
 469                ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
 470        } else {
 471                dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
 472                ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
 473        }
 474}
 475