linux/arch/sparc/kernel/ioport.c
<<
>>
Prefs
   1/*
   2 * ioport.c:  Simple io mapping allocator.
   3 *
   4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
   6 *
   7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
   8 *
   9 * 2000/01/29
  10 * <rth> zait: as long as pci_alloc_consistent produces something addressable, 
  11 *      things are ok.
  12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
  13 *      pointer into the big page mapping
  14 * <rth> zait: so what?
  15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
  16 * <zaitcev> Hmm
  17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
  18 *      So far so good.
  19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
  20 *      remap_it_my_way()).
  21 * <zaitcev> How do you find the address to pass to free_pages()?
  22 * <rth> zait: walk the page tables?  It's only two or three level after all.
  23 * <rth> zait: you have to walk them anyway to remove the mapping.
  24 * <zaitcev> Hmm
  25 * <zaitcev> Sounds reasonable
  26 */
  27
  28#include <linux/module.h>
  29#include <linux/sched.h>
  30#include <linux/kernel.h>
  31#include <linux/errno.h>
  32#include <linux/types.h>
  33#include <linux/ioport.h>
  34#include <linux/mm.h>
  35#include <linux/slab.h>
  36#include <linux/pci.h>          /* struct pci_dev */
  37#include <linux/proc_fs.h>
  38#include <linux/scatterlist.h>
  39#include <linux/of_device.h>
  40
  41#include <asm/io.h>
  42#include <asm/vaddrs.h>
  43#include <asm/oplib.h>
  44#include <asm/prom.h>
  45#include <asm/sbus.h>
  46#include <asm/page.h>
  47#include <asm/pgalloc.h>
  48#include <asm/dma.h>
  49
  50#define mmu_inval_dma_area(p, l)        /* Anton pulled it out for 2.4.0-xx */
  51
  52static struct resource *_sparc_find_resource(struct resource *r,
  53                                             unsigned long);
  54
  55static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
  56static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
  57    unsigned long size, char *name);
  58static void _sparc_free_io(struct resource *res);
  59
  60static void register_proc_sparc_ioport(void);
  61
  62/* This points to the next to use virtual memory for DVMA mappings */
  63static struct resource _sparc_dvma = {
  64        .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
  65};
  66/* This points to the start of I/O mappings, cluable from outside. */
  67/*ext*/ struct resource sparc_iomap = {
  68        .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
  69};
  70
  71/*
  72 * Our mini-allocator...
  73 * Boy this is gross! We need it because we must map I/O for
  74 * timers and interrupt controller before the kmalloc is available.
  75 */
  76
  77#define XNMLN  15
  78#define XNRES  10       /* SS-10 uses 8 */
  79
  80struct xresource {
  81        struct resource xres;   /* Must be first */
  82        int xflag;              /* 1 == used */
  83        char xname[XNMLN+1];
  84};
  85
  86static struct xresource xresv[XNRES];
  87
  88static struct xresource *xres_alloc(void) {
  89        struct xresource *xrp;
  90        int n;
  91
  92        xrp = xresv;
  93        for (n = 0; n < XNRES; n++) {
  94                if (xrp->xflag == 0) {
  95                        xrp->xflag = 1;
  96                        return xrp;
  97                }
  98                xrp++;
  99        }
 100        return NULL;
 101}
 102
 103static void xres_free(struct xresource *xrp) {
 104        xrp->xflag = 0;
 105}
 106
 107/*
 108 * These are typically used in PCI drivers
 109 * which are trying to be cross-platform.
 110 *
 111 * Bus type is always zero on IIep.
 112 */
 113void __iomem *ioremap(unsigned long offset, unsigned long size)
 114{
 115        char name[14];
 116
 117        sprintf(name, "phys_%08x", (u32)offset);
 118        return _sparc_alloc_io(0, offset, size, name);
 119}
 120
 121/*
 122 * Comlimentary to ioremap().
 123 */
 124void iounmap(volatile void __iomem *virtual)
 125{
 126        unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
 127        struct resource *res;
 128
 129        if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
 130                printk("free_io/iounmap: cannot free %lx\n", vaddr);
 131                return;
 132        }
 133        _sparc_free_io(res);
 134
 135        if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
 136                xres_free((struct xresource *)res);
 137        } else {
 138                kfree(res);
 139        }
 140}
 141
 142/*
 143 */
 144void __iomem *sbus_ioremap(struct resource *phyres, unsigned long offset,
 145    unsigned long size, char *name)
 146{
 147        return _sparc_alloc_io(phyres->flags & 0xF,
 148            phyres->start + offset, size, name);
 149}
 150
 151void __iomem *of_ioremap(struct resource *res, unsigned long offset,
 152                         unsigned long size, char *name)
 153{
 154        return _sparc_alloc_io(res->flags & 0xF,
 155                               res->start + offset,
 156                               size, name);
 157}
 158EXPORT_SYMBOL(of_ioremap);
 159
 160void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
 161{
 162        iounmap(base);
 163}
 164EXPORT_SYMBOL(of_iounmap);
 165
 166/*
 167 */
 168void sbus_iounmap(volatile void __iomem *addr, unsigned long size)
 169{
 170        iounmap(addr);
 171}
 172
 173/*
 174 * Meat of mapping
 175 */
 176static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
 177    unsigned long size, char *name)
 178{
 179        static int printed_full;
 180        struct xresource *xres;
 181        struct resource *res;
 182        char *tack;
 183        int tlen;
 184        void __iomem *va;       /* P3 diag */
 185
 186        if (name == NULL) name = "???";
 187
 188        if ((xres = xres_alloc()) != 0) {
 189                tack = xres->xname;
 190                res = &xres->xres;
 191        } else {
 192                if (!printed_full) {
 193                        printk("ioremap: done with statics, switching to malloc\n");
 194                        printed_full = 1;
 195                }
 196                tlen = strlen(name);
 197                tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
 198                if (tack == NULL) return NULL;
 199                memset(tack, 0, sizeof(struct resource));
 200                res = (struct resource *) tack;
 201                tack += sizeof (struct resource);
 202        }
 203
 204        strlcpy(tack, name, XNMLN+1);
 205        res->name = tack;
 206
 207        va = _sparc_ioremap(res, busno, phys, size);
 208        /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
 209        return va;
 210}
 211
 212/*
 213 */
 214static void __iomem *
 215_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
 216{
 217        unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
 218
 219        if (allocate_resource(&sparc_iomap, res,
 220            (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
 221            sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
 222                /* Usually we cannot see printks in this case. */
 223                prom_printf("alloc_io_res(%s): cannot occupy\n",
 224                    (res->name != NULL)? res->name: "???");
 225                prom_halt();
 226        }
 227
 228        pa &= PAGE_MASK;
 229        sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
 230
 231        return (void __iomem *)(unsigned long)(res->start + offset);
 232}
 233
 234/*
 235 * Comlimentary to _sparc_ioremap().
 236 */
 237static void _sparc_free_io(struct resource *res)
 238{
 239        unsigned long plen;
 240
 241        plen = res->end - res->start + 1;
 242        BUG_ON((plen & (PAGE_SIZE-1)) != 0);
 243        sparc_unmapiorange(res->start, plen);
 244        release_resource(res);
 245}
 246
 247#ifdef CONFIG_SBUS
 248
 249void sbus_set_sbus64(struct sbus_dev *sdev, int x)
 250{
 251        printk("sbus_set_sbus64: unsupported\n");
 252}
 253
 254extern unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq);
 255void __init sbus_fill_device_irq(struct sbus_dev *sdev)
 256{
 257        struct linux_prom_irqs irqs[PROMINTR_MAX];
 258        int len;
 259
 260        len = prom_getproperty(sdev->prom_node, "intr",
 261                               (char *)irqs, sizeof(irqs));
 262        if (len != -1) {
 263                sdev->num_irqs = len / 8;
 264                if (sdev->num_irqs == 0) {
 265                        sdev->irqs[0] = 0;
 266                } else if (sparc_cpu_model == sun4d) {
 267                        for (len = 0; len < sdev->num_irqs; len++)
 268                                sdev->irqs[len] =
 269                                        sun4d_build_irq(sdev, irqs[len].pri);
 270                } else {
 271                        for (len = 0; len < sdev->num_irqs; len++)
 272                                sdev->irqs[len] = irqs[len].pri;
 273                }
 274        } else {
 275                int interrupts[PROMINTR_MAX];
 276
 277                /* No "intr" node found-- check for "interrupts" node.
 278                 * This node contains SBus interrupt levels, not IPLs
 279                 * as in "intr", and no vector values.  We convert
 280                 * SBus interrupt levels to PILs (platform specific).
 281                 */
 282                len = prom_getproperty(sdev->prom_node, "interrupts",
 283                                       (char *)interrupts, sizeof(interrupts));
 284                if (len == -1) {
 285                        sdev->irqs[0] = 0;
 286                        sdev->num_irqs = 0;
 287                } else {
 288                        sdev->num_irqs = len / sizeof(int);
 289                        for (len = 0; len < sdev->num_irqs; len++) {
 290                                sdev->irqs[len] =
 291                                        sbint_to_irq(sdev, interrupts[len]);
 292                        }
 293                }
 294        } 
 295}
 296
 297/*
 298 * Allocate a chunk of memory suitable for DMA.
 299 * Typically devices use them for control blocks.
 300 * CPU may access them without any explicit flushing.
 301 *
 302 * XXX Some clever people know that sdev is not used and supply NULL. Watch.
 303 */
 304void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
 305{
 306        unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
 307        unsigned long va;
 308        struct resource *res;
 309        int order;
 310
 311        /* XXX why are some lengths signed, others unsigned? */
 312        if (len <= 0) {
 313                return NULL;
 314        }
 315        /* XXX So what is maxphys for us and how do drivers know it? */
 316        if (len > 256*1024) {                   /* __get_free_pages() limit */
 317                return NULL;
 318        }
 319
 320        order = get_order(len_total);
 321        if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
 322                goto err_nopages;
 323
 324        if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
 325                goto err_nomem;
 326
 327        if (allocate_resource(&_sparc_dvma, res, len_total,
 328            _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
 329                printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
 330                goto err_nova;
 331        }
 332        mmu_inval_dma_area(va, len_total);
 333        // XXX The mmu_map_dma_area does this for us below, see comments.
 334        // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
 335        /*
 336         * XXX That's where sdev would be used. Currently we load
 337         * all iommu tables with the same translations.
 338         */
 339        if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
 340                goto err_noiommu;
 341
 342        /* Set the resource name, if known. */
 343        if (sdev) {
 344                res->name = sdev->prom_name;
 345        }
 346
 347        return (void *)(unsigned long)res->start;
 348
 349err_noiommu:
 350        release_resource(res);
 351err_nova:
 352        free_pages(va, order);
 353err_nomem:
 354        kfree(res);
 355err_nopages:
 356        return NULL;
 357}
 358
 359void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
 360{
 361        struct resource *res;
 362        struct page *pgv;
 363
 364        if ((res = _sparc_find_resource(&_sparc_dvma,
 365            (unsigned long)p)) == NULL) {
 366                printk("sbus_free_consistent: cannot free %p\n", p);
 367                return;
 368        }
 369
 370        if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
 371                printk("sbus_free_consistent: unaligned va %p\n", p);
 372                return;
 373        }
 374
 375        n = (n + PAGE_SIZE-1) & PAGE_MASK;
 376        if ((res->end-res->start)+1 != n) {
 377                printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
 378                    (long)((res->end-res->start)+1), n);
 379                return;
 380        }
 381
 382        release_resource(res);
 383        kfree(res);
 384
 385        /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
 386        pgv = mmu_translate_dvma(ba);
 387        mmu_unmap_dma_area(ba, n);
 388
 389        __free_pages(pgv, get_order(n));
 390}
 391
 392/*
 393 * Map a chunk of memory so that devices can see it.
 394 * CPU view of this memory may be inconsistent with
 395 * a device view and explicit flushing is necessary.
 396 */
 397dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
 398{
 399        /* XXX why are some lengths signed, others unsigned? */
 400        if (len <= 0) {
 401                return 0;
 402        }
 403        /* XXX So what is maxphys for us and how do drivers know it? */
 404        if (len > 256*1024) {                   /* __get_free_pages() limit */
 405                return 0;
 406        }
 407        return mmu_get_scsi_one(va, len, sdev->bus);
 408}
 409
 410void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction)
 411{
 412        mmu_release_scsi_one(ba, n, sdev->bus);
 413}
 414
 415int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
 416{
 417        mmu_get_scsi_sgl(sg, n, sdev->bus);
 418
 419        /*
 420         * XXX sparc64 can return a partial length here. sun4c should do this
 421         * but it currently panics if it can't fulfill the request - Anton
 422         */
 423        return n;
 424}
 425
 426void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
 427{
 428        mmu_release_scsi_sgl(sg, n, sdev->bus);
 429}
 430
 431/*
 432 */
 433void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
 434{
 435#if 0
 436        unsigned long va;
 437        struct resource *res;
 438
 439        /* We do not need the resource, just print a message if invalid. */
 440        res = _sparc_find_resource(&_sparc_dvma, ba);
 441        if (res == NULL)
 442                panic("sbus_dma_sync_single: 0x%x\n", ba);
 443
 444        va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
 445        /*
 446         * XXX This bogosity will be fixed with the iommu rewrite coming soon
 447         * to a kernel near you. - Anton
 448         */
 449        /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
 450#endif
 451}
 452
 453void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
 454{
 455#if 0
 456        unsigned long va;
 457        struct resource *res;
 458
 459        /* We do not need the resource, just print a message if invalid. */
 460        res = _sparc_find_resource(&_sparc_dvma, ba);
 461        if (res == NULL)
 462                panic("sbus_dma_sync_single: 0x%x\n", ba);
 463
 464        va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
 465        /*
 466         * XXX This bogosity will be fixed with the iommu rewrite coming soon
 467         * to a kernel near you. - Anton
 468         */
 469        /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
 470#endif
 471}
 472
 473void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
 474{
 475        printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n");
 476}
 477
 478void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
 479{
 480        printk("sbus_dma_sync_sg_for_device: not implemented yet\n");
 481}
 482
 483/* Support code for sbus_init().  */
 484/*
 485 * XXX This functions appears to be a distorted version of
 486 * prom_sbus_ranges_init(), with all sun4d stuff cut away.
 487 * Ask DaveM what is going on here, how is sun4d supposed to work... XXX
 488 */
 489/* added back sun4d patch from Thomas Bogendoerfer - should be OK (crn) */
 490void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus)
 491{
 492        int parent_node = pn->node;
 493
 494        if (sparc_cpu_model == sun4d) {
 495                struct linux_prom_ranges iounit_ranges[PROMREG_MAX];
 496                int num_iounit_ranges, len;
 497
 498                len = prom_getproperty(parent_node, "ranges",
 499                                       (char *) iounit_ranges,
 500                                       sizeof (iounit_ranges));
 501                if (len != -1) {
 502                        num_iounit_ranges =
 503                                (len / sizeof(struct linux_prom_ranges));
 504                        prom_adjust_ranges(sbus->sbus_ranges,
 505                                           sbus->num_sbus_ranges,
 506                                           iounit_ranges, num_iounit_ranges);
 507                }
 508        }
 509}
 510
 511void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
 512{
 513#ifndef CONFIG_SUN4
 514        struct device_node *parent = dp->parent;
 515
 516        if (sparc_cpu_model != sun4d &&
 517            parent != NULL &&
 518            !strcmp(parent->name, "iommu")) {
 519                extern void iommu_init(int iommu_node, struct sbus_bus *sbus);
 520
 521                iommu_init(parent->node, sbus);
 522        }
 523
 524        if (sparc_cpu_model == sun4d) {
 525                extern void iounit_init(int sbi_node, int iounit_node,
 526                                        struct sbus_bus *sbus);
 527
 528                iounit_init(dp->node, parent->node, sbus);
 529        }
 530#endif
 531}
 532
 533void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
 534{
 535        if (sparc_cpu_model == sun4d) {
 536                struct device_node *parent = dp->parent;
 537
 538                sbus->devid = of_getintprop_default(parent, "device-id", 0);
 539                sbus->board = of_getintprop_default(parent, "board#", 0);
 540        }
 541}
 542
 543int __init sbus_arch_preinit(void)
 544{
 545        register_proc_sparc_ioport();
 546
 547#ifdef CONFIG_SUN4
 548        {
 549                extern void sun4_dvma_init(void);
 550                sun4_dvma_init();
 551        }
 552        return 1;
 553#else
 554        return 0;
 555#endif
 556}
 557
 558void __init sbus_arch_postinit(void)
 559{
 560        if (sparc_cpu_model == sun4d) {
 561                extern void sun4d_init_sbi_irq(void);
 562                sun4d_init_sbi_irq();
 563        }
 564}
 565#endif /* CONFIG_SBUS */
 566
 567#ifdef CONFIG_PCI
 568
 569/* Allocate and map kernel buffer using consistent mode DMA for a device.
 570 * hwdev should be valid struct pci_dev pointer for PCI devices.
 571 */
 572void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
 573{
 574        unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
 575        unsigned long va;
 576        struct resource *res;
 577        int order;
 578
 579        if (len == 0) {
 580                return NULL;
 581        }
 582        if (len > 256*1024) {                   /* __get_free_pages() limit */
 583                return NULL;
 584        }
 585
 586        order = get_order(len_total);
 587        va = __get_free_pages(GFP_KERNEL, order);
 588        if (va == 0) {
 589                printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
 590                return NULL;
 591        }
 592
 593        if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
 594                free_pages(va, order);
 595                printk("pci_alloc_consistent: no core\n");
 596                return NULL;
 597        }
 598
 599        if (allocate_resource(&_sparc_dvma, res, len_total,
 600            _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
 601                printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
 602                free_pages(va, order);
 603                kfree(res);
 604                return NULL;
 605        }
 606        mmu_inval_dma_area(va, len_total);
 607#if 0
 608/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
 609  (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
 610#endif
 611        sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
 612
 613        *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
 614        return (void *) res->start;
 615}
 616
 617/* Free and unmap a consistent DMA buffer.
 618 * cpu_addr is what was returned from pci_alloc_consistent,
 619 * size must be the same as what as passed into pci_alloc_consistent,
 620 * and likewise dma_addr must be the same as what *dma_addrp was set to.
 621 *
 622 * References to the memory and mappings associated with cpu_addr/dma_addr
 623 * past this call are illegal.
 624 */
 625void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
 626{
 627        struct resource *res;
 628        unsigned long pgp;
 629
 630        if ((res = _sparc_find_resource(&_sparc_dvma,
 631            (unsigned long)p)) == NULL) {
 632                printk("pci_free_consistent: cannot free %p\n", p);
 633                return;
 634        }
 635
 636        if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
 637                printk("pci_free_consistent: unaligned va %p\n", p);
 638                return;
 639        }
 640
 641        n = (n + PAGE_SIZE-1) & PAGE_MASK;
 642        if ((res->end-res->start)+1 != n) {
 643                printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
 644                    (long)((res->end-res->start)+1), (long)n);
 645                return;
 646        }
 647
 648        pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
 649        mmu_inval_dma_area(pgp, n);
 650        sparc_unmapiorange((unsigned long)p, n);
 651
 652        release_resource(res);
 653        kfree(res);
 654
 655        free_pages(pgp, get_order(n));
 656}
 657
 658/* Map a single buffer of the indicated size for DMA in streaming mode.
 659 * The 32-bit bus address to use is returned.
 660 *
 661 * Once the device is given the dma address, the device owns this memory
 662 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
 663 */
 664dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
 665    int direction)
 666{
 667        BUG_ON(direction == PCI_DMA_NONE);
 668        /* IIep is write-through, not flushing. */
 669        return virt_to_phys(ptr);
 670}
 671
 672/* Unmap a single streaming mode DMA translation.  The dma_addr and size
 673 * must match what was provided for in a previous pci_map_single call.  All
 674 * other usages are undefined.
 675 *
 676 * After this call, reads by the cpu to the buffer are guaranteed to see
 677 * whatever the device wrote there.
 678 */
 679void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
 680    int direction)
 681{
 682        BUG_ON(direction == PCI_DMA_NONE);
 683        if (direction != PCI_DMA_TODEVICE) {
 684                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
 685                    (size + PAGE_SIZE-1) & PAGE_MASK);
 686        }
 687}
 688
 689/*
 690 * Same as pci_map_single, but with pages.
 691 */
 692dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
 693                        unsigned long offset, size_t size, int direction)
 694{
 695        BUG_ON(direction == PCI_DMA_NONE);
 696        /* IIep is write-through, not flushing. */
 697        return page_to_phys(page) + offset;
 698}
 699
 700void pci_unmap_page(struct pci_dev *hwdev,
 701                        dma_addr_t dma_address, size_t size, int direction)
 702{
 703        BUG_ON(direction == PCI_DMA_NONE);
 704        /* mmu_inval_dma_area XXX */
 705}
 706
 707/* Map a set of buffers described by scatterlist in streaming
 708 * mode for DMA.  This is the scather-gather version of the
 709 * above pci_map_single interface.  Here the scatter gather list
 710 * elements are each tagged with the appropriate dma address
 711 * and length.  They are obtained via sg_dma_{address,length}(SG).
 712 *
 713 * NOTE: An implementation may be able to use a smaller number of
 714 *       DMA address/length pairs than there are SG table elements.
 715 *       (for example via virtual mapping capabilities)
 716 *       The routine returns the number of addr/length pairs actually
 717 *       used, at most nents.
 718 *
 719 * Device ownership issues as mentioned above for pci_map_single are
 720 * the same here.
 721 */
 722int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
 723    int direction)
 724{
 725        struct scatterlist *sg;
 726        int n;
 727
 728        BUG_ON(direction == PCI_DMA_NONE);
 729        /* IIep is write-through, not flushing. */
 730        for_each_sg(sgl, sg, nents, n) {
 731                BUG_ON(page_address(sg_page(sg)) == NULL);
 732                sg->dvma_address = virt_to_phys(sg_virt(sg));
 733                sg->dvma_length = sg->length;
 734        }
 735        return nents;
 736}
 737
 738/* Unmap a set of streaming mode DMA translations.
 739 * Again, cpu read rules concerning calls here are the same as for
 740 * pci_unmap_single() above.
 741 */
 742void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
 743    int direction)
 744{
 745        struct scatterlist *sg;
 746        int n;
 747
 748        BUG_ON(direction == PCI_DMA_NONE);
 749        if (direction != PCI_DMA_TODEVICE) {
 750                for_each_sg(sgl, sg, nents, n) {
 751                        BUG_ON(page_address(sg_page(sg)) == NULL);
 752                        mmu_inval_dma_area(
 753                            (unsigned long) page_address(sg_page(sg)),
 754                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
 755                }
 756        }
 757}
 758
 759/* Make physical memory consistent for a single
 760 * streaming mode DMA translation before or after a transfer.
 761 *
 762 * If you perform a pci_map_single() but wish to interrogate the
 763 * buffer using the cpu, yet do not wish to teardown the PCI dma
 764 * mapping, you must call this function before doing so.  At the
 765 * next point you give the PCI dma address back to the card, you
 766 * must first perform a pci_dma_sync_for_device, and then the
 767 * device again owns the buffer.
 768 */
 769void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
 770{
 771        BUG_ON(direction == PCI_DMA_NONE);
 772        if (direction != PCI_DMA_TODEVICE) {
 773                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
 774                    (size + PAGE_SIZE-1) & PAGE_MASK);
 775        }
 776}
 777
 778void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
 779{
 780        BUG_ON(direction == PCI_DMA_NONE);
 781        if (direction != PCI_DMA_TODEVICE) {
 782                mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
 783                    (size + PAGE_SIZE-1) & PAGE_MASK);
 784        }
 785}
 786
 787/* Make physical memory consistent for a set of streaming
 788 * mode DMA translations after a transfer.
 789 *
 790 * The same as pci_dma_sync_single_* but for a scatter-gather list,
 791 * same rules and usage.
 792 */
 793void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
 794{
 795        struct scatterlist *sg;
 796        int n;
 797
 798        BUG_ON(direction == PCI_DMA_NONE);
 799        if (direction != PCI_DMA_TODEVICE) {
 800                for_each_sg(sgl, sg, nents, n) {
 801                        BUG_ON(page_address(sg_page(sg)) == NULL);
 802                        mmu_inval_dma_area(
 803                            (unsigned long) page_address(sg_page(sg)),
 804                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
 805                }
 806        }
 807}
 808
 809void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
 810{
 811        struct scatterlist *sg;
 812        int n;
 813
 814        BUG_ON(direction == PCI_DMA_NONE);
 815        if (direction != PCI_DMA_TODEVICE) {
 816                for_each_sg(sgl, sg, nents, n) {
 817                        BUG_ON(page_address(sg_page(sg)) == NULL);
 818                        mmu_inval_dma_area(
 819                            (unsigned long) page_address(sg_page(sg)),
 820                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
 821                }
 822        }
 823}
 824#endif /* CONFIG_PCI */
 825
 826#ifdef CONFIG_PROC_FS
 827
 828static int
 829_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
 830    void *data)
 831{
 832        char *p = buf, *e = buf + length;
 833        struct resource *r;
 834        const char *nm;
 835
 836        for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
 837                if (p + 32 >= e)        /* Better than nothing */
 838                        break;
 839                if ((nm = r->name) == 0) nm = "???";
 840                p += sprintf(p, "%016llx-%016llx: %s\n",
 841                                (unsigned long long)r->start,
 842                                (unsigned long long)r->end, nm);
 843        }
 844
 845        return p-buf;
 846}
 847
 848#endif /* CONFIG_PROC_FS */
 849
 850/*
 851 * This is a version of find_resource and it belongs to kernel/resource.c.
 852 * Until we have agreement with Linus and Martin, it lingers here.
 853 *
 854 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
 855 * This probably warrants some sort of hashing.
 856 */
 857static struct resource *_sparc_find_resource(struct resource *root,
 858                                             unsigned long hit)
 859{
 860        struct resource *tmp;
 861
 862        for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
 863                if (tmp->start <= hit && tmp->end >= hit)
 864                        return tmp;
 865        }
 866        return NULL;
 867}
 868
 869static void register_proc_sparc_ioport(void)
 870{
 871#ifdef CONFIG_PROC_FS
 872        create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
 873        create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);
 874#endif
 875}
 876