linux/arch/m68k/mm/kmap.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/m68k/mm/kmap.c
   3 *
   4 *  Copyright (C) 1997 Roman Hodek
   5 *
   6 *  10/01/99 cleaned up the code and changing to the same interface
   7 *           used by other architectures                /Roman Zippel
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/mm.h>
  12#include <linux/kernel.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/vmalloc.h>
  17
  18#include <asm/setup.h>
  19#include <asm/segment.h>
  20#include <asm/page.h>
  21#include <asm/pgalloc.h>
  22#include <asm/io.h>
  23#include <asm/system.h>
  24
  25#undef DEBUG
  26
  27#define PTRTREESIZE     (256*1024)
  28
  29/*
  30 * For 040/060 we can use the virtual memory area like other architectures,
  31 * but for 020/030 we want to use early termination page descriptor and we
  32 * can't mix this with normal page descriptors, so we have to copy that code
  33 * (mm/vmalloc.c) and return appriorate aligned addresses.
  34 */
  35
  36#ifdef CPU_M68040_OR_M68060_ONLY
  37
  38#define IO_SIZE         PAGE_SIZE
  39
  40static inline struct vm_struct *get_io_area(unsigned long size)
  41{
  42        return get_vm_area(size, VM_IOREMAP);
  43}
  44
  45
  46static inline void free_io_area(void *addr)
  47{
  48        vfree((void *)(PAGE_MASK & (unsigned long)addr));
  49}
  50
  51#else
  52
  53#define IO_SIZE         (256*1024)
  54
  55static struct vm_struct *iolist;
  56
  57static struct vm_struct *get_io_area(unsigned long size)
  58{
  59        unsigned long addr;
  60        struct vm_struct **p, *tmp, *area;
  61
  62        area = kmalloc(sizeof(*area), GFP_KERNEL);
  63        if (!area)
  64                return NULL;
  65        addr = KMAP_START;
  66        for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
  67                if (size + addr < (unsigned long)tmp->addr)
  68                        break;
  69                if (addr > KMAP_END-size) {
  70                        kfree(area);
  71                        return NULL;
  72                }
  73                addr = tmp->size + (unsigned long)tmp->addr;
  74        }
  75        area->addr = (void *)addr;
  76        area->size = size + IO_SIZE;
  77        area->next = *p;
  78        *p = area;
  79        return area;
  80}
  81
  82static inline void free_io_area(void *addr)
  83{
  84        struct vm_struct **p, *tmp;
  85
  86        if (!addr)
  87                return;
  88        addr = (void *)((unsigned long)addr & -IO_SIZE);
  89        for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
  90                if (tmp->addr == addr) {
  91                        *p = tmp->next;
  92                        __iounmap(tmp->addr, tmp->size);
  93                        kfree(tmp);
  94                        return;
  95                }
  96        }
  97}
  98
  99#endif
 100
 101/*
 102 * Map some physical address range into the kernel address space. The
 103 * code is copied and adapted from map_chunk().
 104 */
 105/* Rewritten by Andreas Schwab to remove all races. */
 106
 107void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
 108{
 109        struct vm_struct *area;
 110        unsigned long virtaddr, retaddr;
 111        long offset;
 112        pgd_t *pgd_dir;
 113        pmd_t *pmd_dir;
 114        pte_t *pte_dir;
 115
 116        /*
 117         * Don't allow mappings that wrap..
 118         */
 119        if (!size || size > physaddr + size)
 120                return NULL;
 121
 122#ifdef CONFIG_AMIGA
 123        if (MACH_IS_AMIGA) {
 124                if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
 125                    && (cacheflag == IOMAP_NOCACHE_SER))
 126                        return (void __iomem *)physaddr;
 127        }
 128#endif
 129
 130#ifdef DEBUG
 131        printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
 132#endif
 133        /*
 134         * Mappings have to be aligned
 135         */
 136        offset = physaddr & (IO_SIZE - 1);
 137        physaddr &= -IO_SIZE;
 138        size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
 139
 140        /*
 141         * Ok, go for it..
 142         */
 143        area = get_io_area(size);
 144        if (!area)
 145                return NULL;
 146
 147        virtaddr = (unsigned long)area->addr;
 148        retaddr = virtaddr + offset;
 149#ifdef DEBUG
 150        printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
 151#endif
 152
 153        /*
 154         * add cache and table flags to physical address
 155         */
 156        if (CPU_IS_040_OR_060) {
 157                physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
 158                             _PAGE_ACCESSED | _PAGE_DIRTY);
 159                switch (cacheflag) {
 160                case IOMAP_FULL_CACHING:
 161                        physaddr |= _PAGE_CACHE040;
 162                        break;
 163                case IOMAP_NOCACHE_SER:
 164                default:
 165                        physaddr |= _PAGE_NOCACHE_S;
 166                        break;
 167                case IOMAP_NOCACHE_NONSER:
 168                        physaddr |= _PAGE_NOCACHE;
 169                        break;
 170                case IOMAP_WRITETHROUGH:
 171                        physaddr |= _PAGE_CACHE040W;
 172                        break;
 173                }
 174        } else {
 175                physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
 176                switch (cacheflag) {
 177                case IOMAP_NOCACHE_SER:
 178                case IOMAP_NOCACHE_NONSER:
 179                default:
 180                        physaddr |= _PAGE_NOCACHE030;
 181                        break;
 182                case IOMAP_FULL_CACHING:
 183                case IOMAP_WRITETHROUGH:
 184                        break;
 185                }
 186        }
 187
 188        while ((long)size > 0) {
 189#ifdef DEBUG
 190                if (!(virtaddr & (PTRTREESIZE-1)))
 191                        printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
 192#endif
 193                pgd_dir = pgd_offset_k(virtaddr);
 194                pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
 195                if (!pmd_dir) {
 196                        printk("ioremap: no mem for pmd_dir\n");
 197                        return NULL;
 198                }
 199
 200                if (CPU_IS_020_OR_030) {
 201                        pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
 202                        physaddr += PTRTREESIZE;
 203                        virtaddr += PTRTREESIZE;
 204                        size -= PTRTREESIZE;
 205                } else {
 206                        pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
 207                        if (!pte_dir) {
 208                                printk("ioremap: no mem for pte_dir\n");
 209                                return NULL;
 210                        }
 211
 212                        pte_val(*pte_dir) = physaddr;
 213                        virtaddr += PAGE_SIZE;
 214                        physaddr += PAGE_SIZE;
 215                        size -= PAGE_SIZE;
 216                }
 217        }
 218#ifdef DEBUG
 219        printk("\n");
 220#endif
 221        flush_tlb_all();
 222
 223        return (void __iomem *)retaddr;
 224}
 225EXPORT_SYMBOL(__ioremap);
 226
 227/*
 228 * Unmap a ioremap()ed region again
 229 */
 230void iounmap(void __iomem *addr)
 231{
 232#ifdef CONFIG_AMIGA
 233        if ((!MACH_IS_AMIGA) ||
 234            (((unsigned long)addr < 0x40000000) ||
 235             ((unsigned long)addr > 0x60000000)))
 236                        free_io_area((__force void *)addr);
 237#else
 238        free_io_area((__force void *)addr);
 239#endif
 240}
 241EXPORT_SYMBOL(iounmap);
 242
 243/*
 244 * __iounmap unmaps nearly everything, so be careful
 245 * it doesn't free currently pointer/page tables anymore but it
 246 * wans't used anyway and might be added later.
 247 */
 248void __iounmap(void *addr, unsigned long size)
 249{
 250        unsigned long virtaddr = (unsigned long)addr;
 251        pgd_t *pgd_dir;
 252        pmd_t *pmd_dir;
 253        pte_t *pte_dir;
 254
 255        while ((long)size > 0) {
 256                pgd_dir = pgd_offset_k(virtaddr);
 257                if (pgd_bad(*pgd_dir)) {
 258                        printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
 259                        pgd_clear(pgd_dir);
 260                        return;
 261                }
 262                pmd_dir = pmd_offset(pgd_dir, virtaddr);
 263
 264                if (CPU_IS_020_OR_030) {
 265                        int pmd_off = (virtaddr/PTRTREESIZE) & 15;
 266                        int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
 267
 268                        if (pmd_type == _PAGE_PRESENT) {
 269                                pmd_dir->pmd[pmd_off] = 0;
 270                                virtaddr += PTRTREESIZE;
 271                                size -= PTRTREESIZE;
 272                                continue;
 273                        } else if (pmd_type == 0)
 274                                continue;
 275                }
 276
 277                if (pmd_bad(*pmd_dir)) {
 278                        printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
 279                        pmd_clear(pmd_dir);
 280                        return;
 281                }
 282                pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 283
 284                pte_val(*pte_dir) = 0;
 285                virtaddr += PAGE_SIZE;
 286                size -= PAGE_SIZE;
 287        }
 288
 289        flush_tlb_all();
 290}
 291
 292/*
 293 * Set new cache mode for some kernel address space.
 294 * The caller must push data for that range itself, if such data may already
 295 * be in the cache.
 296 */
 297void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
 298{
 299        unsigned long virtaddr = (unsigned long)addr;
 300        pgd_t *pgd_dir;
 301        pmd_t *pmd_dir;
 302        pte_t *pte_dir;
 303
 304        if (CPU_IS_040_OR_060) {
 305                switch (cmode) {
 306                case IOMAP_FULL_CACHING:
 307                        cmode = _PAGE_CACHE040;
 308                        break;
 309                case IOMAP_NOCACHE_SER:
 310                default:
 311                        cmode = _PAGE_NOCACHE_S;
 312                        break;
 313                case IOMAP_NOCACHE_NONSER:
 314                        cmode = _PAGE_NOCACHE;
 315                        break;
 316                case IOMAP_WRITETHROUGH:
 317                        cmode = _PAGE_CACHE040W;
 318                        break;
 319                }
 320        } else {
 321                switch (cmode) {
 322                case IOMAP_NOCACHE_SER:
 323                case IOMAP_NOCACHE_NONSER:
 324                default:
 325                        cmode = _PAGE_NOCACHE030;
 326                        break;
 327                case IOMAP_FULL_CACHING:
 328                case IOMAP_WRITETHROUGH:
 329                        cmode = 0;
 330                }
 331        }
 332
 333        while ((long)size > 0) {
 334                pgd_dir = pgd_offset_k(virtaddr);
 335                if (pgd_bad(*pgd_dir)) {
 336                        printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
 337                        pgd_clear(pgd_dir);
 338                        return;
 339                }
 340                pmd_dir = pmd_offset(pgd_dir, virtaddr);
 341
 342                if (CPU_IS_020_OR_030) {
 343                        int pmd_off = (virtaddr/PTRTREESIZE) & 15;
 344
 345                        if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
 346                                pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
 347                                                         _CACHEMASK040) | cmode;
 348                                virtaddr += PTRTREESIZE;
 349                                size -= PTRTREESIZE;
 350                                continue;
 351                        }
 352                }
 353
 354                if (pmd_bad(*pmd_dir)) {
 355                        printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
 356                        pmd_clear(pmd_dir);
 357                        return;
 358                }
 359                pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 360
 361                pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
 362                virtaddr += PAGE_SIZE;
 363                size -= PAGE_SIZE;
 364        }
 365
 366        flush_tlb_all();
 367}
 368EXPORT_SYMBOL(kernel_set_cachemode);
 369
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.