linux/arch/sparc/mm/highmem.c
<<
>>
Prefs
   1/*
   2 *  highmem.c: virtual kernel memory mappings for high memory
   3 *
   4 *  Provides kernel-static versions of atomic kmap functions originally
   5 *  found as inlines in include/asm-sparc/highmem.h.  These became
   6 *  needed as kmap_atomic() and kunmap_atomic() started getting
   7 *  called from within modules.
   8 *  -- Tomas Szepe <szepe@pinerecords.com>, September 2002
   9 *
  10 *  But kmap_atomic() and kunmap_atomic() cannot be inlined in
  11 *  modules because they are loaded with btfixup-ped functions.
  12 */
  13
  14/*
  15 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  16 * gives a more generic (and caching) interface. But kmap_atomic can
  17 * be used in IRQ contexts, so in some (very limited) cases we need it.
  18 *
  19 * XXX This is an old text. Actually, it's good to use atomic kmaps,
  20 * provided you remember that they are atomic and not try to sleep
  21 * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  22 * shared by CPUs, and so precious, and establishing them requires IPI.
  23 * Atomic kmaps are lightweight and we may have NCPUS more of them.
  24 */
  25#include <linux/mm.h>
  26#include <linux/highmem.h>
  27#include <asm/pgalloc.h>
  28#include <asm/cacheflush.h>
  29#include <asm/tlbflush.h>
  30#include <asm/fixmap.h>
  31
  32void *kmap_atomic(struct page *page, enum km_type type)
  33{
  34        unsigned long idx;
  35        unsigned long vaddr;
  36
  37        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  38        pagefault_disable();
  39        if (!PageHighMem(page))
  40                return page_address(page);
  41
  42        idx = type + KM_TYPE_NR*smp_processor_id();
  43        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  44
  45/* XXX Fix - Anton */
  46#if 0
  47        __flush_cache_one(vaddr);
  48#else
  49        flush_cache_all();
  50#endif
  51
  52#ifdef CONFIG_DEBUG_HIGHMEM
  53        BUG_ON(!pte_none(*(kmap_pte-idx)));
  54#endif
  55        set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  56/* XXX Fix - Anton */
  57#if 0
  58        __flush_tlb_one(vaddr);
  59#else
  60        flush_tlb_all();
  61#endif
  62
  63        return (void*) vaddr;
  64}
  65
  66void kunmap_atomic(void *kvaddr, enum km_type type)
  67{
  68#ifdef CONFIG_DEBUG_HIGHMEM
  69        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  70        unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
  71
  72        if (vaddr < FIXADDR_START) { // FIXME
  73                pagefault_enable();
  74                return;
  75        }
  76
  77        BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  78
  79/* XXX Fix - Anton */
  80#if 0
  81        __flush_cache_one(vaddr);
  82#else
  83        flush_cache_all();
  84#endif
  85
  86        /*
  87         * force other mappings to Oops if they'll try to access
  88         * this pte without first remap it
  89         */
  90        pte_clear(&init_mm, vaddr, kmap_pte-idx);
  91/* XXX Fix - Anton */
  92#if 0
  93        __flush_tlb_one(vaddr);
  94#else
  95        flush_tlb_all();
  96#endif
  97#endif
  98
  99        pagefault_enable();
 100}
 101
 102/* We may be fed a pagetable here by ptep_to_xxx and others. */
 103struct page *kmap_atomic_to_page(void *ptr)
 104{
 105        unsigned long idx, vaddr = (unsigned long)ptr;
 106        pte_t *pte;
 107
 108        if (vaddr < SRMMU_NOCACHE_VADDR)
 109                return virt_to_page(ptr);
 110        if (vaddr < PKMAP_BASE)
 111                return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
 112        BUG_ON(vaddr < FIXADDR_START);
 113        BUG_ON(vaddr > FIXADDR_TOP);
 114
 115        idx = virt_to_fix(vaddr);
 116        pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
 117        return pte_page(*pte);
 118}
 119