linux/arch/x86/mm/highmem_32.c
<<
>>
Prefs
   1#include <linux/highmem.h>
   2#include <linux/module.h>
   3
   4void *kmap(struct page *page)
   5{
   6        might_sleep();
   7        if (!PageHighMem(page))
   8                return page_address(page);
   9        return kmap_high(page);
  10}
  11
  12void kunmap(struct page *page)
  13{
  14        if (in_interrupt())
  15                BUG();
  16        if (!PageHighMem(page))
  17                return;
  18        kunmap_high(page);
  19}
  20
  21static void debug_kmap_atomic_prot(enum km_type type)
  22{
  23#ifdef CONFIG_DEBUG_HIGHMEM
  24        static unsigned warn_count = 10;
  25
  26        if (unlikely(warn_count == 0))
  27                return;
  28
  29        if (unlikely(in_interrupt())) {
  30                if (in_irq()) {
  31                        if (type != KM_IRQ0 && type != KM_IRQ1 &&
  32                            type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
  33                            type != KM_BOUNCE_READ) {
  34                                WARN_ON(1);
  35                                warn_count--;
  36                        }
  37                } else if (!irqs_disabled()) {  /* softirq */
  38                        if (type != KM_IRQ0 && type != KM_IRQ1 &&
  39                            type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
  40                            type != KM_SKB_SUNRPC_DATA &&
  41                            type != KM_SKB_DATA_SOFTIRQ &&
  42                            type != KM_BOUNCE_READ) {
  43                                WARN_ON(1);
  44                                warn_count--;
  45                        }
  46                }
  47        }
  48
  49        if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
  50                        type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
  51                if (!irqs_disabled()) {
  52                        WARN_ON(1);
  53                        warn_count--;
  54                }
  55        } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
  56                if (irq_count() == 0 && !irqs_disabled()) {
  57                        WARN_ON(1);
  58                        warn_count--;
  59                }
  60        }
  61#endif
  62}
  63
  64/*
  65 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  66 * no global lock is needed and because the kmap code must perform a global TLB
  67 * invalidation when the kmap pool wraps.
  68 *
  69 * However when holding an atomic kmap is is not legal to sleep, so atomic
  70 * kmaps are appropriate for short, tight code paths only.
  71 */
  72void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
  73{
  74        enum fixed_addresses idx;
  75        unsigned long vaddr;
  76
  77        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  78        pagefault_disable();
  79
  80        if (!PageHighMem(page))
  81                return page_address(page);
  82
  83        debug_kmap_atomic_prot(type);
  84
  85        idx = type + KM_TYPE_NR*smp_processor_id();
  86        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  87        BUG_ON(!pte_none(*(kmap_pte-idx)));
  88        set_pte(kmap_pte-idx, mk_pte(page, prot));
  89        arch_flush_lazy_mmu_mode();
  90
  91        return (void *)vaddr;
  92}
  93
  94void *kmap_atomic(struct page *page, enum km_type type)
  95{
  96        return kmap_atomic_prot(page, type, kmap_prot);
  97}
  98
  99void kunmap_atomic(void *kvaddr, enum km_type type)
 100{
 101        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 102        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 103
 104        /*
 105         * Force other mappings to Oops if they'll try to access this pte
 106         * without first remap it.  Keeping stale mappings around is a bad idea
 107         * also, in case the page changes cacheability attributes or becomes
 108         * a protected page in a hypervisor.
 109         */
 110        if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
 111                kpte_clear_flush(kmap_pte-idx, vaddr);
 112        else {
 113#ifdef CONFIG_DEBUG_HIGHMEM
 114                BUG_ON(vaddr < PAGE_OFFSET);
 115                BUG_ON(vaddr >= (unsigned long)high_memory);
 116#endif
 117        }
 118
 119        arch_flush_lazy_mmu_mode();
 120        pagefault_enable();
 121}
 122
 123/* This is the same as kmap_atomic() but can map memory that doesn't
 124 * have a struct page associated with it.
 125 */
 126void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 127{
 128        enum fixed_addresses idx;
 129        unsigned long vaddr;
 130
 131        pagefault_disable();
 132
 133        idx = type + KM_TYPE_NR*smp_processor_id();
 134        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 135        set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
 136        arch_flush_lazy_mmu_mode();
 137
 138        return (void*) vaddr;
 139}
 140EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
 141
 142struct page *kmap_atomic_to_page(void *ptr)
 143{
 144        unsigned long idx, vaddr = (unsigned long)ptr;
 145        pte_t *pte;
 146
 147        if (vaddr < FIXADDR_START)
 148                return virt_to_page(ptr);
 149
 150        idx = virt_to_fix(vaddr);
 151        pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
 152        return pte_page(*pte);
 153}
 154
 155EXPORT_SYMBOL(kmap);
 156EXPORT_SYMBOL(kunmap);
 157EXPORT_SYMBOL(kmap_atomic);
 158EXPORT_SYMBOL(kunmap_atomic);
 159
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.