linux/arch/arm/mm/highmem.c
<<
>>
Prefs
   1/*
   2 * arch/arm/mm/highmem.c -- ARM highmem support
   3 *
   4 * Author:      Nicolas Pitre
   5 * Created:     september 8, 2008
   6 * Copyright:   Marvell Semiconductors Inc.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/interrupt.h>
  16#include <asm/fixmap.h>
  17#include <asm/cacheflush.h>
  18#include <asm/tlbflush.h>
  19#include "mm.h"
  20
  21void *kmap(struct page *page)
  22{
  23        might_sleep();
  24        if (!PageHighMem(page))
  25                return page_address(page);
  26        return kmap_high(page);
  27}
  28EXPORT_SYMBOL(kmap);
  29
  30void kunmap(struct page *page)
  31{
  32        BUG_ON(in_interrupt());
  33        if (!PageHighMem(page))
  34                return;
  35        kunmap_high(page);
  36}
  37EXPORT_SYMBOL(kunmap);
  38
  39void *kmap_atomic(struct page *page, enum km_type type)
  40{
  41        unsigned int idx;
  42        unsigned long vaddr;
  43        void *kmap;
  44
  45        pagefault_disable();
  46        if (!PageHighMem(page))
  47                return page_address(page);
  48
  49        debug_kmap_atomic(type);
  50
  51#ifdef CONFIG_DEBUG_HIGHMEM
  52        /*
  53         * There is no cache coherency issue when non VIVT, so force the
  54         * dedicated kmap usage for better debugging purposes in that case.
  55         */
  56        if (!cache_is_vivt())
  57                kmap = NULL;
  58        else
  59#endif
  60                kmap = kmap_high_get(page);
  61        if (kmap)
  62                return kmap;
  63
  64        idx = type + KM_TYPE_NR * smp_processor_id();
  65        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  66#ifdef CONFIG_DEBUG_HIGHMEM
  67        /*
  68         * With debugging enabled, kunmap_atomic forces that entry to 0.
  69         * Make sure it was indeed properly unmapped.
  70         */
  71        BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  72#endif
  73        set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  74        /*
  75         * When debugging is off, kunmap_atomic leaves the previous mapping
  76         * in place, so this TLB flush ensures the TLB is updated with the
  77         * new mapping.
  78         */
  79        local_flush_tlb_kernel_page(vaddr);
  80
  81        return (void *)vaddr;
  82}
  83EXPORT_SYMBOL(kmap_atomic);
  84
  85void kunmap_atomic(void *kvaddr, enum km_type type)
  86{
  87        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  88        unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
  89
  90        if (kvaddr >= (void *)FIXADDR_START) {
  91                if (cache_is_vivt())
  92                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
  93#ifdef CONFIG_DEBUG_HIGHMEM
  94                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  95                set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
  96                local_flush_tlb_kernel_page(vaddr);
  97#else
  98                (void) idx;  /* to kill a warning */
  99#endif
 100        } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
 101                /* this address was obtained through kmap_high_get() */
 102                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
 103        }
 104        pagefault_enable();
 105}
 106EXPORT_SYMBOL(kunmap_atomic);
 107
 108void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 109{
 110        unsigned int idx;
 111        unsigned long vaddr;
 112
 113        pagefault_disable();
 114
 115        idx = type + KM_TYPE_NR * smp_processor_id();
 116        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 117#ifdef CONFIG_DEBUG_HIGHMEM
 118        BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
 119#endif
 120        set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
 121        local_flush_tlb_kernel_page(vaddr);
 122
 123        return (void *)vaddr;
 124}
 125
 126struct page *kmap_atomic_to_page(const void *ptr)
 127{
 128        unsigned long vaddr = (unsigned long)ptr;
 129        pte_t *pte;
 130
 131        if (vaddr < FIXADDR_START)
 132                return virt_to_page(ptr);
 133
 134        pte = TOP_PTE(vaddr);
 135        return pte_page(*pte);
 136}
 137
 138#ifdef CONFIG_CPU_CACHE_VIPT
 139
 140#include <linux/percpu.h>
 141
 142/*
 143 * The VIVT cache of a highmem page is always flushed before the page
 144 * is unmapped. Hence unmapped highmem pages need no cache maintenance
 145 * in that case.
 146 *
 147 * However unmapped pages may still be cached with a VIPT cache, and
 148 * it is not possible to perform cache maintenance on them using physical
 149 * addresses unfortunately.  So we have no choice but to set up a temporary
 150 * virtual mapping for that purpose.
 151 *
 152 * Yet this VIPT cache maintenance may be triggered from DMA support
 153 * functions which are possibly called from interrupt context. As we don't
 154 * want to keep interrupt disabled all the time when such maintenance is
 155 * taking place, we therefore allow for some reentrancy by preserving and
 156 * restoring the previous fixmap entry before the interrupted context is
 157 * resumed.  If the reentrancy depth is 0 then there is no need to restore
 158 * the previous fixmap, and leaving the current one in place allow it to
 159 * be reused the next time without a TLB flush (common with DMA).
 160 */
 161
 162static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
 163
 164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
 165{
 166        unsigned int idx, cpu;
 167        int *depth;
 168        unsigned long vaddr, flags;
 169        pte_t pte, *ptep;
 170
 171        if (!in_interrupt())
 172                preempt_disable();
 173
 174        cpu = smp_processor_id();
 175        depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
 176
 177        idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
 178        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 179        ptep = TOP_PTE(vaddr);
 180        pte = mk_pte(page, kmap_prot);
 181
 182        raw_local_irq_save(flags);
 183        (*depth)++;
 184        if (pte_val(*ptep) == pte_val(pte)) {
 185                *saved_pte = pte;
 186        } else {
 187                *saved_pte = *ptep;
 188                set_pte_ext(ptep, pte, 0);
 189                local_flush_tlb_kernel_page(vaddr);
 190        }
 191        raw_local_irq_restore(flags);
 192
 193        return (void *)vaddr;
 194}
 195
 196void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
 197{
 198        unsigned int idx, cpu = smp_processor_id();
 199        int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
 200        unsigned long vaddr, flags;
 201        pte_t pte, *ptep;
 202
 203        idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
 204        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 205        ptep = TOP_PTE(vaddr);
 206        pte = mk_pte(page, kmap_prot);
 207
 208        BUG_ON(pte_val(*ptep) != pte_val(pte));
 209        BUG_ON(*depth <= 0);
 210
 211        raw_local_irq_save(flags);
 212        (*depth)--;
 213        if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
 214                set_pte_ext(ptep, saved_pte, 0);
 215                local_flush_tlb_kernel_page(vaddr);
 216        }
 217        raw_local_irq_restore(flags);
 218
 219        if (!in_interrupt())
 220                preempt_enable();
 221}
 222
 223#endif  /* CONFIG_CPU_CACHE_VIPT */
 224
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.