linux-bk/include/asm-m32r/mmu_context.h
<<
>>
Prefs
   1#ifndef _ASM_M32R_MMU_CONTEXT_H
   2#define _ASM_M32R_MMU_CONTEXT_H
   3
   4#ifdef __KERNEL__
   5
   6#include <linux/config.h>
   7
   8#include <asm/m32r.h>
   9
  10#define MMU_CONTEXT_ASID_MASK      (0x000000FF)
  11#define MMU_CONTEXT_VERSION_MASK   (0xFFFFFF00)
  12#define MMU_CONTEXT_FIRST_VERSION  (0x00000100)
  13#define NO_CONTEXT                 (0x00000000)
  14
  15
  16#ifndef __ASSEMBLY__
  17
  18#include <linux/config.h>
  19#include <asm/atomic.h>
  20#include <asm/pgalloc.h>
  21#include <asm/mmu.h>
  22#include <asm/tlbflush.h>
  23
  24/*
  25 * Cache of MMU context last used.
  26 */
  27#ifndef CONFIG_SMP
  28extern unsigned long mmu_context_cache_dat;
  29#define mmu_context_cache       mmu_context_cache_dat
  30#define mm_context(mm)          mm->context
  31#else /* not CONFIG_SMP */
  32extern unsigned long mmu_context_cache_dat[];
  33#define mmu_context_cache       mmu_context_cache_dat[smp_processor_id()]
  34#define mm_context(mm)          mm->context[smp_processor_id()]
  35#endif /* not CONFIG_SMP */
  36
  37#define set_tlb_tag(entry, tag)         (*entry = (tag & PAGE_MASK)|get_asid())
  38#define set_tlb_data(entry, data)       (*entry = (data | _PAGE_PRESENT))
  39
  40#ifdef CONFIG_MMU
  41#define enter_lazy_tlb(mm, tsk) do { } while (0)
  42
  43static inline void get_new_mmu_context(struct mm_struct *mm)
  44{
  45        unsigned long mc = ++mmu_context_cache;
  46
  47        if (!(mc & MMU_CONTEXT_ASID_MASK)) {
  48                /* We exhaust ASID of this version.
  49                   Flush all TLB and start new cycle. */
  50                local_flush_tlb_all();
  51                /* Fix version if needed.
  52                   Note that we avoid version #0 to distingush NO_CONTEXT. */
  53                if (!mc)
  54                        mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
  55        }
  56        mm_context(mm) = mc;
  57}
  58
  59/*
  60 * Get MMU context if needed.
  61 */
  62static inline void get_mmu_context(struct mm_struct *mm)
  63{
  64        if (mm) {
  65                unsigned long mc = mmu_context_cache;
  66
  67                /* Check if we have old version of context.
  68                   If it's old, we need to get new context with new version. */
  69                if ((mm_context(mm) ^ mc) & MMU_CONTEXT_VERSION_MASK)
  70                        get_new_mmu_context(mm);
  71        }
  72}
  73
  74/*
  75 * Initialize the context related info for a new mm_struct
  76 * instance.
  77 */
  78static inline int init_new_context(struct task_struct *tsk,
  79        struct mm_struct *mm)
  80{
  81#ifndef CONFIG_SMP
  82        mm->context = NO_CONTEXT;
  83#else /* CONFIG_SMP */
  84        int num_cpus = num_online_cpus();
  85        int i;
  86
  87        for (i = 0 ; i < num_cpus ; i++)
  88                mm->context[i] = NO_CONTEXT;
  89#endif /* CONFIG_SMP */
  90
  91        return 0;
  92}
  93
  94/*
  95 * Destroy context related info for an mm_struct that is about
  96 * to be put to rest.
  97 */
  98#define destroy_context(mm)     do { } while (0)
  99
 100static inline void set_asid(unsigned long asid)
 101{
 102        *(volatile unsigned long *)MASID = (asid & MMU_CONTEXT_ASID_MASK);
 103}
 104
 105static inline unsigned long get_asid(void)
 106{
 107        unsigned long asid;
 108
 109        asid = *(volatile long *)MASID;
 110        asid &= MMU_CONTEXT_ASID_MASK;
 111
 112        return asid;
 113}
 114
 115/*
 116 * After we have set current->mm to a new value, this activates
 117 * the context for the new mm so we see the new mappings.
 118 */
 119static inline void activate_context(struct mm_struct *mm)
 120{
 121        get_mmu_context(mm);
 122        set_asid(mm_context(mm) & MMU_CONTEXT_ASID_MASK);
 123}
 124
 125static inline void switch_mm(struct mm_struct *prev,
 126        struct mm_struct *next, struct task_struct *tsk)
 127{
 128#ifdef CONFIG_SMP
 129        int cpu = smp_processor_id();
 130#endif  /* CONFIG_SMP */
 131
 132        if (prev != next) {
 133#ifdef CONFIG_SMP
 134                cpu_set(cpu, next->cpu_vm_mask);
 135#endif /* CONFIG_SMP */
 136                /* Set MPTB = next->pgd */
 137                *(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
 138                activate_context(next);
 139        }
 140#ifdef CONFIG_SMP
 141        else
 142                if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
 143                        activate_context(next);
 144#endif /* CONFIG_SMP */
 145}
 146
 147#define deactivate_mm(tsk, mm)  do { } while (0)
 148
 149#define activate_mm(prev, next) \
 150        switch_mm((prev), (next), NULL)
 151
 152#else
 153#define get_mmu_context(mm)             do { } while (0)
 154#define init_new_context(tsk,mm)        (0)
 155#define destroy_context(mm)             do { } while (0)
 156#define set_asid(asid)                  do { } while (0)
 157#define get_asid()                      (0)
 158#define activate_context(mm)            do { } while (0)
 159#define switch_mm(prev,next,tsk)        do { } while (0)
 160#define deactivate_mm(mm,tsk)           do { } while (0)
 161#define activate_mm(prev,next)          do { } while (0)
 162#define enter_lazy_tlb(mm,tsk)          do { } while (0)
 163#endif /* CONFIG_MMU */
 164
 165
 166#endif /* not __ASSEMBLY__ */
 167
 168#endif /* __KERNEL__ */
 169
 170#endif /* _ASM_M32R_MMU_CONTEXT_H */
 171
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.