linux/arch/powerpc/include/asm/tlbflush.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_TLBFLUSH_H
   2#define _ASM_POWERPC_TLBFLUSH_H
   3
   4/*
   5 * TLB flushing:
   6 *
   7 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
   8 *  - flush_tlb_page(vma, vmaddr) flushes one page
   9 *  - local_flush_tlb_mm(mm) flushes the specified mm context on
  10 *                           the local processor
  11 *  - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
  12 *  - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
  13 *  - flush_tlb_range(vma, start, end) flushes a range of pages
  14 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  15 *
  16 *  This program is free software; you can redistribute it and/or
  17 *  modify it under the terms of the GNU General Public License
  18 *  as published by the Free Software Foundation; either version
  19 *  2 of the License, or (at your option) any later version.
  20 */
  21#ifdef __KERNEL__
  22
  23#ifdef CONFIG_PPC_MMU_NOHASH
  24/*
  25 * TLB flushing for software loaded TLB chips
  26 *
  27 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
  28 * flush_tlb_kernel_range are best implemented as tlbia vs
  29 * specific tlbie's
  30 */
  31
  32#include <linux/mm.h>
  33
  34#define MMU_NO_CONTEXT          ((unsigned int)-1)
  35
  36extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  37                            unsigned long end);
  38extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  39
  40extern void local_flush_tlb_mm(struct mm_struct *mm);
  41extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  42
  43#ifdef CONFIG_SMP
  44extern void flush_tlb_mm(struct mm_struct *mm);
  45extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  46#else
  47#define flush_tlb_mm(mm)                local_flush_tlb_mm(mm)
  48#define flush_tlb_page(vma,addr)        local_flush_tlb_page(vma,addr)
  49#endif
  50#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
  51
  52#elif defined(CONFIG_PPC_STD_MMU_32)
  53
  54/*
  55 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
  56 */
  57extern void flush_tlb_mm(struct mm_struct *mm);
  58extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  59extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
  60extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  61                            unsigned long end);
  62extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  63static inline void local_flush_tlb_page(struct vm_area_struct *vma,
  64                                        unsigned long vmaddr)
  65{
  66        flush_tlb_page(vma, vmaddr);
  67}
  68static inline void local_flush_tlb_mm(struct mm_struct *mm)
  69{
  70        flush_tlb_mm(mm);
  71}
  72
  73#elif defined(CONFIG_PPC_STD_MMU_64)
  74
  75/*
  76 * TLB flushing for 64-bit hash-MMU CPUs
  77 */
  78
  79#include <linux/percpu.h>
  80#include <asm/page.h>
  81
  82#define PPC64_TLB_BATCH_NR 192
  83
  84struct ppc64_tlb_batch {
  85        int                     active;
  86        unsigned long           index;
  87        struct mm_struct        *mm;
  88        real_pte_t              pte[PPC64_TLB_BATCH_NR];
  89        unsigned long           vaddr[PPC64_TLB_BATCH_NR];
  90        unsigned int            psize;
  91        int                     ssize;
  92};
  93DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  94
  95extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
  96
  97extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
  98                            pte_t *ptep, unsigned long pte, int huge);
  99
 100#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 101
 102static inline void arch_enter_lazy_mmu_mode(void)
 103{
 104        struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
 105
 106        batch->active = 1;
 107}
 108
 109static inline void arch_leave_lazy_mmu_mode(void)
 110{
 111        struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
 112
 113        if (batch->index)
 114                __flush_tlb_pending(batch);
 115        batch->active = 0;
 116}
 117
 118#define arch_flush_lazy_mmu_mode()      do {} while (0)
 119
 120
 121extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
 122                            int ssize, int local);
 123extern void flush_hash_range(unsigned long number, int local);
 124
 125
 126static inline void local_flush_tlb_mm(struct mm_struct *mm)
 127{
 128}
 129
 130static inline void flush_tlb_mm(struct mm_struct *mm)
 131{
 132}
 133
 134static inline void local_flush_tlb_page(struct vm_area_struct *vma,
 135                                        unsigned long vmaddr)
 136{
 137}
 138
 139static inline void flush_tlb_page(struct vm_area_struct *vma,
 140                                  unsigned long vmaddr)
 141{
 142}
 143
 144static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
 145                                         unsigned long vmaddr)
 146{
 147}
 148
 149static inline void flush_tlb_range(struct vm_area_struct *vma,
 150                                   unsigned long start, unsigned long end)
 151{
 152}
 153
 154static inline void flush_tlb_kernel_range(unsigned long start,
 155                                          unsigned long end)
 156{
 157}
 158
 159/* Private function for use by PCI IO mapping code */
 160extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
 161                                     unsigned long end);
 162
 163#else
 164#error Unsupported MMU type
 165#endif
 166
 167#endif /*__KERNEL__ */
 168#endif /* _ASM_POWERPC_TLBFLUSH_H */
 169