linux/arch/nios2/mm/cacheflush.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2009, Wind River Systems Inc
   7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
   8 */
   9
  10#include <linux/export.h>
  11#include <linux/sched.h>
  12#include <linux/mm.h>
  13#include <linux/fs.h>
  14#include <linux/pagemap.h>
  15
  16#include <asm/cacheflush.h>
  17#include <asm/cpuinfo.h>
  18
  19static void __flush_dcache(unsigned long start, unsigned long end)
  20{
  21        unsigned long addr;
  22
  23        start &= ~(cpuinfo.dcache_line_size - 1);
  24        end += (cpuinfo.dcache_line_size - 1);
  25        end &= ~(cpuinfo.dcache_line_size - 1);
  26
  27        if (end > start + cpuinfo.dcache_size)
  28                end = start + cpuinfo.dcache_size;
  29
  30        for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
  31                __asm__ __volatile__ ("   flushd 0(%0)\n"
  32                                        : /* Outputs */
  33                                        : /* Inputs  */ "r"(addr)
  34                                        /* : No clobber */);
  35        }
  36}
  37
  38static void __invalidate_dcache(unsigned long start, unsigned long end)
  39{
  40        unsigned long addr;
  41
  42        start &= ~(cpuinfo.dcache_line_size - 1);
  43        end += (cpuinfo.dcache_line_size - 1);
  44        end &= ~(cpuinfo.dcache_line_size - 1);
  45
  46        for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
  47                __asm__ __volatile__ ("   initda 0(%0)\n"
  48                                        : /* Outputs */
  49                                        : /* Inputs  */ "r"(addr)
  50                                        /* : No clobber */);
  51        }
  52}
  53
  54static void __flush_icache(unsigned long start, unsigned long end)
  55{
  56        unsigned long addr;
  57
  58        start &= ~(cpuinfo.icache_line_size - 1);
  59        end += (cpuinfo.icache_line_size - 1);
  60        end &= ~(cpuinfo.icache_line_size - 1);
  61
  62        if (end > start + cpuinfo.icache_size)
  63                end = start + cpuinfo.icache_size;
  64
  65        for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
  66                __asm__ __volatile__ ("   flushi %0\n"
  67                                        : /* Outputs */
  68                                        : /* Inputs  */ "r"(addr)
  69                                        /* : No clobber */);
  70        }
  71        __asm__ __volatile(" flushp\n");
  72}
  73
  74static void flush_aliases(struct address_space *mapping, struct page *page)
  75{
  76        struct mm_struct *mm = current->active_mm;
  77        struct vm_area_struct *mpnt;
  78        pgoff_t pgoff;
  79
  80        pgoff = page->index;
  81
  82        flush_dcache_mmap_lock(mapping);
  83        vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  84                unsigned long offset;
  85
  86                if (mpnt->vm_mm != mm)
  87                        continue;
  88                if (!(mpnt->vm_flags & VM_MAYSHARE))
  89                        continue;
  90
  91                offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  92                flush_cache_page(mpnt, mpnt->vm_start + offset,
  93                        page_to_pfn(page));
  94        }
  95        flush_dcache_mmap_unlock(mapping);
  96}
  97
  98void flush_cache_all(void)
  99{
 100        __flush_dcache(0, cpuinfo.dcache_size);
 101        __flush_icache(0, cpuinfo.icache_size);
 102}
 103
 104void flush_cache_mm(struct mm_struct *mm)
 105{
 106        flush_cache_all();
 107}
 108
 109void flush_cache_dup_mm(struct mm_struct *mm)
 110{
 111        flush_cache_all();
 112}
 113
 114void flush_icache_range(unsigned long start, unsigned long end)
 115{
 116        __flush_dcache(start, end);
 117        __flush_icache(start, end);
 118}
 119
 120void flush_dcache_range(unsigned long start, unsigned long end)
 121{
 122        __flush_dcache(start, end);
 123        __flush_icache(start, end);
 124}
 125EXPORT_SYMBOL(flush_dcache_range);
 126
 127void invalidate_dcache_range(unsigned long start, unsigned long end)
 128{
 129        __invalidate_dcache(start, end);
 130}
 131EXPORT_SYMBOL(invalidate_dcache_range);
 132
 133void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 134                        unsigned long end)
 135{
 136        __flush_dcache(start, end);
 137        if (vma == NULL || (vma->vm_flags & VM_EXEC))
 138                __flush_icache(start, end);
 139}
 140
 141void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 142{
 143        unsigned long start = (unsigned long) page_address(page);
 144        unsigned long end = start + PAGE_SIZE;
 145
 146        __flush_dcache(start, end);
 147        __flush_icache(start, end);
 148}
 149
 150void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
 151                        unsigned long pfn)
 152{
 153        unsigned long start = vmaddr;
 154        unsigned long end = start + PAGE_SIZE;
 155
 156        __flush_dcache(start, end);
 157        if (vma->vm_flags & VM_EXEC)
 158                __flush_icache(start, end);
 159}
 160
 161void __flush_dcache_page(struct address_space *mapping, struct page *page)
 162{
 163        /*
 164         * Writeback any data associated with the kernel mapping of this
 165         * page.  This ensures that data in the physical page is mutually
 166         * coherent with the kernels mapping.
 167         */
 168        unsigned long start = (unsigned long)page_address(page);
 169
 170        __flush_dcache(start, start + PAGE_SIZE);
 171}
 172
 173void flush_dcache_page(struct page *page)
 174{
 175        struct address_space *mapping;
 176
 177        /*
 178         * The zero page is never written to, so never has any dirty
 179         * cache lines, and therefore never needs to be flushed.
 180         */
 181        if (page == ZERO_PAGE(0))
 182                return;
 183
 184        mapping = page_mapping_file(page);
 185
 186        /* Flush this page if there are aliases. */
 187        if (mapping && !mapping_mapped(mapping)) {
 188                clear_bit(PG_dcache_clean, &page->flags);
 189        } else {
 190                __flush_dcache_page(mapping, page);
 191                if (mapping) {
 192                        unsigned long start = (unsigned long)page_address(page);
 193                        flush_aliases(mapping,  page);
 194                        flush_icache_range(start, start + PAGE_SIZE);
 195                }
 196                set_bit(PG_dcache_clean, &page->flags);
 197        }
 198}
 199EXPORT_SYMBOL(flush_dcache_page);
 200
 201void update_mmu_cache(struct vm_area_struct *vma,
 202                      unsigned long address, pte_t *ptep)
 203{
 204        pte_t pte = *ptep;
 205        unsigned long pfn = pte_pfn(pte);
 206        struct page *page;
 207        struct address_space *mapping;
 208
 209        reload_tlb_page(vma, address, pte);
 210
 211        if (!pfn_valid(pfn))
 212                return;
 213
 214        /*
 215        * The zero page is never written to, so never has any dirty
 216        * cache lines, and therefore never needs to be flushed.
 217        */
 218        page = pfn_to_page(pfn);
 219        if (page == ZERO_PAGE(0))
 220                return;
 221
 222        mapping = page_mapping_file(page);
 223        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
 224                __flush_dcache_page(mapping, page);
 225
 226        if(mapping)
 227        {
 228                flush_aliases(mapping, page);
 229                if (vma->vm_flags & VM_EXEC)
 230                        flush_icache_page(vma, page);
 231        }
 232}
 233
 234void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 235                    struct page *to)
 236{
 237        __flush_dcache(vaddr, vaddr + PAGE_SIZE);
 238        __flush_icache(vaddr, vaddr + PAGE_SIZE);
 239        copy_page(vto, vfrom);
 240        __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
 241        __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
 242}
 243
 244void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
 245{
 246        __flush_dcache(vaddr, vaddr + PAGE_SIZE);
 247        __flush_icache(vaddr, vaddr + PAGE_SIZE);
 248        clear_page(addr);
 249        __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
 250        __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
 251}
 252
 253void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 254                        unsigned long user_vaddr,
 255                        void *dst, void *src, int len)
 256{
 257        flush_cache_page(vma, user_vaddr, page_to_pfn(page));
 258        memcpy(dst, src, len);
 259        __flush_dcache((unsigned long)src, (unsigned long)src + len);
 260        if (vma->vm_flags & VM_EXEC)
 261                __flush_icache((unsigned long)src, (unsigned long)src + len);
 262}
 263
 264void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 265                        unsigned long user_vaddr,
 266                        void *dst, void *src, int len)
 267{
 268        flush_cache_page(vma, user_vaddr, page_to_pfn(page));
 269        memcpy(dst, src, len);
 270        __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
 271        if (vma->vm_flags & VM_EXEC)
 272                __flush_icache((unsigned long)dst, (unsigned long)dst + len);
 273}
 274