linux/mm/fremap.c
<<
>>
Prefs
   1/*
   2 *   linux/mm/fremap.c
   3 * 
   4 * Explicit pagetable population and nonlinear (random) mappings support.
   5 *
   6 * started by Ingo Molnar, Copyright (C) 2002, 2003
   7 */
   8#include <linux/backing-dev.h>
   9#include <linux/mm.h>
  10#include <linux/swap.h>
  11#include <linux/file.h>
  12#include <linux/mman.h>
  13#include <linux/pagemap.h>
  14#include <linux/swapops.h>
  15#include <linux/rmap.h>
  16#include <linux/syscalls.h>
  17#include <linux/mmu_notifier.h>
  18
  19#include <asm/mmu_context.h>
  20#include <asm/cacheflush.h>
  21#include <asm/tlbflush.h>
  22
  23#include "internal.h"
  24
  25static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
  26                        unsigned long addr, pte_t *ptep)
  27{
  28        pte_t pte = *ptep;
  29
  30        if (pte_present(pte)) {
  31                struct page *page;
  32
  33                flush_cache_page(vma, addr, pte_pfn(pte));
  34                pte = ptep_clear_flush(vma, addr, ptep);
  35                page = vm_normal_page(vma, addr, pte);
  36                if (page) {
  37                        if (pte_dirty(pte))
  38                                set_page_dirty(page);
  39                        page_remove_rmap(page);
  40                        page_cache_release(page);
  41                        update_hiwater_rss(mm);
  42                        dec_mm_counter(mm, MM_FILEPAGES);
  43                }
  44        } else {
  45                if (!pte_file(pte))
  46                        free_swap_and_cache(pte_to_swp_entry(pte));
  47                pte_clear_not_present_full(mm, addr, ptep, 0);
  48        }
  49}
  50
  51/*
  52 * Install a file pte to a given virtual memory address, release any
  53 * previously existing mapping.
  54 */
  55static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
  56                unsigned long addr, unsigned long pgoff, pgprot_t prot)
  57{
  58        int err = -ENOMEM;
  59        pte_t *pte;
  60        spinlock_t *ptl;
  61
  62        pte = get_locked_pte(mm, addr, &ptl);
  63        if (!pte)
  64                goto out;
  65
  66        if (!pte_none(*pte))
  67                zap_pte(mm, vma, addr, pte);
  68
  69        set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
  70        /*
  71         * We don't need to run update_mmu_cache() here because the "file pte"
  72         * being installed by install_file_pte() is not a real pte - it's a
  73         * non-present entry (like a swap entry), noting what file offset should
  74         * be mapped there when there's a fault (in a non-linear vma where
  75         * that's not obvious).
  76         */
  77        pte_unmap_unlock(pte, ptl);
  78        err = 0;
  79out:
  80        return err;
  81}
  82
  83static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
  84                        unsigned long addr, unsigned long size, pgoff_t pgoff)
  85{
  86        int err;
  87
  88        do {
  89                err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
  90                if (err)
  91                        return err;
  92
  93                size -= PAGE_SIZE;
  94                addr += PAGE_SIZE;
  95                pgoff++;
  96        } while (size);
  97
  98        return 0;
  99
 100}
 101
 102/**
 103 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
 104 * @start: start of the remapped virtual memory range
 105 * @size: size of the remapped virtual memory range
 106 * @prot: new protection bits of the range (see NOTE)
 107 * @pgoff: to-be-mapped page of the backing store file
 108 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
 109 *
 110 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
 111 * (shared backing store file).
 112 *
 113 * This syscall works purely via pagetables, so it's the most efficient
 114 * way to map the same (large) file into a given virtual window. Unlike
 115 * mmap()/mremap() it does not create any new vmas. The new mappings are
 116 * also safe across swapout.
 117 *
 118 * NOTE: the @prot parameter right now is ignored (but must be zero),
 119 * and the vma's default protection is used. Arbitrary protections
 120 * might be implemented in the future.
 121 */
 122SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 123                unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
 124{
 125        struct mm_struct *mm = current->mm;
 126        struct address_space *mapping;
 127        struct vm_area_struct *vma;
 128        int err = -EINVAL;
 129        int has_write_lock = 0;
 130
 131        if (prot)
 132                return err;
 133        /*
 134         * Sanitize the syscall parameters:
 135         */
 136        start = start & PAGE_MASK;
 137        size = size & PAGE_MASK;
 138
 139        /* Does the address range wrap, or is the span zero-sized? */
 140        if (start + size <= start)
 141                return err;
 142
 143        /* Does pgoff wrap? */
 144        if (pgoff + (size >> PAGE_SHIFT) < pgoff)
 145                return err;
 146
 147        /* Can we represent this offset inside this architecture's pte's? */
 148#if PTE_FILE_MAX_BITS < BITS_PER_LONG
 149        if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
 150                return err;
 151#endif
 152
 153        /* We need down_write() to change vma->vm_flags. */
 154        down_read(&mm->mmap_sem);
 155 retry:
 156        vma = find_vma(mm, start);
 157
 158        /*
 159         * Make sure the vma is shared, that it supports prefaulting,
 160         * and that the remapped range is valid and fully within
 161         * the single existing vma.  vm_private_data is used as a
 162         * swapout cursor in a VM_NONLINEAR vma.
 163         */
 164        if (!vma || !(vma->vm_flags & VM_SHARED))
 165                goto out;
 166
 167        if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
 168                goto out;
 169
 170        if (!(vma->vm_flags & VM_CAN_NONLINEAR))
 171                goto out;
 172
 173        if (start < vma->vm_start || start + size > vma->vm_end)
 174                goto out;
 175
 176        /* Must set VM_NONLINEAR before any pages are populated. */
 177        if (!(vma->vm_flags & VM_NONLINEAR)) {
 178                /* Don't need a nonlinear mapping, exit success */
 179                if (pgoff == linear_page_index(vma, start)) {
 180                        err = 0;
 181                        goto out;
 182                }
 183
 184                if (!has_write_lock) {
 185                        up_read(&mm->mmap_sem);
 186                        down_write(&mm->mmap_sem);
 187                        has_write_lock = 1;
 188                        goto retry;
 189                }
 190                mapping = vma->vm_file->f_mapping;
 191                /*
 192                 * page_mkclean doesn't work on nonlinear vmas, so if
 193                 * dirty pages need to be accounted, emulate with linear
 194                 * vmas.
 195                 */
 196                if (mapping_cap_account_dirty(mapping)) {
 197                        unsigned long addr;
 198                        struct file *file = vma->vm_file;
 199
 200                        flags &= MAP_NONBLOCK;
 201                        get_file(file);
 202                        addr = mmap_region(file, start, size,
 203                                        flags, vma->vm_flags, pgoff);
 204                        fput(file);
 205                        if (IS_ERR_VALUE(addr)) {
 206                                err = addr;
 207                        } else {
 208                                BUG_ON(addr != start);
 209                                err = 0;
 210                        }
 211                        goto out;
 212                }
 213                mutex_lock(&mapping->i_mmap_mutex);
 214                flush_dcache_mmap_lock(mapping);
 215                vma->vm_flags |= VM_NONLINEAR;
 216                vma_prio_tree_remove(vma, &mapping->i_mmap);
 217                vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
 218                flush_dcache_mmap_unlock(mapping);
 219                mutex_unlock(&mapping->i_mmap_mutex);
 220        }
 221
 222        if (vma->vm_flags & VM_LOCKED) {
 223                /*
 224                 * drop PG_Mlocked flag for over-mapped range
 225                 */
 226                vm_flags_t saved_flags = vma->vm_flags;
 227                munlock_vma_pages_range(vma, start, start + size);
 228                vma->vm_flags = saved_flags;
 229        }
 230
 231        mmu_notifier_invalidate_range_start(mm, start, start + size);
 232        err = populate_range(mm, vma, start, size, pgoff);
 233        mmu_notifier_invalidate_range_end(mm, start, start + size);
 234        if (!err && !(flags & MAP_NONBLOCK)) {
 235                if (vma->vm_flags & VM_LOCKED) {
 236                        /*
 237                         * might be mapping previously unmapped range of file
 238                         */
 239                        mlock_vma_pages_range(vma, start, start + size);
 240                } else {
 241                        if (unlikely(has_write_lock)) {
 242                                downgrade_write(&mm->mmap_sem);
 243                                has_write_lock = 0;
 244                        }
 245                        make_pages_present(start, start+size);
 246                }
 247        }
 248
 249        /*
 250         * We can't clear VM_NONLINEAR because we'd have to do
 251         * it after ->populate completes, and that would prevent
 252         * downgrading the lock.  (Locks can't be upgraded).
 253         */
 254
 255out:
 256        if (likely(!has_write_lock))
 257                up_read(&mm->mmap_sem);
 258        else
 259                up_write(&mm->mmap_sem);
 260
 261        return err;
 262}
 263
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.