linux/mm/mprotect.c
<<
>>
Prefs
   1/*
   2 *  mm/mprotect.c
   3 *
   4 *  (C) Copyright 1994 Linus Torvalds
   5 *  (C) Copyright 2002 Christoph Hellwig
   6 *
   7 *  Address space accounting code       <alan@redhat.com>
   8 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/hugetlb.h>
  13#include <linux/slab.h>
  14#include <linux/shm.h>
  15#include <linux/mman.h>
  16#include <linux/fs.h>
  17#include <linux/highmem.h>
  18#include <linux/security.h>
  19#include <linux/mempolicy.h>
  20#include <linux/personality.h>
  21#include <linux/syscalls.h>
  22#include <linux/swap.h>
  23#include <linux/swapops.h>
  24#include <linux/mmu_notifier.h>
  25#include <asm/uaccess.h>
  26#include <asm/pgtable.h>
  27#include <asm/cacheflush.h>
  28#include <asm/tlbflush.h>
  29
  30#ifndef pgprot_modify
  31static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  32{
  33        return newprot;
  34}
  35#endif
  36
  37static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
  38                unsigned long addr, unsigned long end, pgprot_t newprot,
  39                int dirty_accountable)
  40{
  41        pte_t *pte, oldpte;
  42        spinlock_t *ptl;
  43
  44        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  45        arch_enter_lazy_mmu_mode();
  46        do {
  47                oldpte = *pte;
  48                if (pte_present(oldpte)) {
  49                        pte_t ptent;
  50
  51                        ptent = ptep_modify_prot_start(mm, addr, pte);
  52                        ptent = pte_modify(ptent, newprot);
  53
  54                        /*
  55                         * Avoid taking write faults for pages we know to be
  56                         * dirty.
  57                         */
  58                        if (dirty_accountable && pte_dirty(ptent))
  59                                ptent = pte_mkwrite(ptent);
  60
  61                        ptep_modify_prot_commit(mm, addr, pte, ptent);
  62#ifdef CONFIG_MIGRATION
  63                } else if (!pte_file(oldpte)) {
  64                        swp_entry_t entry = pte_to_swp_entry(oldpte);
  65
  66                        if (is_write_migration_entry(entry)) {
  67                                /*
  68                                 * A protection check is difficult so
  69                                 * just be safe and disable write
  70                                 */
  71                                make_migration_entry_read(&entry);
  72                                set_pte_at(mm, addr, pte,
  73                                        swp_entry_to_pte(entry));
  74                        }
  75#endif
  76                }
  77
  78        } while (pte++, addr += PAGE_SIZE, addr != end);
  79        arch_leave_lazy_mmu_mode();
  80        pte_unmap_unlock(pte - 1, ptl);
  81}
  82
  83static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
  84                unsigned long addr, unsigned long end, pgprot_t newprot,
  85                int dirty_accountable)
  86{
  87        pmd_t *pmd;
  88        unsigned long next;
  89
  90        pmd = pmd_offset(pud, addr);
  91        do {
  92                next = pmd_addr_end(addr, end);
  93                if (pmd_none_or_clear_bad(pmd))
  94                        continue;
  95                change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
  96        } while (pmd++, addr = next, addr != end);
  97}
  98
  99static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
 100                unsigned long addr, unsigned long end, pgprot_t newprot,
 101                int dirty_accountable)
 102{
 103        pud_t *pud;
 104        unsigned long next;
 105
 106        pud = pud_offset(pgd, addr);
 107        do {
 108                next = pud_addr_end(addr, end);
 109                if (pud_none_or_clear_bad(pud))
 110                        continue;
 111                change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
 112        } while (pud++, addr = next, addr != end);
 113}
 114
 115static void change_protection(struct vm_area_struct *vma,
 116                unsigned long addr, unsigned long end, pgprot_t newprot,
 117                int dirty_accountable)
 118{
 119        struct mm_struct *mm = vma->vm_mm;
 120        pgd_t *pgd;
 121        unsigned long next;
 122        unsigned long start = addr;
 123
 124        BUG_ON(addr >= end);
 125        pgd = pgd_offset(mm, addr);
 126        flush_cache_range(vma, addr, end);
 127        do {
 128                next = pgd_addr_end(addr, end);
 129                if (pgd_none_or_clear_bad(pgd))
 130                        continue;
 131                change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
 132        } while (pgd++, addr = next, addr != end);
 133        flush_tlb_range(vma, start, end);
 134}
 135
 136int
 137mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 138        unsigned long start, unsigned long end, unsigned long newflags)
 139{
 140        struct mm_struct *mm = vma->vm_mm;
 141        unsigned long oldflags = vma->vm_flags;
 142        long nrpages = (end - start) >> PAGE_SHIFT;
 143        unsigned long charged = 0;
 144        pgoff_t pgoff;
 145        int error;
 146        int dirty_accountable = 0;
 147
 148        if (newflags == oldflags) {
 149                *pprev = vma;
 150                return 0;
 151        }
 152
 153        /*
 154         * If we make a private mapping writable we increase our commit;
 155         * but (without finer accounting) cannot reduce our commit if we
 156         * make it unwritable again.
 157         */
 158        if (newflags & VM_WRITE) {
 159                if (!(oldflags & (VM_ACCOUNT|VM_WRITE|
 160                                                VM_SHARED|VM_NORESERVE))) {
 161                        charged = nrpages;
 162                        if (security_vm_enough_memory(charged))
 163                                return -ENOMEM;
 164                        newflags |= VM_ACCOUNT;
 165                }
 166        }
 167
 168        /*
 169         * First try to merge with previous and/or next vma.
 170         */
 171        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 172        *pprev = vma_merge(mm, *pprev, start, end, newflags,
 173                        vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
 174        if (*pprev) {
 175                vma = *pprev;
 176                goto success;
 177        }
 178
 179        *pprev = vma;
 180
 181        if (start != vma->vm_start) {
 182                error = split_vma(mm, vma, start, 1);
 183                if (error)
 184                        goto fail;
 185        }
 186
 187        if (end != vma->vm_end) {
 188                error = split_vma(mm, vma, end, 0);
 189                if (error)
 190                        goto fail;
 191        }
 192
 193success:
 194        /*
 195         * vm_flags and vm_page_prot are protected by the mmap_sem
 196         * held in write mode.
 197         */
 198        vma->vm_flags = newflags;
 199        vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
 200                                          vm_get_page_prot(newflags));
 201
 202        if (vma_wants_writenotify(vma)) {
 203                vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
 204                dirty_accountable = 1;
 205        }
 206
 207        mmu_notifier_invalidate_range_start(mm, start, end);
 208        if (is_vm_hugetlb_page(vma))
 209                hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
 210        else
 211                change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
 212        mmu_notifier_invalidate_range_end(mm, start, end);
 213        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
 214        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
 215        return 0;
 216
 217fail:
 218        vm_unacct_memory(charged);
 219        return error;
 220}
 221
 222asmlinkage long
 223sys_mprotect(unsigned long start, size_t len, unsigned long prot)
 224{
 225        unsigned long vm_flags, nstart, end, tmp, reqprot;
 226        struct vm_area_struct *vma, *prev;
 227        int error = -EINVAL;
 228        const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
 229        prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
 230        if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
 231                return -EINVAL;
 232
 233        if (start & ~PAGE_MASK)
 234                return -EINVAL;
 235        if (!len)
 236                return 0;
 237        len = PAGE_ALIGN(len);
 238        end = start + len;
 239        if (end <= start)
 240                return -ENOMEM;
 241        if (!arch_validate_prot(prot))
 242                return -EINVAL;
 243
 244        reqprot = prot;
 245        /*
 246         * Does the application expect PROT_READ to imply PROT_EXEC:
 247         */
 248        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
 249                prot |= PROT_EXEC;
 250
 251        vm_flags = calc_vm_prot_bits(prot);
 252
 253        down_write(&current->mm->mmap_sem);
 254
 255        vma = find_vma_prev(current->mm, start, &prev);
 256        error = -ENOMEM;
 257        if (!vma)
 258                goto out;
 259        if (unlikely(grows & PROT_GROWSDOWN)) {
 260                if (vma->vm_start >= end)
 261                        goto out;
 262                start = vma->vm_start;
 263                error = -EINVAL;
 264                if (!(vma->vm_flags & VM_GROWSDOWN))
 265                        goto out;
 266        }
 267        else {
 268                if (vma->vm_start > start)
 269                        goto out;
 270                if (unlikely(grows & PROT_GROWSUP)) {
 271                        end = vma->vm_end;
 272                        error = -EINVAL;
 273                        if (!(vma->vm_flags & VM_GROWSUP))
 274                                goto out;
 275                }
 276        }
 277        if (start > vma->vm_start)
 278                prev = vma;
 279
 280        for (nstart = start ; ; ) {
 281                unsigned long newflags;
 282
 283                /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 284
 285                newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
 286
 287                /* newflags >> 4 shift VM_MAY% in place of VM_% */
 288                if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
 289                        error = -EACCES;
 290                        goto out;
 291                }
 292
 293                error = security_file_mprotect(vma, reqprot, prot);
 294                if (error)
 295                        goto out;
 296
 297                tmp = vma->vm_end;
 298                if (tmp > end)
 299                        tmp = end;
 300                error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
 301                if (error)
 302                        goto out;
 303                nstart = tmp;
 304
 305                if (nstart < prev->vm_end)
 306                        nstart = prev->vm_end;
 307                if (nstart >= end)
 308                        goto out;
 309
 310                vma = prev->vm_next;
 311                if (!vma || vma->vm_start != nstart) {
 312                        error = -ENOMEM;
 313                        goto out;
 314                }
 315        }
 316out:
 317        up_write(&current->mm->mmap_sem);
 318        return error;
 319}
 320