linux/mm/mprotect.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  mm/mprotect.c
   4 *
   5 *  (C) Copyright 1994 Linus Torvalds
   6 *  (C) Copyright 2002 Christoph Hellwig
   7 *
   8 *  Address space accounting code       <alan@lxorguk.ukuu.org.uk>
   9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
  10 */
  11
  12#include <linux/pagewalk.h>
  13#include <linux/hugetlb.h>
  14#include <linux/shm.h>
  15#include <linux/mman.h>
  16#include <linux/fs.h>
  17#include <linux/highmem.h>
  18#include <linux/security.h>
  19#include <linux/mempolicy.h>
  20#include <linux/personality.h>
  21#include <linux/syscalls.h>
  22#include <linux/swap.h>
  23#include <linux/swapops.h>
  24#include <linux/mmu_notifier.h>
  25#include <linux/migrate.h>
  26#include <linux/perf_event.h>
  27#include <linux/pkeys.h>
  28#include <linux/ksm.h>
  29#include <linux/uaccess.h>
  30#include <linux/mm_inline.h>
  31#include <linux/pgtable.h>
  32#include <asm/cacheflush.h>
  33#include <asm/mmu_context.h>
  34#include <asm/tlbflush.h>
  35
  36#include "internal.h"
  37
  38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
  39                unsigned long addr, unsigned long end, pgprot_t newprot,
  40                unsigned long cp_flags)
  41{
  42        pte_t *pte, oldpte;
  43        spinlock_t *ptl;
  44        unsigned long pages = 0;
  45        int target_node = NUMA_NO_NODE;
  46        bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
  47        bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
  48        bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
  49        bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
  50
  51        /*
  52         * Can be called with only the mmap_lock for reading by
  53         * prot_numa so we must check the pmd isn't constantly
  54         * changing from under us from pmd_none to pmd_trans_huge
  55         * and/or the other way around.
  56         */
  57        if (pmd_trans_unstable(pmd))
  58                return 0;
  59
  60        /*
  61         * The pmd points to a regular pte so the pmd can't change
  62         * from under us even if the mmap_lock is only hold for
  63         * reading.
  64         */
  65        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  66
  67        /* Get target node for single threaded private VMAs */
  68        if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
  69            atomic_read(&vma->vm_mm->mm_users) == 1)
  70                target_node = numa_node_id();
  71
  72        flush_tlb_batched_pending(vma->vm_mm);
  73        arch_enter_lazy_mmu_mode();
  74        do {
  75                oldpte = *pte;
  76                if (pte_present(oldpte)) {
  77                        pte_t ptent;
  78                        bool preserve_write = prot_numa && pte_write(oldpte);
  79
  80                        /*
  81                         * Avoid trapping faults against the zero or KSM
  82                         * pages. See similar comment in change_huge_pmd.
  83                         */
  84                        if (prot_numa) {
  85                                struct page *page;
  86
  87                                /* Avoid TLB flush if possible */
  88                                if (pte_protnone(oldpte))
  89                                        continue;
  90
  91                                page = vm_normal_page(vma, addr, oldpte);
  92                                if (!page || PageKsm(page))
  93                                        continue;
  94
  95                                /* Also skip shared copy-on-write pages */
  96                                if (is_cow_mapping(vma->vm_flags) &&
  97                                    page_mapcount(page) != 1)
  98                                        continue;
  99
 100                                /*
 101                                 * While migration can move some dirty pages,
 102                                 * it cannot move them all from MIGRATE_ASYNC
 103                                 * context.
 104                                 */
 105                                if (page_is_file_lru(page) && PageDirty(page))
 106                                        continue;
 107
 108                                /*
 109                                 * Don't mess with PTEs if page is already on the node
 110                                 * a single-threaded process is running on.
 111                                 */
 112                                if (target_node == page_to_nid(page))
 113                                        continue;
 114                        }
 115
 116                        oldpte = ptep_modify_prot_start(vma, addr, pte);
 117                        ptent = pte_modify(oldpte, newprot);
 118                        if (preserve_write)
 119                                ptent = pte_mk_savedwrite(ptent);
 120
 121                        if (uffd_wp) {
 122                                ptent = pte_wrprotect(ptent);
 123                                ptent = pte_mkuffd_wp(ptent);
 124                        } else if (uffd_wp_resolve) {
 125                                /*
 126                                 * Leave the write bit to be handled
 127                                 * by PF interrupt handler, then
 128                                 * things like COW could be properly
 129                                 * handled.
 130                                 */
 131                                ptent = pte_clear_uffd_wp(ptent);
 132                        }
 133
 134                        /* Avoid taking write faults for known dirty pages */
 135                        if (dirty_accountable && pte_dirty(ptent) &&
 136                                        (pte_soft_dirty(ptent) ||
 137                                         !(vma->vm_flags & VM_SOFTDIRTY))) {
 138                                ptent = pte_mkwrite(ptent);
 139                        }
 140                        ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
 141                        pages++;
 142                } else if (is_swap_pte(oldpte)) {
 143                        swp_entry_t entry = pte_to_swp_entry(oldpte);
 144                        pte_t newpte;
 145
 146                        if (is_writable_migration_entry(entry)) {
 147                                /*
 148                                 * A protection check is difficult so
 149                                 * just be safe and disable write
 150                                 */
 151                                entry = make_readable_migration_entry(
 152                                                        swp_offset(entry));
 153                                newpte = swp_entry_to_pte(entry);
 154                                if (pte_swp_soft_dirty(oldpte))
 155                                        newpte = pte_swp_mksoft_dirty(newpte);
 156                                if (pte_swp_uffd_wp(oldpte))
 157                                        newpte = pte_swp_mkuffd_wp(newpte);
 158                        } else if (is_writable_device_private_entry(entry)) {
 159                                /*
 160                                 * We do not preserve soft-dirtiness. See
 161                                 * copy_one_pte() for explanation.
 162                                 */
 163                                entry = make_readable_device_private_entry(
 164                                                        swp_offset(entry));
 165                                newpte = swp_entry_to_pte(entry);
 166                                if (pte_swp_uffd_wp(oldpte))
 167                                        newpte = pte_swp_mkuffd_wp(newpte);
 168                        } else if (is_writable_device_exclusive_entry(entry)) {
 169                                entry = make_readable_device_exclusive_entry(
 170                                                        swp_offset(entry));
 171                                newpte = swp_entry_to_pte(entry);
 172                                if (pte_swp_soft_dirty(oldpte))
 173                                        newpte = pte_swp_mksoft_dirty(newpte);
 174                                if (pte_swp_uffd_wp(oldpte))
 175                                        newpte = pte_swp_mkuffd_wp(newpte);
 176                        } else {
 177                                newpte = oldpte;
 178                        }
 179
 180                        if (uffd_wp)
 181                                newpte = pte_swp_mkuffd_wp(newpte);
 182                        else if (uffd_wp_resolve)
 183                                newpte = pte_swp_clear_uffd_wp(newpte);
 184
 185                        if (!pte_same(oldpte, newpte)) {
 186                                set_pte_at(vma->vm_mm, addr, pte, newpte);
 187                                pages++;
 188                        }
 189                }
 190        } while (pte++, addr += PAGE_SIZE, addr != end);
 191        arch_leave_lazy_mmu_mode();
 192        pte_unmap_unlock(pte - 1, ptl);
 193
 194        return pages;
 195}
 196
 197/*
 198 * Used when setting automatic NUMA hinting protection where it is
 199 * critical that a numa hinting PMD is not confused with a bad PMD.
 200 */
 201static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
 202{
 203        pmd_t pmdval = pmd_read_atomic(pmd);
 204
 205        /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
 206#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 207        barrier();
 208#endif
 209
 210        if (pmd_none(pmdval))
 211                return 1;
 212        if (pmd_trans_huge(pmdval))
 213                return 0;
 214        if (unlikely(pmd_bad(pmdval))) {
 215                pmd_clear_bad(pmd);
 216                return 1;
 217        }
 218
 219        return 0;
 220}
 221
 222static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 223                pud_t *pud, unsigned long addr, unsigned long end,
 224                pgprot_t newprot, unsigned long cp_flags)
 225{
 226        pmd_t *pmd;
 227        unsigned long next;
 228        unsigned long pages = 0;
 229        unsigned long nr_huge_updates = 0;
 230        struct mmu_notifier_range range;
 231
 232        range.start = 0;
 233
 234        pmd = pmd_offset(pud, addr);
 235        do {
 236                unsigned long this_pages;
 237
 238                next = pmd_addr_end(addr, end);
 239
 240                /*
 241                 * Automatic NUMA balancing walks the tables with mmap_lock
 242                 * held for read. It's possible a parallel update to occur
 243                 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
 244                 * check leading to a false positive and clearing.
 245                 * Hence, it's necessary to atomically read the PMD value
 246                 * for all the checks.
 247                 */
 248                if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
 249                     pmd_none_or_clear_bad_unless_trans_huge(pmd))
 250                        goto next;
 251
 252                /* invoke the mmu notifier if the pmd is populated */
 253                if (!range.start) {
 254                        mmu_notifier_range_init(&range,
 255                                MMU_NOTIFY_PROTECTION_VMA, 0,
 256                                vma, vma->vm_mm, addr, end);
 257                        mmu_notifier_invalidate_range_start(&range);
 258                }
 259
 260                if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
 261                        if (next - addr != HPAGE_PMD_SIZE) {
 262                                __split_huge_pmd(vma, pmd, addr, false, NULL);
 263                        } else {
 264                                int nr_ptes = change_huge_pmd(vma, pmd, addr,
 265                                                              newprot, cp_flags);
 266
 267                                if (nr_ptes) {
 268                                        if (nr_ptes == HPAGE_PMD_NR) {
 269                                                pages += HPAGE_PMD_NR;
 270                                                nr_huge_updates++;
 271                                        }
 272
 273                                        /* huge pmd was handled */
 274                                        goto next;
 275                                }
 276                        }
 277                        /* fall through, the trans huge pmd just split */
 278                }
 279                this_pages = change_pte_range(vma, pmd, addr, next, newprot,
 280                                              cp_flags);
 281                pages += this_pages;
 282next:
 283                cond_resched();
 284        } while (pmd++, addr = next, addr != end);
 285
 286        if (range.start)
 287                mmu_notifier_invalidate_range_end(&range);
 288
 289        if (nr_huge_updates)
 290                count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
 291        return pages;
 292}
 293
 294static inline unsigned long change_pud_range(struct vm_area_struct *vma,
 295                p4d_t *p4d, unsigned long addr, unsigned long end,
 296                pgprot_t newprot, unsigned long cp_flags)
 297{
 298        pud_t *pud;
 299        unsigned long next;
 300        unsigned long pages = 0;
 301
 302        pud = pud_offset(p4d, addr);
 303        do {
 304                next = pud_addr_end(addr, end);
 305                if (pud_none_or_clear_bad(pud))
 306                        continue;
 307                pages += change_pmd_range(vma, pud, addr, next, newprot,
 308                                          cp_flags);
 309        } while (pud++, addr = next, addr != end);
 310
 311        return pages;
 312}
 313
 314static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
 315                pgd_t *pgd, unsigned long addr, unsigned long end,
 316                pgprot_t newprot, unsigned long cp_flags)
 317{
 318        p4d_t *p4d;
 319        unsigned long next;
 320        unsigned long pages = 0;
 321
 322        p4d = p4d_offset(pgd, addr);
 323        do {
 324                next = p4d_addr_end(addr, end);
 325                if (p4d_none_or_clear_bad(p4d))
 326                        continue;
 327                pages += change_pud_range(vma, p4d, addr, next, newprot,
 328                                          cp_flags);
 329        } while (p4d++, addr = next, addr != end);
 330
 331        return pages;
 332}
 333
 334static unsigned long change_protection_range(struct vm_area_struct *vma,
 335                unsigned long addr, unsigned long end, pgprot_t newprot,
 336                unsigned long cp_flags)
 337{
 338        struct mm_struct *mm = vma->vm_mm;
 339        pgd_t *pgd;
 340        unsigned long next;
 341        unsigned long start = addr;
 342        unsigned long pages = 0;
 343
 344        BUG_ON(addr >= end);
 345        pgd = pgd_offset(mm, addr);
 346        flush_cache_range(vma, addr, end);
 347        inc_tlb_flush_pending(mm);
 348        do {
 349                next = pgd_addr_end(addr, end);
 350                if (pgd_none_or_clear_bad(pgd))
 351                        continue;
 352                pages += change_p4d_range(vma, pgd, addr, next, newprot,
 353                                          cp_flags);
 354        } while (pgd++, addr = next, addr != end);
 355
 356        /* Only flush the TLB if we actually modified any entries: */
 357        if (pages)
 358                flush_tlb_range(vma, start, end);
 359        dec_tlb_flush_pending(mm);
 360
 361        return pages;
 362}
 363
 364unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
 365                       unsigned long end, pgprot_t newprot,
 366                       unsigned long cp_flags)
 367{
 368        unsigned long pages;
 369
 370        BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
 371
 372        if (is_vm_hugetlb_page(vma))
 373                pages = hugetlb_change_protection(vma, start, end, newprot);
 374        else
 375                pages = change_protection_range(vma, start, end, newprot,
 376                                                cp_flags);
 377
 378        return pages;
 379}
 380
 381static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
 382                               unsigned long next, struct mm_walk *walk)
 383{
 384        return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
 385                0 : -EACCES;
 386}
 387
 388static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
 389                                   unsigned long addr, unsigned long next,
 390                                   struct mm_walk *walk)
 391{
 392        return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
 393                0 : -EACCES;
 394}
 395
 396static int prot_none_test(unsigned long addr, unsigned long next,
 397                          struct mm_walk *walk)
 398{
 399        return 0;
 400}
 401
 402static const struct mm_walk_ops prot_none_walk_ops = {
 403        .pte_entry              = prot_none_pte_entry,
 404        .hugetlb_entry          = prot_none_hugetlb_entry,
 405        .test_walk              = prot_none_test,
 406};
 407
 408int
 409mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 410        unsigned long start, unsigned long end, unsigned long newflags)
 411{
 412        struct mm_struct *mm = vma->vm_mm;
 413        unsigned long oldflags = vma->vm_flags;
 414        long nrpages = (end - start) >> PAGE_SHIFT;
 415        unsigned long charged = 0;
 416        pgoff_t pgoff;
 417        int error;
 418        int dirty_accountable = 0;
 419
 420        if (newflags == oldflags) {
 421                *pprev = vma;
 422                return 0;
 423        }
 424
 425        /*
 426         * Do PROT_NONE PFN permission checks here when we can still
 427         * bail out without undoing a lot of state. This is a rather
 428         * uncommon case, so doesn't need to be very optimized.
 429         */
 430        if (arch_has_pfn_modify_check() &&
 431            (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
 432            (newflags & VM_ACCESS_FLAGS) == 0) {
 433                pgprot_t new_pgprot = vm_get_page_prot(newflags);
 434
 435                error = walk_page_range(current->mm, start, end,
 436                                &prot_none_walk_ops, &new_pgprot);
 437                if (error)
 438                        return error;
 439        }
 440
 441        /*
 442         * If we make a private mapping writable we increase our commit;
 443         * but (without finer accounting) cannot reduce our commit if we
 444         * make it unwritable again. hugetlb mapping were accounted for
 445         * even if read-only so there is no need to account for them here
 446         */
 447        if (newflags & VM_WRITE) {
 448                /* Check space limits when area turns into data. */
 449                if (!may_expand_vm(mm, newflags, nrpages) &&
 450                                may_expand_vm(mm, oldflags, nrpages))
 451                        return -ENOMEM;
 452                if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
 453                                                VM_SHARED|VM_NORESERVE))) {
 454                        charged = nrpages;
 455                        if (security_vm_enough_memory_mm(mm, charged))
 456                                return -ENOMEM;
 457                        newflags |= VM_ACCOUNT;
 458                }
 459        }
 460
 461        /*
 462         * First try to merge with previous and/or next vma.
 463         */
 464        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 465        *pprev = vma_merge(mm, *pprev, start, end, newflags,
 466                           vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
 467                           vma->vm_userfaultfd_ctx);
 468        if (*pprev) {
 469                vma = *pprev;
 470                VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
 471                goto success;
 472        }
 473
 474        *pprev = vma;
 475
 476        if (start != vma->vm_start) {
 477                error = split_vma(mm, vma, start, 1);
 478                if (error)
 479                        goto fail;
 480        }
 481
 482        if (end != vma->vm_end) {
 483                error = split_vma(mm, vma, end, 0);
 484                if (error)
 485                        goto fail;
 486        }
 487
 488success:
 489        /*
 490         * vm_flags and vm_page_prot are protected by the mmap_lock
 491         * held in write mode.
 492         */
 493        vma->vm_flags = newflags;
 494        dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
 495        vma_set_page_prot(vma);
 496
 497        change_protection(vma, start, end, vma->vm_page_prot,
 498                          dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
 499
 500        /*
 501         * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
 502         * fault on access.
 503         */
 504        if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
 505                        (newflags & VM_WRITE)) {
 506                populate_vma_page_range(vma, start, end, NULL);
 507        }
 508
 509        vm_stat_account(mm, oldflags, -nrpages);
 510        vm_stat_account(mm, newflags, nrpages);
 511        perf_event_mmap(vma);
 512        return 0;
 513
 514fail:
 515        vm_unacct_memory(charged);
 516        return error;
 517}
 518
 519/*
 520 * pkey==-1 when doing a legacy mprotect()
 521 */
 522static int do_mprotect_pkey(unsigned long start, size_t len,
 523                unsigned long prot, int pkey)
 524{
 525        unsigned long nstart, end, tmp, reqprot;
 526        struct vm_area_struct *vma, *prev;
 527        int error = -EINVAL;
 528        const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
 529        const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
 530                                (prot & PROT_READ);
 531
 532        start = untagged_addr(start);
 533
 534        prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
 535        if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
 536                return -EINVAL;
 537
 538        if (start & ~PAGE_MASK)
 539                return -EINVAL;
 540        if (!len)
 541                return 0;
 542        len = PAGE_ALIGN(len);
 543        end = start + len;
 544        if (end <= start)
 545                return -ENOMEM;
 546        if (!arch_validate_prot(prot, start))
 547                return -EINVAL;
 548
 549        reqprot = prot;
 550
 551        if (mmap_write_lock_killable(current->mm))
 552                return -EINTR;
 553
 554        /*
 555         * If userspace did not allocate the pkey, do not let
 556         * them use it here.
 557         */
 558        error = -EINVAL;
 559        if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
 560                goto out;
 561
 562        vma = find_vma(current->mm, start);
 563        error = -ENOMEM;
 564        if (!vma)
 565                goto out;
 566        prev = vma->vm_prev;
 567        if (unlikely(grows & PROT_GROWSDOWN)) {
 568                if (vma->vm_start >= end)
 569                        goto out;
 570                start = vma->vm_start;
 571                error = -EINVAL;
 572                if (!(vma->vm_flags & VM_GROWSDOWN))
 573                        goto out;
 574        } else {
 575                if (vma->vm_start > start)
 576                        goto out;
 577                if (unlikely(grows & PROT_GROWSUP)) {
 578                        end = vma->vm_end;
 579                        error = -EINVAL;
 580                        if (!(vma->vm_flags & VM_GROWSUP))
 581                                goto out;
 582                }
 583        }
 584        if (start > vma->vm_start)
 585                prev = vma;
 586
 587        for (nstart = start ; ; ) {
 588                unsigned long mask_off_old_flags;
 589                unsigned long newflags;
 590                int new_vma_pkey;
 591
 592                /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
 593
 594                /* Does the application expect PROT_READ to imply PROT_EXEC */
 595                if (rier && (vma->vm_flags & VM_MAYEXEC))
 596                        prot |= PROT_EXEC;
 597
 598                /*
 599                 * Each mprotect() call explicitly passes r/w/x permissions.
 600                 * If a permission is not passed to mprotect(), it must be
 601                 * cleared from the VMA.
 602                 */
 603                mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
 604                                        VM_FLAGS_CLEAR;
 605
 606                new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
 607                newflags = calc_vm_prot_bits(prot, new_vma_pkey);
 608                newflags |= (vma->vm_flags & ~mask_off_old_flags);
 609
 610                /* newflags >> 4 shift VM_MAY% in place of VM_% */
 611                if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
 612                        error = -EACCES;
 613                        goto out;
 614                }
 615
 616                /* Allow architectures to sanity-check the new flags */
 617                if (!arch_validate_flags(newflags)) {
 618                        error = -EINVAL;
 619                        goto out;
 620                }
 621
 622                error = security_file_mprotect(vma, reqprot, prot);
 623                if (error)
 624                        goto out;
 625
 626                tmp = vma->vm_end;
 627                if (tmp > end)
 628                        tmp = end;
 629
 630                if (vma->vm_ops && vma->vm_ops->mprotect) {
 631                        error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
 632                        if (error)
 633                                goto out;
 634                }
 635
 636                error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
 637                if (error)
 638                        goto out;
 639
 640                nstart = tmp;
 641
 642                if (nstart < prev->vm_end)
 643                        nstart = prev->vm_end;
 644                if (nstart >= end)
 645                        goto out;
 646
 647                vma = prev->vm_next;
 648                if (!vma || vma->vm_start != nstart) {
 649                        error = -ENOMEM;
 650                        goto out;
 651                }
 652                prot = reqprot;
 653        }
 654out:
 655        mmap_write_unlock(current->mm);
 656        return error;
 657}
 658
 659SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
 660                unsigned long, prot)
 661{
 662        return do_mprotect_pkey(start, len, prot, -1);
 663}
 664
 665#ifdef CONFIG_ARCH_HAS_PKEYS
 666
 667SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
 668                unsigned long, prot, int, pkey)
 669{
 670        return do_mprotect_pkey(start, len, prot, pkey);
 671}
 672
 673SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
 674{
 675        int pkey;
 676        int ret;
 677
 678        /* No flags supported yet. */
 679        if (flags)
 680                return -EINVAL;
 681        /* check for unsupported init values */
 682        if (init_val & ~PKEY_ACCESS_MASK)
 683                return -EINVAL;
 684
 685        mmap_write_lock(current->mm);
 686        pkey = mm_pkey_alloc(current->mm);
 687
 688        ret = -ENOSPC;
 689        if (pkey == -1)
 690                goto out;
 691
 692        ret = arch_set_user_pkey_access(current, pkey, init_val);
 693        if (ret) {
 694                mm_pkey_free(current->mm, pkey);
 695                goto out;
 696        }
 697        ret = pkey;
 698out:
 699        mmap_write_unlock(current->mm);
 700        return ret;
 701}
 702
 703SYSCALL_DEFINE1(pkey_free, int, pkey)
 704{
 705        int ret;
 706
 707        mmap_write_lock(current->mm);
 708        ret = mm_pkey_free(current->mm, pkey);
 709        mmap_write_unlock(current->mm);
 710
 711        /*
 712         * We could provide warnings or errors if any VMA still
 713         * has the pkey set here.
 714         */
 715        return ret;
 716}
 717
 718#endif /* CONFIG_ARCH_HAS_PKEYS */
 719
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.