linux/fs/exec.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/exec.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7/*
   8 * #!-checking implemented by tytso.
   9 */
  10/*
  11 * Demand-loading implemented 01.12.91 - no need to read anything but
  12 * the header into memory. The inode of the executable is put into
  13 * "current->executable", and page faults do the actual loading. Clean.
  14 *
  15 * Once more I can proudly say that linux stood up to being changed: it
  16 * was less than 2 hours work to get demand-loading completely implemented.
  17 *
  18 * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
  19 * current->executable is only used by the procfs.  This allows a dispatch
  20 * table to check for several different types  of binary formats.  We keep
  21 * trying until we recognize the file or we run out of supported binary
  22 * formats. 
  23 */
  24
  25#include <linux/slab.h>
  26#include <linux/file.h>
  27#include <linux/fdtable.h>
  28#include <linux/mm.h>
  29#include <linux/stat.h>
  30#include <linux/fcntl.h>
  31#include <linux/swap.h>
  32#include <linux/string.h>
  33#include <linux/init.h>
  34#include <linux/pagemap.h>
  35#include <linux/perf_event.h>
  36#include <linux/highmem.h>
  37#include <linux/spinlock.h>
  38#include <linux/key.h>
  39#include <linux/personality.h>
  40#include <linux/binfmts.h>
  41#include <linux/utsname.h>
  42#include <linux/pid_namespace.h>
  43#include <linux/module.h>
  44#include <linux/namei.h>
  45#include <linux/mount.h>
  46#include <linux/security.h>
  47#include <linux/syscalls.h>
  48#include <linux/tsacct_kern.h>
  49#include <linux/cn_proc.h>
  50#include <linux/audit.h>
  51#include <linux/tracehook.h>
  52#include <linux/kmod.h>
  53#include <linux/fsnotify.h>
  54#include <linux/fs_struct.h>
  55#include <linux/pipe_fs_i.h>
  56#include <linux/oom.h>
  57#include <linux/compat.h>
  58
  59#include <asm/uaccess.h>
  60#include <asm/mmu_context.h>
  61#include <asm/tlb.h>
  62
  63#include <trace/events/task.h>
  64#include "internal.h"
  65#include "coredump.h"
  66
  67#include <trace/events/sched.h>
  68
  69int suid_dumpable = 0;
  70
  71static LIST_HEAD(formats);
  72static DEFINE_RWLOCK(binfmt_lock);
  73
  74void __register_binfmt(struct linux_binfmt * fmt, int insert)
  75{
  76        BUG_ON(!fmt);
  77        if (WARN_ON(!fmt->load_binary))
  78                return;
  79        write_lock(&binfmt_lock);
  80        insert ? list_add(&fmt->lh, &formats) :
  81                 list_add_tail(&fmt->lh, &formats);
  82        write_unlock(&binfmt_lock);
  83}
  84
  85EXPORT_SYMBOL(__register_binfmt);
  86
  87void unregister_binfmt(struct linux_binfmt * fmt)
  88{
  89        write_lock(&binfmt_lock);
  90        list_del(&fmt->lh);
  91        write_unlock(&binfmt_lock);
  92}
  93
  94EXPORT_SYMBOL(unregister_binfmt);
  95
  96static inline void put_binfmt(struct linux_binfmt * fmt)
  97{
  98        module_put(fmt->module);
  99}
 100
 101/*
 102 * Note that a shared library must be both readable and executable due to
 103 * security reasons.
 104 *
 105 * Also note that we take the address to load from from the file itself.
 106 */
 107SYSCALL_DEFINE1(uselib, const char __user *, library)
 108{
 109        struct file *file;
 110        struct filename *tmp = getname(library);
 111        int error = PTR_ERR(tmp);
 112        static const struct open_flags uselib_flags = {
 113                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 114                .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
 115                .intent = LOOKUP_OPEN,
 116                .lookup_flags = LOOKUP_FOLLOW,
 117        };
 118
 119        if (IS_ERR(tmp))
 120                goto out;
 121
 122        file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
 123        putname(tmp);
 124        error = PTR_ERR(file);
 125        if (IS_ERR(file))
 126                goto out;
 127
 128        error = -EINVAL;
 129        if (!S_ISREG(file_inode(file)->i_mode))
 130                goto exit;
 131
 132        error = -EACCES;
 133        if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
 134                goto exit;
 135
 136        fsnotify_open(file);
 137
 138        error = -ENOEXEC;
 139        if(file->f_op) {
 140                struct linux_binfmt * fmt;
 141
 142                read_lock(&binfmt_lock);
 143                list_for_each_entry(fmt, &formats, lh) {
 144                        if (!fmt->load_shlib)
 145                                continue;
 146                        if (!try_module_get(fmt->module))
 147                                continue;
 148                        read_unlock(&binfmt_lock);
 149                        error = fmt->load_shlib(file);
 150                        read_lock(&binfmt_lock);
 151                        put_binfmt(fmt);
 152                        if (error != -ENOEXEC)
 153                                break;
 154                }
 155                read_unlock(&binfmt_lock);
 156        }
 157exit:
 158        fput(file);
 159out:
 160        return error;
 161}
 162
 163#ifdef CONFIG_MMU
 164/*
 165 * The nascent bprm->mm is not visible until exec_mmap() but it can
 166 * use a lot of memory, account these pages in current->mm temporary
 167 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
 168 * change the counter back via acct_arg_size(0).
 169 */
 170static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 171{
 172        struct mm_struct *mm = current->mm;
 173        long diff = (long)(pages - bprm->vma_pages);
 174
 175        if (!mm || !diff)
 176                return;
 177
 178        bprm->vma_pages = pages;
 179        add_mm_counter(mm, MM_ANONPAGES, diff);
 180}
 181
 182static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 183                int write)
 184{
 185        struct page *page;
 186        int ret;
 187
 188#ifdef CONFIG_STACK_GROWSUP
 189        if (write) {
 190                ret = expand_downwards(bprm->vma, pos);
 191                if (ret < 0)
 192                        return NULL;
 193        }
 194#endif
 195        ret = get_user_pages(current, bprm->mm, pos,
 196                        1, write, 1, &page, NULL);
 197        if (ret <= 0)
 198                return NULL;
 199
 200        if (write) {
 201                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
 202                struct rlimit *rlim;
 203
 204                acct_arg_size(bprm, size / PAGE_SIZE);
 205
 206                /*
 207                 * We've historically supported up to 32 pages (ARG_MAX)
 208                 * of argument strings even with small stacks
 209                 */
 210                if (size <= ARG_MAX)
 211                        return page;
 212
 213                /*
 214                 * Limit to 1/4-th the stack size for the argv+env strings.
 215                 * This ensures that:
 216                 *  - the remaining binfmt code will not run out of stack space,
 217                 *  - the program will have a reasonable amount of stack left
 218                 *    to work from.
 219                 */
 220                rlim = current->signal->rlim;
 221                if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
 222                        put_page(page);
 223                        return NULL;
 224                }
 225        }
 226
 227        return page;
 228}
 229
 230static void put_arg_page(struct page *page)
 231{
 232        put_page(page);
 233}
 234
 235static void free_arg_page(struct linux_binprm *bprm, int i)
 236{
 237}
 238
 239static void free_arg_pages(struct linux_binprm *bprm)
 240{
 241}
 242
 243static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 244                struct page *page)
 245{
 246        flush_cache_page(bprm->vma, pos, page_to_pfn(page));
 247}
 248
 249static int __bprm_mm_init(struct linux_binprm *bprm)
 250{
 251        int err;
 252        struct vm_area_struct *vma = NULL;
 253        struct mm_struct *mm = bprm->mm;
 254
 255        bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 256        if (!vma)
 257                return -ENOMEM;
 258
 259        down_write(&mm->mmap_sem);
 260        vma->vm_mm = mm;
 261
 262        /*
 263         * Place the stack at the largest stack address the architecture
 264         * supports. Later, we'll move this to an appropriate place. We don't
 265         * use STACK_TOP because that can depend on attributes which aren't
 266         * configured yet.
 267         */
 268        BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
 269        vma->vm_end = STACK_TOP_MAX;
 270        vma->vm_start = vma->vm_end - PAGE_SIZE;
 271        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 272        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 273        INIT_LIST_HEAD(&vma->anon_vma_chain);
 274
 275        err = insert_vm_struct(mm, vma);
 276        if (err)
 277                goto err;
 278
 279        mm->stack_vm = mm->total_vm = 1;
 280        up_write(&mm->mmap_sem);
 281        bprm->p = vma->vm_end - sizeof(void *);
 282        return 0;
 283err:
 284        up_write(&mm->mmap_sem);
 285        bprm->vma = NULL;
 286        kmem_cache_free(vm_area_cachep, vma);
 287        return err;
 288}
 289
 290static bool valid_arg_len(struct linux_binprm *bprm, long len)
 291{
 292        return len <= MAX_ARG_STRLEN;
 293}
 294
 295#else
 296
 297static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 298{
 299}
 300
 301static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 302                int write)
 303{
 304        struct page *page;
 305
 306        page = bprm->page[pos / PAGE_SIZE];
 307        if (!page && write) {
 308                page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
 309                if (!page)
 310                        return NULL;
 311                bprm->page[pos / PAGE_SIZE] = page;
 312        }
 313
 314        return page;
 315}
 316
 317static void put_arg_page(struct page *page)
 318{
 319}
 320
 321static void free_arg_page(struct linux_binprm *bprm, int i)
 322{
 323        if (bprm->page[i]) {
 324                __free_page(bprm->page[i]);
 325                bprm->page[i] = NULL;
 326        }
 327}
 328
 329static void free_arg_pages(struct linux_binprm *bprm)
 330{
 331        int i;
 332
 333        for (i = 0; i < MAX_ARG_PAGES; i++)
 334                free_arg_page(bprm, i);
 335}
 336
 337static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 338                struct page *page)
 339{
 340}
 341
 342static int __bprm_mm_init(struct linux_binprm *bprm)
 343{
 344        bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
 345        return 0;
 346}
 347
 348static bool valid_arg_len(struct linux_binprm *bprm, long len)
 349{
 350        return len <= bprm->p;
 351}
 352
 353#endif /* CONFIG_MMU */
 354
 355/*
 356 * Create a new mm_struct and populate it with a temporary stack
 357 * vm_area_struct.  We don't have enough context at this point to set the stack
 358 * flags, permissions, and offset, so we use temporary values.  We'll update
 359 * them later in setup_arg_pages().
 360 */
 361static int bprm_mm_init(struct linux_binprm *bprm)
 362{
 363        int err;
 364        struct mm_struct *mm = NULL;
 365
 366        bprm->mm = mm = mm_alloc();
 367        err = -ENOMEM;
 368        if (!mm)
 369                goto err;
 370
 371        err = init_new_context(current, mm);
 372        if (err)
 373                goto err;
 374
 375        err = __bprm_mm_init(bprm);
 376        if (err)
 377                goto err;
 378
 379        return 0;
 380
 381err:
 382        if (mm) {
 383                bprm->mm = NULL;
 384                mmdrop(mm);
 385        }
 386
 387        return err;
 388}
 389
 390struct user_arg_ptr {
 391#ifdef CONFIG_COMPAT
 392        bool is_compat;
 393#endif
 394        union {
 395                const char __user *const __user *native;
 396#ifdef CONFIG_COMPAT
 397                const compat_uptr_t __user *compat;
 398#endif
 399        } ptr;
 400};
 401
 402static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
 403{
 404        const char __user *native;
 405
 406#ifdef CONFIG_COMPAT
 407        if (unlikely(argv.is_compat)) {
 408                compat_uptr_t compat;
 409
 410                if (get_user(compat, argv.ptr.compat + nr))
 411                        return ERR_PTR(-EFAULT);
 412
 413                return compat_ptr(compat);
 414        }
 415#endif
 416
 417        if (get_user(native, argv.ptr.native + nr))
 418                return ERR_PTR(-EFAULT);
 419
 420        return native;
 421}
 422
 423/*
 424 * count() counts the number of strings in array ARGV.
 425 */
 426static int count(struct user_arg_ptr argv, int max)
 427{
 428        int i = 0;
 429
 430        if (argv.ptr.native != NULL) {
 431                for (;;) {
 432                        const char __user *p = get_user_arg_ptr(argv, i);
 433
 434                        if (!p)
 435                                break;
 436
 437                        if (IS_ERR(p))
 438                                return -EFAULT;
 439
 440                        if (i >= max)
 441                                return -E2BIG;
 442                        ++i;
 443
 444                        if (fatal_signal_pending(current))
 445                                return -ERESTARTNOHAND;
 446                        cond_resched();
 447                }
 448        }
 449        return i;
 450}
 451
 452/*
 453 * 'copy_strings()' copies argument/environment strings from the old
 454 * processes's memory to the new process's stack.  The call to get_user_pages()
 455 * ensures the destination page is created and not swapped out.
 456 */
 457static int copy_strings(int argc, struct user_arg_ptr argv,
 458                        struct linux_binprm *bprm)
 459{
 460        struct page *kmapped_page = NULL;
 461        char *kaddr = NULL;
 462        unsigned long kpos = 0;
 463        int ret;
 464
 465        while (argc-- > 0) {
 466                const char __user *str;
 467                int len;
 468                unsigned long pos;
 469
 470                ret = -EFAULT;
 471                str = get_user_arg_ptr(argv, argc);
 472                if (IS_ERR(str))
 473                        goto out;
 474
 475                len = strnlen_user(str, MAX_ARG_STRLEN);
 476                if (!len)
 477                        goto out;
 478
 479                ret = -E2BIG;
 480                if (!valid_arg_len(bprm, len))
 481                        goto out;
 482
 483                /* We're going to work our way backwords. */
 484                pos = bprm->p;
 485                str += len;
 486                bprm->p -= len;
 487
 488                while (len > 0) {
 489                        int offset, bytes_to_copy;
 490
 491                        if (fatal_signal_pending(current)) {
 492                                ret = -ERESTARTNOHAND;
 493                                goto out;
 494                        }
 495                        cond_resched();
 496
 497                        offset = pos % PAGE_SIZE;
 498                        if (offset == 0)
 499                                offset = PAGE_SIZE;
 500
 501                        bytes_to_copy = offset;
 502                        if (bytes_to_copy > len)
 503                                bytes_to_copy = len;
 504
 505                        offset -= bytes_to_copy;
 506                        pos -= bytes_to_copy;
 507                        str -= bytes_to_copy;
 508                        len -= bytes_to_copy;
 509
 510                        if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
 511                                struct page *page;
 512
 513                                page = get_arg_page(bprm, pos, 1);
 514                                if (!page) {
 515                                        ret = -E2BIG;
 516                                        goto out;
 517                                }
 518
 519                                if (kmapped_page) {
 520                                        flush_kernel_dcache_page(kmapped_page);
 521                                        kunmap(kmapped_page);
 522                                        put_arg_page(kmapped_page);
 523                                }
 524                                kmapped_page = page;
 525                                kaddr = kmap(kmapped_page);
 526                                kpos = pos & PAGE_MASK;
 527                                flush_arg_page(bprm, kpos, kmapped_page);
 528                        }
 529                        if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
 530                                ret = -EFAULT;
 531                                goto out;
 532                        }
 533                }
 534        }
 535        ret = 0;
 536out:
 537        if (kmapped_page) {
 538                flush_kernel_dcache_page(kmapped_page);
 539                kunmap(kmapped_page);
 540                put_arg_page(kmapped_page);
 541        }
 542        return ret;
 543}
 544
 545/*
 546 * Like copy_strings, but get argv and its values from kernel memory.
 547 */
 548int copy_strings_kernel(int argc, const char *const *__argv,
 549                        struct linux_binprm *bprm)
 550{
 551        int r;
 552        mm_segment_t oldfs = get_fs();
 553        struct user_arg_ptr argv = {
 554                .ptr.native = (const char __user *const  __user *)__argv,
 555        };
 556
 557        set_fs(KERNEL_DS);
 558        r = copy_strings(argc, argv, bprm);
 559        set_fs(oldfs);
 560
 561        return r;
 562}
 563EXPORT_SYMBOL(copy_strings_kernel);
 564
 565#ifdef CONFIG_MMU
 566
 567/*
 568 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
 569 * the binfmt code determines where the new stack should reside, we shift it to
 570 * its final location.  The process proceeds as follows:
 571 *
 572 * 1) Use shift to calculate the new vma endpoints.
 573 * 2) Extend vma to cover both the old and new ranges.  This ensures the
 574 *    arguments passed to subsequent functions are consistent.
 575 * 3) Move vma's page tables to the new range.
 576 * 4) Free up any cleared pgd range.
 577 * 5) Shrink the vma to cover only the new range.
 578 */
 579static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 580{
 581        struct mm_struct *mm = vma->vm_mm;
 582        unsigned long old_start = vma->vm_start;
 583        unsigned long old_end = vma->vm_end;
 584        unsigned long length = old_end - old_start;
 585        unsigned long new_start = old_start - shift;
 586        unsigned long new_end = old_end - shift;
 587        struct mmu_gather tlb;
 588
 589        BUG_ON(new_start > new_end);
 590
 591        /*
 592         * ensure there are no vmas between where we want to go
 593         * and where we are
 594         */
 595        if (vma != find_vma(mm, new_start))
 596                return -EFAULT;
 597
 598        /*
 599         * cover the whole range: [new_start, old_end)
 600         */
 601        if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
 602                return -ENOMEM;
 603
 604        /*
 605         * move the page tables downwards, on failure we rely on
 606         * process cleanup to remove whatever mess we made.
 607         */
 608        if (length != move_page_tables(vma, old_start,
 609                                       vma, new_start, length, false))
 610                return -ENOMEM;
 611
 612        lru_add_drain();
 613        tlb_gather_mmu(&tlb, mm, old_start, old_end);
 614        if (new_end > old_start) {
 615                /*
 616                 * when the old and new regions overlap clear from new_end.
 617                 */
 618                free_pgd_range(&tlb, new_end, old_end, new_end,
 619                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 620        } else {
 621                /*
 622                 * otherwise, clean from old_start; this is done to not touch
 623                 * the address space in [new_end, old_start) some architectures
 624                 * have constraints on va-space that make this illegal (IA64) -
 625                 * for the others its just a little faster.
 626                 */
 627                free_pgd_range(&tlb, old_start, old_end, new_end,
 628                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 629        }
 630        tlb_finish_mmu(&tlb, old_start, old_end);
 631
 632        /*
 633         * Shrink the vma to just the new range.  Always succeeds.
 634         */
 635        vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
 636
 637        return 0;
 638}
 639
 640/*
 641 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
 642 * the stack is optionally relocated, and some extra space is added.
 643 */
 644int setup_arg_pages(struct linux_binprm *bprm,
 645                    unsigned long stack_top,
 646                    int executable_stack)
 647{
 648        unsigned long ret;
 649        unsigned long stack_shift;
 650        struct mm_struct *mm = current->mm;
 651        struct vm_area_struct *vma = bprm->vma;
 652        struct vm_area_struct *prev = NULL;
 653        unsigned long vm_flags;
 654        unsigned long stack_base;
 655        unsigned long stack_size;
 656        unsigned long stack_expand;
 657        unsigned long rlim_stack;
 658
 659#ifdef CONFIG_STACK_GROWSUP
 660        /* Limit stack size to 1GB */
 661        stack_base = rlimit_max(RLIMIT_STACK);
 662        if (stack_base > (1 << 30))
 663                stack_base = 1 << 30;
 664
 665        /* Make sure we didn't let the argument array grow too large. */
 666        if (vma->vm_end - vma->vm_start > stack_base)
 667                return -ENOMEM;
 668
 669        stack_base = PAGE_ALIGN(stack_top - stack_base);
 670
 671        stack_shift = vma->vm_start - stack_base;
 672        mm->arg_start = bprm->p - stack_shift;
 673        bprm->p = vma->vm_end - stack_shift;
 674#else
 675        stack_top = arch_align_stack(stack_top);
 676        stack_top = PAGE_ALIGN(stack_top);
 677
 678        if (unlikely(stack_top < mmap_min_addr) ||
 679            unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
 680                return -ENOMEM;
 681
 682        stack_shift = vma->vm_end - stack_top;
 683
 684        bprm->p -= stack_shift;
 685        mm->arg_start = bprm->p;
 686#endif
 687
 688        if (bprm->loader)
 689                bprm->loader -= stack_shift;
 690        bprm->exec -= stack_shift;
 691
 692        down_write(&mm->mmap_sem);
 693        vm_flags = VM_STACK_FLAGS;
 694
 695        /*
 696         * Adjust stack execute permissions; explicitly enable for
 697         * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
 698         * (arch default) otherwise.
 699         */
 700        if (unlikely(executable_stack == EXSTACK_ENABLE_X))
 701                vm_flags |= VM_EXEC;
 702        else if (executable_stack == EXSTACK_DISABLE_X)
 703                vm_flags &= ~VM_EXEC;
 704        vm_flags |= mm->def_flags;
 705        vm_flags |= VM_STACK_INCOMPLETE_SETUP;
 706
 707        ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
 708                        vm_flags);
 709        if (ret)
 710                goto out_unlock;
 711        BUG_ON(prev != vma);
 712
 713        /* Move stack pages down in memory. */
 714        if (stack_shift) {
 715                ret = shift_arg_pages(vma, stack_shift);
 716                if (ret)
 717                        goto out_unlock;
 718        }
 719
 720        /* mprotect_fixup is overkill to remove the temporary stack flags */
 721        vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
 722
 723        stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
 724        stack_size = vma->vm_end - vma->vm_start;
 725        /*
 726         * Align this down to a page boundary as expand_stack
 727         * will align it up.
 728         */
 729        rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
 730#ifdef CONFIG_STACK_GROWSUP
 731        if (stack_size + stack_expand > rlim_stack)
 732                stack_base = vma->vm_start + rlim_stack;
 733        else
 734                stack_base = vma->vm_end + stack_expand;
 735#else
 736        if (stack_size + stack_expand > rlim_stack)
 737                stack_base = vma->vm_end - rlim_stack;
 738        else
 739                stack_base = vma->vm_start - stack_expand;
 740#endif
 741        current->mm->start_stack = bprm->p;
 742        ret = expand_stack(vma, stack_base);
 743        if (ret)
 744                ret = -EFAULT;
 745
 746out_unlock:
 747        up_write(&mm->mmap_sem);
 748        return ret;
 749}
 750EXPORT_SYMBOL(setup_arg_pages);
 751
 752#endif /* CONFIG_MMU */
 753
 754struct file *open_exec(const char *name)
 755{
 756        struct file *file;
 757        int err;
 758        struct filename tmp = { .name = name };
 759        static const struct open_flags open_exec_flags = {
 760                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 761                .acc_mode = MAY_EXEC | MAY_OPEN,
 762                .intent = LOOKUP_OPEN,
 763                .lookup_flags = LOOKUP_FOLLOW,
 764        };
 765
 766        file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags);
 767        if (IS_ERR(file))
 768                goto out;
 769
 770        err = -EACCES;
 771        if (!S_ISREG(file_inode(file)->i_mode))
 772                goto exit;
 773
 774        if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
 775                goto exit;
 776
 777        fsnotify_open(file);
 778
 779        err = deny_write_access(file);
 780        if (err)
 781                goto exit;
 782
 783out:
 784        return file;
 785
 786exit:
 787        fput(file);
 788        return ERR_PTR(err);
 789}
 790EXPORT_SYMBOL(open_exec);
 791
 792int kernel_read(struct file *file, loff_t offset,
 793                char *addr, unsigned long count)
 794{
 795        mm_segment_t old_fs;
 796        loff_t pos = offset;
 797        int result;
 798
 799        old_fs = get_fs();
 800        set_fs(get_ds());
 801        /* The cast to a user pointer is valid due to the set_fs() */
 802        result = vfs_read(file, (void __user *)addr, count, &pos);
 803        set_fs(old_fs);
 804        return result;
 805}
 806
 807EXPORT_SYMBOL(kernel_read);
 808
 809ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
 810{
 811        ssize_t res = file->f_op->read(file, (void __user *)addr, len, &pos);
 812        if (res > 0)
 813                flush_icache_range(addr, addr + len);
 814        return res;
 815}
 816EXPORT_SYMBOL(read_code);
 817
 818static int exec_mmap(struct mm_struct *mm)
 819{
 820        struct task_struct *tsk;
 821        struct mm_struct * old_mm, *active_mm;
 822
 823        /* Notify parent that we're no longer interested in the old VM */
 824        tsk = current;
 825        old_mm = current->mm;
 826        mm_release(tsk, old_mm);
 827
 828        if (old_mm) {
 829                sync_mm_rss(old_mm);
 830                /*
 831                 * Make sure that if there is a core dump in progress
 832                 * for the old mm, we get out and die instead of going
 833                 * through with the exec.  We must hold mmap_sem around
 834                 * checking core_state and changing tsk->mm.
 835                 */
 836                down_read(&old_mm->mmap_sem);
 837                if (unlikely(old_mm->core_state)) {
 838                        up_read(&old_mm->mmap_sem);
 839                        return -EINTR;
 840                }
 841        }
 842        task_lock(tsk);
 843        active_mm = tsk->active_mm;
 844        tsk->mm = mm;
 845        tsk->active_mm = mm;
 846        activate_mm(active_mm, mm);
 847        task_unlock(tsk);
 848        arch_pick_mmap_layout(mm);
 849        if (old_mm) {
 850                up_read(&old_mm->mmap_sem);
 851                BUG_ON(active_mm != old_mm);
 852                setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
 853                mm_update_next_owner(old_mm);
 854                mmput(old_mm);
 855                return 0;
 856        }
 857        mmdrop(active_mm);
 858        return 0;
 859}
 860
 861/*
 862 * This function makes sure the current process has its own signal table,
 863 * so that flush_signal_handlers can later reset the handlers without
 864 * disturbing other processes.  (Other processes might share the signal
 865 * table via the CLONE_SIGHAND option to clone().)
 866 */
 867static int de_thread(struct task_struct *tsk)
 868{
 869        struct signal_struct *sig = tsk->signal;
 870        struct sighand_struct *oldsighand = tsk->sighand;
 871        spinlock_t *lock = &oldsighand->siglock;
 872
 873        if (thread_group_empty(tsk))
 874                goto no_thread_group;
 875
 876        /*
 877         * Kill all other threads in the thread group.
 878         */
 879        spin_lock_irq(lock);
 880        if (signal_group_exit(sig)) {
 881                /*
 882                 * Another group action in progress, just
 883                 * return so that the signal is processed.
 884                 */
 885                spin_unlock_irq(lock);
 886                return -EAGAIN;
 887        }
 888
 889        sig->group_exit_task = tsk;
 890        sig->notify_count = zap_other_threads(tsk);
 891        if (!thread_group_leader(tsk))
 892                sig->notify_count--;
 893
 894        while (sig->notify_count) {
 895                __set_current_state(TASK_KILLABLE);
 896                spin_unlock_irq(lock);
 897                schedule();
 898                if (unlikely(__fatal_signal_pending(tsk)))
 899                        goto killed;
 900                spin_lock_irq(lock);
 901        }
 902        spin_unlock_irq(lock);
 903
 904        /*
 905         * At this point all other threads have exited, all we have to
 906         * do is to wait for the thread group leader to become inactive,
 907         * and to assume its PID:
 908         */
 909        if (!thread_group_leader(tsk)) {
 910                struct task_struct *leader = tsk->group_leader;
 911
 912                sig->notify_count = -1; /* for exit_notify() */
 913                for (;;) {
 914                        threadgroup_change_begin(tsk);
 915                        write_lock_irq(&tasklist_lock);
 916                        if (likely(leader->exit_state))
 917                                break;
 918                        __set_current_state(TASK_KILLABLE);
 919                        write_unlock_irq(&tasklist_lock);
 920                        threadgroup_change_end(tsk);
 921                        schedule();
 922                        if (unlikely(__fatal_signal_pending(tsk)))
 923                                goto killed;
 924                }
 925
 926                /*
 927                 * The only record we have of the real-time age of a
 928                 * process, regardless of execs it's done, is start_time.
 929                 * All the past CPU time is accumulated in signal_struct
 930                 * from sister threads now dead.  But in this non-leader
 931                 * exec, nothing survives from the original leader thread,
 932                 * whose birth marks the true age of this process now.
 933                 * When we take on its identity by switching to its PID, we
 934                 * also take its birthdate (always earlier than our own).
 935                 */
 936                tsk->start_time = leader->start_time;
 937                tsk->real_start_time = leader->real_start_time;
 938
 939                BUG_ON(!same_thread_group(leader, tsk));
 940                BUG_ON(has_group_leader_pid(tsk));
 941                /*
 942                 * An exec() starts a new thread group with the
 943                 * TGID of the previous thread group. Rehash the
 944                 * two threads with a switched PID, and release
 945                 * the former thread group leader:
 946                 */
 947
 948                /* Become a process group leader with the old leader's pid.
 949                 * The old leader becomes a thread of the this thread group.
 950                 * Note: The old leader also uses this pid until release_task
 951                 *       is called.  Odd but simple and correct.
 952                 */
 953                tsk->pid = leader->pid;
 954                change_pid(tsk, PIDTYPE_PID, task_pid(leader));
 955                transfer_pid(leader, tsk, PIDTYPE_PGID);
 956                transfer_pid(leader, tsk, PIDTYPE_SID);
 957
 958                list_replace_rcu(&leader->tasks, &tsk->tasks);
 959                list_replace_init(&leader->sibling, &tsk->sibling);
 960
 961                tsk->group_leader = tsk;
 962                leader->group_leader = tsk;
 963
 964                tsk->exit_signal = SIGCHLD;
 965                leader->exit_signal = -1;
 966
 967                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
 968                leader->exit_state = EXIT_DEAD;
 969
 970                /*
 971                 * We are going to release_task()->ptrace_unlink() silently,
 972                 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
 973                 * the tracer wont't block again waiting for this thread.
 974                 */
 975                if (unlikely(leader->ptrace))
 976                        __wake_up_parent(leader, leader->parent);
 977                write_unlock_irq(&tasklist_lock);
 978                threadgroup_change_end(tsk);
 979
 980                release_task(leader);
 981        }
 982
 983        sig->group_exit_task = NULL;
 984        sig->notify_count = 0;
 985
 986no_thread_group:
 987        /* we have changed execution domain */
 988        tsk->exit_signal = SIGCHLD;
 989
 990        exit_itimers(sig);
 991        flush_itimer_signals();
 992
 993        if (atomic_read(&oldsighand->count) != 1) {
 994                struct sighand_struct *newsighand;
 995                /*
 996                 * This ->sighand is shared with the CLONE_SIGHAND
 997                 * but not CLONE_THREAD task, switch to the new one.
 998                 */
 999                newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1000                if (!newsighand)
1001                        return -ENOMEM;
1002
1003                atomic_set(&newsighand->count, 1);
1004                memcpy(newsighand->action, oldsighand->action,
1005                       sizeof(newsighand->action));
1006
1007                write_lock_irq(&tasklist_lock);
1008                spin_lock(&oldsighand->siglock);
1009                rcu_assign_pointer(tsk->sighand, newsighand);
1010                spin_unlock(&oldsighand->siglock);
1011                write_unlock_irq(&tasklist_lock);
1012
1013                __cleanup_sighand(oldsighand);
1014        }
1015
1016        BUG_ON(!thread_group_leader(tsk));
1017        return 0;
1018
1019killed:
1020        /* protects against exit_notify() and __exit_signal() */
1021        read_lock(&tasklist_lock);
1022        sig->group_exit_task = NULL;
1023        sig->notify_count = 0;
1024        read_unlock(&tasklist_lock);
1025        return -EAGAIN;
1026}
1027
1028char *get_task_comm(char *buf, struct task_struct *tsk)
1029{
1030        /* buf must be at least sizeof(tsk->comm) in size */
1031        task_lock(tsk);
1032        strncpy(buf, tsk->comm, sizeof(tsk->comm));
1033        task_unlock(tsk);
1034        return buf;
1035}
1036EXPORT_SYMBOL_GPL(get_task_comm);
1037
1038/*
1039 * These functions flushes out all traces of the currently running executable
1040 * so that a new one can be started
1041 */
1042
1043void set_task_comm(struct task_struct *tsk, char *buf)
1044{
1045        task_lock(tsk);
1046        trace_task_rename(tsk, buf);
1047        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1048        task_unlock(tsk);
1049        perf_event_comm(tsk);
1050}
1051
1052static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
1053{
1054        int i, ch;
1055
1056        /* Copies the binary name from after last slash */
1057        for (i = 0; (ch = *(fn++)) != '\0';) {
1058                if (ch == '/')
1059                        i = 0; /* overwrite what we wrote */
1060                else
1061                        if (i < len - 1)
1062                                tcomm[i++] = ch;
1063        }
1064        tcomm[i] = '\0';
1065}
1066
1067int flush_old_exec(struct linux_binprm * bprm)
1068{
1069        int retval;
1070
1071        /*
1072         * Make sure we have a private signal table and that
1073         * we are unassociated from the previous thread group.
1074         */
1075        retval = de_thread(current);
1076        if (retval)
1077                goto out;
1078
1079        set_mm_exe_file(bprm->mm, bprm->file);
1080
1081        filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
1082        /*
1083         * Release all of the old mmap stuff
1084         */
1085        acct_arg_size(bprm, 0);
1086        retval = exec_mmap(bprm->mm);
1087        if (retval)
1088                goto out;
1089
1090        bprm->mm = NULL;                /* We're using it now */
1091
1092        set_fs(USER_DS);
1093        current->flags &=
1094                ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE);
1095        flush_thread();
1096        current->personality &= ~bprm->per_clear;
1097
1098        return 0;
1099
1100out:
1101        return retval;
1102}
1103EXPORT_SYMBOL(flush_old_exec);
1104
1105void would_dump(struct linux_binprm *bprm, struct file *file)
1106{
1107        if (inode_permission(file_inode(file), MAY_READ) < 0)
1108                bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1109}
1110EXPORT_SYMBOL(would_dump);
1111
1112void setup_new_exec(struct linux_binprm * bprm)
1113{
1114        arch_pick_mmap_layout(current->mm);
1115
1116        /* This is the point of no return */
1117        current->sas_ss_sp = current->sas_ss_size = 0;
1118
1119        if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1120                set_dumpable(current->mm, SUID_DUMP_USER);
1121        else
1122                set_dumpable(current->mm, suid_dumpable);
1123
1124        set_task_comm(current, bprm->tcomm);
1125
1126        /* Set the new mm task size. We have to do that late because it may
1127         * depend on TIF_32BIT which is only updated in flush_thread() on
1128         * some architectures like powerpc
1129         */
1130        current->mm->task_size = TASK_SIZE;
1131
1132        /* install the new credentials */
1133        if (!uid_eq(bprm->cred->uid, current_euid()) ||
1134            !gid_eq(bprm->cred->gid, current_egid())) {
1135                current->pdeath_signal = 0;
1136        } else {
1137                would_dump(bprm, bprm->file);
1138                if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1139                        set_dumpable(current->mm, suid_dumpable);
1140        }
1141
1142        /* An exec changes our domain. We are no longer part of the thread
1143           group */
1144
1145        current->self_exec_id++;
1146                        
1147        flush_signal_handlers(current, 0);
1148        do_close_on_exec(current->files);
1149}
1150EXPORT_SYMBOL(setup_new_exec);
1151
1152/*
1153 * Prepare credentials and lock ->cred_guard_mutex.
1154 * install_exec_creds() commits the new creds and drops the lock.
1155 * Or, if exec fails before, free_bprm() should release ->cred and
1156 * and unlock.
1157 */
1158int prepare_bprm_creds(struct linux_binprm *bprm)
1159{
1160        if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1161                return -ERESTARTNOINTR;
1162
1163        bprm->cred = prepare_exec_creds();
1164        if (likely(bprm->cred))
1165                return 0;
1166
1167        mutex_unlock(&current->signal->cred_guard_mutex);
1168        return -ENOMEM;
1169}
1170
1171void free_bprm(struct linux_binprm *bprm)
1172{
1173        free_arg_pages(bprm);
1174        if (bprm->cred) {
1175                mutex_unlock(&current->signal->cred_guard_mutex);
1176                abort_creds(bprm->cred);
1177        }
1178        /* If a binfmt changed the interp, free it. */
1179        if (bprm->interp != bprm->filename)
1180                kfree(bprm->interp);
1181        kfree(bprm);
1182}
1183
1184int bprm_change_interp(char *interp, struct linux_binprm *bprm)
1185{
1186        /* If a binfmt changed the interp, free it first. */
1187        if (bprm->interp != bprm->filename)
1188                kfree(bprm->interp);
1189        bprm->interp = kstrdup(interp, GFP_KERNEL);
1190        if (!bprm->interp)
1191                return -ENOMEM;
1192        return 0;
1193}
1194EXPORT_SYMBOL(bprm_change_interp);
1195
1196/*
1197 * install the new credentials for this executable
1198 */
1199void install_exec_creds(struct linux_binprm *bprm)
1200{
1201        security_bprm_committing_creds(bprm);
1202
1203        commit_creds(bprm->cred);
1204        bprm->cred = NULL;
1205
1206        /*
1207         * Disable monitoring for regular users
1208         * when executing setuid binaries. Must
1209         * wait until new credentials are committed
1210         * by commit_creds() above
1211         */
1212        if (get_dumpable(current->mm) != SUID_DUMP_USER)
1213                perf_event_exit_task(current);
1214        /*
1215         * cred_guard_mutex must be held at least to this point to prevent
1216         * ptrace_attach() from altering our determination of the task's
1217         * credentials; any time after this it may be unlocked.
1218         */
1219        security_bprm_committed_creds(bprm);
1220        mutex_unlock(&current->signal->cred_guard_mutex);
1221}
1222EXPORT_SYMBOL(install_exec_creds);
1223
1224/*
1225 * determine how safe it is to execute the proposed program
1226 * - the caller must hold ->cred_guard_mutex to protect against
1227 *   PTRACE_ATTACH
1228 */
1229static int check_unsafe_exec(struct linux_binprm *bprm)
1230{
1231        struct task_struct *p = current, *t;
1232        unsigned n_fs;
1233        int res = 0;
1234
1235        if (p->ptrace) {
1236                if (p->ptrace & PT_PTRACE_CAP)
1237                        bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1238                else
1239                        bprm->unsafe |= LSM_UNSAFE_PTRACE;
1240        }
1241
1242        /*
1243         * This isn't strictly necessary, but it makes it harder for LSMs to
1244         * mess up.
1245         */
1246        if (current->no_new_privs)
1247                bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1248
1249        n_fs = 1;
1250        spin_lock(&p->fs->lock);
1251        rcu_read_lock();
1252        for (t = next_thread(p); t != p; t = next_thread(t)) {
1253                if (t->fs == p->fs)
1254                        n_fs++;
1255        }
1256        rcu_read_unlock();
1257
1258        if (p->fs->users > n_fs) {
1259                bprm->unsafe |= LSM_UNSAFE_SHARE;
1260        } else {
1261                res = -EAGAIN;
1262                if (!p->fs->in_exec) {
1263                        p->fs->in_exec = 1;
1264                        res = 1;
1265                }
1266        }
1267        spin_unlock(&p->fs->lock);
1268
1269        return res;
1270}
1271
1272/* 
1273 * Fill the binprm structure from the inode. 
1274 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1275 *
1276 * This may be called multiple times for binary chains (scripts for example).
1277 */
1278int prepare_binprm(struct linux_binprm *bprm)
1279{
1280        umode_t mode;
1281        struct inode * inode = file_inode(bprm->file);
1282        int retval;
1283
1284        mode = inode->i_mode;
1285        if (bprm->file->f_op == NULL)
1286                return -EACCES;
1287
1288        /* clear any previous set[ug]id data from a previous binary */
1289        bprm->cred->euid = current_euid();
1290        bprm->cred->egid = current_egid();
1291
1292        if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
1293            !current->no_new_privs &&
1294            kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
1295            kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
1296                /* Set-uid? */
1297                if (mode & S_ISUID) {
1298                        bprm->per_clear |= PER_CLEAR_ON_SETID;
1299                        bprm->cred->euid = inode->i_uid;
1300                }
1301
1302                /* Set-gid? */
1303                /*
1304                 * If setgid is set but no group execute bit then this
1305                 * is a candidate for mandatory locking, not a setgid
1306                 * executable.
1307                 */
1308                if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1309                        bprm->per_clear |= PER_CLEAR_ON_SETID;
1310                        bprm->cred->egid = inode->i_gid;
1311                }
1312        }
1313
1314        /* fill in binprm security blob */
1315        retval = security_bprm_set_creds(bprm);
1316        if (retval)
1317                return retval;
1318        bprm->cred_prepared = 1;
1319
1320        memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1321        return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1322}
1323
1324EXPORT_SYMBOL(prepare_binprm);
1325
1326/*
1327 * Arguments are '\0' separated strings found at the location bprm->p
1328 * points to; chop off the first by relocating brpm->p to right after
1329 * the first '\0' encountered.
1330 */
1331int remove_arg_zero(struct linux_binprm *bprm)
1332{
1333        int ret = 0;
1334        unsigned long offset;
1335        char *kaddr;
1336        struct page *page;
1337
1338        if (!bprm->argc)
1339                return 0;
1340
1341        do {
1342                offset = bprm->p & ~PAGE_MASK;
1343                page = get_arg_page(bprm, bprm->p, 0);
1344                if (!page) {
1345                        ret = -EFAULT;
1346                        goto out;
1347                }
1348                kaddr = kmap_atomic(page);
1349
1350                for (; offset < PAGE_SIZE && kaddr[offset];
1351                                offset++, bprm->p++)
1352                        ;
1353
1354                kunmap_atomic(kaddr);
1355                put_arg_page(page);
1356
1357                if (offset == PAGE_SIZE)
1358                        free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1359        } while (offset == PAGE_SIZE);
1360
1361        bprm->p++;
1362        bprm->argc--;
1363        ret = 0;
1364
1365out:
1366        return ret;
1367}
1368EXPORT_SYMBOL(remove_arg_zero);
1369
1370#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1371/*
1372 * cycle the list of binary formats handler, until one recognizes the image
1373 */
1374int search_binary_handler(struct linux_binprm *bprm)
1375{
1376        bool need_retry = IS_ENABLED(CONFIG_MODULES);
1377        struct linux_binfmt *fmt;
1378        int retval;
1379
1380        /* This allows 4 levels of binfmt rewrites before failing hard. */
1381        if (bprm->recursion_depth > 5)
1382                return -ELOOP;
1383
1384        retval = security_bprm_check(bprm);
1385        if (retval)
1386                return retval;
1387
1388        retval = audit_bprm(bprm);
1389        if (retval)
1390                return retval;
1391
1392        retval = -ENOENT;
1393 retry:
1394        read_lock(&binfmt_lock);
1395        list_for_each_entry(fmt, &formats, lh) {
1396                if (!try_module_get(fmt->module))
1397                        continue;
1398                read_unlock(&binfmt_lock);
1399                bprm->recursion_depth++;
1400                retval = fmt->load_binary(bprm);
1401                bprm->recursion_depth--;
1402                if (retval >= 0 || retval != -ENOEXEC ||
1403                    bprm->mm == NULL || bprm->file == NULL) {
1404                        put_binfmt(fmt);
1405                        return retval;
1406                }
1407                read_lock(&binfmt_lock);
1408                put_binfmt(fmt);
1409        }
1410        read_unlock(&binfmt_lock);
1411
1412        if (need_retry && retval == -ENOEXEC) {
1413                if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1414                    printable(bprm->buf[2]) && printable(bprm->buf[3]))
1415                        return retval;
1416                if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1417                        return retval;
1418                need_retry = false;
1419                goto retry;
1420        }
1421
1422        return retval;
1423}
1424EXPORT_SYMBOL(search_binary_handler);
1425
1426static int exec_binprm(struct linux_binprm *bprm)
1427{
1428        pid_t old_pid, old_vpid;
1429        int ret;
1430
1431        /* Need to fetch pid before load_binary changes it */
1432        old_pid = current->pid;
1433        rcu_read_lock();
1434        old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1435        rcu_read_unlock();
1436
1437        ret = search_binary_handler(bprm);
1438        if (ret >= 0) {
1439                trace_sched_process_exec(current, old_pid, bprm);
1440                ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1441                current->did_exec = 1;
1442                proc_exec_connector(current);
1443
1444                if (bprm->file) {
1445                        allow_write_access(bprm->file);
1446                        fput(bprm->file);
1447                        bprm->file = NULL; /* to catch use-after-free */
1448                }
1449        }
1450
1451        return ret;
1452}
1453
1454/*
1455 * sys_execve() executes a new program.
1456 */
1457static int do_execve_common(const char *filename,
1458                                struct user_arg_ptr argv,
1459                                struct user_arg_ptr envp)
1460{
1461        struct linux_binprm *bprm;
1462        struct file *file;
1463        struct files_struct *displaced;
1464        bool clear_in_exec;
1465        int retval;
1466
1467        /*
1468         * We move the actual failure in case of RLIMIT_NPROC excess from
1469         * set*uid() to execve() because too many poorly written programs
1470         * don't check setuid() return code.  Here we additionally recheck
1471         * whether NPROC limit is still exceeded.
1472         */
1473        if ((current->flags & PF_NPROC_EXCEEDED) &&
1474            atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1475                retval = -EAGAIN;
1476                goto out_ret;
1477        }
1478
1479        /* We're below the limit (still or again), so we don't want to make
1480         * further execve() calls fail. */
1481        current->flags &= ~PF_NPROC_EXCEEDED;
1482
1483        retval = unshare_files(&displaced);
1484        if (retval)
1485                goto out_ret;
1486
1487        retval = -ENOMEM;
1488        bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1489        if (!bprm)
1490                goto out_files;
1491
1492        retval = prepare_bprm_creds(bprm);
1493        if (retval)
1494                goto out_free;
1495
1496        retval = check_unsafe_exec(bprm);
1497        if (retval < 0)
1498                goto out_free;
1499        clear_in_exec = retval;
1500        current->in_execve = 1;
1501
1502        file = open_exec(filename);
1503        retval = PTR_ERR(file);
1504        if (IS_ERR(file))
1505                goto out_unmark;
1506
1507        sched_exec();
1508
1509        bprm->file = file;
1510        bprm->filename = filename;
1511        bprm->interp = filename;
1512
1513        retval = bprm_mm_init(bprm);
1514        if (retval)
1515                goto out_file;
1516
1517        bprm->argc = count(argv, MAX_ARG_STRINGS);
1518        if ((retval = bprm->argc) < 0)
1519                goto out;
1520
1521        bprm->envc = count(envp, MAX_ARG_STRINGS);
1522        if ((retval = bprm->envc) < 0)
1523                goto out;
1524
1525        retval = prepare_binprm(bprm);
1526        if (retval < 0)
1527                goto out;
1528
1529        retval = copy_strings_kernel(1, &bprm->filename, bprm);
1530        if (retval < 0)
1531                goto out;
1532
1533        bprm->exec = bprm->p;
1534        retval = copy_strings(bprm->envc, envp, bprm);
1535        if (retval < 0)
1536                goto out;
1537
1538        retval = copy_strings(bprm->argc, argv, bprm);
1539        if (retval < 0)
1540                goto out;
1541
1542        retval = exec_binprm(bprm);
1543        if (retval < 0)
1544                goto out;
1545
1546        /* execve succeeded */
1547        current->fs->in_exec = 0;
1548        current->in_execve = 0;
1549        acct_update_integrals(current);
1550        free_bprm(bprm);
1551        if (displaced)
1552                put_files_struct(displaced);
1553        return retval;
1554
1555out:
1556        if (bprm->mm) {
1557                acct_arg_size(bprm, 0);
1558                mmput(bprm->mm);
1559        }
1560
1561out_file:
1562        if (bprm->file) {
1563                allow_write_access(bprm->file);
1564                fput(bprm->file);
1565        }
1566
1567out_unmark:
1568        if (clear_in_exec)
1569                current->fs->in_exec = 0;
1570        current->in_execve = 0;
1571
1572out_free:
1573        free_bprm(bprm);
1574
1575out_files:
1576        if (displaced)
1577                reset_files_struct(displaced);
1578out_ret:
1579        return retval;
1580}
1581
1582int do_execve(const char *filename,
1583        const char __user *const __user *__argv,
1584        const char __user *const __user *__envp)
1585{
1586        struct user_arg_ptr argv = { .ptr.native = __argv };
1587        struct user_arg_ptr envp = { .ptr.native = __envp };
1588        return do_execve_common(filename, argv, envp);
1589}
1590
1591#ifdef CONFIG_COMPAT
1592static int compat_do_execve(const char *filename,
1593        const compat_uptr_t __user *__argv,
1594        const compat_uptr_t __user *__envp)
1595{
1596        struct user_arg_ptr argv = {
1597                .is_compat = true,
1598                .ptr.compat = __argv,
1599        };
1600        struct user_arg_ptr envp = {
1601                .is_compat = true,
1602                .ptr.compat = __envp,
1603        };
1604        return do_execve_common(filename, argv, envp);
1605}
1606#endif
1607
1608void set_binfmt(struct linux_binfmt *new)
1609{
1610        struct mm_struct *mm = current->mm;
1611
1612        if (mm->binfmt)
1613                module_put(mm->binfmt->module);
1614
1615        mm->binfmt = new;
1616        if (new)
1617                __module_get(new->module);
1618}
1619
1620EXPORT_SYMBOL(set_binfmt);
1621
1622/*
1623 * set_dumpable converts traditional three-value dumpable to two flags and
1624 * stores them into mm->flags.  It modifies lower two bits of mm->flags, but
1625 * these bits are not changed atomically.  So get_dumpable can observe the
1626 * intermediate state.  To avoid doing unexpected behavior, get get_dumpable
1627 * return either old dumpable or new one by paying attention to the order of
1628 * modifying the bits.
1629 *
1630 * dumpable |   mm->flags (binary)
1631 * old  new | initial interim  final
1632 * ---------+-----------------------
1633 *  0    1  |   00      01      01
1634 *  0    2  |   00      10(*)   11
1635 *  1    0  |   01      00      00
1636 *  1    2  |   01      11      11
1637 *  2    0  |   11      10(*)   00
1638 *  2    1  |   11      11      01
1639 *
1640 * (*) get_dumpable regards interim value of 10 as 11.
1641 */
1642void set_dumpable(struct mm_struct *mm, int value)
1643{
1644        switch (value) {
1645        case SUID_DUMP_DISABLE:
1646                clear_bit(MMF_DUMPABLE, &mm->flags);
1647                smp_wmb();
1648                clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1649                break;
1650        case SUID_DUMP_USER:
1651                set_bit(MMF_DUMPABLE, &mm->flags);
1652                smp_wmb();
1653                clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1654                break;
1655        case SUID_DUMP_ROOT:
1656                set_bit(MMF_DUMP_SECURELY, &mm->flags);
1657                smp_wmb();
1658                set_bit(MMF_DUMPABLE, &mm->flags);
1659                break;
1660        }
1661}
1662
1663int __get_dumpable(unsigned long mm_flags)
1664{
1665        int ret;
1666
1667        ret = mm_flags & MMF_DUMPABLE_MASK;
1668        return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
1669}
1670
1671int get_dumpable(struct mm_struct *mm)
1672{
1673        return __get_dumpable(mm->flags);
1674}
1675
1676SYSCALL_DEFINE3(execve,
1677                const char __user *, filename,
1678                const char __user *const __user *, argv,
1679                const char __user *const __user *, envp)
1680{
1681        struct filename *path = getname(filename);
1682        int error = PTR_ERR(path);
1683        if (!IS_ERR(path)) {
1684                error = do_execve(path->name, argv, envp);
1685                putname(path);
1686        }
1687        return error;
1688}
1689#ifdef CONFIG_COMPAT
1690asmlinkage long compat_sys_execve(const char __user * filename,
1691        const compat_uptr_t __user * argv,
1692        const compat_uptr_t __user * envp)
1693{
1694        struct filename *path = getname(filename);
1695        int error = PTR_ERR(path);
1696        if (!IS_ERR(path)) {
1697                error = compat_do_execve(path->name, argv, envp);
1698                putname(path);
1699        }
1700        return error;
1701}
1702#endif
1703
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.