linux/fs/exec.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/fs/exec.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 */
   7
   8/*
   9 * #!-checking implemented by tytso.
  10 */
  11/*
  12 * Demand-loading implemented 01.12.91 - no need to read anything but
  13 * the header into memory. The inode of the executable is put into
  14 * "current->executable", and page faults do the actual loading. Clean.
  15 *
  16 * Once more I can proudly say that linux stood up to being changed: it
  17 * was less than 2 hours work to get demand-loading completely implemented.
  18 *
  19 * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
  20 * current->executable is only used by the procfs.  This allows a dispatch
  21 * table to check for several different types  of binary formats.  We keep
  22 * trying until we recognize the file or we run out of supported binary
  23 * formats.
  24 */
  25
  26#include <linux/kernel_read_file.h>
  27#include <linux/slab.h>
  28#include <linux/file.h>
  29#include <linux/fdtable.h>
  30#include <linux/mm.h>
  31#include <linux/vmacache.h>
  32#include <linux/stat.h>
  33#include <linux/fcntl.h>
  34#include <linux/swap.h>
  35#include <linux/string.h>
  36#include <linux/init.h>
  37#include <linux/sched/mm.h>
  38#include <linux/sched/coredump.h>
  39#include <linux/sched/signal.h>
  40#include <linux/sched/numa_balancing.h>
  41#include <linux/sched/task.h>
  42#include <linux/pagemap.h>
  43#include <linux/perf_event.h>
  44#include <linux/highmem.h>
  45#include <linux/spinlock.h>
  46#include <linux/key.h>
  47#include <linux/personality.h>
  48#include <linux/binfmts.h>
  49#include <linux/utsname.h>
  50#include <linux/pid_namespace.h>
  51#include <linux/module.h>
  52#include <linux/namei.h>
  53#include <linux/mount.h>
  54#include <linux/security.h>
  55#include <linux/syscalls.h>
  56#include <linux/tsacct_kern.h>
  57#include <linux/cn_proc.h>
  58#include <linux/audit.h>
  59#include <linux/tracehook.h>
  60#include <linux/kmod.h>
  61#include <linux/fsnotify.h>
  62#include <linux/fs_struct.h>
  63#include <linux/oom.h>
  64#include <linux/compat.h>
  65#include <linux/vmalloc.h>
  66#include <linux/io_uring.h>
  67#include <linux/syscall_user_dispatch.h>
  68
  69#include <linux/uaccess.h>
  70#include <asm/mmu_context.h>
  71#include <asm/tlb.h>
  72
  73#include <trace/events/task.h>
  74#include "internal.h"
  75
  76#include <trace/events/sched.h>
  77
  78static int bprm_creds_from_file(struct linux_binprm *bprm);
  79
  80int suid_dumpable = 0;
  81
  82static LIST_HEAD(formats);
  83static DEFINE_RWLOCK(binfmt_lock);
  84
  85void __register_binfmt(struct linux_binfmt * fmt, int insert)
  86{
  87        BUG_ON(!fmt);
  88        if (WARN_ON(!fmt->load_binary))
  89                return;
  90        write_lock(&binfmt_lock);
  91        insert ? list_add(&fmt->lh, &formats) :
  92                 list_add_tail(&fmt->lh, &formats);
  93        write_unlock(&binfmt_lock);
  94}
  95
  96EXPORT_SYMBOL(__register_binfmt);
  97
  98void unregister_binfmt(struct linux_binfmt * fmt)
  99{
 100        write_lock(&binfmt_lock);
 101        list_del(&fmt->lh);
 102        write_unlock(&binfmt_lock);
 103}
 104
 105EXPORT_SYMBOL(unregister_binfmt);
 106
 107static inline void put_binfmt(struct linux_binfmt * fmt)
 108{
 109        module_put(fmt->module);
 110}
 111
 112bool path_noexec(const struct path *path)
 113{
 114        return (path->mnt->mnt_flags & MNT_NOEXEC) ||
 115               (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
 116}
 117
 118#ifdef CONFIG_USELIB
 119/*
 120 * Note that a shared library must be both readable and executable due to
 121 * security reasons.
 122 *
 123 * Also note that we take the address to load from from the file itself.
 124 */
 125SYSCALL_DEFINE1(uselib, const char __user *, library)
 126{
 127        struct linux_binfmt *fmt;
 128        struct file *file;
 129        struct filename *tmp = getname(library);
 130        int error = PTR_ERR(tmp);
 131        static const struct open_flags uselib_flags = {
 132                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 133                .acc_mode = MAY_READ | MAY_EXEC,
 134                .intent = LOOKUP_OPEN,
 135                .lookup_flags = LOOKUP_FOLLOW,
 136        };
 137
 138        if (IS_ERR(tmp))
 139                goto out;
 140
 141        file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
 142        putname(tmp);
 143        error = PTR_ERR(file);
 144        if (IS_ERR(file))
 145                goto out;
 146
 147        /*
 148         * may_open() has already checked for this, so it should be
 149         * impossible to trip now. But we need to be extra cautious
 150         * and check again at the very end too.
 151         */
 152        error = -EACCES;
 153        if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
 154                         path_noexec(&file->f_path)))
 155                goto exit;
 156
 157        fsnotify_open(file);
 158
 159        error = -ENOEXEC;
 160
 161        read_lock(&binfmt_lock);
 162        list_for_each_entry(fmt, &formats, lh) {
 163                if (!fmt->load_shlib)
 164                        continue;
 165                if (!try_module_get(fmt->module))
 166                        continue;
 167                read_unlock(&binfmt_lock);
 168                error = fmt->load_shlib(file);
 169                read_lock(&binfmt_lock);
 170                put_binfmt(fmt);
 171                if (error != -ENOEXEC)
 172                        break;
 173        }
 174        read_unlock(&binfmt_lock);
 175exit:
 176        fput(file);
 177out:
 178        return error;
 179}
 180#endif /* #ifdef CONFIG_USELIB */
 181
 182#ifdef CONFIG_MMU
 183/*
 184 * The nascent bprm->mm is not visible until exec_mmap() but it can
 185 * use a lot of memory, account these pages in current->mm temporary
 186 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
 187 * change the counter back via acct_arg_size(0).
 188 */
 189static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 190{
 191        struct mm_struct *mm = current->mm;
 192        long diff = (long)(pages - bprm->vma_pages);
 193
 194        if (!mm || !diff)
 195                return;
 196
 197        bprm->vma_pages = pages;
 198        add_mm_counter(mm, MM_ANONPAGES, diff);
 199}
 200
 201static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 202                int write)
 203{
 204        struct page *page;
 205        int ret;
 206        unsigned int gup_flags = FOLL_FORCE;
 207
 208#ifdef CONFIG_STACK_GROWSUP
 209        if (write) {
 210                ret = expand_downwards(bprm->vma, pos);
 211                if (ret < 0)
 212                        return NULL;
 213        }
 214#endif
 215
 216        if (write)
 217                gup_flags |= FOLL_WRITE;
 218
 219        /*
 220         * We are doing an exec().  'current' is the process
 221         * doing the exec and bprm->mm is the new process's mm.
 222         */
 223        ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
 224                        &page, NULL, NULL);
 225        if (ret <= 0)
 226                return NULL;
 227
 228        if (write)
 229                acct_arg_size(bprm, vma_pages(bprm->vma));
 230
 231        return page;
 232}
 233
 234static void put_arg_page(struct page *page)
 235{
 236        put_page(page);
 237}
 238
 239static void free_arg_pages(struct linux_binprm *bprm)
 240{
 241}
 242
 243static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 244                struct page *page)
 245{
 246        flush_cache_page(bprm->vma, pos, page_to_pfn(page));
 247}
 248
 249static int __bprm_mm_init(struct linux_binprm *bprm)
 250{
 251        int err;
 252        struct vm_area_struct *vma = NULL;
 253        struct mm_struct *mm = bprm->mm;
 254
 255        bprm->vma = vma = vm_area_alloc(mm);
 256        if (!vma)
 257                return -ENOMEM;
 258        vma_set_anonymous(vma);
 259
 260        if (mmap_write_lock_killable(mm)) {
 261                err = -EINTR;
 262                goto err_free;
 263        }
 264
 265        /*
 266         * Place the stack at the largest stack address the architecture
 267         * supports. Later, we'll move this to an appropriate place. We don't
 268         * use STACK_TOP because that can depend on attributes which aren't
 269         * configured yet.
 270         */
 271        BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
 272        vma->vm_end = STACK_TOP_MAX;
 273        vma->vm_start = vma->vm_end - PAGE_SIZE;
 274        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 275        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 276
 277        err = insert_vm_struct(mm, vma);
 278        if (err)
 279                goto err;
 280
 281        mm->stack_vm = mm->total_vm = 1;
 282        mmap_write_unlock(mm);
 283        bprm->p = vma->vm_end - sizeof(void *);
 284        return 0;
 285err:
 286        mmap_write_unlock(mm);
 287err_free:
 288        bprm->vma = NULL;
 289        vm_area_free(vma);
 290        return err;
 291}
 292
 293static bool valid_arg_len(struct linux_binprm *bprm, long len)
 294{
 295        return len <= MAX_ARG_STRLEN;
 296}
 297
 298#else
 299
 300static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 301{
 302}
 303
 304static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 305                int write)
 306{
 307        struct page *page;
 308
 309        page = bprm->page[pos / PAGE_SIZE];
 310        if (!page && write) {
 311                page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
 312                if (!page)
 313                        return NULL;
 314                bprm->page[pos / PAGE_SIZE] = page;
 315        }
 316
 317        return page;
 318}
 319
 320static void put_arg_page(struct page *page)
 321{
 322}
 323
 324static void free_arg_page(struct linux_binprm *bprm, int i)
 325{
 326        if (bprm->page[i]) {
 327                __free_page(bprm->page[i]);
 328                bprm->page[i] = NULL;
 329        }
 330}
 331
 332static void free_arg_pages(struct linux_binprm *bprm)
 333{
 334        int i;
 335
 336        for (i = 0; i < MAX_ARG_PAGES; i++)
 337                free_arg_page(bprm, i);
 338}
 339
 340static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 341                struct page *page)
 342{
 343}
 344
 345static int __bprm_mm_init(struct linux_binprm *bprm)
 346{
 347        bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
 348        return 0;
 349}
 350
 351static bool valid_arg_len(struct linux_binprm *bprm, long len)
 352{
 353        return len <= bprm->p;
 354}
 355
 356#endif /* CONFIG_MMU */
 357
 358/*
 359 * Create a new mm_struct and populate it with a temporary stack
 360 * vm_area_struct.  We don't have enough context at this point to set the stack
 361 * flags, permissions, and offset, so we use temporary values.  We'll update
 362 * them later in setup_arg_pages().
 363 */
 364static int bprm_mm_init(struct linux_binprm *bprm)
 365{
 366        int err;
 367        struct mm_struct *mm = NULL;
 368
 369        bprm->mm = mm = mm_alloc();
 370        err = -ENOMEM;
 371        if (!mm)
 372                goto err;
 373
 374        /* Save current stack limit for all calculations made during exec. */
 375        task_lock(current->group_leader);
 376        bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
 377        task_unlock(current->group_leader);
 378
 379        err = __bprm_mm_init(bprm);
 380        if (err)
 381                goto err;
 382
 383        return 0;
 384
 385err:
 386        if (mm) {
 387                bprm->mm = NULL;
 388                mmdrop(mm);
 389        }
 390
 391        return err;
 392}
 393
 394struct user_arg_ptr {
 395#ifdef CONFIG_COMPAT
 396        bool is_compat;
 397#endif
 398        union {
 399                const char __user *const __user *native;
 400#ifdef CONFIG_COMPAT
 401                const compat_uptr_t __user *compat;
 402#endif
 403        } ptr;
 404};
 405
 406static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
 407{
 408        const char __user *native;
 409
 410#ifdef CONFIG_COMPAT
 411        if (unlikely(argv.is_compat)) {
 412                compat_uptr_t compat;
 413
 414                if (get_user(compat, argv.ptr.compat + nr))
 415                        return ERR_PTR(-EFAULT);
 416
 417                return compat_ptr(compat);
 418        }
 419#endif
 420
 421        if (get_user(native, argv.ptr.native + nr))
 422                return ERR_PTR(-EFAULT);
 423
 424        return native;
 425}
 426
 427/*
 428 * count() counts the number of strings in array ARGV.
 429 */
 430static int count(struct user_arg_ptr argv, int max)
 431{
 432        int i = 0;
 433
 434        if (argv.ptr.native != NULL) {
 435                for (;;) {
 436                        const char __user *p = get_user_arg_ptr(argv, i);
 437
 438                        if (!p)
 439                                break;
 440
 441                        if (IS_ERR(p))
 442                                return -EFAULT;
 443
 444                        if (i >= max)
 445                                return -E2BIG;
 446                        ++i;
 447
 448                        if (fatal_signal_pending(current))
 449                                return -ERESTARTNOHAND;
 450                        cond_resched();
 451                }
 452        }
 453        return i;
 454}
 455
 456static int count_strings_kernel(const char *const *argv)
 457{
 458        int i;
 459
 460        if (!argv)
 461                return 0;
 462
 463        for (i = 0; argv[i]; ++i) {
 464                if (i >= MAX_ARG_STRINGS)
 465                        return -E2BIG;
 466                if (fatal_signal_pending(current))
 467                        return -ERESTARTNOHAND;
 468                cond_resched();
 469        }
 470        return i;
 471}
 472
 473static int bprm_stack_limits(struct linux_binprm *bprm)
 474{
 475        unsigned long limit, ptr_size;
 476
 477        /*
 478         * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
 479         * (whichever is smaller) for the argv+env strings.
 480         * This ensures that:
 481         *  - the remaining binfmt code will not run out of stack space,
 482         *  - the program will have a reasonable amount of stack left
 483         *    to work from.
 484         */
 485        limit = _STK_LIM / 4 * 3;
 486        limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
 487        /*
 488         * We've historically supported up to 32 pages (ARG_MAX)
 489         * of argument strings even with small stacks
 490         */
 491        limit = max_t(unsigned long, limit, ARG_MAX);
 492        /*
 493         * We must account for the size of all the argv and envp pointers to
 494         * the argv and envp strings, since they will also take up space in
 495         * the stack. They aren't stored until much later when we can't
 496         * signal to the parent that the child has run out of stack space.
 497         * Instead, calculate it here so it's possible to fail gracefully.
 498         */
 499        ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
 500        if (limit <= ptr_size)
 501                return -E2BIG;
 502        limit -= ptr_size;
 503
 504        bprm->argmin = bprm->p - limit;
 505        return 0;
 506}
 507
 508/*
 509 * 'copy_strings()' copies argument/environment strings from the old
 510 * processes's memory to the new process's stack.  The call to get_user_pages()
 511 * ensures the destination page is created and not swapped out.
 512 */
 513static int copy_strings(int argc, struct user_arg_ptr argv,
 514                        struct linux_binprm *bprm)
 515{
 516        struct page *kmapped_page = NULL;
 517        char *kaddr = NULL;
 518        unsigned long kpos = 0;
 519        int ret;
 520
 521        while (argc-- > 0) {
 522                const char __user *str;
 523                int len;
 524                unsigned long pos;
 525
 526                ret = -EFAULT;
 527                str = get_user_arg_ptr(argv, argc);
 528                if (IS_ERR(str))
 529                        goto out;
 530
 531                len = strnlen_user(str, MAX_ARG_STRLEN);
 532                if (!len)
 533                        goto out;
 534
 535                ret = -E2BIG;
 536                if (!valid_arg_len(bprm, len))
 537                        goto out;
 538
 539                /* We're going to work our way backwords. */
 540                pos = bprm->p;
 541                str += len;
 542                bprm->p -= len;
 543#ifdef CONFIG_MMU
 544                if (bprm->p < bprm->argmin)
 545                        goto out;
 546#endif
 547
 548                while (len > 0) {
 549                        int offset, bytes_to_copy;
 550
 551                        if (fatal_signal_pending(current)) {
 552                                ret = -ERESTARTNOHAND;
 553                                goto out;
 554                        }
 555                        cond_resched();
 556
 557                        offset = pos % PAGE_SIZE;
 558                        if (offset == 0)
 559                                offset = PAGE_SIZE;
 560
 561                        bytes_to_copy = offset;
 562                        if (bytes_to_copy > len)
 563                                bytes_to_copy = len;
 564
 565                        offset -= bytes_to_copy;
 566                        pos -= bytes_to_copy;
 567                        str -= bytes_to_copy;
 568                        len -= bytes_to_copy;
 569
 570                        if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
 571                                struct page *page;
 572
 573                                page = get_arg_page(bprm, pos, 1);
 574                                if (!page) {
 575                                        ret = -E2BIG;
 576                                        goto out;
 577                                }
 578
 579                                if (kmapped_page) {
 580                                        flush_kernel_dcache_page(kmapped_page);
 581                                        kunmap(kmapped_page);
 582                                        put_arg_page(kmapped_page);
 583                                }
 584                                kmapped_page = page;
 585                                kaddr = kmap(kmapped_page);
 586                                kpos = pos & PAGE_MASK;
 587                                flush_arg_page(bprm, kpos, kmapped_page);
 588                        }
 589                        if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
 590                                ret = -EFAULT;
 591                                goto out;
 592                        }
 593                }
 594        }
 595        ret = 0;
 596out:
 597        if (kmapped_page) {
 598                flush_kernel_dcache_page(kmapped_page);
 599                kunmap(kmapped_page);
 600                put_arg_page(kmapped_page);
 601        }
 602        return ret;
 603}
 604
 605/*
 606 * Copy and argument/environment string from the kernel to the processes stack.
 607 */
 608int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
 609{
 610        int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
 611        unsigned long pos = bprm->p;
 612
 613        if (len == 0)
 614                return -EFAULT;
 615        if (!valid_arg_len(bprm, len))
 616                return -E2BIG;
 617
 618        /* We're going to work our way backwards. */
 619        arg += len;
 620        bprm->p -= len;
 621        if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
 622                return -E2BIG;
 623
 624        while (len > 0) {
 625                unsigned int bytes_to_copy = min_t(unsigned int, len,
 626                                min_not_zero(offset_in_page(pos), PAGE_SIZE));
 627                struct page *page;
 628                char *kaddr;
 629
 630                pos -= bytes_to_copy;
 631                arg -= bytes_to_copy;
 632                len -= bytes_to_copy;
 633
 634                page = get_arg_page(bprm, pos, 1);
 635                if (!page)
 636                        return -E2BIG;
 637                kaddr = kmap_atomic(page);
 638                flush_arg_page(bprm, pos & PAGE_MASK, page);
 639                memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
 640                flush_kernel_dcache_page(page);
 641                kunmap_atomic(kaddr);
 642                put_arg_page(page);
 643        }
 644
 645        return 0;
 646}
 647EXPORT_SYMBOL(copy_string_kernel);
 648
 649static int copy_strings_kernel(int argc, const char *const *argv,
 650                               struct linux_binprm *bprm)
 651{
 652        while (argc-- > 0) {
 653                int ret = copy_string_kernel(argv[argc], bprm);
 654                if (ret < 0)
 655                        return ret;
 656                if (fatal_signal_pending(current))
 657                        return -ERESTARTNOHAND;
 658                cond_resched();
 659        }
 660        return 0;
 661}
 662
 663#ifdef CONFIG_MMU
 664
 665/*
 666 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
 667 * the binfmt code determines where the new stack should reside, we shift it to
 668 * its final location.  The process proceeds as follows:
 669 *
 670 * 1) Use shift to calculate the new vma endpoints.
 671 * 2) Extend vma to cover both the old and new ranges.  This ensures the
 672 *    arguments passed to subsequent functions are consistent.
 673 * 3) Move vma's page tables to the new range.
 674 * 4) Free up any cleared pgd range.
 675 * 5) Shrink the vma to cover only the new range.
 676 */
 677static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 678{
 679        struct mm_struct *mm = vma->vm_mm;
 680        unsigned long old_start = vma->vm_start;
 681        unsigned long old_end = vma->vm_end;
 682        unsigned long length = old_end - old_start;
 683        unsigned long new_start = old_start - shift;
 684        unsigned long new_end = old_end - shift;
 685        struct mmu_gather tlb;
 686
 687        BUG_ON(new_start > new_end);
 688
 689        /*
 690         * ensure there are no vmas between where we want to go
 691         * and where we are
 692         */
 693        if (vma != find_vma(mm, new_start))
 694                return -EFAULT;
 695
 696        /*
 697         * cover the whole range: [new_start, old_end)
 698         */
 699        if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
 700                return -ENOMEM;
 701
 702        /*
 703         * move the page tables downwards, on failure we rely on
 704         * process cleanup to remove whatever mess we made.
 705         */
 706        if (length != move_page_tables(vma, old_start,
 707                                       vma, new_start, length, false))
 708                return -ENOMEM;
 709
 710        lru_add_drain();
 711        tlb_gather_mmu(&tlb, mm);
 712        if (new_end > old_start) {
 713                /*
 714                 * when the old and new regions overlap clear from new_end.
 715                 */
 716                free_pgd_range(&tlb, new_end, old_end, new_end,
 717                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 718        } else {
 719                /*
 720                 * otherwise, clean from old_start; this is done to not touch
 721                 * the address space in [new_end, old_start) some architectures
 722                 * have constraints on va-space that make this illegal (IA64) -
 723                 * for the others its just a little faster.
 724                 */
 725                free_pgd_range(&tlb, old_start, old_end, new_end,
 726                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 727        }
 728        tlb_finish_mmu(&tlb);
 729
 730        /*
 731         * Shrink the vma to just the new range.  Always succeeds.
 732         */
 733        vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
 734
 735        return 0;
 736}
 737
 738/*
 739 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
 740 * the stack is optionally relocated, and some extra space is added.
 741 */
 742int setup_arg_pages(struct linux_binprm *bprm,
 743                    unsigned long stack_top,
 744                    int executable_stack)
 745{
 746        unsigned long ret;
 747        unsigned long stack_shift;
 748        struct mm_struct *mm = current->mm;
 749        struct vm_area_struct *vma = bprm->vma;
 750        struct vm_area_struct *prev = NULL;
 751        unsigned long vm_flags;
 752        unsigned long stack_base;
 753        unsigned long stack_size;
 754        unsigned long stack_expand;
 755        unsigned long rlim_stack;
 756
 757#ifdef CONFIG_STACK_GROWSUP
 758        /* Limit stack size */
 759        stack_base = bprm->rlim_stack.rlim_max;
 760
 761        stack_base = calc_max_stack_size(stack_base);
 762
 763        /* Add space for stack randomization. */
 764        stack_base += (STACK_RND_MASK << PAGE_SHIFT);
 765
 766        /* Make sure we didn't let the argument array grow too large. */
 767        if (vma->vm_end - vma->vm_start > stack_base)
 768                return -ENOMEM;
 769
 770        stack_base = PAGE_ALIGN(stack_top - stack_base);
 771
 772        stack_shift = vma->vm_start - stack_base;
 773        mm->arg_start = bprm->p - stack_shift;
 774        bprm->p = vma->vm_end - stack_shift;
 775#else
 776        stack_top = arch_align_stack(stack_top);
 777        stack_top = PAGE_ALIGN(stack_top);
 778
 779        if (unlikely(stack_top < mmap_min_addr) ||
 780            unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
 781                return -ENOMEM;
 782
 783        stack_shift = vma->vm_end - stack_top;
 784
 785        bprm->p -= stack_shift;
 786        mm->arg_start = bprm->p;
 787#endif
 788
 789        if (bprm->loader)
 790                bprm->loader -= stack_shift;
 791        bprm->exec -= stack_shift;
 792
 793        if (mmap_write_lock_killable(mm))
 794                return -EINTR;
 795
 796        vm_flags = VM_STACK_FLAGS;
 797
 798        /*
 799         * Adjust stack execute permissions; explicitly enable for
 800         * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
 801         * (arch default) otherwise.
 802         */
 803        if (unlikely(executable_stack == EXSTACK_ENABLE_X))
 804                vm_flags |= VM_EXEC;
 805        else if (executable_stack == EXSTACK_DISABLE_X)
 806                vm_flags &= ~VM_EXEC;
 807        vm_flags |= mm->def_flags;
 808        vm_flags |= VM_STACK_INCOMPLETE_SETUP;
 809
 810        ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
 811                        vm_flags);
 812        if (ret)
 813                goto out_unlock;
 814        BUG_ON(prev != vma);
 815
 816        if (unlikely(vm_flags & VM_EXEC)) {
 817                pr_warn_once("process '%pD4' started with executable stack\n",
 818                             bprm->file);
 819        }
 820
 821        /* Move stack pages down in memory. */
 822        if (stack_shift) {
 823                ret = shift_arg_pages(vma, stack_shift);
 824                if (ret)
 825                        goto out_unlock;
 826        }
 827
 828        /* mprotect_fixup is overkill to remove the temporary stack flags */
 829        vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
 830
 831        stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
 832        stack_size = vma->vm_end - vma->vm_start;
 833        /*
 834         * Align this down to a page boundary as expand_stack
 835         * will align it up.
 836         */
 837        rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
 838#ifdef CONFIG_STACK_GROWSUP
 839        if (stack_size + stack_expand > rlim_stack)
 840                stack_base = vma->vm_start + rlim_stack;
 841        else
 842                stack_base = vma->vm_end + stack_expand;
 843#else
 844        if (stack_size + stack_expand > rlim_stack)
 845                stack_base = vma->vm_end - rlim_stack;
 846        else
 847                stack_base = vma->vm_start - stack_expand;
 848#endif
 849        current->mm->start_stack = bprm->p;
 850        ret = expand_stack(vma, stack_base);
 851        if (ret)
 852                ret = -EFAULT;
 853
 854out_unlock:
 855        mmap_write_unlock(mm);
 856        return ret;
 857}
 858EXPORT_SYMBOL(setup_arg_pages);
 859
 860#else
 861
 862/*
 863 * Transfer the program arguments and environment from the holding pages
 864 * onto the stack. The provided stack pointer is adjusted accordingly.
 865 */
 866int transfer_args_to_stack(struct linux_binprm *bprm,
 867                           unsigned long *sp_location)
 868{
 869        unsigned long index, stop, sp;
 870        int ret = 0;
 871
 872        stop = bprm->p >> PAGE_SHIFT;
 873        sp = *sp_location;
 874
 875        for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
 876                unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
 877                char *src = kmap(bprm->page[index]) + offset;
 878                sp -= PAGE_SIZE - offset;
 879                if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
 880                        ret = -EFAULT;
 881                kunmap(bprm->page[index]);
 882                if (ret)
 883                        goto out;
 884        }
 885
 886        *sp_location = sp;
 887
 888out:
 889        return ret;
 890}
 891EXPORT_SYMBOL(transfer_args_to_stack);
 892
 893#endif /* CONFIG_MMU */
 894
 895static struct file *do_open_execat(int fd, struct filename *name, int flags)
 896{
 897        struct file *file;
 898        int err;
 899        struct open_flags open_exec_flags = {
 900                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 901                .acc_mode = MAY_EXEC,
 902                .intent = LOOKUP_OPEN,
 903                .lookup_flags = LOOKUP_FOLLOW,
 904        };
 905
 906        if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
 907                return ERR_PTR(-EINVAL);
 908        if (flags & AT_SYMLINK_NOFOLLOW)
 909                open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
 910        if (flags & AT_EMPTY_PATH)
 911                open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
 912
 913        file = do_filp_open(fd, name, &open_exec_flags);
 914        if (IS_ERR(file))
 915                goto out;
 916
 917        /*
 918         * may_open() has already checked for this, so it should be
 919         * impossible to trip now. But we need to be extra cautious
 920         * and check again at the very end too.
 921         */
 922        err = -EACCES;
 923        if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
 924                         path_noexec(&file->f_path)))
 925                goto exit;
 926
 927        err = deny_write_access(file);
 928        if (err)
 929                goto exit;
 930
 931        if (name->name[0] != '\0')
 932                fsnotify_open(file);
 933
 934out:
 935        return file;
 936
 937exit:
 938        fput(file);
 939        return ERR_PTR(err);
 940}
 941
 942struct file *open_exec(const char *name)
 943{
 944        struct filename *filename = getname_kernel(name);
 945        struct file *f = ERR_CAST(filename);
 946
 947        if (!IS_ERR(filename)) {
 948                f = do_open_execat(AT_FDCWD, filename, 0);
 949                putname(filename);
 950        }
 951        return f;
 952}
 953EXPORT_SYMBOL(open_exec);
 954
 955#if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
 956    defined(CONFIG_BINFMT_ELF_FDPIC)
 957ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
 958{
 959        ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
 960        if (res > 0)
 961                flush_icache_user_range(addr, addr + len);
 962        return res;
 963}
 964EXPORT_SYMBOL(read_code);
 965#endif
 966
 967/*
 968 * Maps the mm_struct mm into the current task struct.
 969 * On success, this function returns with exec_update_lock
 970 * held for writing.
 971 */
 972static int exec_mmap(struct mm_struct *mm)
 973{
 974        struct task_struct *tsk;
 975        struct mm_struct *old_mm, *active_mm;
 976        int ret;
 977
 978        /* Notify parent that we're no longer interested in the old VM */
 979        tsk = current;
 980        old_mm = current->mm;
 981        exec_mm_release(tsk, old_mm);
 982        if (old_mm)
 983                sync_mm_rss(old_mm);
 984
 985        ret = down_write_killable(&tsk->signal->exec_update_lock);
 986        if (ret)
 987                return ret;
 988
 989        if (old_mm) {
 990                /*
 991                 * Make sure that if there is a core dump in progress
 992                 * for the old mm, we get out and die instead of going
 993                 * through with the exec.  We must hold mmap_lock around
 994                 * checking core_state and changing tsk->mm.
 995                 */
 996                mmap_read_lock(old_mm);
 997                if (unlikely(old_mm->core_state)) {
 998                        mmap_read_unlock(old_mm);
 999                        up_write(&tsk->signal->exec_update_lock);
1000                        return -EINTR;
1001                }
1002        }
1003
1004        task_lock(tsk);
1005        membarrier_exec_mmap(mm);
1006
1007        local_irq_disable();
1008        active_mm = tsk->active_mm;
1009        tsk->active_mm = mm;
1010        tsk->mm = mm;
1011        /*
1012         * This prevents preemption while active_mm is being loaded and
1013         * it and mm are being updated, which could cause problems for
1014         * lazy tlb mm refcounting when these are updated by context
1015         * switches. Not all architectures can handle irqs off over
1016         * activate_mm yet.
1017         */
1018        if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1019                local_irq_enable();
1020        activate_mm(active_mm, mm);
1021        if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1022                local_irq_enable();
1023        tsk->mm->vmacache_seqnum = 0;
1024        vmacache_flush(tsk);
1025        task_unlock(tsk);
1026        if (old_mm) {
1027                mmap_read_unlock(old_mm);
1028                BUG_ON(active_mm != old_mm);
1029                setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1030                mm_update_next_owner(old_mm);
1031                mmput(old_mm);
1032                return 0;
1033        }
1034        mmdrop(active_mm);
1035        return 0;
1036}
1037
1038static int de_thread(struct task_struct *tsk)
1039{
1040        struct signal_struct *sig = tsk->signal;
1041        struct sighand_struct *oldsighand = tsk->sighand;
1042        spinlock_t *lock = &oldsighand->siglock;
1043
1044        if (thread_group_empty(tsk))
1045                goto no_thread_group;
1046
1047        /*
1048         * Kill all other threads in the thread group.
1049         */
1050        spin_lock_irq(lock);
1051        if (signal_group_exit(sig)) {
1052                /*
1053                 * Another group action in progress, just
1054                 * return so that the signal is processed.
1055                 */
1056                spin_unlock_irq(lock);
1057                return -EAGAIN;
1058        }
1059
1060        sig->group_exit_task = tsk;
1061        sig->notify_count = zap_other_threads(tsk);
1062        if (!thread_group_leader(tsk))
1063                sig->notify_count--;
1064
1065        while (sig->notify_count) {
1066                __set_current_state(TASK_KILLABLE);
1067                spin_unlock_irq(lock);
1068                schedule();
1069                if (__fatal_signal_pending(tsk))
1070                        goto killed;
1071                spin_lock_irq(lock);
1072        }
1073        spin_unlock_irq(lock);
1074
1075        /*
1076         * At this point all other threads have exited, all we have to
1077         * do is to wait for the thread group leader to become inactive,
1078         * and to assume its PID:
1079         */
1080        if (!thread_group_leader(tsk)) {
1081                struct task_struct *leader = tsk->group_leader;
1082
1083                for (;;) {
1084                        cgroup_threadgroup_change_begin(tsk);
1085                        write_lock_irq(&tasklist_lock);
1086                        /*
1087                         * Do this under tasklist_lock to ensure that
1088                         * exit_notify() can't miss ->group_exit_task
1089                         */
1090                        sig->notify_count = -1;
1091                        if (likely(leader->exit_state))
1092                                break;
1093                        __set_current_state(TASK_KILLABLE);
1094                        write_unlock_irq(&tasklist_lock);
1095                        cgroup_threadgroup_change_end(tsk);
1096                        schedule();
1097                        if (__fatal_signal_pending(tsk))
1098                                goto killed;
1099                }
1100
1101                /*
1102                 * The only record we have of the real-time age of a
1103                 * process, regardless of execs it's done, is start_time.
1104                 * All the past CPU time is accumulated in signal_struct
1105                 * from sister threads now dead.  But in this non-leader
1106                 * exec, nothing survives from the original leader thread,
1107                 * whose birth marks the true age of this process now.
1108                 * When we take on its identity by switching to its PID, we
1109                 * also take its birthdate (always earlier than our own).
1110                 */
1111                tsk->start_time = leader->start_time;
1112                tsk->start_boottime = leader->start_boottime;
1113
1114                BUG_ON(!same_thread_group(leader, tsk));
1115                /*
1116                 * An exec() starts a new thread group with the
1117                 * TGID of the previous thread group. Rehash the
1118                 * two threads with a switched PID, and release
1119                 * the former thread group leader:
1120                 */
1121
1122                /* Become a process group leader with the old leader's pid.
1123                 * The old leader becomes a thread of the this thread group.
1124                 */
1125                exchange_tids(tsk, leader);
1126                transfer_pid(leader, tsk, PIDTYPE_TGID);
1127                transfer_pid(leader, tsk, PIDTYPE_PGID);
1128                transfer_pid(leader, tsk, PIDTYPE_SID);
1129
1130                list_replace_rcu(&leader->tasks, &tsk->tasks);
1131                list_replace_init(&leader->sibling, &tsk->sibling);
1132
1133                tsk->group_leader = tsk;
1134                leader->group_leader = tsk;
1135
1136                tsk->exit_signal = SIGCHLD;
1137                leader->exit_signal = -1;
1138
1139                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1140                leader->exit_state = EXIT_DEAD;
1141
1142                /*
1143                 * We are going to release_task()->ptrace_unlink() silently,
1144                 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1145                 * the tracer wont't block again waiting for this thread.
1146                 */
1147                if (unlikely(leader->ptrace))
1148                        __wake_up_parent(leader, leader->parent);
1149                write_unlock_irq(&tasklist_lock);
1150                cgroup_threadgroup_change_end(tsk);
1151
1152                release_task(leader);
1153        }
1154
1155        sig->group_exit_task = NULL;
1156        sig->notify_count = 0;
1157
1158no_thread_group:
1159        /* we have changed execution domain */
1160        tsk->exit_signal = SIGCHLD;
1161
1162        BUG_ON(!thread_group_leader(tsk));
1163        return 0;
1164
1165killed:
1166        /* protects against exit_notify() and __exit_signal() */
1167        read_lock(&tasklist_lock);
1168        sig->group_exit_task = NULL;
1169        sig->notify_count = 0;
1170        read_unlock(&tasklist_lock);
1171        return -EAGAIN;
1172}
1173
1174
1175/*
1176 * This function makes sure the current process has its own signal table,
1177 * so that flush_signal_handlers can later reset the handlers without
1178 * disturbing other processes.  (Other processes might share the signal
1179 * table via the CLONE_SIGHAND option to clone().)
1180 */
1181static int unshare_sighand(struct task_struct *me)
1182{
1183        struct sighand_struct *oldsighand = me->sighand;
1184
1185        if (refcount_read(&oldsighand->count) != 1) {
1186                struct sighand_struct *newsighand;
1187                /*
1188                 * This ->sighand is shared with the CLONE_SIGHAND
1189                 * but not CLONE_THREAD task, switch to the new one.
1190                 */
1191                newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1192                if (!newsighand)
1193                        return -ENOMEM;
1194
1195                refcount_set(&newsighand->count, 1);
1196                memcpy(newsighand->action, oldsighand->action,
1197                       sizeof(newsighand->action));
1198
1199                write_lock_irq(&tasklist_lock);
1200                spin_lock(&oldsighand->siglock);
1201                rcu_assign_pointer(me->sighand, newsighand);
1202                spin_unlock(&oldsighand->siglock);
1203                write_unlock_irq(&tasklist_lock);
1204
1205                __cleanup_sighand(oldsighand);
1206        }
1207        return 0;
1208}
1209
1210char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1211{
1212        task_lock(tsk);
1213        strncpy(buf, tsk->comm, buf_size);
1214        task_unlock(tsk);
1215        return buf;
1216}
1217EXPORT_SYMBOL_GPL(__get_task_comm);
1218
1219/*
1220 * These functions flushes out all traces of the currently running executable
1221 * so that a new one can be started
1222 */
1223
1224void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1225{
1226        task_lock(tsk);
1227        trace_task_rename(tsk, buf);
1228        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1229        task_unlock(tsk);
1230        perf_event_comm(tsk, exec);
1231}
1232
1233/*
1234 * Calling this is the point of no return. None of the failures will be
1235 * seen by userspace since either the process is already taking a fatal
1236 * signal (via de_thread() or coredump), or will have SEGV raised
1237 * (after exec_mmap()) by search_binary_handler (see below).
1238 */
1239int begin_new_exec(struct linux_binprm * bprm)
1240{
1241        struct task_struct *me = current;
1242        int retval;
1243
1244        /* Once we are committed compute the creds */
1245        retval = bprm_creds_from_file(bprm);
1246        if (retval)
1247                return retval;
1248
1249        /*
1250         * Ensure all future errors are fatal.
1251         */
1252        bprm->point_of_no_return = true;
1253
1254        /*
1255         * Make this the only thread in the thread group.
1256         */
1257        retval = de_thread(me);
1258        if (retval)
1259                goto out;
1260
1261        /*
1262         * Cancel any io_uring activity across execve
1263         */
1264        io_uring_task_cancel();
1265
1266        /* Ensure the files table is not shared. */
1267        retval = unshare_files();
1268        if (retval)
1269                goto out;
1270
1271        /*
1272         * Must be called _before_ exec_mmap() as bprm->mm is
1273         * not visibile until then. This also enables the update
1274         * to be lockless.
1275         */
1276        set_mm_exe_file(bprm->mm, bprm->file);
1277
1278        /* If the binary is not readable then enforce mm->dumpable=0 */
1279        would_dump(bprm, bprm->file);
1280        if (bprm->have_execfd)
1281                would_dump(bprm, bprm->executable);
1282
1283        /*
1284         * Release all of the old mmap stuff
1285         */
1286        acct_arg_size(bprm, 0);
1287        retval = exec_mmap(bprm->mm);
1288        if (retval)
1289                goto out;
1290
1291        bprm->mm = NULL;
1292
1293#ifdef CONFIG_POSIX_TIMERS
1294        exit_itimers(me->signal);
1295        flush_itimer_signals();
1296#endif
1297
1298        /*
1299         * Make the signal table private.
1300         */
1301        retval = unshare_sighand(me);
1302        if (retval)
1303                goto out_unlock;
1304
1305        /*
1306         * Ensure that the uaccess routines can actually operate on userspace
1307         * pointers:
1308         */
1309        force_uaccess_begin();
1310
1311        me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1312                                        PF_NOFREEZE | PF_NO_SETAFFINITY);
1313        flush_thread();
1314        me->personality &= ~bprm->per_clear;
1315
1316        clear_syscall_work_syscall_user_dispatch(me);
1317
1318        /*
1319         * We have to apply CLOEXEC before we change whether the process is
1320         * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1321         * trying to access the should-be-closed file descriptors of a process
1322         * undergoing exec(2).
1323         */
1324        do_close_on_exec(me->files);
1325
1326        if (bprm->secureexec) {
1327                /* Make sure parent cannot signal privileged process. */
1328                me->pdeath_signal = 0;
1329
1330                /*
1331                 * For secureexec, reset the stack limit to sane default to
1332                 * avoid bad behavior from the prior rlimits. This has to
1333                 * happen before arch_pick_mmap_layout(), which examines
1334                 * RLIMIT_STACK, but after the point of no return to avoid
1335                 * needing to clean up the change on failure.
1336                 */
1337                if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1338                        bprm->rlim_stack.rlim_cur = _STK_LIM;
1339        }
1340
1341        me->sas_ss_sp = me->sas_ss_size = 0;
1342
1343        /*
1344         * Figure out dumpability. Note that this checking only of current
1345         * is wrong, but userspace depends on it. This should be testing
1346         * bprm->secureexec instead.
1347         */
1348        if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1349            !(uid_eq(current_euid(), current_uid()) &&
1350              gid_eq(current_egid(), current_gid())))
1351                set_dumpable(current->mm, suid_dumpable);
1352        else
1353                set_dumpable(current->mm, SUID_DUMP_USER);
1354
1355        perf_event_exec();
1356        __set_task_comm(me, kbasename(bprm->filename), true);
1357
1358        /* An exec changes our domain. We are no longer part of the thread
1359           group */
1360        WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
1361        flush_signal_handlers(me, 0);
1362
1363        retval = set_cred_ucounts(bprm->cred);
1364        if (retval < 0)
1365                goto out_unlock;
1366
1367        /*
1368         * install the new credentials for this executable
1369         */
1370        security_bprm_committing_creds(bprm);
1371
1372        commit_creds(bprm->cred);
1373        bprm->cred = NULL;
1374
1375        /*
1376         * Disable monitoring for regular users
1377         * when executing setuid binaries. Must
1378         * wait until new credentials are committed
1379         * by commit_creds() above
1380         */
1381        if (get_dumpable(me->mm) != SUID_DUMP_USER)
1382                perf_event_exit_task(me);
1383        /*
1384         * cred_guard_mutex must be held at least to this point to prevent
1385         * ptrace_attach() from altering our determination of the task's
1386         * credentials; any time after this it may be unlocked.
1387         */
1388        security_bprm_committed_creds(bprm);
1389
1390        /* Pass the opened binary to the interpreter. */
1391        if (bprm->have_execfd) {
1392                retval = get_unused_fd_flags(0);
1393                if (retval < 0)
1394                        goto out_unlock;
1395                fd_install(retval, bprm->executable);
1396                bprm->executable = NULL;
1397                bprm->execfd = retval;
1398        }
1399        return 0;
1400
1401out_unlock:
1402        up_write(&me->signal->exec_update_lock);
1403out:
1404        return retval;
1405}
1406EXPORT_SYMBOL(begin_new_exec);
1407
1408void would_dump(struct linux_binprm *bprm, struct file *file)
1409{
1410        struct inode *inode = file_inode(file);
1411        struct user_namespace *mnt_userns = file_mnt_user_ns(file);
1412        if (inode_permission(mnt_userns, inode, MAY_READ) < 0) {
1413                struct user_namespace *old, *user_ns;
1414                bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1415
1416                /* Ensure mm->user_ns contains the executable */
1417                user_ns = old = bprm->mm->user_ns;
1418                while ((user_ns != &init_user_ns) &&
1419                       !privileged_wrt_inode_uidgid(user_ns, mnt_userns, inode))
1420                        user_ns = user_ns->parent;
1421
1422                if (old != user_ns) {
1423                        bprm->mm->user_ns = get_user_ns(user_ns);
1424                        put_user_ns(old);
1425                }
1426        }
1427}
1428EXPORT_SYMBOL(would_dump);
1429
1430void setup_new_exec(struct linux_binprm * bprm)
1431{
1432        /* Setup things that can depend upon the personality */
1433        struct task_struct *me = current;
1434
1435        arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
1436
1437        arch_setup_new_exec();
1438
1439        /* Set the new mm task size. We have to do that late because it may
1440         * depend on TIF_32BIT which is only updated in flush_thread() on
1441         * some architectures like powerpc
1442         */
1443        me->mm->task_size = TASK_SIZE;
1444        up_write(&me->signal->exec_update_lock);
1445        mutex_unlock(&me->signal->cred_guard_mutex);
1446}
1447EXPORT_SYMBOL(setup_new_exec);
1448
1449/* Runs immediately before start_thread() takes over. */
1450void finalize_exec(struct linux_binprm *bprm)
1451{
1452        /* Store any stack rlimit changes before starting thread. */
1453        task_lock(current->group_leader);
1454        current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1455        task_unlock(current->group_leader);
1456}
1457EXPORT_SYMBOL(finalize_exec);
1458
1459/*
1460 * Prepare credentials and lock ->cred_guard_mutex.
1461 * setup_new_exec() commits the new creds and drops the lock.
1462 * Or, if exec fails before, free_bprm() should release ->cred
1463 * and unlock.
1464 */
1465static int prepare_bprm_creds(struct linux_binprm *bprm)
1466{
1467        if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1468                return -ERESTARTNOINTR;
1469
1470        bprm->cred = prepare_exec_creds();
1471        if (likely(bprm->cred))
1472                return 0;
1473
1474        mutex_unlock(&current->signal->cred_guard_mutex);
1475        return -ENOMEM;
1476}
1477
1478static void free_bprm(struct linux_binprm *bprm)
1479{
1480        if (bprm->mm) {
1481                acct_arg_size(bprm, 0);
1482                mmput(bprm->mm);
1483        }
1484        free_arg_pages(bprm);
1485        if (bprm->cred) {
1486                mutex_unlock(&current->signal->cred_guard_mutex);
1487                abort_creds(bprm->cred);
1488        }
1489        if (bprm->file) {
1490                allow_write_access(bprm->file);
1491                fput(bprm->file);
1492        }
1493        if (bprm->executable)
1494                fput(bprm->executable);
1495        /* If a binfmt changed the interp, free it. */
1496        if (bprm->interp != bprm->filename)
1497                kfree(bprm->interp);
1498        kfree(bprm->fdpath);
1499        kfree(bprm);
1500}
1501
1502static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
1503{
1504        struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1505        int retval = -ENOMEM;
1506        if (!bprm)
1507                goto out;
1508
1509        if (fd == AT_FDCWD || filename->name[0] == '/') {
1510                bprm->filename = filename->name;
1511        } else {
1512                if (filename->name[0] == '\0')
1513                        bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1514                else
1515                        bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1516                                                  fd, filename->name);
1517                if (!bprm->fdpath)
1518                        goto out_free;
1519
1520                bprm->filename = bprm->fdpath;
1521        }
1522        bprm->interp = bprm->filename;
1523
1524        retval = bprm_mm_init(bprm);
1525        if (retval)
1526                goto out_free;
1527        return bprm;
1528
1529out_free:
1530        free_bprm(bprm);
1531out:
1532        return ERR_PTR(retval);
1533}
1534
1535int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1536{
1537        /* If a binfmt changed the interp, free it first. */
1538        if (bprm->interp != bprm->filename)
1539                kfree(bprm->interp);
1540        bprm->interp = kstrdup(interp, GFP_KERNEL);
1541        if (!bprm->interp)
1542                return -ENOMEM;
1543        return 0;
1544}
1545EXPORT_SYMBOL(bprm_change_interp);
1546
1547/*
1548 * determine how safe it is to execute the proposed program
1549 * - the caller must hold ->cred_guard_mutex to protect against
1550 *   PTRACE_ATTACH or seccomp thread-sync
1551 */
1552static void check_unsafe_exec(struct linux_binprm *bprm)
1553{
1554        struct task_struct *p = current, *t;
1555        unsigned n_fs;
1556
1557        if (p->ptrace)
1558                bprm->unsafe |= LSM_UNSAFE_PTRACE;
1559
1560        /*
1561         * This isn't strictly necessary, but it makes it harder for LSMs to
1562         * mess up.
1563         */
1564        if (task_no_new_privs(current))
1565                bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1566
1567        t = p;
1568        n_fs = 1;
1569        spin_lock(&p->fs->lock);
1570        rcu_read_lock();
1571        while_each_thread(p, t) {
1572                if (t->fs == p->fs)
1573                        n_fs++;
1574        }
1575        rcu_read_unlock();
1576
1577        if (p->fs->users > n_fs)
1578                bprm->unsafe |= LSM_UNSAFE_SHARE;
1579        else
1580                p->fs->in_exec = 1;
1581        spin_unlock(&p->fs->lock);
1582}
1583
1584static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
1585{
1586        /* Handle suid and sgid on files */
1587        struct user_namespace *mnt_userns;
1588        struct inode *inode;
1589        unsigned int mode;
1590        kuid_t uid;
1591        kgid_t gid;
1592
1593        if (!mnt_may_suid(file->f_path.mnt))
1594                return;
1595
1596        if (task_no_new_privs(current))
1597                return;
1598
1599        inode = file->f_path.dentry->d_inode;
1600        mode = READ_ONCE(inode->i_mode);
1601        if (!(mode & (S_ISUID|S_ISGID)))
1602                return;
1603
1604        mnt_userns = file_mnt_user_ns(file);
1605
1606        /* Be careful if suid/sgid is set */
1607        inode_lock(inode);
1608
1609        /* reload atomically mode/uid/gid now that lock held */
1610        mode = inode->i_mode;
1611        uid = i_uid_into_mnt(mnt_userns, inode);
1612        gid = i_gid_into_mnt(mnt_userns, inode);
1613        inode_unlock(inode);
1614
1615        /* We ignore suid/sgid if there are no mappings for them in the ns */
1616        if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1617                 !kgid_has_mapping(bprm->cred->user_ns, gid))
1618                return;
1619
1620        if (mode & S_ISUID) {
1621                bprm->per_clear |= PER_CLEAR_ON_SETID;
1622                bprm->cred->euid = uid;
1623        }
1624
1625        if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1626                bprm->per_clear |= PER_CLEAR_ON_SETID;
1627                bprm->cred->egid = gid;
1628        }
1629}
1630
1631/*
1632 * Compute brpm->cred based upon the final binary.
1633 */
1634static int bprm_creds_from_file(struct linux_binprm *bprm)
1635{
1636        /* Compute creds based on which file? */
1637        struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
1638
1639        bprm_fill_uid(bprm, file);
1640        return security_bprm_creds_from_file(bprm, file);
1641}
1642
1643/*
1644 * Fill the binprm structure from the inode.
1645 * Read the first BINPRM_BUF_SIZE bytes
1646 *
1647 * This may be called multiple times for binary chains (scripts for example).
1648 */
1649static int prepare_binprm(struct linux_binprm *bprm)
1650{
1651        loff_t pos = 0;
1652
1653        memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1654        return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1655}
1656
1657/*
1658 * Arguments are '\0' separated strings found at the location bprm->p
1659 * points to; chop off the first by relocating brpm->p to right after
1660 * the first '\0' encountered.
1661 */
1662int remove_arg_zero(struct linux_binprm *bprm)
1663{
1664        int ret = 0;
1665        unsigned long offset;
1666        char *kaddr;
1667        struct page *page;
1668
1669        if (!bprm->argc)
1670                return 0;
1671
1672        do {
1673                offset = bprm->p & ~PAGE_MASK;
1674                page = get_arg_page(bprm, bprm->p, 0);
1675                if (!page) {
1676                        ret = -EFAULT;
1677                        goto out;
1678                }
1679                kaddr = kmap_atomic(page);
1680
1681                for (; offset < PAGE_SIZE && kaddr[offset];
1682                                offset++, bprm->p++)
1683                        ;
1684
1685                kunmap_atomic(kaddr);
1686                put_arg_page(page);
1687        } while (offset == PAGE_SIZE);
1688
1689        bprm->p++;
1690        bprm->argc--;
1691        ret = 0;
1692
1693out:
1694        return ret;
1695}
1696EXPORT_SYMBOL(remove_arg_zero);
1697
1698#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1699/*
1700 * cycle the list of binary formats handler, until one recognizes the image
1701 */
1702static int search_binary_handler(struct linux_binprm *bprm)
1703{
1704        bool need_retry = IS_ENABLED(CONFIG_MODULES);
1705        struct linux_binfmt *fmt;
1706        int retval;
1707
1708        retval = prepare_binprm(bprm);
1709        if (retval < 0)
1710                return retval;
1711
1712        retval = security_bprm_check(bprm);
1713        if (retval)
1714                return retval;
1715
1716        retval = -ENOENT;
1717 retry:
1718        read_lock(&binfmt_lock);
1719        list_for_each_entry(fmt, &formats, lh) {
1720                if (!try_module_get(fmt->module))
1721                        continue;
1722                read_unlock(&binfmt_lock);
1723
1724                retval = fmt->load_binary(bprm);
1725
1726                read_lock(&binfmt_lock);
1727                put_binfmt(fmt);
1728                if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
1729                        read_unlock(&binfmt_lock);
1730                        return retval;
1731                }
1732        }
1733        read_unlock(&binfmt_lock);
1734
1735        if (need_retry) {
1736                if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1737                    printable(bprm->buf[2]) && printable(bprm->buf[3]))
1738                        return retval;
1739                if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1740                        return retval;
1741                need_retry = false;
1742                goto retry;
1743        }
1744
1745        return retval;
1746}
1747
1748static int exec_binprm(struct linux_binprm *bprm)
1749{
1750        pid_t old_pid, old_vpid;
1751        int ret, depth;
1752
1753        /* Need to fetch pid before load_binary changes it */
1754        old_pid = current->pid;
1755        rcu_read_lock();
1756        old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1757        rcu_read_unlock();
1758
1759        /* This allows 4 levels of binfmt rewrites before failing hard. */
1760        for (depth = 0;; depth++) {
1761                struct file *exec;
1762                if (depth > 5)
1763                        return -ELOOP;
1764
1765                ret = search_binary_handler(bprm);
1766                if (ret < 0)
1767                        return ret;
1768                if (!bprm->interpreter)
1769                        break;
1770
1771                exec = bprm->file;
1772                bprm->file = bprm->interpreter;
1773                bprm->interpreter = NULL;
1774
1775                allow_write_access(exec);
1776                if (unlikely(bprm->have_execfd)) {
1777                        if (bprm->executable) {
1778                                fput(exec);
1779                                return -ENOEXEC;
1780                        }
1781                        bprm->executable = exec;
1782                } else
1783                        fput(exec);
1784        }
1785
1786        audit_bprm(bprm);
1787        trace_sched_process_exec(current, old_pid, bprm);
1788        ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1789        proc_exec_connector(current);
1790        return 0;
1791}
1792
1793/*
1794 * sys_execve() executes a new program.
1795 */
1796static int bprm_execve(struct linux_binprm *bprm,
1797                       int fd, struct filename *filename, int flags)
1798{
1799        struct file *file;
1800        int retval;
1801
1802        retval = prepare_bprm_creds(bprm);
1803        if (retval)
1804                return retval;
1805
1806        check_unsafe_exec(bprm);
1807        current->in_execve = 1;
1808
1809        file = do_open_execat(fd, filename, flags);
1810        retval = PTR_ERR(file);
1811        if (IS_ERR(file))
1812                goto out_unmark;
1813
1814        sched_exec();
1815
1816        bprm->file = file;
1817        /*
1818         * Record that a name derived from an O_CLOEXEC fd will be
1819         * inaccessible after exec.  This allows the code in exec to
1820         * choose to fail when the executable is not mmaped into the
1821         * interpreter and an open file descriptor is not passed to
1822         * the interpreter.  This makes for a better user experience
1823         * than having the interpreter start and then immediately fail
1824         * when it finds the executable is inaccessible.
1825         */
1826        if (bprm->fdpath && get_close_on_exec(fd))
1827                bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1828
1829        /* Set the unchanging part of bprm->cred */
1830        retval = security_bprm_creds_for_exec(bprm);
1831        if (retval)
1832                goto out;
1833
1834        retval = exec_binprm(bprm);
1835        if (retval < 0)
1836                goto out;
1837
1838        /* execve succeeded */
1839        current->fs->in_exec = 0;
1840        current->in_execve = 0;
1841        rseq_execve(current);
1842        acct_update_integrals(current);
1843        task_numa_free(current, false);
1844        return retval;
1845
1846out:
1847        /*
1848         * If past the point of no return ensure the code never
1849         * returns to the userspace process.  Use an existing fatal
1850         * signal if present otherwise terminate the process with
1851         * SIGSEGV.
1852         */
1853        if (bprm->point_of_no_return && !fatal_signal_pending(current))
1854                force_sigsegv(SIGSEGV);
1855
1856out_unmark:
1857        current->fs->in_exec = 0;
1858        current->in_execve = 0;
1859
1860        return retval;
1861}
1862
1863static int do_execveat_common(int fd, struct filename *filename,
1864                              struct user_arg_ptr argv,
1865                              struct user_arg_ptr envp,
1866                              int flags)
1867{
1868        struct linux_binprm *bprm;
1869        int retval;
1870
1871        if (IS_ERR(filename))
1872                return PTR_ERR(filename);
1873
1874        /*
1875         * We move the actual failure in case of RLIMIT_NPROC excess from
1876         * set*uid() to execve() because too many poorly written programs
1877         * don't check setuid() return code.  Here we additionally recheck
1878         * whether NPROC limit is still exceeded.
1879         */
1880        if ((current->flags & PF_NPROC_EXCEEDED) &&
1881            atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1882                retval = -EAGAIN;
1883                goto out_ret;
1884        }
1885
1886        /* We're below the limit (still or again), so we don't want to make
1887         * further execve() calls fail. */
1888        current->flags &= ~PF_NPROC_EXCEEDED;
1889
1890        bprm = alloc_bprm(fd, filename);
1891        if (IS_ERR(bprm)) {
1892                retval = PTR_ERR(bprm);
1893                goto out_ret;
1894        }
1895
1896        retval = count(argv, MAX_ARG_STRINGS);
1897        if (retval < 0)
1898                goto out_free;
1899        bprm->argc = retval;
1900
1901        retval = count(envp, MAX_ARG_STRINGS);
1902        if (retval < 0)
1903                goto out_free;
1904        bprm->envc = retval;
1905
1906        retval = bprm_stack_limits(bprm);
1907        if (retval < 0)
1908                goto out_free;
1909
1910        retval = copy_string_kernel(bprm->filename, bprm);
1911        if (retval < 0)
1912                goto out_free;
1913        bprm->exec = bprm->p;
1914
1915        retval = copy_strings(bprm->envc, envp, bprm);
1916        if (retval < 0)
1917                goto out_free;
1918
1919        retval = copy_strings(bprm->argc, argv, bprm);
1920        if (retval < 0)
1921                goto out_free;
1922
1923        retval = bprm_execve(bprm, fd, filename, flags);
1924out_free:
1925        free_bprm(bprm);
1926
1927out_ret:
1928        putname(filename);
1929        return retval;
1930}
1931
1932int kernel_execve(const char *kernel_filename,
1933                  const char *const *argv, const char *const *envp)
1934{
1935        struct filename *filename;
1936        struct linux_binprm *bprm;
1937        int fd = AT_FDCWD;
1938        int retval;
1939
1940        filename = getname_kernel(kernel_filename);
1941        if (IS_ERR(filename))
1942                return PTR_ERR(filename);
1943
1944        bprm = alloc_bprm(fd, filename);
1945        if (IS_ERR(bprm)) {
1946                retval = PTR_ERR(bprm);
1947                goto out_ret;
1948        }
1949
1950        retval = count_strings_kernel(argv);
1951        if (retval < 0)
1952                goto out_free;
1953        bprm->argc = retval;
1954
1955        retval = count_strings_kernel(envp);
1956        if (retval < 0)
1957                goto out_free;
1958        bprm->envc = retval;
1959
1960        retval = bprm_stack_limits(bprm);
1961        if (retval < 0)
1962                goto out_free;
1963
1964        retval = copy_string_kernel(bprm->filename, bprm);
1965        if (retval < 0)
1966                goto out_free;
1967        bprm->exec = bprm->p;
1968
1969        retval = copy_strings_kernel(bprm->envc, envp, bprm);
1970        if (retval < 0)
1971                goto out_free;
1972
1973        retval = copy_strings_kernel(bprm->argc, argv, bprm);
1974        if (retval < 0)
1975                goto out_free;
1976
1977        retval = bprm_execve(bprm, fd, filename, 0);
1978out_free:
1979        free_bprm(bprm);
1980out_ret:
1981        putname(filename);
1982        return retval;
1983}
1984
1985static int do_execve(struct filename *filename,
1986        const char __user *const __user *__argv,
1987        const char __user *const __user *__envp)
1988{
1989        struct user_arg_ptr argv = { .ptr.native = __argv };
1990        struct user_arg_ptr envp = { .ptr.native = __envp };
1991        return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1992}
1993
1994static int do_execveat(int fd, struct filename *filename,
1995                const char __user *const __user *__argv,
1996                const char __user *const __user *__envp,
1997                int flags)
1998{
1999        struct user_arg_ptr argv = { .ptr.native = __argv };
2000        struct user_arg_ptr envp = { .ptr.native = __envp };
2001
2002        return do_execveat_common(fd, filename, argv, envp, flags);
2003}
2004
2005#ifdef CONFIG_COMPAT
2006static int compat_do_execve(struct filename *filename,
2007        const compat_uptr_t __user *__argv,
2008        const compat_uptr_t __user *__envp)
2009{
2010        struct user_arg_ptr argv = {
2011                .is_compat = true,
2012                .ptr.compat = __argv,
2013        };
2014        struct user_arg_ptr envp = {
2015                .is_compat = true,
2016                .ptr.compat = __envp,
2017        };
2018        return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
2019}
2020
2021static int compat_do_execveat(int fd, struct filename *filename,
2022                              const compat_uptr_t __user *__argv,
2023                              const compat_uptr_t __user *__envp,
2024                              int flags)
2025{
2026        struct user_arg_ptr argv = {
2027                .is_compat = true,
2028                .ptr.compat = __argv,
2029        };
2030        struct user_arg_ptr envp = {
2031                .is_compat = true,
2032                .ptr.compat = __envp,
2033        };
2034        return do_execveat_common(fd, filename, argv, envp, flags);
2035}
2036#endif
2037
2038void set_binfmt(struct linux_binfmt *new)
2039{
2040        struct mm_struct *mm = current->mm;
2041
2042        if (mm->binfmt)
2043                module_put(mm->binfmt->module);
2044
2045        mm->binfmt = new;
2046        if (new)
2047                __module_get(new->module);
2048}
2049EXPORT_SYMBOL(set_binfmt);
2050
2051/*
2052 * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
2053 */
2054void set_dumpable(struct mm_struct *mm, int value)
2055{
2056        if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
2057                return;
2058
2059        set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
2060}
2061
2062SYSCALL_DEFINE3(execve,
2063                const char __user *, filename,
2064                const char __user *const __user *, argv,
2065                const char __user *const __user *, envp)
2066{
2067        return do_execve(getname(filename), argv, envp);
2068}
2069
2070SYSCALL_DEFINE5(execveat,
2071                int, fd, const char __user *, filename,
2072                const char __user *const __user *, argv,
2073                const char __user *const __user *, envp,
2074                int, flags)
2075{
2076        int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2077
2078        return do_execveat(fd,
2079                           getname_flags(filename, lookup_flags, NULL),
2080                           argv, envp, flags);
2081}
2082
2083#ifdef CONFIG_COMPAT
2084COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
2085        const compat_uptr_t __user *, argv,
2086        const compat_uptr_t __user *, envp)
2087{
2088        return compat_do_execve(getname(filename), argv, envp);
2089}
2090
2091COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
2092                       const char __user *, filename,
2093                       const compat_uptr_t __user *, argv,
2094                       const compat_uptr_t __user *, envp,
2095                       int,  flags)
2096{
2097        int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2098
2099        return compat_do_execveat(fd,
2100                                  getname_flags(filename, lookup_flags, NULL),
2101                                  argv, envp, flags);
2102}
2103#endif
2104