linux/fs/exec.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/exec.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7/*
   8 * #!-checking implemented by tytso.
   9 */
  10/*
  11 * Demand-loading implemented 01.12.91 - no need to read anything but
  12 * the header into memory. The inode of the executable is put into
  13 * "current->executable", and page faults do the actual loading. Clean.
  14 *
  15 * Once more I can proudly say that linux stood up to being changed: it
  16 * was less than 2 hours work to get demand-loading completely implemented.
  17 *
  18 * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
  19 * current->executable is only used by the procfs.  This allows a dispatch
  20 * table to check for several different types  of binary formats.  We keep
  21 * trying until we recognize the file or we run out of supported binary
  22 * formats. 
  23 */
  24
  25#include <linux/slab.h>
  26#include <linux/file.h>
  27#include <linux/fdtable.h>
  28#include <linux/mm.h>
  29#include <linux/stat.h>
  30#include <linux/fcntl.h>
  31#include <linux/smp_lock.h>
  32#include <linux/swap.h>
  33#include <linux/string.h>
  34#include <linux/init.h>
  35#include <linux/pagemap.h>
  36#include <linux/highmem.h>
  37#include <linux/spinlock.h>
  38#include <linux/key.h>
  39#include <linux/personality.h>
  40#include <linux/binfmts.h>
  41#include <linux/utsname.h>
  42#include <linux/pid_namespace.h>
  43#include <linux/module.h>
  44#include <linux/namei.h>
  45#include <linux/proc_fs.h>
  46#include <linux/mount.h>
  47#include <linux/security.h>
  48#include <linux/syscalls.h>
  49#include <linux/tsacct_kern.h>
  50#include <linux/cn_proc.h>
  51#include <linux/audit.h>
  52#include <linux/tracehook.h>
  53
  54#include <asm/uaccess.h>
  55#include <asm/mmu_context.h>
  56#include <asm/tlb.h>
  57
  58#ifdef CONFIG_KMOD
  59#include <linux/kmod.h>
  60#endif
  61
  62#ifdef __alpha__
  63/* for /sbin/loader handling in search_binary_handler() */
  64#include <linux/a.out.h>
  65#endif
  66
  67int core_uses_pid;
  68char core_pattern[CORENAME_MAX_SIZE] = "core";
  69int suid_dumpable = 0;
  70
  71/* The maximal length of core_pattern is also specified in sysctl.c */
  72
  73static LIST_HEAD(formats);
  74static DEFINE_RWLOCK(binfmt_lock);
  75
  76int register_binfmt(struct linux_binfmt * fmt)
  77{
  78        if (!fmt)
  79                return -EINVAL;
  80        write_lock(&binfmt_lock);
  81        list_add(&fmt->lh, &formats);
  82        write_unlock(&binfmt_lock);
  83        return 0;       
  84}
  85
  86EXPORT_SYMBOL(register_binfmt);
  87
  88void unregister_binfmt(struct linux_binfmt * fmt)
  89{
  90        write_lock(&binfmt_lock);
  91        list_del(&fmt->lh);
  92        write_unlock(&binfmt_lock);
  93}
  94
  95EXPORT_SYMBOL(unregister_binfmt);
  96
  97static inline void put_binfmt(struct linux_binfmt * fmt)
  98{
  99        module_put(fmt->module);
 100}
 101
 102/*
 103 * Note that a shared library must be both readable and executable due to
 104 * security reasons.
 105 *
 106 * Also note that we take the address to load from from the file itself.
 107 */
 108SYSCALL_DEFINE1(uselib, const char __user *, library)
 109{
 110        struct file *file;
 111        struct nameidata nd;
 112        char *tmp = getname(library);
 113        int error = PTR_ERR(tmp);
 114
 115        if (!IS_ERR(tmp)) {
 116                error = path_lookup_open(AT_FDCWD, tmp,
 117                                         LOOKUP_FOLLOW, &nd,
 118                                         FMODE_READ|FMODE_EXEC);
 119                putname(tmp);
 120        }
 121        if (error)
 122                goto out;
 123
 124        error = -EINVAL;
 125        if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
 126                goto exit;
 127
 128        error = -EACCES;
 129        if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
 130                goto exit;
 131
 132        error = vfs_permission(&nd, MAY_READ | MAY_EXEC | MAY_OPEN);
 133        if (error)
 134                goto exit;
 135
 136        file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
 137        error = PTR_ERR(file);
 138        if (IS_ERR(file))
 139                goto out;
 140
 141        error = -ENOEXEC;
 142        if(file->f_op) {
 143                struct linux_binfmt * fmt;
 144
 145                read_lock(&binfmt_lock);
 146                list_for_each_entry(fmt, &formats, lh) {
 147                        if (!fmt->load_shlib)
 148                                continue;
 149                        if (!try_module_get(fmt->module))
 150                                continue;
 151                        read_unlock(&binfmt_lock);
 152                        error = fmt->load_shlib(file);
 153                        read_lock(&binfmt_lock);
 154                        put_binfmt(fmt);
 155                        if (error != -ENOEXEC)
 156                                break;
 157                }
 158                read_unlock(&binfmt_lock);
 159        }
 160        fput(file);
 161out:
 162        return error;
 163exit:
 164        release_open_intent(&nd);
 165        path_put(&nd.path);
 166        goto out;
 167}
 168
 169#ifdef CONFIG_MMU
 170
 171void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 172{
 173        struct mm_struct *mm = current->mm;
 174        long diff = (long)(pages - bprm->vma_pages);
 175
 176        if (!mm || !diff)
 177                return;
 178
 179        bprm->vma_pages = pages;
 180
 181        down_write(&mm->mmap_sem);
 182        mm->total_vm += diff;
 183        up_write(&mm->mmap_sem);
 184}
 185
 186struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 187                int write)
 188{
 189        struct page *page;
 190        int ret;
 191
 192#ifdef CONFIG_STACK_GROWSUP
 193        if (write) {
 194                ret = expand_stack_downwards(bprm->vma, pos);
 195                if (ret < 0)
 196                        return NULL;
 197        }
 198#endif
 199        ret = get_user_pages(current, bprm->mm, pos,
 200                        1, write, 1, &page, NULL);
 201        if (ret <= 0)
 202                return NULL;
 203
 204        if (write) {
 205                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
 206                struct rlimit *rlim;
 207
 208                acct_arg_size(bprm, size / PAGE_SIZE);
 209
 210                /*
 211                 * We've historically supported up to 32 pages (ARG_MAX)
 212                 * of argument strings even with small stacks
 213                 */
 214                if (size <= ARG_MAX)
 215                        return page;
 216
 217                /*
 218                 * Limit to 1/4-th the stack size for the argv+env strings.
 219                 * This ensures that:
 220                 *  - the remaining binfmt code will not run out of stack space,
 221                 *  - the program will have a reasonable amount of stack left
 222                 *    to work from.
 223                 */
 224                rlim = current->signal->rlim;
 225                if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
 226                        put_page(page);
 227                        return NULL;
 228                }
 229        }
 230
 231        return page;
 232}
 233
 234static void put_arg_page(struct page *page)
 235{
 236        put_page(page);
 237}
 238
 239static void free_arg_page(struct linux_binprm *bprm, int i)
 240{
 241}
 242
 243static void free_arg_pages(struct linux_binprm *bprm)
 244{
 245}
 246
 247static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 248                struct page *page)
 249{
 250        flush_cache_page(bprm->vma, pos, page_to_pfn(page));
 251}
 252
 253static int __bprm_mm_init(struct linux_binprm *bprm)
 254{
 255        int err = -ENOMEM;
 256        struct vm_area_struct *vma = NULL;
 257        struct mm_struct *mm = bprm->mm;
 258
 259        bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 260        if (!vma)
 261                goto err;
 262
 263        down_write(&mm->mmap_sem);
 264        vma->vm_mm = mm;
 265
 266        /*
 267         * Place the stack at the largest stack address the architecture
 268         * supports. Later, we'll move this to an appropriate place. We don't
 269         * use STACK_TOP because that can depend on attributes which aren't
 270         * configured yet.
 271         */
 272        vma->vm_end = STACK_TOP_MAX;
 273        vma->vm_start = vma->vm_end - PAGE_SIZE;
 274
 275        vma->vm_flags = VM_STACK_FLAGS;
 276        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 277
 278        err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
 279        if (err)
 280                goto err;
 281
 282        err = insert_vm_struct(mm, vma);
 283        if (err) {
 284                up_write(&mm->mmap_sem);
 285                goto err;
 286        }
 287
 288        mm->stack_vm = mm->total_vm = 1;
 289        up_write(&mm->mmap_sem);
 290
 291        bprm->p = vma->vm_end - sizeof(void *);
 292
 293        return 0;
 294
 295err:
 296        if (vma) {
 297                bprm->vma = NULL;
 298                kmem_cache_free(vm_area_cachep, vma);
 299        }
 300
 301        return err;
 302}
 303
 304static bool valid_arg_len(struct linux_binprm *bprm, long len)
 305{
 306        return len <= MAX_ARG_STRLEN;
 307}
 308
 309#else
 310
 311void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 312{
 313}
 314
 315struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 316                int write)
 317{
 318        struct page *page;
 319
 320        page = bprm->page[pos / PAGE_SIZE];
 321        if (!page && write) {
 322                page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
 323                if (!page)
 324                        return NULL;
 325                bprm->page[pos / PAGE_SIZE] = page;
 326        }
 327
 328        return page;
 329}
 330
 331static void put_arg_page(struct page *page)
 332{
 333}
 334
 335static void free_arg_page(struct linux_binprm *bprm, int i)
 336{
 337        if (bprm->page[i]) {
 338                __free_page(bprm->page[i]);
 339                bprm->page[i] = NULL;
 340        }
 341}
 342
 343static void free_arg_pages(struct linux_binprm *bprm)
 344{
 345        int i;
 346
 347        for (i = 0; i < MAX_ARG_PAGES; i++)
 348                free_arg_page(bprm, i);
 349}
 350
 351static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 352                struct page *page)
 353{
 354}
 355
 356static int __bprm_mm_init(struct linux_binprm *bprm)
 357{
 358        bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
 359        return 0;
 360}
 361
 362static bool valid_arg_len(struct linux_binprm *bprm, long len)
 363{
 364        return len <= bprm->p;
 365}
 366
 367#endif /* CONFIG_MMU */
 368
 369/*
 370 * Create a new mm_struct and populate it with a temporary stack
 371 * vm_area_struct.  We don't have enough context at this point to set the stack
 372 * flags, permissions, and offset, so we use temporary values.  We'll update
 373 * them later in setup_arg_pages().
 374 */
 375int bprm_mm_init(struct linux_binprm *bprm)
 376{
 377        int err;
 378        struct mm_struct *mm = NULL;
 379
 380        bprm->mm = mm = mm_alloc();
 381        err = -ENOMEM;
 382        if (!mm)
 383                goto err;
 384
 385        err = init_new_context(current, mm);
 386        if (err)
 387                goto err;
 388
 389        err = __bprm_mm_init(bprm);
 390        if (err)
 391                goto err;
 392
 393        return 0;
 394
 395err:
 396        if (mm) {
 397                bprm->mm = NULL;
 398                mmdrop(mm);
 399        }
 400
 401        return err;
 402}
 403
 404/*
 405 * count() counts the number of strings in array ARGV.
 406 */
 407static int count(char __user * __user * argv, int max)
 408{
 409        int i = 0;
 410
 411        if (argv != NULL) {
 412                for (;;) {
 413                        char __user * p;
 414
 415                        if (get_user(p, argv))
 416                                return -EFAULT;
 417                        if (!p)
 418                                break;
 419                        argv++;
 420                        if(++i > max)
 421                                return -E2BIG;
 422
 423                        if (fatal_signal_pending(current))
 424                                return -ERESTARTNOHAND;
 425                        cond_resched();
 426                }
 427        }
 428        return i;
 429}
 430
 431/*
 432 * 'copy_strings()' copies argument/environment strings from the old
 433 * processes's memory to the new process's stack.  The call to get_user_pages()
 434 * ensures the destination page is created and not swapped out.
 435 */
 436static int copy_strings(int argc, char __user * __user * argv,
 437                        struct linux_binprm *bprm)
 438{
 439        struct page *kmapped_page = NULL;
 440        char *kaddr = NULL;
 441        unsigned long kpos = 0;
 442        int ret;
 443
 444        while (argc-- > 0) {
 445                char __user *str;
 446                int len;
 447                unsigned long pos;
 448
 449                if (get_user(str, argv+argc) ||
 450                                !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
 451                        ret = -EFAULT;
 452                        goto out;
 453                }
 454
 455                if (!valid_arg_len(bprm, len)) {
 456                        ret = -E2BIG;
 457                        goto out;
 458                }
 459
 460                /* We're going to work our way backwords. */
 461                pos = bprm->p;
 462                str += len;
 463                bprm->p -= len;
 464
 465                while (len > 0) {
 466                        int offset, bytes_to_copy;
 467
 468                        if (fatal_signal_pending(current)) {
 469                                ret = -ERESTARTNOHAND;
 470                                goto out;
 471                        }
 472                        cond_resched();
 473
 474                        offset = pos % PAGE_SIZE;
 475                        if (offset == 0)
 476                                offset = PAGE_SIZE;
 477
 478                        bytes_to_copy = offset;
 479                        if (bytes_to_copy > len)
 480                                bytes_to_copy = len;
 481
 482                        offset -= bytes_to_copy;
 483                        pos -= bytes_to_copy;
 484                        str -= bytes_to_copy;
 485                        len -= bytes_to_copy;
 486
 487                        if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
 488                                struct page *page;
 489
 490                                page = get_arg_page(bprm, pos, 1);
 491                                if (!page) {
 492                                        ret = -E2BIG;
 493                                        goto out;
 494                                }
 495
 496                                if (kmapped_page) {
 497                                        flush_kernel_dcache_page(kmapped_page);
 498                                        kunmap(kmapped_page);
 499                                        put_arg_page(kmapped_page);
 500                                }
 501                                kmapped_page = page;
 502                                kaddr = kmap(kmapped_page);
 503                                kpos = pos & PAGE_MASK;
 504                                flush_arg_page(bprm, kpos, kmapped_page);
 505                        }
 506                        if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
 507                                ret = -EFAULT;
 508                                goto out;
 509                        }
 510                }
 511        }
 512        ret = 0;
 513out:
 514        if (kmapped_page) {
 515                flush_kernel_dcache_page(kmapped_page);
 516                kunmap(kmapped_page);
 517                put_arg_page(kmapped_page);
 518        }
 519        return ret;
 520}
 521
 522/*
 523 * Like copy_strings, but get argv and its values from kernel memory.
 524 */
 525int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
 526{
 527        int r;
 528        mm_segment_t oldfs = get_fs();
 529        set_fs(KERNEL_DS);
 530        r = copy_strings(argc, (char __user * __user *)argv, bprm);
 531        set_fs(oldfs);
 532        return r;
 533}
 534EXPORT_SYMBOL(copy_strings_kernel);
 535
 536#ifdef CONFIG_MMU
 537
 538/*
 539 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
 540 * the binfmt code determines where the new stack should reside, we shift it to
 541 * its final location.  The process proceeds as follows:
 542 *
 543 * 1) Use shift to calculate the new vma endpoints.
 544 * 2) Extend vma to cover both the old and new ranges.  This ensures the
 545 *    arguments passed to subsequent functions are consistent.
 546 * 3) Move vma's page tables to the new range.
 547 * 4) Free up any cleared pgd range.
 548 * 5) Shrink the vma to cover only the new range.
 549 */
 550static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 551{
 552        struct mm_struct *mm = vma->vm_mm;
 553        unsigned long old_start = vma->vm_start;
 554        unsigned long old_end = vma->vm_end;
 555        unsigned long length = old_end - old_start;
 556        unsigned long new_start = old_start - shift;
 557        unsigned long new_end = old_end - shift;
 558        struct mmu_gather *tlb;
 559
 560        BUG_ON(new_start > new_end);
 561
 562        /*
 563         * ensure there are no vmas between where we want to go
 564         * and where we are
 565         */
 566        if (vma != find_vma(mm, new_start))
 567                return -EFAULT;
 568
 569        /*
 570         * cover the whole range: [new_start, old_end)
 571         */
 572        vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
 573
 574        /*
 575         * move the page tables downwards, on failure we rely on
 576         * process cleanup to remove whatever mess we made.
 577         */
 578        if (length != move_page_tables(vma, old_start,
 579                                       vma, new_start, length))
 580                return -ENOMEM;
 581
 582        lru_add_drain();
 583        tlb = tlb_gather_mmu(mm, 0);
 584        if (new_end > old_start) {
 585                /*
 586                 * when the old and new regions overlap clear from new_end.
 587                 */
 588                free_pgd_range(tlb, new_end, old_end, new_end,
 589                        vma->vm_next ? vma->vm_next->vm_start : 0);
 590        } else {
 591                /*
 592                 * otherwise, clean from old_start; this is done to not touch
 593                 * the address space in [new_end, old_start) some architectures
 594                 * have constraints on va-space that make this illegal (IA64) -
 595                 * for the others its just a little faster.
 596                 */
 597                free_pgd_range(tlb, old_start, old_end, new_end,
 598                        vma->vm_next ? vma->vm_next->vm_start : 0);
 599        }
 600        tlb_finish_mmu(tlb, new_end, old_end);
 601
 602        /*
 603         * shrink the vma to just the new range.
 604         */
 605        vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
 606
 607        return 0;
 608}
 609
 610#define EXTRA_STACK_VM_PAGES    20      /* random */
 611
 612/*
 613 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
 614 * the stack is optionally relocated, and some extra space is added.
 615 */
 616int setup_arg_pages(struct linux_binprm *bprm,
 617                    unsigned long stack_top,
 618                    int executable_stack)
 619{
 620        unsigned long ret;
 621        unsigned long stack_shift;
 622        struct mm_struct *mm = current->mm;
 623        struct vm_area_struct *vma = bprm->vma;
 624        struct vm_area_struct *prev = NULL;
 625        unsigned long vm_flags;
 626        unsigned long stack_base;
 627
 628#ifdef CONFIG_STACK_GROWSUP
 629        /* Limit stack size to 1GB */
 630        stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
 631        if (stack_base > (1 << 30))
 632                stack_base = 1 << 30;
 633
 634        /* Make sure we didn't let the argument array grow too large. */
 635        if (vma->vm_end - vma->vm_start > stack_base)
 636                return -ENOMEM;
 637
 638        stack_base = PAGE_ALIGN(stack_top - stack_base);
 639
 640        stack_shift = vma->vm_start - stack_base;
 641        mm->arg_start = bprm->p - stack_shift;
 642        bprm->p = vma->vm_end - stack_shift;
 643#else
 644        stack_top = arch_align_stack(stack_top);
 645        stack_top = PAGE_ALIGN(stack_top);
 646
 647        if (unlikely(stack_top < mmap_min_addr) ||
 648            unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
 649                return -ENOMEM;
 650
 651        stack_shift = vma->vm_end - stack_top;
 652
 653        bprm->p -= stack_shift;
 654        mm->arg_start = bprm->p;
 655#endif
 656
 657        if (bprm->loader)
 658                bprm->loader -= stack_shift;
 659        bprm->exec -= stack_shift;
 660
 661        down_write(&mm->mmap_sem);
 662        vm_flags = VM_STACK_FLAGS;
 663
 664        /*
 665         * Adjust stack execute permissions; explicitly enable for
 666         * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
 667         * (arch default) otherwise.
 668         */
 669        if (unlikely(executable_stack == EXSTACK_ENABLE_X))
 670                vm_flags |= VM_EXEC;
 671        else if (executable_stack == EXSTACK_DISABLE_X)
 672                vm_flags &= ~VM_EXEC;
 673        vm_flags |= mm->def_flags;
 674
 675        ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
 676                        vm_flags);
 677        if (ret)
 678                goto out_unlock;
 679        BUG_ON(prev != vma);
 680
 681        /* Move stack pages down in memory. */
 682        if (stack_shift) {
 683                ret = shift_arg_pages(vma, stack_shift);
 684                if (ret) {
 685                        up_write(&mm->mmap_sem);
 686                        return ret;
 687                }
 688        }
 689
 690#ifdef CONFIG_STACK_GROWSUP
 691        stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
 692#else
 693        stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
 694#endif
 695        ret = expand_stack(vma, stack_base);
 696        if (ret)
 697                ret = -EFAULT;
 698
 699out_unlock:
 700        up_write(&mm->mmap_sem);
 701        return 0;
 702}
 703EXPORT_SYMBOL(setup_arg_pages);
 704
 705#endif /* CONFIG_MMU */
 706
 707struct file *open_exec(const char *name)
 708{
 709        struct nameidata nd;
 710        struct file *file;
 711        int err;
 712
 713        err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd,
 714                                FMODE_READ|FMODE_EXEC);
 715        if (err)
 716                goto out;
 717
 718        err = -EACCES;
 719        if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
 720                goto out_path_put;
 721
 722        if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
 723                goto out_path_put;
 724
 725        err = vfs_permission(&nd, MAY_EXEC | MAY_OPEN);
 726        if (err)
 727                goto out_path_put;
 728
 729        file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
 730        if (IS_ERR(file))
 731                return file;
 732
 733        err = deny_write_access(file);
 734        if (err) {
 735                fput(file);
 736                goto out;
 737        }
 738
 739        return file;
 740
 741 out_path_put:
 742        release_open_intent(&nd);
 743        path_put(&nd.path);
 744 out:
 745        return ERR_PTR(err);
 746}
 747EXPORT_SYMBOL(open_exec);
 748
 749int kernel_read(struct file *file, unsigned long offset,
 750        char *addr, unsigned long count)
 751{
 752        mm_segment_t old_fs;
 753        loff_t pos = offset;
 754        int result;
 755
 756        old_fs = get_fs();
 757        set_fs(get_ds());
 758        /* The cast to a user pointer is valid due to the set_fs() */
 759        result = vfs_read(file, (void __user *)addr, count, &pos);
 760        set_fs(old_fs);
 761        return result;
 762}
 763
 764EXPORT_SYMBOL(kernel_read);
 765
 766static int exec_mmap(struct mm_struct *mm)
 767{
 768        struct task_struct *tsk;
 769        struct mm_struct * old_mm, *active_mm;
 770
 771        /* Notify parent that we're no longer interested in the old VM */
 772        tsk = current;
 773        old_mm = current->mm;
 774        mm_release(tsk, old_mm);
 775
 776        if (old_mm) {
 777                /*
 778                 * Make sure that if there is a core dump in progress
 779                 * for the old mm, we get out and die instead of going
 780                 * through with the exec.  We must hold mmap_sem around
 781                 * checking core_state and changing tsk->mm.
 782                 */
 783                down_read(&old_mm->mmap_sem);
 784                if (unlikely(old_mm->core_state)) {
 785                        up_read(&old_mm->mmap_sem);
 786                        return -EINTR;
 787                }
 788        }
 789        task_lock(tsk);
 790        active_mm = tsk->active_mm;
 791        tsk->mm = mm;
 792        tsk->active_mm = mm;
 793        activate_mm(active_mm, mm);
 794        task_unlock(tsk);
 795        arch_pick_mmap_layout(mm);
 796        if (old_mm) {
 797                up_read(&old_mm->mmap_sem);
 798                BUG_ON(active_mm != old_mm);
 799                mm_update_next_owner(old_mm);
 800                mmput(old_mm);
 801                return 0;
 802        }
 803        mmdrop(active_mm);
 804        return 0;
 805}
 806
 807/*
 808 * This function makes sure the current process has its own signal table,
 809 * so that flush_signal_handlers can later reset the handlers without
 810 * disturbing other processes.  (Other processes might share the signal
 811 * table via the CLONE_SIGHAND option to clone().)
 812 */
 813static int de_thread(struct task_struct *tsk)
 814{
 815        struct signal_struct *sig = tsk->signal;
 816        struct sighand_struct *oldsighand = tsk->sighand;
 817        spinlock_t *lock = &oldsighand->siglock;
 818        struct task_struct *leader = NULL;
 819        int count;
 820
 821        if (thread_group_empty(tsk))
 822                goto no_thread_group;
 823
 824        /*
 825         * Kill all other threads in the thread group.
 826         */
 827        spin_lock_irq(lock);
 828        if (signal_group_exit(sig)) {
 829                /*
 830                 * Another group action in progress, just
 831                 * return so that the signal is processed.
 832                 */
 833                spin_unlock_irq(lock);
 834                return -EAGAIN;
 835        }
 836        sig->group_exit_task = tsk;
 837        zap_other_threads(tsk);
 838
 839        /* Account for the thread group leader hanging around: */
 840        count = thread_group_leader(tsk) ? 1 : 2;
 841        sig->notify_count = count;
 842        while (atomic_read(&sig->count) > count) {
 843                __set_current_state(TASK_UNINTERRUPTIBLE);
 844                spin_unlock_irq(lock);
 845                schedule();
 846                spin_lock_irq(lock);
 847        }
 848        spin_unlock_irq(lock);
 849
 850        /*
 851         * At this point all other threads have exited, all we have to
 852         * do is to wait for the thread group leader to become inactive,
 853         * and to assume its PID:
 854         */
 855        if (!thread_group_leader(tsk)) {
 856                leader = tsk->group_leader;
 857
 858                sig->notify_count = -1; /* for exit_notify() */
 859                for (;;) {
 860                        write_lock_irq(&tasklist_lock);
 861                        if (likely(leader->exit_state))
 862                                break;
 863                        __set_current_state(TASK_UNINTERRUPTIBLE);
 864                        write_unlock_irq(&tasklist_lock);
 865                        schedule();
 866                }
 867
 868                if (unlikely(task_child_reaper(tsk) == leader))
 869                        task_active_pid_ns(tsk)->child_reaper = tsk;
 870                /*
 871                 * The only record we have of the real-time age of a
 872                 * process, regardless of execs it's done, is start_time.
 873                 * All the past CPU time is accumulated in signal_struct
 874                 * from sister threads now dead.  But in this non-leader
 875                 * exec, nothing survives from the original leader thread,
 876                 * whose birth marks the true age of this process now.
 877                 * When we take on its identity by switching to its PID, we
 878                 * also take its birthdate (always earlier than our own).
 879                 */
 880                tsk->start_time = leader->start_time;
 881
 882                BUG_ON(!same_thread_group(leader, tsk));
 883                BUG_ON(has_group_leader_pid(tsk));
 884                /*
 885                 * An exec() starts a new thread group with the
 886                 * TGID of the previous thread group. Rehash the
 887                 * two threads with a switched PID, and release
 888                 * the former thread group leader:
 889                 */
 890
 891                /* Become a process group leader with the old leader's pid.
 892                 * The old leader becomes a thread of the this thread group.
 893                 * Note: The old leader also uses this pid until release_task
 894                 *       is called.  Odd but simple and correct.
 895                 */
 896                detach_pid(tsk, PIDTYPE_PID);
 897                tsk->pid = leader->pid;
 898                attach_pid(tsk, PIDTYPE_PID,  task_pid(leader));
 899                transfer_pid(leader, tsk, PIDTYPE_PGID);
 900                transfer_pid(leader, tsk, PIDTYPE_SID);
 901                list_replace_rcu(&leader->tasks, &tsk->tasks);
 902
 903                tsk->group_leader = tsk;
 904                leader->group_leader = tsk;
 905
 906                tsk->exit_signal = SIGCHLD;
 907
 908                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
 909                leader->exit_state = EXIT_DEAD;
 910
 911                write_unlock_irq(&tasklist_lock);
 912        }
 913
 914        sig->group_exit_task = NULL;
 915        sig->notify_count = 0;
 916
 917no_thread_group:
 918        exit_itimers(sig);
 919        flush_itimer_signals();
 920        if (leader)
 921                release_task(leader);
 922
 923        if (atomic_read(&oldsighand->count) != 1) {
 924                struct sighand_struct *newsighand;
 925                /*
 926                 * This ->sighand is shared with the CLONE_SIGHAND
 927                 * but not CLONE_THREAD task, switch to the new one.
 928                 */
 929                newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
 930                if (!newsighand)
 931                        return -ENOMEM;
 932
 933                atomic_set(&newsighand->count, 1);
 934                memcpy(newsighand->action, oldsighand->action,
 935                       sizeof(newsighand->action));
 936
 937                write_lock_irq(&tasklist_lock);
 938                spin_lock(&oldsighand->siglock);
 939                rcu_assign_pointer(tsk->sighand, newsighand);
 940                spin_unlock(&oldsighand->siglock);
 941                write_unlock_irq(&tasklist_lock);
 942
 943                __cleanup_sighand(oldsighand);
 944        }
 945
 946        BUG_ON(!thread_group_leader(tsk));
 947        return 0;
 948}
 949
 950/*
 951 * These functions flushes out all traces of the currently running executable
 952 * so that a new one can be started
 953 */
 954static void flush_old_files(struct files_struct * files)
 955{
 956        long j = -1;
 957        struct fdtable *fdt;
 958
 959        spin_lock(&files->file_lock);
 960        for (;;) {
 961                unsigned long set, i;
 962
 963                j++;
 964                i = j * __NFDBITS;
 965                fdt = files_fdtable(files);
 966                if (i >= fdt->max_fds)
 967                        break;
 968                set = fdt->close_on_exec->fds_bits[j];
 969                if (!set)
 970                        continue;
 971                fdt->close_on_exec->fds_bits[j] = 0;
 972                spin_unlock(&files->file_lock);
 973                for ( ; set ; i++,set >>= 1) {
 974                        if (set & 1) {
 975                                sys_close(i);
 976                        }
 977                }
 978                spin_lock(&files->file_lock);
 979
 980        }
 981        spin_unlock(&files->file_lock);
 982}
 983
 984char *get_task_comm(char *buf, struct task_struct *tsk)
 985{
 986        /* buf must be at least sizeof(tsk->comm) in size */
 987        task_lock(tsk);
 988        strncpy(buf, tsk->comm, sizeof(tsk->comm));
 989        task_unlock(tsk);
 990        return buf;
 991}
 992
 993void set_task_comm(struct task_struct *tsk, char *buf)
 994{
 995        task_lock(tsk);
 996        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
 997        task_unlock(tsk);
 998}
 999
1000int flush_old_exec(struct linux_binprm * bprm)
1001{
1002        char * name;
1003        int i, ch, retval;
1004        char tcomm[sizeof(current->comm)];
1005
1006        /*
1007         * Make sure we have a private signal table and that
1008         * we are unassociated from the previous thread group.
1009         */
1010        retval = de_thread(current);
1011        if (retval)
1012                goto out;
1013
1014        set_mm_exe_file(bprm->mm, bprm->file);
1015
1016        /*
1017         * Release all of the old mmap stuff
1018         */
1019        acct_arg_size(bprm, 0);
1020        retval = exec_mmap(bprm->mm);
1021        if (retval)
1022                goto out;
1023
1024        bprm->mm = NULL;                /* We're using it now */
1025
1026        /* This is the point of no return */
1027        current->sas_ss_sp = current->sas_ss_size = 0;
1028
1029        if (current->euid == current->uid && current->egid == current->gid)
1030                set_dumpable(current->mm, 1);
1031        else
1032                set_dumpable(current->mm, suid_dumpable);
1033
1034        name = bprm->filename;
1035
1036        /* Copies the binary name from after last slash */
1037        for (i=0; (ch = *(name++)) != '\0';) {
1038                if (ch == '/')
1039                        i = 0; /* overwrite what we wrote */
1040                else
1041                        if (i < (sizeof(tcomm) - 1))
1042                                tcomm[i++] = ch;
1043        }
1044        tcomm[i] = '\0';
1045        set_task_comm(current, tcomm);
1046
1047        current->flags &= ~PF_RANDOMIZE;
1048        flush_thread();
1049
1050        /* Set the new mm task size. We have to do that late because it may
1051         * depend on TIF_32BIT which is only updated in flush_thread() on
1052         * some architectures like powerpc
1053         */
1054        current->mm->task_size = TASK_SIZE;
1055
1056        if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
1057                suid_keys(current);
1058                set_dumpable(current->mm, suid_dumpable);
1059                current->pdeath_signal = 0;
1060        } else if (file_permission(bprm->file, MAY_READ) ||
1061                        (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
1062                suid_keys(current);
1063                set_dumpable(current->mm, suid_dumpable);
1064        }
1065
1066        /* An exec changes our domain. We are no longer part of the thread
1067           group */
1068
1069        current->self_exec_id++;
1070                        
1071        flush_signal_handlers(current, 0);
1072        flush_old_files(current->files);
1073
1074        return 0;
1075
1076out:
1077        return retval;
1078}
1079
1080EXPORT_SYMBOL(flush_old_exec);
1081
1082/* 
1083 * Fill the binprm structure from the inode. 
1084 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1085 */
1086int prepare_binprm(struct linux_binprm *bprm)
1087{
1088        int mode;
1089        struct inode * inode = bprm->file->f_path.dentry->d_inode;
1090        int retval;
1091
1092        mode = inode->i_mode;
1093        if (bprm->file->f_op == NULL)
1094                return -EACCES;
1095
1096        bprm->e_uid = current->euid;
1097        bprm->e_gid = current->egid;
1098
1099        if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
1100                /* Set-uid? */
1101                if (mode & S_ISUID) {
1102                        current->personality &= ~PER_CLEAR_ON_SETID;
1103                        bprm->e_uid = inode->i_uid;
1104                }
1105
1106                /* Set-gid? */
1107                /*
1108                 * If setgid is set but no group execute bit then this
1109                 * is a candidate for mandatory locking, not a setgid
1110                 * executable.
1111                 */
1112                if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1113                        current->personality &= ~PER_CLEAR_ON_SETID;
1114                        bprm->e_gid = inode->i_gid;
1115                }
1116        }
1117
1118        /* fill in binprm security blob */
1119        retval = security_bprm_set(bprm);
1120        if (retval)
1121                return retval;
1122
1123        memset(bprm->buf,0,BINPRM_BUF_SIZE);
1124        return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
1125}
1126
1127EXPORT_SYMBOL(prepare_binprm);
1128
1129static int unsafe_exec(struct task_struct *p)
1130{
1131        int unsafe = tracehook_unsafe_exec(p);
1132
1133        if (atomic_read(&p->fs->count) > 1)
1134                unsafe |= LSM_UNSAFE_SHARE;
1135
1136        return unsafe;
1137}
1138
1139void compute_creds(struct linux_binprm *bprm)
1140{
1141        int unsafe;
1142
1143        if (bprm->e_uid != current->uid) {
1144                suid_keys(current);
1145                current->pdeath_signal = 0;
1146        }
1147        exec_keys(current);
1148
1149        task_lock(current);
1150        unsafe = unsafe_exec(current);
1151        security_bprm_apply_creds(bprm, unsafe);
1152        task_unlock(current);
1153        security_bprm_post_apply_creds(bprm);
1154}
1155EXPORT_SYMBOL(compute_creds);
1156
1157/*
1158 * Arguments are '\0' separated strings found at the location bprm->p
1159 * points to; chop off the first by relocating brpm->p to right after
1160 * the first '\0' encountered.
1161 */
1162int remove_arg_zero(struct linux_binprm *bprm)
1163{
1164        int ret = 0;
1165        unsigned long offset;
1166        char *kaddr;
1167        struct page *page;
1168
1169        if (!bprm->argc)
1170                return 0;
1171
1172        do {
1173                offset = bprm->p & ~PAGE_MASK;
1174                page = get_arg_page(bprm, bprm->p, 0);
1175                if (!page) {
1176                        ret = -EFAULT;
1177                        goto out;
1178                }
1179                kaddr = kmap_atomic(page, KM_USER0);
1180
1181                for (; offset < PAGE_SIZE && kaddr[offset];
1182                                offset++, bprm->p++)
1183                        ;
1184
1185                kunmap_atomic(kaddr, KM_USER0);
1186                put_arg_page(page);
1187
1188                if (offset == PAGE_SIZE)
1189                        free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1190        } while (offset == PAGE_SIZE);
1191
1192        bprm->p++;
1193        bprm->argc--;
1194        ret = 0;
1195
1196out:
1197        return ret;
1198}
1199EXPORT_SYMBOL(remove_arg_zero);
1200
1201/*
1202 * cycle the list of binary formats handler, until one recognizes the image
1203 */
1204int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1205{
1206        unsigned int depth = bprm->recursion_depth;
1207        int try,retval;
1208        struct linux_binfmt *fmt;
1209#ifdef __alpha__
1210        /* handle /sbin/loader.. */
1211        {
1212            struct exec * eh = (struct exec *) bprm->buf;
1213
1214            if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1215                (eh->fh.f_flags & 0x3000) == 0x3000)
1216            {
1217                struct file * file;
1218                unsigned long loader;
1219
1220                allow_write_access(bprm->file);
1221                fput(bprm->file);
1222                bprm->file = NULL;
1223
1224                loader = bprm->vma->vm_end - sizeof(void *);
1225
1226                file = open_exec("/sbin/loader");
1227                retval = PTR_ERR(file);
1228                if (IS_ERR(file))
1229                        return retval;
1230
1231                /* Remember if the application is TASO.  */
1232                bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1233
1234                bprm->file = file;
1235                bprm->loader = loader;
1236                retval = prepare_binprm(bprm);
1237                if (retval<0)
1238                        return retval;
1239                /* should call search_binary_handler recursively here,
1240                   but it does not matter */
1241            }
1242        }
1243#endif
1244        retval = security_bprm_check(bprm);
1245        if (retval)
1246                return retval;
1247
1248        /* kernel module loader fixup */
1249        /* so we don't try to load run modprobe in kernel space. */
1250        set_fs(USER_DS);
1251
1252        retval = audit_bprm(bprm);
1253        if (retval)
1254                return retval;
1255
1256        retval = -ENOENT;
1257        for (try=0; try<2; try++) {
1258                read_lock(&binfmt_lock);
1259                list_for_each_entry(fmt, &formats, lh) {
1260                        int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1261                        if (!fn)
1262                                continue;
1263                        if (!try_module_get(fmt->module))
1264                                continue;
1265                        read_unlock(&binfmt_lock);
1266                        retval = fn(bprm, regs);
1267                        /*
1268                         * Restore the depth counter to its starting value
1269                         * in this call, so we don't have to rely on every
1270                         * load_binary function to restore it on return.
1271                         */
1272                        bprm->recursion_depth = depth;
1273                        if (retval >= 0) {
1274                                if (depth == 0)
1275                                        tracehook_report_exec(fmt, bprm, regs);
1276                                put_binfmt(fmt);
1277                                allow_write_access(bprm->file);
1278                                if (bprm->file)
1279                                        fput(bprm->file);
1280                                bprm->file = NULL;
1281                                current->did_exec = 1;
1282                                proc_exec_connector(current);
1283                                return retval;
1284                        }
1285                        read_lock(&binfmt_lock);
1286                        put_binfmt(fmt);
1287                        if (retval != -ENOEXEC || bprm->mm == NULL)
1288                                break;
1289                        if (!bprm->file) {
1290                                read_unlock(&binfmt_lock);
1291                                return retval;
1292                        }
1293                }
1294                read_unlock(&binfmt_lock);
1295                if (retval != -ENOEXEC || bprm->mm == NULL) {
1296                        break;
1297#ifdef CONFIG_KMOD
1298                }else{
1299#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1300                        if (printable(bprm->buf[0]) &&
1301                            printable(bprm->buf[1]) &&
1302                            printable(bprm->buf[2]) &&
1303                            printable(bprm->buf[3]))
1304                                break; /* -ENOEXEC */
1305                        request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1306#endif
1307                }
1308        }
1309        return retval;
1310}
1311
1312EXPORT_SYMBOL(search_binary_handler);
1313
1314void free_bprm(struct linux_binprm *bprm)
1315{
1316        free_arg_pages(bprm);
1317        kfree(bprm);
1318}
1319
1320/*
1321 * sys_execve() executes a new program.
1322 */
1323int do_execve(char * filename,
1324        char __user *__user *argv,
1325        char __user *__user *envp,
1326        struct pt_regs * regs)
1327{
1328        struct linux_binprm *bprm;
1329        struct file *file;
1330        struct files_struct *displaced;
1331        int retval;
1332
1333        retval = unshare_files(&displaced);
1334        if (retval)
1335                goto out_ret;
1336
1337        retval = -ENOMEM;
1338        bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1339        if (!bprm)
1340                goto out_files;
1341
1342        file = open_exec(filename);
1343        retval = PTR_ERR(file);
1344        if (IS_ERR(file))
1345                goto out_kfree;
1346
1347        sched_exec();
1348
1349        bprm->file = file;
1350        bprm->filename = filename;
1351        bprm->interp = filename;
1352
1353        retval = bprm_mm_init(bprm);
1354        if (retval)
1355                goto out_file;
1356
1357        bprm->argc = count(argv, MAX_ARG_STRINGS);
1358        if ((retval = bprm->argc) < 0)
1359                goto out_mm;
1360
1361        bprm->envc = count(envp, MAX_ARG_STRINGS);
1362        if ((retval = bprm->envc) < 0)
1363                goto out_mm;
1364
1365        retval = security_bprm_alloc(bprm);
1366        if (retval)
1367                goto out;
1368
1369        retval = prepare_binprm(bprm);
1370        if (retval < 0)
1371                goto out;
1372
1373        retval = copy_strings_kernel(1, &bprm->filename, bprm);
1374        if (retval < 0)
1375                goto out;
1376
1377        bprm->exec = bprm->p;
1378        retval = copy_strings(bprm->envc, envp, bprm);
1379        if (retval < 0)
1380                goto out;
1381
1382        retval = copy_strings(bprm->argc, argv, bprm);
1383        if (retval < 0)
1384                goto out;
1385
1386        current->flags &= ~PF_KTHREAD;
1387        retval = search_binary_handler(bprm,regs);
1388        if (retval >= 0) {
1389                /* execve success */
1390                security_bprm_free(bprm);
1391                acct_update_integrals(current);
1392                free_bprm(bprm);
1393                if (displaced)
1394                        put_files_struct(displaced);
1395                return retval;
1396        }
1397
1398out:
1399        if (bprm->security)
1400                security_bprm_free(bprm);
1401
1402out_mm:
1403        if (bprm->mm) {
1404                acct_arg_size(bprm, 0);
1405                mmput (bprm->mm);
1406        }
1407
1408out_file:
1409        if (bprm->file) {
1410                allow_write_access(bprm->file);
1411                fput(bprm->file);
1412        }
1413out_kfree:
1414        free_bprm(bprm);
1415
1416out_files:
1417        if (displaced)
1418                reset_files_struct(displaced);
1419out_ret:
1420        return retval;
1421}
1422
1423int set_binfmt(struct linux_binfmt *new)
1424{
1425        struct linux_binfmt *old = current->binfmt;
1426
1427        if (new) {
1428                if (!try_module_get(new->module))
1429                        return -1;
1430        }
1431        current->binfmt = new;
1432        if (old)
1433                module_put(old->module);
1434        return 0;
1435}
1436
1437EXPORT_SYMBOL(set_binfmt);
1438
1439/* format_corename will inspect the pattern parameter, and output a
1440 * name into corename, which must have space for at least
1441 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1442 */
1443static int format_corename(char *corename, int nr_threads, long signr)
1444{
1445        const char *pat_ptr = core_pattern;
1446        int ispipe = (*pat_ptr == '|');
1447        char *out_ptr = corename;
1448        char *const out_end = corename + CORENAME_MAX_SIZE;
1449        int rc;
1450        int pid_in_pattern = 0;
1451
1452        /* Repeat as long as we have more pattern to process and more output
1453           space */
1454        while (*pat_ptr) {
1455                if (*pat_ptr != '%') {
1456                        if (out_ptr == out_end)
1457                                goto out;
1458                        *out_ptr++ = *pat_ptr++;
1459                } else {
1460                        switch (*++pat_ptr) {
1461                        case 0:
1462                                goto out;
1463                        /* Double percent, output one percent */
1464                        case '%':
1465                                if (out_ptr == out_end)
1466                                        goto out;
1467                                *out_ptr++ = '%';
1468                                break;
1469                        /* pid */
1470                        case 'p':
1471                                pid_in_pattern = 1;
1472                                rc = snprintf(out_ptr, out_end - out_ptr,
1473                                              "%d", task_tgid_vnr(current));
1474                                if (rc > out_end - out_ptr)
1475                                        goto out;
1476                                out_ptr += rc;
1477                                break;
1478                        /* uid */
1479                        case 'u':
1480                                rc = snprintf(out_ptr, out_end - out_ptr,
1481                                              "%d", current->uid);
1482                                if (rc > out_end - out_ptr)
1483                                        goto out;
1484                                out_ptr += rc;
1485                                break;
1486                        /* gid */
1487                        case 'g':
1488                                rc = snprintf(out_ptr, out_end - out_ptr,
1489                                              "%d", current->gid);
1490                                if (rc > out_end - out_ptr)
1491                                        goto out;
1492                                out_ptr += rc;
1493                                break;
1494                        /* signal that caused the coredump */
1495                        case 's':
1496                                rc = snprintf(out_ptr, out_end - out_ptr,
1497                                              "%ld", signr);
1498                                if (rc > out_end - out_ptr)
1499                                        goto out;
1500                                out_ptr += rc;
1501                                break;
1502                        /* UNIX time of coredump */
1503                        case 't': {
1504                                struct timeval tv;
1505                                do_gettimeofday(&tv);
1506                                rc = snprintf(out_ptr, out_end - out_ptr,
1507                                              "%lu", tv.tv_sec);
1508                                if (rc > out_end - out_ptr)
1509                                        goto out;
1510                                out_ptr += rc;
1511                                break;
1512                        }
1513                        /* hostname */
1514                        case 'h':
1515                                down_read(&uts_sem);
1516                                rc = snprintf(out_ptr, out_end - out_ptr,
1517                                              "%s", utsname()->nodename);
1518                                up_read(&uts_sem);
1519                                if (rc > out_end - out_ptr)
1520                                        goto out;
1521                                out_ptr += rc;
1522                                break;
1523                        /* executable */
1524                        case 'e':
1525                                rc = snprintf(out_ptr, out_end - out_ptr,
1526                                              "%s", current->comm);
1527                                if (rc > out_end - out_ptr)
1528                                        goto out;
1529                                out_ptr += rc;
1530                                break;
1531                        /* core limit size */
1532                        case 'c':
1533                                rc = snprintf(out_ptr, out_end - out_ptr,
1534                                              "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
1535                                if (rc > out_end - out_ptr)
1536                                        goto out;
1537                                out_ptr += rc;
1538                                break;
1539                        default:
1540                                break;
1541                        }
1542                        ++pat_ptr;
1543                }
1544        }
1545        /* Backward compatibility with core_uses_pid:
1546         *
1547         * If core_pattern does not include a %p (as is the default)
1548         * and core_uses_pid is set, then .%pid will be appended to
1549         * the filename. Do not do this for piped commands. */
1550        if (!ispipe && !pid_in_pattern
1551            && (core_uses_pid || nr_threads)) {
1552                rc = snprintf(out_ptr, out_end - out_ptr,
1553                              ".%d", task_tgid_vnr(current));
1554                if (rc > out_end - out_ptr)
1555                        goto out;
1556                out_ptr += rc;
1557        }
1558out:
1559        *out_ptr = 0;
1560        return ispipe;
1561}
1562
1563static int zap_process(struct task_struct *start)
1564{
1565        struct task_struct *t;
1566        int nr = 0;
1567
1568        start->signal->flags = SIGNAL_GROUP_EXIT;
1569        start->signal->group_stop_count = 0;
1570
1571        t = start;
1572        do {
1573                if (t != current && t->mm) {
1574                        sigaddset(&t->pending.signal, SIGKILL);
1575                        signal_wake_up(t, 1);
1576                        nr++;
1577                }
1578        } while_each_thread(start, t);
1579
1580        return nr;
1581}
1582
1583static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1584                                struct core_state *core_state, int exit_code)
1585{
1586        struct task_struct *g, *p;
1587        unsigned long flags;
1588        int nr = -EAGAIN;
1589
1590        spin_lock_irq(&tsk->sighand->siglock);
1591        if (!signal_group_exit(tsk->signal)) {
1592                mm->core_state = core_state;
1593                tsk->signal->group_exit_code = exit_code;
1594                nr = zap_process(tsk);
1595        }
1596        spin_unlock_irq(&tsk->sighand->siglock);
1597        if (unlikely(nr < 0))
1598                return nr;
1599
1600        if (atomic_read(&mm->mm_users) == nr + 1)
1601                goto done;
1602        /*
1603         * We should find and kill all tasks which use this mm, and we should
1604         * count them correctly into ->nr_threads. We don't take tasklist
1605         * lock, but this is safe wrt:
1606         *
1607         * fork:
1608         *      None of sub-threads can fork after zap_process(leader). All
1609         *      processes which were created before this point should be
1610         *      visible to zap_threads() because copy_process() adds the new
1611         *      process to the tail of init_task.tasks list, and lock/unlock
1612         *      of ->siglock provides a memory barrier.
1613         *
1614         * do_exit:
1615         *      The caller holds mm->mmap_sem. This means that the task which
1616         *      uses this mm can't pass exit_mm(), so it can't exit or clear
1617         *      its ->mm.
1618         *
1619         * de_thread:
1620         *      It does list_replace_rcu(&leader->tasks, &current->tasks),
1621         *      we must see either old or new leader, this does not matter.
1622         *      However, it can change p->sighand, so lock_task_sighand(p)
1623         *      must be used. Since p->mm != NULL and we hold ->mmap_sem
1624         *      it can't fail.
1625         *
1626         *      Note also that "g" can be the old leader with ->mm == NULL
1627         *      and already unhashed and thus removed from ->thread_group.
1628         *      This is OK, __unhash_process()->list_del_rcu() does not
1629         *      clear the ->next pointer, we will find the new leader via
1630         *      next_thread().
1631         */
1632        rcu_read_lock();
1633        for_each_process(g) {
1634                if (g == tsk->group_leader)
1635                        continue;
1636                if (g->flags & PF_KTHREAD)
1637                        continue;
1638                p = g;
1639                do {
1640                        if (p->mm) {
1641                                if (unlikely(p->mm == mm)) {
1642                                        lock_task_sighand(p, &flags);
1643                                        nr += zap_process(p);
1644                                        unlock_task_sighand(p, &flags);
1645                                }
1646                                break;
1647                        }
1648                } while_each_thread(g, p);
1649        }
1650        rcu_read_unlock();
1651done:
1652        atomic_set(&core_state->nr_threads, nr);
1653        return nr;
1654}
1655
1656static int coredump_wait(int exit_code, struct core_state *core_state)
1657{
1658        struct task_struct *tsk = current;
1659        struct mm_struct *mm = tsk->mm;
1660        struct completion *vfork_done;
1661        int core_waiters;
1662
1663        init_completion(&core_state->startup);
1664        core_state->dumper.task = tsk;
1665        core_state->dumper.next = NULL;
1666        core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1667        up_write(&mm->mmap_sem);
1668
1669        if (unlikely(core_waiters < 0))
1670                goto fail;
1671
1672        /*
1673         * Make sure nobody is waiting for us to release the VM,
1674         * otherwise we can deadlock when we wait on each other
1675         */
1676        vfork_done = tsk->vfork_done;
1677        if (vfork_done) {
1678                tsk->vfork_done = NULL;
1679                complete(vfork_done);
1680        }
1681
1682        if (core_waiters)
1683                wait_for_completion(&core_state->startup);
1684fail:
1685        return core_waiters;
1686}
1687
1688static void coredump_finish(struct mm_struct *mm)
1689{
1690        struct core_thread *curr, *next;
1691        struct task_struct *task;
1692
1693        next = mm->core_state->dumper.next;
1694        while ((curr = next) != NULL) {
1695                next = curr->next;
1696                task = curr->task;
1697                /*
1698                 * see exit_mm(), curr->task must not see
1699                 * ->task == NULL before we read ->next.
1700                 */
1701                smp_mb();
1702                curr->task = NULL;
1703                wake_up_process(task);
1704        }
1705
1706        mm->core_state = NULL;
1707}
1708
1709/*
1710 * set_dumpable converts traditional three-value dumpable to two flags and
1711 * stores them into mm->flags.  It modifies lower two bits of mm->flags, but
1712 * these bits are not changed atomically.  So get_dumpable can observe the
1713 * intermediate state.  To avoid doing unexpected behavior, get get_dumpable
1714 * return either old dumpable or new one by paying attention to the order of
1715 * modifying the bits.
1716 *
1717 * dumpable |   mm->flags (binary)
1718 * old  new | initial interim  final
1719 * ---------+-----------------------
1720 *  0    1  |   00      01      01
1721 *  0    2  |   00      10(*)   11
1722 *  1    0  |   01      00      00
1723 *  1    2  |   01      11      11
1724 *  2    0  |   11      10(*)   00
1725 *  2    1  |   11      11      01
1726 *
1727 * (*) get_dumpable regards interim value of 10 as 11.
1728 */
1729void set_dumpable(struct mm_struct *mm, int value)
1730{
1731        switch (value) {
1732        case 0:
1733                clear_bit(MMF_DUMPABLE, &mm->flags);
1734                smp_wmb();
1735                clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1736                break;
1737        case 1:
1738                set_bit(MMF_DUMPABLE, &mm->flags);
1739                smp_wmb();
1740                clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1741                break;
1742        case 2:
1743                set_bit(MMF_DUMP_SECURELY, &mm->flags);
1744                smp_wmb();
1745                set_bit(MMF_DUMPABLE, &mm->flags);
1746                break;
1747        }
1748}
1749
1750int get_dumpable(struct mm_struct *mm)
1751{
1752        int ret;
1753
1754        ret = mm->flags & 0x3;
1755        return (ret >= 2) ? 2 : ret;
1756}
1757
1758int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1759{
1760        struct core_state core_state;
1761        char corename[CORENAME_MAX_SIZE + 1];
1762        struct mm_struct *mm = current->mm;
1763        struct linux_binfmt * binfmt;
1764        struct inode * inode;
1765        struct file * file;
1766        int retval = 0;
1767        int fsuid = current->fsuid;
1768        int flag = 0;
1769        int ispipe = 0;
1770        unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1771        char **helper_argv = NULL;
1772        int helper_argc = 0;
1773        char *delimit;
1774
1775        audit_core_dumps(signr);
1776
1777        binfmt = current->binfmt;
1778        if (!binfmt || !binfmt->core_dump)
1779                goto fail;
1780        down_write(&mm->mmap_sem);
1781        /*
1782         * If another thread got here first, or we are not dumpable, bail out.
1783         */
1784        if (mm->core_state || !get_dumpable(mm)) {
1785                up_write(&mm->mmap_sem);
1786                goto fail;
1787        }
1788
1789        /*
1790         *      We cannot trust fsuid as being the "true" uid of the
1791         *      process nor do we know its entire history. We only know it
1792         *      was tainted so we dump it as root in mode 2.
1793         */
1794        if (get_dumpable(mm) == 2) {    /* Setuid core dump mode */
1795                flag = O_EXCL;          /* Stop rewrite attacks */
1796                current->fsuid = 0;     /* Dump root private */
1797        }
1798
1799        retval = coredump_wait(exit_code, &core_state);
1800        if (retval < 0)
1801                goto fail;
1802
1803        /*
1804         * Clear any false indication of pending signals that might
1805         * be seen by the filesystem code called to write the core file.
1806         */
1807        clear_thread_flag(TIF_SIGPENDING);
1808
1809        /*
1810         * lock_kernel() because format_corename() is controlled by sysctl, which
1811         * uses lock_kernel()
1812         */
1813        lock_kernel();
1814        ispipe = format_corename(corename, retval, signr);
1815        unlock_kernel();
1816        /*
1817         * Don't bother to check the RLIMIT_CORE value if core_pattern points
1818         * to a pipe.  Since we're not writing directly to the filesystem
1819         * RLIMIT_CORE doesn't really apply, as no actual core file will be
1820         * created unless the pipe reader choses to write out the core file
1821         * at which point file size limits and permissions will be imposed
1822         * as it does with any other process
1823         */
1824        if ((!ispipe) && (core_limit < binfmt->min_coredump))
1825                goto fail_unlock;
1826
1827        if (ispipe) {
1828                helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
1829                /* Terminate the string before the first option */
1830                delimit = strchr(corename, ' ');
1831                if (delimit)
1832                        *delimit = '\0';
1833                delimit = strrchr(helper_argv[0], '/');
1834                if (delimit)
1835                        delimit++;
1836                else
1837                        delimit = helper_argv[0];
1838                if (!strcmp(delimit, current->comm)) {
1839                        printk(KERN_NOTICE "Recursive core dump detected, "
1840                                        "aborting\n");
1841                        goto fail_unlock;
1842                }
1843
1844                core_limit = RLIM_INFINITY;
1845
1846                /* SIGPIPE can happen, but it's just never processed */
1847                if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
1848                                &file)) {
1849                        printk(KERN_INFO "Core dump to %s pipe failed\n",
1850                               corename);
1851                        goto fail_unlock;
1852                }
1853        } else
1854                file = filp_open(corename,
1855                                 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1856                                 0600);
1857        if (IS_ERR(file))
1858                goto fail_unlock;
1859        inode = file->f_path.dentry->d_inode;
1860        if (inode->i_nlink > 1)
1861                goto close_fail;        /* multiple links - don't dump */
1862        if (!ispipe && d_unhashed(file->f_path.dentry))
1863                goto close_fail;
1864
1865        /* AK: actually i see no reason to not allow this for named pipes etc.,
1866           but keep the previous behaviour for now. */
1867        if (!ispipe && !S_ISREG(inode->i_mode))
1868                goto close_fail;
1869        /*
1870         * Dont allow local users get cute and trick others to coredump
1871         * into their pre-created files:
1872         * Note, this is not relevant for pipes
1873         */
1874        if (!ispipe && (inode->i_uid != current->fsuid))
1875                goto close_fail;
1876        if (!file->f_op)
1877                goto close_fail;
1878        if (!file->f_op->write)
1879                goto close_fail;
1880        if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1881                goto close_fail;
1882
1883        retval = binfmt->core_dump(signr, regs, file, core_limit);
1884
1885        if (retval)
1886                current->signal->group_exit_code |= 0x80;
1887close_fail:
1888        filp_close(file, NULL);
1889fail_unlock:
1890        if (helper_argv)
1891                argv_free(helper_argv);
1892
1893        current->fsuid = fsuid;
1894        coredump_finish(mm);
1895fail:
1896        return retval;
1897}
1898