linux/kernel/fork.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/fork.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7/*
   8 *  'fork.c' contains the help-routines for the 'fork' system call
   9 * (see also entry.S and others).
  10 * Fork is rather simple, once you get the hang of it, but the memory
  11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/unistd.h>
  17#include <linux/module.h>
  18#include <linux/vmalloc.h>
  19#include <linux/completion.h>
  20#include <linux/personality.h>
  21#include <linux/mempolicy.h>
  22#include <linux/sem.h>
  23#include <linux/file.h>
  24#include <linux/fdtable.h>
  25#include <linux/iocontext.h>
  26#include <linux/key.h>
  27#include <linux/binfmts.h>
  28#include <linux/mman.h>
  29#include <linux/mmu_notifier.h>
  30#include <linux/fs.h>
  31#include <linux/nsproxy.h>
  32#include <linux/capability.h>
  33#include <linux/cpu.h>
  34#include <linux/cgroup.h>
  35#include <linux/security.h>
  36#include <linux/hugetlb.h>
  37#include <linux/seccomp.h>
  38#include <linux/swap.h>
  39#include <linux/syscalls.h>
  40#include <linux/jiffies.h>
  41#include <linux/futex.h>
  42#include <linux/compat.h>
  43#include <linux/kthread.h>
  44#include <linux/task_io_accounting_ops.h>
  45#include <linux/rcupdate.h>
  46#include <linux/ptrace.h>
  47#include <linux/mount.h>
  48#include <linux/audit.h>
  49#include <linux/memcontrol.h>
  50#include <linux/ftrace.h>
  51#include <linux/proc_fs.h>
  52#include <linux/profile.h>
  53#include <linux/rmap.h>
  54#include <linux/ksm.h>
  55#include <linux/acct.h>
  56#include <linux/tsacct_kern.h>
  57#include <linux/cn_proc.h>
  58#include <linux/freezer.h>
  59#include <linux/delayacct.h>
  60#include <linux/taskstats_kern.h>
  61#include <linux/random.h>
  62#include <linux/tty.h>
  63#include <linux/blkdev.h>
  64#include <linux/fs_struct.h>
  65#include <linux/magic.h>
  66#include <linux/perf_event.h>
  67#include <linux/posix-timers.h>
  68#include <linux/user-return-notifier.h>
  69#include <linux/oom.h>
  70#include <linux/khugepaged.h>
  71#include <linux/signalfd.h>
  72#include <linux/uprobes.h>
  73
  74#include <asm/pgtable.h>
  75#include <asm/pgalloc.h>
  76#include <asm/uaccess.h>
  77#include <asm/mmu_context.h>
  78#include <asm/cacheflush.h>
  79#include <asm/tlbflush.h>
  80
  81#include <trace/events/sched.h>
  82
  83#define CREATE_TRACE_POINTS
  84#include <trace/events/task.h>
  85
  86/*
  87 * Protected counters by write_lock_irq(&tasklist_lock)
  88 */
  89unsigned long total_forks;      /* Handle normal Linux uptimes. */
  90int nr_threads;                 /* The idle threads do not count.. */
  91
  92int max_threads;                /* tunable limit on nr_threads */
  93
  94DEFINE_PER_CPU(unsigned long, process_counts) = 0;
  95
  96__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
  97
  98#ifdef CONFIG_PROVE_RCU
  99int lockdep_tasklist_lock_is_held(void)
 100{
 101        return lockdep_is_held(&tasklist_lock);
 102}
 103EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
 104#endif /* #ifdef CONFIG_PROVE_RCU */
 105
 106int nr_processes(void)
 107{
 108        int cpu;
 109        int total = 0;
 110
 111        for_each_possible_cpu(cpu)
 112                total += per_cpu(process_counts, cpu);
 113
 114        return total;
 115}
 116
 117void __weak arch_release_task_struct(struct task_struct *tsk)
 118{
 119}
 120
 121#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
 122static struct kmem_cache *task_struct_cachep;
 123
 124static inline struct task_struct *alloc_task_struct_node(int node)
 125{
 126        return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
 127}
 128
 129static inline void free_task_struct(struct task_struct *tsk)
 130{
 131        kmem_cache_free(task_struct_cachep, tsk);
 132}
 133#endif
 134
 135void __weak arch_release_thread_info(struct thread_info *ti)
 136{
 137}
 138
 139#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
 140
 141/*
 142 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
 143 * kmemcache based allocator.
 144 */
 145# if THREAD_SIZE >= PAGE_SIZE
 146static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
 147                                                  int node)
 148{
 149        struct page *page = alloc_pages_node(node, THREADINFO_GFP,
 150                                             THREAD_SIZE_ORDER);
 151
 152        return page ? page_address(page) : NULL;
 153}
 154
 155static inline void free_thread_info(struct thread_info *ti)
 156{
 157        free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
 158}
 159# else
 160static struct kmem_cache *thread_info_cache;
 161
 162static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
 163                                                  int node)
 164{
 165        return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
 166}
 167
 168static void free_thread_info(struct thread_info *ti)
 169{
 170        kmem_cache_free(thread_info_cache, ti);
 171}
 172
 173void thread_info_cache_init(void)
 174{
 175        thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
 176                                              THREAD_SIZE, 0, NULL);
 177        BUG_ON(thread_info_cache == NULL);
 178}
 179# endif
 180#endif
 181
 182/* SLAB cache for signal_struct structures (tsk->signal) */
 183static struct kmem_cache *signal_cachep;
 184
 185/* SLAB cache for sighand_struct structures (tsk->sighand) */
 186struct kmem_cache *sighand_cachep;
 187
 188/* SLAB cache for files_struct structures (tsk->files) */
 189struct kmem_cache *files_cachep;
 190
 191/* SLAB cache for fs_struct structures (tsk->fs) */
 192struct kmem_cache *fs_cachep;
 193
 194/* SLAB cache for vm_area_struct structures */
 195struct kmem_cache *vm_area_cachep;
 196
 197/* SLAB cache for mm_struct structures (tsk->mm) */
 198static struct kmem_cache *mm_cachep;
 199
 200static void account_kernel_stack(struct thread_info *ti, int account)
 201{
 202        struct zone *zone = page_zone(virt_to_page(ti));
 203
 204        mod_zone_page_state(zone, NR_KERNEL_STACK, account);
 205}
 206
 207void free_task(struct task_struct *tsk)
 208{
 209        account_kernel_stack(tsk->stack, -1);
 210        arch_release_thread_info(tsk->stack);
 211        free_thread_info(tsk->stack);
 212        rt_mutex_debug_task_free(tsk);
 213        ftrace_graph_exit_task(tsk);
 214        put_seccomp_filter(tsk);
 215        arch_release_task_struct(tsk);
 216        free_task_struct(tsk);
 217}
 218EXPORT_SYMBOL(free_task);
 219
 220static inline void free_signal_struct(struct signal_struct *sig)
 221{
 222        taskstats_tgid_free(sig);
 223        sched_autogroup_exit(sig);
 224        kmem_cache_free(signal_cachep, sig);
 225}
 226
 227static inline void put_signal_struct(struct signal_struct *sig)
 228{
 229        if (atomic_dec_and_test(&sig->sigcnt))
 230                free_signal_struct(sig);
 231}
 232
 233void __put_task_struct(struct task_struct *tsk)
 234{
 235        WARN_ON(!tsk->exit_state);
 236        WARN_ON(atomic_read(&tsk->usage));
 237        WARN_ON(tsk == current);
 238
 239        security_task_free(tsk);
 240        exit_creds(tsk);
 241        delayacct_tsk_free(tsk);
 242        put_signal_struct(tsk->signal);
 243
 244        if (!profile_handoff_task(tsk))
 245                free_task(tsk);
 246}
 247EXPORT_SYMBOL_GPL(__put_task_struct);
 248
 249void __init __weak arch_task_cache_init(void) { }
 250
 251void __init fork_init(unsigned long mempages)
 252{
 253#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
 254#ifndef ARCH_MIN_TASKALIGN
 255#define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
 256#endif
 257        /* create a slab on which task_structs can be allocated */
 258        task_struct_cachep =
 259                kmem_cache_create("task_struct", sizeof(struct task_struct),
 260                        ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
 261#endif
 262
 263        /* do the arch specific task caches init */
 264        arch_task_cache_init();
 265
 266        /*
 267         * The default maximum number of threads is set to a safe
 268         * value: the thread structures can take up at most half
 269         * of memory.
 270         */
 271        max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
 272
 273        /*
 274         * we need to allow at least 20 threads to boot a system
 275         */
 276        if (max_threads < 20)
 277                max_threads = 20;
 278
 279        init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
 280        init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
 281        init_task.signal->rlim[RLIMIT_SIGPENDING] =
 282                init_task.signal->rlim[RLIMIT_NPROC];
 283}
 284
 285int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
 286                                               struct task_struct *src)
 287{
 288        *dst = *src;
 289        return 0;
 290}
 291
 292static struct task_struct *dup_task_struct(struct task_struct *orig)
 293{
 294        struct task_struct *tsk;
 295        struct thread_info *ti;
 296        unsigned long *stackend;
 297        int node = tsk_fork_get_node(orig);
 298        int err;
 299
 300        tsk = alloc_task_struct_node(node);
 301        if (!tsk)
 302                return NULL;
 303
 304        ti = alloc_thread_info_node(tsk, node);
 305        if (!ti)
 306                goto free_tsk;
 307
 308        err = arch_dup_task_struct(tsk, orig);
 309        if (err)
 310                goto free_ti;
 311
 312        tsk->stack = ti;
 313
 314        setup_thread_stack(tsk, orig);
 315        clear_user_return_notifier(tsk);
 316        clear_tsk_need_resched(tsk);
 317        stackend = end_of_stack(tsk);
 318        *stackend = STACK_END_MAGIC;    /* for overflow detection */
 319
 320#ifdef CONFIG_CC_STACKPROTECTOR
 321        tsk->stack_canary = get_random_int();
 322#endif
 323
 324        /*
 325         * One for us, one for whoever does the "release_task()" (usually
 326         * parent)
 327         */
 328        atomic_set(&tsk->usage, 2);
 329#ifdef CONFIG_BLK_DEV_IO_TRACE
 330        tsk->btrace_seq = 0;
 331#endif
 332        tsk->splice_pipe = NULL;
 333
 334        account_kernel_stack(ti, 1);
 335
 336        return tsk;
 337
 338free_ti:
 339        free_thread_info(ti);
 340free_tsk:
 341        free_task_struct(tsk);
 342        return NULL;
 343}
 344
 345#ifdef CONFIG_MMU
 346static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 347{
 348        struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
 349        struct rb_node **rb_link, *rb_parent;
 350        int retval;
 351        unsigned long charge;
 352        struct mempolicy *pol;
 353
 354        down_write(&oldmm->mmap_sem);
 355        flush_cache_dup_mm(oldmm);
 356        /*
 357         * Not linked in yet - no deadlock potential:
 358         */
 359        down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
 360
 361        mm->locked_vm = 0;
 362        mm->mmap = NULL;
 363        mm->mmap_cache = NULL;
 364        mm->free_area_cache = oldmm->mmap_base;
 365        mm->cached_hole_size = ~0UL;
 366        mm->map_count = 0;
 367        cpumask_clear(mm_cpumask(mm));
 368        mm->mm_rb = RB_ROOT;
 369        rb_link = &mm->mm_rb.rb_node;
 370        rb_parent = NULL;
 371        pprev = &mm->mmap;
 372        retval = ksm_fork(mm, oldmm);
 373        if (retval)
 374                goto out;
 375        retval = khugepaged_fork(mm, oldmm);
 376        if (retval)
 377                goto out;
 378
 379        prev = NULL;
 380        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
 381                struct file *file;
 382
 383                if (mpnt->vm_flags & VM_DONTCOPY) {
 384                        vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
 385                                                        -vma_pages(mpnt));
 386                        continue;
 387                }
 388                charge = 0;
 389                if (mpnt->vm_flags & VM_ACCOUNT) {
 390                        unsigned long len = vma_pages(mpnt);
 391
 392                        if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
 393                                goto fail_nomem;
 394                        charge = len;
 395                }
 396                tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 397                if (!tmp)
 398                        goto fail_nomem;
 399                *tmp = *mpnt;
 400                INIT_LIST_HEAD(&tmp->anon_vma_chain);
 401                pol = mpol_dup(vma_policy(mpnt));
 402                retval = PTR_ERR(pol);
 403                if (IS_ERR(pol))
 404                        goto fail_nomem_policy;
 405                vma_set_policy(tmp, pol);
 406                tmp->vm_mm = mm;
 407                if (anon_vma_fork(tmp, mpnt))
 408                        goto fail_nomem_anon_vma_fork;
 409                tmp->vm_flags &= ~VM_LOCKED;
 410                tmp->vm_next = tmp->vm_prev = NULL;
 411                file = tmp->vm_file;
 412                if (file) {
 413                        struct inode *inode = file->f_path.dentry->d_inode;
 414                        struct address_space *mapping = file->f_mapping;
 415
 416                        get_file(file);
 417                        if (tmp->vm_flags & VM_DENYWRITE)
 418                                atomic_dec(&inode->i_writecount);
 419                        mutex_lock(&mapping->i_mmap_mutex);
 420                        if (tmp->vm_flags & VM_SHARED)
 421                                mapping->i_mmap_writable++;
 422                        flush_dcache_mmap_lock(mapping);
 423                        /* insert tmp into the share list, just after mpnt */
 424                        vma_prio_tree_add(tmp, mpnt);
 425                        flush_dcache_mmap_unlock(mapping);
 426                        mutex_unlock(&mapping->i_mmap_mutex);
 427                }
 428
 429                /*
 430                 * Clear hugetlb-related page reserves for children. This only
 431                 * affects MAP_PRIVATE mappings. Faults generated by the child
 432                 * are not guaranteed to succeed, even if read-only
 433                 */
 434                if (is_vm_hugetlb_page(tmp))
 435                        reset_vma_resv_huge_pages(tmp);
 436
 437                /*
 438                 * Link in the new vma and copy the page table entries.
 439                 */
 440                *pprev = tmp;
 441                pprev = &tmp->vm_next;
 442                tmp->vm_prev = prev;
 443                prev = tmp;
 444
 445                __vma_link_rb(mm, tmp, rb_link, rb_parent);
 446                rb_link = &tmp->vm_rb.rb_right;
 447                rb_parent = &tmp->vm_rb;
 448
 449                mm->map_count++;
 450                retval = copy_page_range(mm, oldmm, mpnt);
 451
 452                if (tmp->vm_ops && tmp->vm_ops->open)
 453                        tmp->vm_ops->open(tmp);
 454
 455                if (retval)
 456                        goto out;
 457
 458                if (file)
 459                        uprobe_mmap(tmp);
 460        }
 461        /* a new mm has just been created */
 462        arch_dup_mmap(oldmm, mm);
 463        retval = 0;
 464out:
 465        up_write(&mm->mmap_sem);
 466        flush_tlb_mm(oldmm);
 467        up_write(&oldmm->mmap_sem);
 468        return retval;
 469fail_nomem_anon_vma_fork:
 470        mpol_put(pol);
 471fail_nomem_policy:
 472        kmem_cache_free(vm_area_cachep, tmp);
 473fail_nomem:
 474        retval = -ENOMEM;
 475        vm_unacct_memory(charge);
 476        goto out;
 477}
 478
 479static inline int mm_alloc_pgd(struct mm_struct *mm)
 480{
 481        mm->pgd = pgd_alloc(mm);
 482        if (unlikely(!mm->pgd))
 483                return -ENOMEM;
 484        return 0;
 485}
 486
 487static inline void mm_free_pgd(struct mm_struct *mm)
 488{
 489        pgd_free(mm, mm->pgd);
 490}
 491#else
 492#define dup_mmap(mm, oldmm)     (0)
 493#define mm_alloc_pgd(mm)        (0)
 494#define mm_free_pgd(mm)
 495#endif /* CONFIG_MMU */
 496
 497__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
 498
 499#define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
 500#define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
 501
 502static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
 503
 504static int __init coredump_filter_setup(char *s)
 505{
 506        default_dump_filter =
 507                (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
 508                MMF_DUMP_FILTER_MASK;
 509        return 1;
 510}
 511
 512__setup("coredump_filter=", coredump_filter_setup);
 513
 514#include <linux/init_task.h>
 515
 516static void mm_init_aio(struct mm_struct *mm)
 517{
 518#ifdef CONFIG_AIO
 519        spin_lock_init(&mm->ioctx_lock);
 520        INIT_HLIST_HEAD(&mm->ioctx_list);
 521#endif
 522}
 523
 524static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
 525{
 526        atomic_set(&mm->mm_users, 1);
 527        atomic_set(&mm->mm_count, 1);
 528        init_rwsem(&mm->mmap_sem);
 529        INIT_LIST_HEAD(&mm->mmlist);
 530        mm->flags = (current->mm) ?
 531                (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
 532        mm->core_state = NULL;
 533        mm->nr_ptes = 0;
 534        memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
 535        spin_lock_init(&mm->page_table_lock);
 536        mm->free_area_cache = TASK_UNMAPPED_BASE;
 537        mm->cached_hole_size = ~0UL;
 538        mm_init_aio(mm);
 539        mm_init_owner(mm, p);
 540
 541        if (likely(!mm_alloc_pgd(mm))) {
 542                mm->def_flags = 0;
 543                mmu_notifier_mm_init(mm);
 544                return mm;
 545        }
 546
 547        free_mm(mm);
 548        return NULL;
 549}
 550
 551static void check_mm(struct mm_struct *mm)
 552{
 553        int i;
 554
 555        for (i = 0; i < NR_MM_COUNTERS; i++) {
 556                long x = atomic_long_read(&mm->rss_stat.count[i]);
 557
 558                if (unlikely(x))
 559                        printk(KERN_ALERT "BUG: Bad rss-counter state "
 560                                          "mm:%p idx:%d val:%ld\n", mm, i, x);
 561        }
 562
 563#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 564        VM_BUG_ON(mm->pmd_huge_pte);
 565#endif
 566}
 567
 568/*
 569 * Allocate and initialize an mm_struct.
 570 */
 571struct mm_struct *mm_alloc(void)
 572{
 573        struct mm_struct *mm;
 574
 575        mm = allocate_mm();
 576        if (!mm)
 577                return NULL;
 578
 579        memset(mm, 0, sizeof(*mm));
 580        mm_init_cpumask(mm);
 581        return mm_init(mm, current);
 582}
 583
 584/*
 585 * Called when the last reference to the mm
 586 * is dropped: either by a lazy thread or by
 587 * mmput. Free the page directory and the mm.
 588 */
 589void __mmdrop(struct mm_struct *mm)
 590{
 591        BUG_ON(mm == &init_mm);
 592        mm_free_pgd(mm);
 593        destroy_context(mm);
 594        mmu_notifier_mm_destroy(mm);
 595        check_mm(mm);
 596        free_mm(mm);
 597}
 598EXPORT_SYMBOL_GPL(__mmdrop);
 599
 600/*
 601 * Decrement the use count and release all resources for an mm.
 602 */
 603void mmput(struct mm_struct *mm)
 604{
 605        might_sleep();
 606
 607        if (atomic_dec_and_test(&mm->mm_users)) {
 608                uprobe_clear_state(mm);
 609                exit_aio(mm);
 610                ksm_exit(mm);
 611                khugepaged_exit(mm); /* must run before exit_mmap */
 612                exit_mmap(mm);
 613                set_mm_exe_file(mm, NULL);
 614                if (!list_empty(&mm->mmlist)) {
 615                        spin_lock(&mmlist_lock);
 616                        list_del(&mm->mmlist);
 617                        spin_unlock(&mmlist_lock);
 618                }
 619                if (mm->binfmt)
 620                        module_put(mm->binfmt->module);
 621                mmdrop(mm);
 622        }
 623}
 624EXPORT_SYMBOL_GPL(mmput);
 625
 626/*
 627 * We added or removed a vma mapping the executable. The vmas are only mapped
 628 * during exec and are not mapped with the mmap system call.
 629 * Callers must hold down_write() on the mm's mmap_sem for these
 630 */
 631void added_exe_file_vma(struct mm_struct *mm)
 632{
 633        mm->num_exe_file_vmas++;
 634}
 635
 636void removed_exe_file_vma(struct mm_struct *mm)
 637{
 638        mm->num_exe_file_vmas--;
 639        if ((mm->num_exe_file_vmas == 0) && mm->exe_file) {
 640                fput(mm->exe_file);
 641                mm->exe_file = NULL;
 642        }
 643
 644}
 645
 646void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
 647{
 648        if (new_exe_file)
 649                get_file(new_exe_file);
 650        if (mm->exe_file)
 651                fput(mm->exe_file);
 652        mm->exe_file = new_exe_file;
 653        mm->num_exe_file_vmas = 0;
 654}
 655
 656struct file *get_mm_exe_file(struct mm_struct *mm)
 657{
 658        struct file *exe_file;
 659
 660        /* We need mmap_sem to protect against races with removal of
 661         * VM_EXECUTABLE vmas */
 662        down_read(&mm->mmap_sem);
 663        exe_file = mm->exe_file;
 664        if (exe_file)
 665                get_file(exe_file);
 666        up_read(&mm->mmap_sem);
 667        return exe_file;
 668}
 669
 670static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
 671{
 672        /* It's safe to write the exe_file pointer without exe_file_lock because
 673         * this is called during fork when the task is not yet in /proc */
 674        newmm->exe_file = get_mm_exe_file(oldmm);
 675}
 676
 677/**
 678 * get_task_mm - acquire a reference to the task's mm
 679 *
 680 * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
 681 * this kernel workthread has transiently adopted a user mm with use_mm,
 682 * to do its AIO) is not set and if so returns a reference to it, after
 683 * bumping up the use count.  User must release the mm via mmput()
 684 * after use.  Typically used by /proc and ptrace.
 685 */
 686struct mm_struct *get_task_mm(struct task_struct *task)
 687{
 688        struct mm_struct *mm;
 689
 690        task_lock(task);
 691        mm = task->mm;
 692        if (mm) {
 693                if (task->flags & PF_KTHREAD)
 694                        mm = NULL;
 695                else
 696                        atomic_inc(&mm->mm_users);
 697        }
 698        task_unlock(task);
 699        return mm;
 700}
 701EXPORT_SYMBOL_GPL(get_task_mm);
 702
 703struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
 704{
 705        struct mm_struct *mm;
 706        int err;
 707
 708        err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
 709        if (err)
 710                return ERR_PTR(err);
 711
 712        mm = get_task_mm(task);
 713        if (mm && mm != current->mm &&
 714                        !ptrace_may_access(task, mode)) {
 715                mmput(mm);
 716                mm = ERR_PTR(-EACCES);
 717        }
 718        mutex_unlock(&task->signal->cred_guard_mutex);
 719
 720        return mm;
 721}
 722
 723static void complete_vfork_done(struct task_struct *tsk)
 724{
 725        struct completion *vfork;
 726
 727        task_lock(tsk);
 728        vfork = tsk->vfork_done;
 729        if (likely(vfork)) {
 730                tsk->vfork_done = NULL;
 731                complete(vfork);
 732        }
 733        task_unlock(tsk);
 734}
 735
 736static int wait_for_vfork_done(struct task_struct *child,
 737                                struct completion *vfork)
 738{
 739        int killed;
 740
 741        freezer_do_not_count();
 742        killed = wait_for_completion_killable(vfork);
 743        freezer_count();
 744
 745        if (killed) {
 746                task_lock(child);
 747                child->vfork_done = NULL;
 748                task_unlock(child);
 749        }
 750
 751        put_task_struct(child);
 752        return killed;
 753}
 754
 755/* Please note the differences between mmput and mm_release.
 756 * mmput is called whenever we stop holding onto a mm_struct,
 757 * error success whatever.
 758 *
 759 * mm_release is called after a mm_struct has been removed
 760 * from the current process.
 761 *
 762 * This difference is important for error handling, when we
 763 * only half set up a mm_struct for a new process and need to restore
 764 * the old one.  Because we mmput the new mm_struct before
 765 * restoring the old one. . .
 766 * Eric Biederman 10 January 1998
 767 */
 768void mm_release(struct task_struct *tsk, struct mm_struct *mm)
 769{
 770        /* Get rid of any futexes when releasing the mm */
 771#ifdef CONFIG_FUTEX
 772        if (unlikely(tsk->robust_list)) {
 773                exit_robust_list(tsk);
 774                tsk->robust_list = NULL;
 775        }
 776#ifdef CONFIG_COMPAT
 777        if (unlikely(tsk->compat_robust_list)) {
 778                compat_exit_robust_list(tsk);
 779                tsk->compat_robust_list = NULL;
 780        }
 781#endif
 782        if (unlikely(!list_empty(&tsk->pi_state_list)))
 783                exit_pi_state_list(tsk);
 784#endif
 785
 786        uprobe_free_utask(tsk);
 787
 788        /* Get rid of any cached register state */
 789        deactivate_mm(tsk, mm);
 790
 791        /*
 792         * If we're exiting normally, clear a user-space tid field if
 793         * requested.  We leave this alone when dying by signal, to leave
 794         * the value intact in a core dump, and to save the unnecessary
 795         * trouble, say, a killed vfork parent shouldn't touch this mm.
 796         * Userland only wants this done for a sys_exit.
 797         */
 798        if (tsk->clear_child_tid) {
 799                if (!(tsk->flags & PF_SIGNALED) &&
 800                    atomic_read(&mm->mm_users) > 1) {
 801                        /*
 802                         * We don't check the error code - if userspace has
 803                         * not set up a proper pointer then tough luck.
 804                         */
 805                        put_user(0, tsk->clear_child_tid);
 806                        sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
 807                                        1, NULL, NULL, 0);
 808                }
 809                tsk->clear_child_tid = NULL;
 810        }
 811
 812        /*
 813         * All done, finally we can wake up parent and return this mm to him.
 814         * Also kthread_stop() uses this completion for synchronization.
 815         */
 816        if (tsk->vfork_done)
 817                complete_vfork_done(tsk);
 818}
 819
 820/*
 821 * Allocate a new mm structure and copy contents from the
 822 * mm structure of the passed in task structure.
 823 */
 824struct mm_struct *dup_mm(struct task_struct *tsk)
 825{
 826        struct mm_struct *mm, *oldmm = current->mm;
 827        int err;
 828
 829        if (!oldmm)
 830                return NULL;
 831
 832        mm = allocate_mm();
 833        if (!mm)
 834                goto fail_nomem;
 835
 836        memcpy(mm, oldmm, sizeof(*mm));
 837        mm_init_cpumask(mm);
 838
 839#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 840        mm->pmd_huge_pte = NULL;
 841#endif
 842        uprobe_reset_state(mm);
 843
 844        if (!mm_init(mm, tsk))
 845                goto fail_nomem;
 846
 847        if (init_new_context(tsk, mm))
 848                goto fail_nocontext;
 849
 850        dup_mm_exe_file(oldmm, mm);
 851
 852        err = dup_mmap(mm, oldmm);
 853        if (err)
 854                goto free_pt;
 855
 856        mm->hiwater_rss = get_mm_rss(mm);
 857        mm->hiwater_vm = mm->total_vm;
 858
 859        if (mm->binfmt && !try_module_get(mm->binfmt->module))
 860                goto free_pt;
 861
 862        return mm;
 863
 864free_pt:
 865        /* don't put binfmt in mmput, we haven't got module yet */
 866        mm->binfmt = NULL;
 867        mmput(mm);
 868
 869fail_nomem:
 870        return NULL;
 871
 872fail_nocontext:
 873        /*
 874         * If init_new_context() failed, we cannot use mmput() to free the mm
 875         * because it calls destroy_context()
 876         */
 877        mm_free_pgd(mm);
 878        free_mm(mm);
 879        return NULL;
 880}
 881
 882static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
 883{
 884        struct mm_struct *mm, *oldmm;
 885        int retval;
 886
 887        tsk->min_flt = tsk->maj_flt = 0;
 888        tsk->nvcsw = tsk->nivcsw = 0;
 889#ifdef CONFIG_DETECT_HUNG_TASK
 890        tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
 891#endif
 892
 893        tsk->mm = NULL;
 894        tsk->active_mm = NULL;
 895
 896        /*
 897         * Are we cloning a kernel thread?
 898         *
 899         * We need to steal a active VM for that..
 900         */
 901        oldmm = current->mm;
 902        if (!oldmm)
 903                return 0;
 904
 905        if (clone_flags & CLONE_VM) {
 906                atomic_inc(&oldmm->mm_users);
 907                mm = oldmm;
 908                goto good_mm;
 909        }
 910
 911        retval = -ENOMEM;
 912        mm = dup_mm(tsk);
 913        if (!mm)
 914                goto fail_nomem;
 915
 916good_mm:
 917        tsk->mm = mm;
 918        tsk->active_mm = mm;
 919        return 0;
 920
 921fail_nomem:
 922        return retval;
 923}
 924
 925static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
 926{
 927        struct fs_struct *fs = current->fs;
 928        if (clone_flags & CLONE_FS) {
 929                /* tsk->fs is already what we want */
 930                spin_lock(&fs->lock);
 931                if (fs->in_exec) {
 932                        spin_unlock(&fs->lock);
 933                        return -EAGAIN;
 934                }
 935                fs->users++;
 936                spin_unlock(&fs->lock);
 937                return 0;
 938        }
 939        tsk->fs = copy_fs_struct(fs);
 940        if (!tsk->fs)
 941                return -ENOMEM;
 942        return 0;
 943}
 944
 945static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
 946{
 947        struct files_struct *oldf, *newf;
 948        int error = 0;
 949
 950        /*
 951         * A background process may not have any files ...
 952         */
 953        oldf = current->files;
 954        if (!oldf)
 955                goto out;
 956
 957        if (clone_flags & CLONE_FILES) {
 958                atomic_inc(&oldf->count);
 959                goto out;
 960        }
 961
 962        newf = dup_fd(oldf, &error);
 963        if (!newf)
 964                goto out;
 965
 966        tsk->files = newf;
 967        error = 0;
 968out:
 969        return error;
 970}
 971
 972static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
 973{
 974#ifdef CONFIG_BLOCK
 975        struct io_context *ioc = current->io_context;
 976        struct io_context *new_ioc;
 977
 978        if (!ioc)
 979                return 0;
 980        /*
 981         * Share io context with parent, if CLONE_IO is set
 982         */
 983        if (clone_flags & CLONE_IO) {
 984                ioc_task_link(ioc);
 985                tsk->io_context = ioc;
 986        } else if (ioprio_valid(ioc->ioprio)) {
 987                new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
 988                if (unlikely(!new_ioc))
 989                        return -ENOMEM;
 990
 991                new_ioc->ioprio = ioc->ioprio;
 992                put_io_context(new_ioc);
 993        }
 994#endif
 995        return 0;
 996}
 997
 998static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
 999{
1000        struct sighand_struct *sig;
1001
1002        if (clone_flags & CLONE_SIGHAND) {
1003                atomic_inc(&current->sighand->count);
1004                return 0;
1005        }
1006        sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1007        rcu_assign_pointer(tsk->sighand, sig);
1008        if (!sig)
1009                return -ENOMEM;
1010        atomic_set(&sig->count, 1);
1011        memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1012        return 0;
1013}
1014
1015void __cleanup_sighand(struct sighand_struct *sighand)
1016{
1017        if (atomic_dec_and_test(&sighand->count)) {
1018                signalfd_cleanup(sighand);
1019                kmem_cache_free(sighand_cachep, sighand);
1020        }
1021}
1022
1023
1024/*
1025 * Initialize POSIX timer handling for a thread group.
1026 */
1027static void posix_cpu_timers_init_group(struct signal_struct *sig)
1028{
1029        unsigned long cpu_limit;
1030
1031        /* Thread group counters. */
1032        thread_group_cputime_init(sig);
1033
1034        cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1035        if (cpu_limit != RLIM_INFINITY) {
1036                sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1037                sig->cputimer.running = 1;
1038        }
1039
1040        /* The timer lists. */
1041        INIT_LIST_HEAD(&sig->cpu_timers[0]);
1042        INIT_LIST_HEAD(&sig->cpu_timers[1]);
1043        INIT_LIST_HEAD(&sig->cpu_timers[2]);
1044}
1045
1046static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1047{
1048        struct signal_struct *sig;
1049
1050        if (clone_flags & CLONE_THREAD)
1051                return 0;
1052
1053        sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1054        tsk->signal = sig;
1055        if (!sig)
1056                return -ENOMEM;
1057
1058        sig->nr_threads = 1;
1059        atomic_set(&sig->live, 1);
1060        atomic_set(&sig->sigcnt, 1);
1061        init_waitqueue_head(&sig->wait_chldexit);
1062        if (clone_flags & CLONE_NEWPID)
1063                sig->flags |= SIGNAL_UNKILLABLE;
1064        sig->curr_target = tsk;
1065        init_sigpending(&sig->shared_pending);
1066        INIT_LIST_HEAD(&sig->posix_timers);
1067
1068        hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1069        sig->real_timer.function = it_real_fn;
1070
1071        task_lock(current->group_leader);
1072        memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1073        task_unlock(current->group_leader);
1074
1075        posix_cpu_timers_init_group(sig);
1076
1077        tty_audit_fork(sig);
1078        sched_autogroup_fork(sig);
1079
1080#ifdef CONFIG_CGROUPS
1081        init_rwsem(&sig->group_rwsem);
1082#endif
1083
1084        sig->oom_adj = current->signal->oom_adj;
1085        sig->oom_score_adj = current->signal->oom_score_adj;
1086        sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1087
1088        sig->has_child_subreaper = current->signal->has_child_subreaper ||
1089                                   current->signal->is_child_subreaper;
1090
1091        mutex_init(&sig->cred_guard_mutex);
1092
1093        return 0;
1094}
1095
1096static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1097{
1098        unsigned long new_flags = p->flags;
1099
1100        new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1101        new_flags |= PF_FORKNOEXEC;
1102        p->flags = new_flags;
1103}
1104
1105SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1106{
1107        current->clear_child_tid = tidptr;
1108
1109        return task_pid_vnr(current);
1110}
1111
1112static void rt_mutex_init_task(struct task_struct *p)
1113{
1114        raw_spin_lock_init(&p->pi_lock);
1115#ifdef CONFIG_RT_MUTEXES
1116        plist_head_init(&p->pi_waiters);
1117        p->pi_blocked_on = NULL;
1118#endif
1119}
1120
1121#ifdef CONFIG_MM_OWNER
1122void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1123{
1124        mm->owner = p;
1125}
1126#endif /* CONFIG_MM_OWNER */
1127
1128/*
1129 * Initialize POSIX timer handling for a single task.
1130 */
1131static void posix_cpu_timers_init(struct task_struct *tsk)
1132{
1133        tsk->cputime_expires.prof_exp = 0;
1134        tsk->cputime_expires.virt_exp = 0;
1135        tsk->cputime_expires.sched_exp = 0;
1136        INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1137        INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1138        INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1139}
1140
1141/*
1142 * This creates a new process as a copy of the old one,
1143 * but does not actually start it yet.
1144 *
1145 * It copies the registers, and all the appropriate
1146 * parts of the process environment (as per the clone
1147 * flags). The actual kick-off is left to the caller.
1148 */
1149static struct task_struct *copy_process(unsigned long clone_flags,
1150                                        unsigned long stack_start,
1151                                        struct pt_regs *regs,
1152                                        unsigned long stack_size,
1153                                        int __user *child_tidptr,
1154                                        struct pid *pid,
1155                                        int trace)
1156{
1157        int retval;
1158        struct task_struct *p;
1159        int cgroup_callbacks_done = 0;
1160
1161        if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1162                return ERR_PTR(-EINVAL);
1163
1164        /*
1165         * Thread groups must share signals as well, and detached threads
1166         * can only be started up within the thread group.
1167         */
1168        if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1169                return ERR_PTR(-EINVAL);
1170
1171        /*
1172         * Shared signal handlers imply shared VM. By way of the above,
1173         * thread groups also imply shared VM. Blocking this case allows
1174         * for various simplifications in other code.
1175         */
1176        if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1177                return ERR_PTR(-EINVAL);
1178
1179        /*
1180         * Siblings of global init remain as zombies on exit since they are
1181         * not reaped by their parent (swapper). To solve this and to avoid
1182         * multi-rooted process trees, prevent global and container-inits
1183         * from creating siblings.
1184         */
1185        if ((clone_flags & CLONE_PARENT) &&
1186                                current->signal->flags & SIGNAL_UNKILLABLE)
1187                return ERR_PTR(-EINVAL);
1188
1189        retval = security_task_create(clone_flags);
1190        if (retval)
1191                goto fork_out;
1192
1193        retval = -ENOMEM;
1194        p = dup_task_struct(current);
1195        if (!p)
1196                goto fork_out;
1197
1198        ftrace_graph_init_task(p);
1199        get_seccomp_filter(p);
1200
1201        rt_mutex_init_task(p);
1202
1203#ifdef CONFIG_PROVE_LOCKING
1204        DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1205        DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1206#endif
1207        retval = -EAGAIN;
1208        if (atomic_read(&p->real_cred->user->processes) >=
1209                        task_rlimit(p, RLIMIT_NPROC)) {
1210                if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1211                    p->real_cred->user != INIT_USER)
1212                        goto bad_fork_free;
1213        }
1214        current->flags &= ~PF_NPROC_EXCEEDED;
1215
1216        retval = copy_creds(p, clone_flags);
1217        if (retval < 0)
1218                goto bad_fork_free;
1219
1220        /*
1221         * If multiple threads are within copy_process(), then this check
1222         * triggers too late. This doesn't hurt, the check is only there
1223         * to stop root fork bombs.
1224         */
1225        retval = -EAGAIN;
1226        if (nr_threads >= max_threads)
1227                goto bad_fork_cleanup_count;
1228
1229        if (!try_module_get(task_thread_info(p)->exec_domain->module))
1230                goto bad_fork_cleanup_count;
1231
1232        p->did_exec = 0;
1233        delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1234        copy_flags(clone_flags, p);
1235        INIT_LIST_HEAD(&p->children);
1236        INIT_LIST_HEAD(&p->sibling);
1237        rcu_copy_process(p);
1238        p->vfork_done = NULL;
1239        spin_lock_init(&p->alloc_lock);
1240
1241        init_sigpending(&p->pending);
1242
1243        p->utime = p->stime = p->gtime = 0;
1244        p->utimescaled = p->stimescaled = 0;
1245#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1246        p->prev_utime = p->prev_stime = 0;
1247#endif
1248#if defined(SPLIT_RSS_COUNTING)
1249        memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1250#endif
1251
1252        p->default_timer_slack_ns = current->timer_slack_ns;
1253
1254        task_io_accounting_init(&p->ioac);
1255        acct_clear_integrals(p);
1256
1257        posix_cpu_timers_init(p);
1258
1259        do_posix_clock_monotonic_gettime(&p->start_time);
1260        p->real_start_time = p->start_time;
1261        monotonic_to_bootbased(&p->real_start_time);
1262        p->io_context = NULL;
1263        p->audit_context = NULL;
1264        if (clone_flags & CLONE_THREAD)
1265                threadgroup_change_begin(current);
1266        cgroup_fork(p);
1267#ifdef CONFIG_NUMA
1268        p->mempolicy = mpol_dup(p->mempolicy);
1269        if (IS_ERR(p->mempolicy)) {
1270                retval = PTR_ERR(p->mempolicy);
1271                p->mempolicy = NULL;
1272                goto bad_fork_cleanup_cgroup;
1273        }
1274        mpol_fix_fork_child_flag(p);
1275#endif
1276#ifdef CONFIG_CPUSETS
1277        p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1278        p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1279        seqcount_init(&p->mems_allowed_seq);
1280#endif
1281#ifdef CONFIG_TRACE_IRQFLAGS
1282        p->irq_events = 0;
1283#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1284        p->hardirqs_enabled = 1;
1285#else
1286        p->hardirqs_enabled = 0;
1287#endif
1288        p->hardirq_enable_ip = 0;
1289        p->hardirq_enable_event = 0;
1290        p->hardirq_disable_ip = _THIS_IP_;
1291        p->hardirq_disable_event = 0;
1292        p->softirqs_enabled = 1;
1293        p->softirq_enable_ip = _THIS_IP_;
1294        p->softirq_enable_event = 0;
1295        p->softirq_disable_ip = 0;
1296        p->softirq_disable_event = 0;
1297        p->hardirq_context = 0;
1298        p->softirq_context = 0;
1299#endif
1300#ifdef CONFIG_LOCKDEP
1301        p->lockdep_depth = 0; /* no locks held yet */
1302        p->curr_chain_key = 0;
1303        p->lockdep_recursion = 0;
1304#endif
1305
1306#ifdef CONFIG_DEBUG_MUTEXES
1307        p->blocked_on = NULL; /* not blocked yet */
1308#endif
1309#ifdef CONFIG_MEMCG
1310        p->memcg_batch.do_batch = 0;
1311        p->memcg_batch.memcg = NULL;
1312#endif
1313
1314        /* Perform scheduler related setup. Assign this task to a CPU. */
1315        sched_fork(p);
1316
1317        retval = perf_event_init_task(p);
1318        if (retval)
1319                goto bad_fork_cleanup_policy;
1320        retval = audit_alloc(p);
1321        if (retval)
1322                goto bad_fork_cleanup_policy;
1323        /* copy all the process information */
1324        retval = copy_semundo(clone_flags, p);
1325        if (retval)
1326                goto bad_fork_cleanup_audit;
1327        retval = copy_files(clone_flags, p);
1328        if (retval)
1329                goto bad_fork_cleanup_semundo;
1330        retval = copy_fs(clone_flags, p);
1331        if (retval)
1332                goto bad_fork_cleanup_files;
1333        retval = copy_sighand(clone_flags, p);
1334        if (retval)
1335                goto bad_fork_cleanup_fs;
1336        retval = copy_signal(clone_flags, p);
1337        if (retval)
1338                goto bad_fork_cleanup_sighand;
1339        retval = copy_mm(clone_flags, p);
1340        if (retval)
1341                goto bad_fork_cleanup_signal;
1342        retval = copy_namespaces(clone_flags, p);
1343        if (retval)
1344                goto bad_fork_cleanup_mm;
1345        retval = copy_io(clone_flags, p);
1346        if (retval)
1347                goto bad_fork_cleanup_namespaces;
1348        retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1349        if (retval)
1350                goto bad_fork_cleanup_io;
1351
1352        if (pid != &init_struct_pid) {
1353                retval = -ENOMEM;
1354                pid = alloc_pid(p->nsproxy->pid_ns);
1355                if (!pid)
1356                        goto bad_fork_cleanup_io;
1357        }
1358
1359        p->pid = pid_nr(pid);
1360        p->tgid = p->pid;
1361        if (clone_flags & CLONE_THREAD)
1362                p->tgid = current->tgid;
1363
1364        p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1365        /*
1366         * Clear TID on mm_release()?
1367         */
1368        p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1369#ifdef CONFIG_BLOCK
1370        p->plug = NULL;
1371#endif
1372#ifdef CONFIG_FUTEX
1373        p->robust_list = NULL;
1374#ifdef CONFIG_COMPAT
1375        p->compat_robust_list = NULL;
1376#endif
1377        INIT_LIST_HEAD(&p->pi_state_list);
1378        p->pi_state_cache = NULL;
1379#endif
1380        uprobe_copy_process(p);
1381        /*
1382         * sigaltstack should be cleared when sharing the same VM
1383         */
1384        if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1385                p->sas_ss_sp = p->sas_ss_size = 0;
1386
1387        /*
1388         * Syscall tracing and stepping should be turned off in the
1389         * child regardless of CLONE_PTRACE.
1390         */
1391        user_disable_single_step(p);
1392        clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1393#ifdef TIF_SYSCALL_EMU
1394        clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1395#endif
1396        clear_all_latency_tracing(p);
1397
1398        /* ok, now we should be set up.. */
1399        if (clone_flags & CLONE_THREAD)
1400                p->exit_signal = -1;
1401        else if (clone_flags & CLONE_PARENT)
1402                p->exit_signal = current->group_leader->exit_signal;
1403        else
1404                p->exit_signal = (clone_flags & CSIGNAL);
1405
1406        p->pdeath_signal = 0;
1407        p->exit_state = 0;
1408
1409        p->nr_dirtied = 0;
1410        p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1411        p->dirty_paused_when = 0;
1412
1413        /*
1414         * Ok, make it visible to the rest of the system.
1415         * We dont wake it up yet.
1416         */
1417        p->group_leader = p;
1418        INIT_LIST_HEAD(&p->thread_group);
1419        p->task_works = NULL;
1420
1421        /* Now that the task is set up, run cgroup callbacks if
1422         * necessary. We need to run them before the task is visible
1423         * on the tasklist. */
1424        cgroup_fork_callbacks(p);
1425        cgroup_callbacks_done = 1;
1426
1427        /* Need tasklist lock for parent etc handling! */
1428        write_lock_irq(&tasklist_lock);
1429
1430        /* CLONE_PARENT re-uses the old parent */
1431        if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1432                p->real_parent = current->real_parent;
1433                p->parent_exec_id = current->parent_exec_id;
1434        } else {
1435                p->real_parent = current;
1436                p->parent_exec_id = current->self_exec_id;
1437        }
1438
1439        spin_lock(&current->sighand->siglock);
1440
1441        /*
1442         * Process group and session signals need to be delivered to just the
1443         * parent before the fork or both the parent and the child after the
1444         * fork. Restart if a signal comes in before we add the new process to
1445         * it's process group.
1446         * A fatal signal pending means that current will exit, so the new
1447         * thread can't slip out of an OOM kill (or normal SIGKILL).
1448        */
1449        recalc_sigpending();
1450        if (signal_pending(current)) {
1451                spin_unlock(&current->sighand->siglock);
1452                write_unlock_irq(&tasklist_lock);
1453                retval = -ERESTARTNOINTR;
1454                goto bad_fork_free_pid;
1455        }
1456
1457        if (clone_flags & CLONE_THREAD) {
1458                current->signal->nr_threads++;
1459                atomic_inc(&current->signal->live);
1460                atomic_inc(&current->signal->sigcnt);
1461                p->group_leader = current->group_leader;
1462                list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1463        }
1464
1465        if (likely(p->pid)) {
1466                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1467
1468                if (thread_group_leader(p)) {
1469                        if (is_child_reaper(pid))
1470                                p->nsproxy->pid_ns->child_reaper = p;
1471
1472                        p->signal->leader_pid = pid;
1473                        p->signal->tty = tty_kref_get(current->signal->tty);
1474                        attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1475                        attach_pid(p, PIDTYPE_SID, task_session(current));
1476                        list_add_tail(&p->sibling, &p->real_parent->children);
1477                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
1478                        __this_cpu_inc(process_counts);
1479                }
1480                attach_pid(p, PIDTYPE_PID, pid);
1481                nr_threads++;
1482        }
1483
1484        total_forks++;
1485        spin_unlock(&current->sighand->siglock);
1486        write_unlock_irq(&tasklist_lock);
1487        proc_fork_connector(p);
1488        cgroup_post_fork(p);
1489        if (clone_flags & CLONE_THREAD)
1490                threadgroup_change_end(current);
1491        perf_event_fork(p);
1492
1493        trace_task_newtask(p, clone_flags);
1494
1495        return p;
1496
1497bad_fork_free_pid:
1498        if (pid != &init_struct_pid)
1499                free_pid(pid);
1500bad_fork_cleanup_io:
1501        if (p->io_context)
1502                exit_io_context(p);
1503bad_fork_cleanup_namespaces:
1504        if (unlikely(clone_flags & CLONE_NEWPID))
1505                pid_ns_release_proc(p->nsproxy->pid_ns);
1506        exit_task_namespaces(p);
1507bad_fork_cleanup_mm:
1508        if (p->mm)
1509                mmput(p->mm);
1510bad_fork_cleanup_signal:
1511        if (!(clone_flags & CLONE_THREAD))
1512                free_signal_struct(p->signal);
1513bad_fork_cleanup_sighand:
1514        __cleanup_sighand(p->sighand);
1515bad_fork_cleanup_fs:
1516        exit_fs(p); /* blocking */
1517bad_fork_cleanup_files:
1518        exit_files(p); /* blocking */
1519bad_fork_cleanup_semundo:
1520        exit_sem(p);
1521bad_fork_cleanup_audit:
1522        audit_free(p);
1523bad_fork_cleanup_policy:
1524        perf_event_free_task(p);
1525#ifdef CONFIG_NUMA
1526        mpol_put(p->mempolicy);
1527bad_fork_cleanup_cgroup:
1528#endif
1529        if (clone_flags & CLONE_THREAD)
1530                threadgroup_change_end(current);
1531        cgroup_exit(p, cgroup_callbacks_done);
1532        delayacct_tsk_free(p);
1533        module_put(task_thread_info(p)->exec_domain->module);
1534bad_fork_cleanup_count:
1535        atomic_dec(&p->cred->user->processes);
1536        exit_creds(p);
1537bad_fork_free:
1538        free_task(p);
1539fork_out:
1540        return ERR_PTR(retval);
1541}
1542
1543noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1544{
1545        memset(regs, 0, sizeof(struct pt_regs));
1546        return regs;
1547}
1548
1549static inline void init_idle_pids(struct pid_link *links)
1550{
1551        enum pid_type type;
1552
1553        for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1554                INIT_HLIST_NODE(&links[type].node); /* not really needed */
1555                links[type].pid = &init_struct_pid;
1556        }
1557}
1558
1559struct task_struct * __cpuinit fork_idle(int cpu)
1560{
1561        struct task_struct *task;
1562        struct pt_regs regs;
1563
1564        task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1565                            &init_struct_pid, 0);
1566        if (!IS_ERR(task)) {
1567                init_idle_pids(task->pids);
1568                init_idle(task, cpu);
1569        }
1570
1571        return task;
1572}
1573
1574/*
1575 *  Ok, this is the main fork-routine.
1576 *
1577 * It copies the process, and if successful kick-starts
1578 * it and waits for it to finish using the VM if required.
1579 */
1580long do_fork(unsigned long clone_flags,
1581              unsigned long stack_start,
1582              struct pt_regs *regs,
1583              unsigned long stack_size,
1584              int __user *parent_tidptr,
1585              int __user *child_tidptr)
1586{
1587        struct task_struct *p;
1588        int trace = 0;
1589        long nr;
1590
1591        /*
1592         * Do some preliminary argument and permissions checking before we
1593         * actually start allocating stuff
1594         */
1595        if (clone_flags & CLONE_NEWUSER) {
1596                if (clone_flags & CLONE_THREAD)
1597                        return -EINVAL;
1598                /* hopefully this check will go away when userns support is
1599                 * complete
1600                 */
1601                if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
1602                                !capable(CAP_SETGID))
1603                        return -EPERM;
1604        }
1605
1606        /*
1607         * Determine whether and which event to report to ptracer.  When
1608         * called from kernel_thread or CLONE_UNTRACED is explicitly
1609         * requested, no event is reported; otherwise, report if the event
1610         * for the type of forking is enabled.
1611         */
1612        if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
1613                if (clone_flags & CLONE_VFORK)
1614                        trace = PTRACE_EVENT_VFORK;
1615                else if ((clone_flags & CSIGNAL) != SIGCHLD)
1616                        trace = PTRACE_EVENT_CLONE;
1617                else
1618                        trace = PTRACE_EVENT_FORK;
1619
1620                if (likely(!ptrace_event_enabled(current, trace)))
1621                        trace = 0;
1622        }
1623
1624        p = copy_process(clone_flags, stack_start, regs, stack_size,
1625                         child_tidptr, NULL, trace);
1626        /*
1627         * Do this prior waking up the new thread - the thread pointer
1628         * might get invalid after that point, if the thread exits quickly.
1629         */
1630        if (!IS_ERR(p)) {
1631                struct completion vfork;
1632
1633                trace_sched_process_fork(current, p);
1634
1635                nr = task_pid_vnr(p);
1636
1637                if (clone_flags & CLONE_PARENT_SETTID)
1638                        put_user(nr, parent_tidptr);
1639
1640                if (clone_flags & CLONE_VFORK) {
1641                        p->vfork_done = &vfork;
1642                        init_completion(&vfork);
1643                        get_task_struct(p);
1644                }
1645
1646                wake_up_new_task(p);
1647
1648                /* forking complete and child started to run, tell ptracer */
1649                if (unlikely(trace))
1650                        ptrace_event(trace, nr);
1651
1652                if (clone_flags & CLONE_VFORK) {
1653                        if (!wait_for_vfork_done(p, &vfork))
1654                                ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1655                }
1656        } else {
1657                nr = PTR_ERR(p);
1658        }
1659        return nr;
1660}
1661
1662#ifndef ARCH_MIN_MMSTRUCT_ALIGN
1663#define ARCH_MIN_MMSTRUCT_ALIGN 0
1664#endif
1665
1666static void sighand_ctor(void *data)
1667{
1668        struct sighand_struct *sighand = data;
1669
1670        spin_lock_init(&sighand->siglock);
1671        init_waitqueue_head(&sighand->signalfd_wqh);
1672}
1673
1674void __init proc_caches_init(void)
1675{
1676        sighand_cachep = kmem_cache_create("sighand_cache",
1677                        sizeof(struct sighand_struct), 0,
1678                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1679                        SLAB_NOTRACK, sighand_ctor);
1680        signal_cachep = kmem_cache_create("signal_cache",
1681                        sizeof(struct signal_struct), 0,
1682                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1683        files_cachep = kmem_cache_create("files_cache",
1684                        sizeof(struct files_struct), 0,
1685                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1686        fs_cachep = kmem_cache_create("fs_cache",
1687                        sizeof(struct fs_struct), 0,
1688                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1689        /*
1690         * FIXME! The "sizeof(struct mm_struct)" currently includes the
1691         * whole struct cpumask for the OFFSTACK case. We could change
1692         * this to *only* allocate as much of it as required by the
1693         * maximum number of CPU's we can ever have.  The cpumask_allocation
1694         * is at the end of the structure, exactly for that reason.
1695         */
1696        mm_cachep = kmem_cache_create("mm_struct",
1697                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1698                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1699        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1700        mmap_init();
1701        nsproxy_cache_init();
1702}
1703
1704/*
1705 * Check constraints on flags passed to the unshare system call.
1706 */
1707static int check_unshare_flags(unsigned long unshare_flags)
1708{
1709        if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1710                                CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1711                                CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1712                return -EINVAL;
1713        /*
1714         * Not implemented, but pretend it works if there is nothing to
1715         * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1716         * needs to unshare vm.
1717         */
1718        if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1719                /* FIXME: get_task_mm() increments ->mm_users */
1720                if (atomic_read(&current->mm->mm_users) > 1)
1721                        return -EINVAL;
1722        }
1723
1724        return 0;
1725}
1726
1727/*
1728 * Unshare the filesystem structure if it is being shared
1729 */
1730static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1731{
1732        struct fs_struct *fs = current->fs;
1733
1734        if (!(unshare_flags & CLONE_FS) || !fs)
1735                return 0;
1736
1737        /* don't need lock here; in the worst case we'll do useless copy */
1738        if (fs->users == 1)
1739                return 0;
1740
1741        *new_fsp = copy_fs_struct(fs);
1742        if (!*new_fsp)
1743                return -ENOMEM;
1744
1745        return 0;
1746}
1747
1748/*
1749 * Unshare file descriptor table if it is being shared
1750 */
1751static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1752{
1753        struct files_struct *fd = current->files;
1754        int error = 0;
1755
1756        if ((unshare_flags & CLONE_FILES) &&
1757            (fd && atomic_read(&fd->count) > 1)) {
1758                *new_fdp = dup_fd(fd, &error);
1759                if (!*new_fdp)
1760                        return error;
1761        }
1762
1763        return 0;
1764}
1765
1766/*
1767 * unshare allows a process to 'unshare' part of the process
1768 * context which was originally shared using clone.  copy_*
1769 * functions used by do_fork() cannot be used here directly
1770 * because they modify an inactive task_struct that is being
1771 * constructed. Here we are modifying the current, active,
1772 * task_struct.
1773 */
1774SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1775{
1776        struct fs_struct *fs, *new_fs = NULL;
1777        struct files_struct *fd, *new_fd = NULL;
1778        struct nsproxy *new_nsproxy = NULL;
1779        int do_sysvsem = 0;
1780        int err;
1781
1782        err = check_unshare_flags(unshare_flags);
1783        if (err)
1784                goto bad_unshare_out;
1785
1786        /*
1787         * If unsharing namespace, must also unshare filesystem information.
1788         */
1789        if (unshare_flags & CLONE_NEWNS)
1790                unshare_flags |= CLONE_FS;
1791        /*
1792         * CLONE_NEWIPC must also detach from the undolist: after switching
1793         * to a new ipc namespace, the semaphore arrays from the old
1794         * namespace are unreachable.
1795         */
1796        if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1797                do_sysvsem = 1;
1798        err = unshare_fs(unshare_flags, &new_fs);
1799        if (err)
1800                goto bad_unshare_out;
1801        err = unshare_fd(unshare_flags, &new_fd);
1802        if (err)
1803                goto bad_unshare_cleanup_fs;
1804        err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
1805        if (err)
1806                goto bad_unshare_cleanup_fd;
1807
1808        if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
1809                if (do_sysvsem) {
1810                        /*
1811                         * CLONE_SYSVSEM is equivalent to sys_exit().
1812                         */
1813                        exit_sem(current);
1814                }
1815
1816                if (new_nsproxy) {
1817                        switch_task_namespaces(current, new_nsproxy);
1818                        new_nsproxy = NULL;
1819                }
1820
1821                task_lock(current);
1822
1823                if (new_fs) {
1824                        fs = current->fs;
1825                        spin_lock(&fs->lock);
1826                        current->fs = new_fs;
1827                        if (--fs->users)
1828                                new_fs = NULL;
1829                        else
1830                                new_fs = fs;
1831                        spin_unlock(&fs->lock);
1832                }
1833
1834                if (new_fd) {
1835                        fd = current->files;
1836                        current->files = new_fd;
1837                        new_fd = fd;
1838                }
1839
1840                task_unlock(current);
1841        }
1842
1843        if (new_nsproxy)
1844                put_nsproxy(new_nsproxy);
1845
1846bad_unshare_cleanup_fd:
1847        if (new_fd)
1848                put_files_struct(new_fd);
1849
1850bad_unshare_cleanup_fs:
1851        if (new_fs)
1852                free_fs_struct(new_fs);
1853
1854bad_unshare_out:
1855        return err;
1856}
1857
1858/*
1859 *      Helper to unshare the files of the current task.
1860 *      We don't want to expose copy_files internals to
1861 *      the exec layer of the kernel.
1862 */
1863
1864int unshare_files(struct files_struct **displaced)
1865{
1866        struct task_struct *task = current;
1867        struct files_struct *copy = NULL;
1868        int error;
1869
1870        error = unshare_fd(CLONE_FILES, &copy);
1871        if (error || !copy) {
1872                *displaced = NULL;
1873                return error;
1874        }
1875        *displaced = task->files;
1876        task_lock(task);
1877        task->files = copy;
1878        task_unlock(task);
1879        return 0;
1880}
1881
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.