linux/arch/x86/kernel/process_32.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995  Linus Torvalds
   3 *
   4 *  Pentium III FXSR, SSE support
   5 *      Gareth Hughes <gareth@valinux.com>, May 2000
   6 */
   7
   8/*
   9 * This file handles the architecture-dependent parts of process handling..
  10 */
  11
  12#include <stdarg.h>
  13
  14#include <linux/cpu.h>
  15#include <linux/errno.h>
  16#include <linux/sched.h>
  17#include <linux/fs.h>
  18#include <linux/kernel.h>
  19#include <linux/mm.h>
  20#include <linux/elfcore.h>
  21#include <linux/smp.h>
  22#include <linux/stddef.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/user.h>
  26#include <linux/interrupt.h>
  27#include <linux/utsname.h>
  28#include <linux/delay.h>
  29#include <linux/reboot.h>
  30#include <linux/init.h>
  31#include <linux/mc146818rtc.h>
  32#include <linux/module.h>
  33#include <linux/kallsyms.h>
  34#include <linux/ptrace.h>
  35#include <linux/random.h>
  36#include <linux/personality.h>
  37#include <linux/tick.h>
  38#include <linux/percpu.h>
  39#include <linux/prctl.h>
  40#include <linux/dmi.h>
  41
  42#include <asm/uaccess.h>
  43#include <asm/pgtable.h>
  44#include <asm/system.h>
  45#include <asm/io.h>
  46#include <asm/ldt.h>
  47#include <asm/processor.h>
  48#include <asm/i387.h>
  49#include <asm/desc.h>
  50#ifdef CONFIG_MATH_EMULATION
  51#include <asm/math_emu.h>
  52#endif
  53
  54#include <linux/err.h>
  55
  56#include <asm/tlbflush.h>
  57#include <asm/cpu.h>
  58#include <asm/kdebug.h>
  59#include <asm/idle.h>
  60#include <asm/syscalls.h>
  61#include <asm/smp.h>
  62
  63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
  64
  65DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
  66EXPORT_PER_CPU_SYMBOL(current_task);
  67
  68DEFINE_PER_CPU(int, cpu_number);
  69EXPORT_PER_CPU_SYMBOL(cpu_number);
  70
  71/*
  72 * Return saved PC of a blocked thread.
  73 */
  74unsigned long thread_saved_pc(struct task_struct *tsk)
  75{
  76        return ((unsigned long *)tsk->thread.sp)[3];
  77}
  78
  79#ifndef CONFIG_SMP
  80static inline void play_dead(void)
  81{
  82        BUG();
  83}
  84#endif
  85
  86/*
  87 * The idle thread. There's no useful work to be
  88 * done, so just try to conserve power and have a
  89 * low exit latency (ie sit in a loop waiting for
  90 * somebody to say that they'd like to reschedule)
  91 */
  92void cpu_idle(void)
  93{
  94        int cpu = smp_processor_id();
  95
  96        current_thread_info()->status |= TS_POLLING;
  97
  98        /* endless idle loop with no priority at all */
  99        while (1) {
 100                tick_nohz_stop_sched_tick(1);
 101                while (!need_resched()) {
 102
 103                        check_pgt_cache();
 104                        rmb();
 105
 106                        if (rcu_pending(cpu))
 107                                rcu_check_callbacks(cpu, 0);
 108
 109                        if (cpu_is_offline(cpu))
 110                                play_dead();
 111
 112                        local_irq_disable();
 113                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
 114                        /* Don't trace irqs off for idle */
 115                        stop_critical_timings();
 116                        pm_idle();
 117                        start_critical_timings();
 118                }
 119                tick_nohz_restart_sched_tick();
 120                preempt_enable_no_resched();
 121                schedule();
 122                preempt_disable();
 123        }
 124}
 125
 126void __show_regs(struct pt_regs *regs, int all)
 127{
 128        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
 129        unsigned long d0, d1, d2, d3, d6, d7;
 130        unsigned long sp;
 131        unsigned short ss, gs;
 132        const char *board;
 133
 134        if (user_mode_vm(regs)) {
 135                sp = regs->sp;
 136                ss = regs->ss & 0xffff;
 137                savesegment(gs, gs);
 138        } else {
 139                sp = (unsigned long) (&regs->sp);
 140                savesegment(ss, ss);
 141                savesegment(gs, gs);
 142        }
 143
 144        printk("\n");
 145
 146        board = dmi_get_system_info(DMI_PRODUCT_NAME);
 147        if (!board)
 148                board = "";
 149        printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
 150                        task_pid_nr(current), current->comm,
 151                        print_tainted(), init_utsname()->release,
 152                        (int)strcspn(init_utsname()->version, " "),
 153                        init_utsname()->version, board);
 154
 155        printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
 156                        (u16)regs->cs, regs->ip, regs->flags,
 157                        smp_processor_id());
 158        print_symbol("EIP is at %s\n", regs->ip);
 159
 160        printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
 161                regs->ax, regs->bx, regs->cx, regs->dx);
 162        printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
 163                regs->si, regs->di, regs->bp, sp);
 164        printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
 165               (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
 166
 167        if (!all)
 168                return;
 169
 170        cr0 = read_cr0();
 171        cr2 = read_cr2();
 172        cr3 = read_cr3();
 173        cr4 = read_cr4_safe();
 174        printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
 175                        cr0, cr2, cr3, cr4);
 176
 177        get_debugreg(d0, 0);
 178        get_debugreg(d1, 1);
 179        get_debugreg(d2, 2);
 180        get_debugreg(d3, 3);
 181        printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
 182                        d0, d1, d2, d3);
 183
 184        get_debugreg(d6, 6);
 185        get_debugreg(d7, 7);
 186        printk("DR6: %08lx DR7: %08lx\n",
 187                        d6, d7);
 188}
 189
 190void show_regs(struct pt_regs *regs)
 191{
 192        __show_regs(regs, 1);
 193        show_trace(NULL, regs, &regs->sp, regs->bp);
 194}
 195
 196/*
 197 * This gets run with %bx containing the
 198 * function to call, and %dx containing
 199 * the "args".
 200 */
 201extern void kernel_thread_helper(void);
 202
 203/*
 204 * Create a kernel thread
 205 */
 206int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 207{
 208        struct pt_regs regs;
 209
 210        memset(&regs, 0, sizeof(regs));
 211
 212        regs.bx = (unsigned long) fn;
 213        regs.dx = (unsigned long) arg;
 214
 215        regs.ds = __USER_DS;
 216        regs.es = __USER_DS;
 217        regs.fs = __KERNEL_PERCPU;
 218        regs.orig_ax = -1;
 219        regs.ip = (unsigned long) kernel_thread_helper;
 220        regs.cs = __KERNEL_CS | get_kernel_rpl();
 221        regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
 222
 223        /* Ok, create the new process.. */
 224        return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
 225}
 226EXPORT_SYMBOL(kernel_thread);
 227
 228/*
 229 * Free current thread data structures etc..
 230 */
 231void exit_thread(void)
 232{
 233        /* The process may have allocated an io port bitmap... nuke it. */
 234        if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
 235                struct task_struct *tsk = current;
 236                struct thread_struct *t = &tsk->thread;
 237                int cpu = get_cpu();
 238                struct tss_struct *tss = &per_cpu(init_tss, cpu);
 239
 240                kfree(t->io_bitmap_ptr);
 241                t->io_bitmap_ptr = NULL;
 242                clear_thread_flag(TIF_IO_BITMAP);
 243                /*
 244                 * Careful, clear this in the TSS too:
 245                 */
 246                memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
 247                t->io_bitmap_max = 0;
 248                tss->io_bitmap_owner = NULL;
 249                tss->io_bitmap_max = 0;
 250                tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
 251                put_cpu();
 252        }
 253#ifdef CONFIG_X86_DS
 254        /* Free any DS contexts that have not been properly released. */
 255        if (unlikely(current->thread.ds_ctx)) {
 256                /* we clear debugctl to make sure DS is not used. */
 257                update_debugctlmsr(0);
 258                ds_free(current->thread.ds_ctx);
 259        }
 260#endif /* CONFIG_X86_DS */
 261}
 262
 263void flush_thread(void)
 264{
 265        struct task_struct *tsk = current;
 266
 267        tsk->thread.debugreg0 = 0;
 268        tsk->thread.debugreg1 = 0;
 269        tsk->thread.debugreg2 = 0;
 270        tsk->thread.debugreg3 = 0;
 271        tsk->thread.debugreg6 = 0;
 272        tsk->thread.debugreg7 = 0;
 273        memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));        
 274        clear_tsk_thread_flag(tsk, TIF_DEBUG);
 275        /*
 276         * Forget coprocessor state..
 277         */
 278        tsk->fpu_counter = 0;
 279        clear_fpu(tsk);
 280        clear_used_math();
 281}
 282
 283void release_thread(struct task_struct *dead_task)
 284{
 285        BUG_ON(dead_task->mm);
 286        release_vm86_irqs(dead_task);
 287}
 288
 289/*
 290 * This gets called before we allocate a new thread and copy
 291 * the current task into it.
 292 */
 293void prepare_to_copy(struct task_struct *tsk)
 294{
 295        unlazy_fpu(tsk);
 296}
 297
 298int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 299        unsigned long unused,
 300        struct task_struct * p, struct pt_regs * regs)
 301{
 302        struct pt_regs * childregs;
 303        struct task_struct *tsk;
 304        int err;
 305
 306        childregs = task_pt_regs(p);
 307        *childregs = *regs;
 308        childregs->ax = 0;
 309        childregs->sp = sp;
 310
 311        p->thread.sp = (unsigned long) childregs;
 312        p->thread.sp0 = (unsigned long) (childregs+1);
 313
 314        p->thread.ip = (unsigned long) ret_from_fork;
 315
 316        savesegment(gs, p->thread.gs);
 317
 318        tsk = current;
 319        if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
 320                p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
 321                                                IO_BITMAP_BYTES, GFP_KERNEL);
 322                if (!p->thread.io_bitmap_ptr) {
 323                        p->thread.io_bitmap_max = 0;
 324                        return -ENOMEM;
 325                }
 326                set_tsk_thread_flag(p, TIF_IO_BITMAP);
 327        }
 328
 329        err = 0;
 330
 331        /*
 332         * Set a new TLS for the child thread?
 333         */
 334        if (clone_flags & CLONE_SETTLS)
 335                err = do_set_thread_area(p, -1,
 336                        (struct user_desc __user *)childregs->si, 0);
 337
 338        if (err && p->thread.io_bitmap_ptr) {
 339                kfree(p->thread.io_bitmap_ptr);
 340                p->thread.io_bitmap_max = 0;
 341        }
 342        return err;
 343}
 344
 345void
 346start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 347{
 348        __asm__("movl %0, %%gs" :: "r"(0));
 349        regs->fs                = 0;
 350        set_fs(USER_DS);
 351        regs->ds                = __USER_DS;
 352        regs->es                = __USER_DS;
 353        regs->ss                = __USER_DS;
 354        regs->cs                = __USER_CS;
 355        regs->ip                = new_ip;
 356        regs->sp                = new_sp;
 357        /*
 358         * Free the old FP and other extended state
 359         */
 360        free_thread_xstate(current);
 361}
 362EXPORT_SYMBOL_GPL(start_thread);
 363
 364static void hard_disable_TSC(void)
 365{
 366        write_cr4(read_cr4() | X86_CR4_TSD);
 367}
 368
 369void disable_TSC(void)
 370{
 371        preempt_disable();
 372        if (!test_and_set_thread_flag(TIF_NOTSC))
 373                /*
 374                 * Must flip the CPU state synchronously with
 375                 * TIF_NOTSC in the current running context.
 376                 */
 377                hard_disable_TSC();
 378        preempt_enable();
 379}
 380
 381static void hard_enable_TSC(void)
 382{
 383        write_cr4(read_cr4() & ~X86_CR4_TSD);
 384}
 385
 386static void enable_TSC(void)
 387{
 388        preempt_disable();
 389        if (test_and_clear_thread_flag(TIF_NOTSC))
 390                /*
 391                 * Must flip the CPU state synchronously with
 392                 * TIF_NOTSC in the current running context.
 393                 */
 394                hard_enable_TSC();
 395        preempt_enable();
 396}
 397
 398int get_tsc_mode(unsigned long adr)
 399{
 400        unsigned int val;
 401
 402        if (test_thread_flag(TIF_NOTSC))
 403                val = PR_TSC_SIGSEGV;
 404        else
 405                val = PR_TSC_ENABLE;
 406
 407        return put_user(val, (unsigned int __user *)adr);
 408}
 409
 410int set_tsc_mode(unsigned int val)
 411{
 412        if (val == PR_TSC_SIGSEGV)
 413                disable_TSC();
 414        else if (val == PR_TSC_ENABLE)
 415                enable_TSC();
 416        else
 417                return -EINVAL;
 418
 419        return 0;
 420}
 421
 422#ifdef CONFIG_X86_DS
 423static int update_debugctl(struct thread_struct *prev,
 424                        struct thread_struct *next, unsigned long debugctl)
 425{
 426        unsigned long ds_prev = 0;
 427        unsigned long ds_next = 0;
 428
 429        if (prev->ds_ctx)
 430                ds_prev = (unsigned long)prev->ds_ctx->ds;
 431        if (next->ds_ctx)
 432                ds_next = (unsigned long)next->ds_ctx->ds;
 433
 434        if (ds_next != ds_prev) {
 435                /* we clear debugctl to make sure DS
 436                 * is not in use when we change it */
 437                debugctl = 0;
 438                update_debugctlmsr(0);
 439                wrmsr(MSR_IA32_DS_AREA, ds_next, 0);
 440        }
 441        return debugctl;
 442}
 443#else
 444static int update_debugctl(struct thread_struct *prev,
 445                        struct thread_struct *next, unsigned long debugctl)
 446{
 447        return debugctl;
 448}
 449#endif /* CONFIG_X86_DS */
 450
 451static noinline void
 452__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 453                 struct tss_struct *tss)
 454{
 455        struct thread_struct *prev, *next;
 456        unsigned long debugctl;
 457
 458        prev = &prev_p->thread;
 459        next = &next_p->thread;
 460
 461        debugctl = update_debugctl(prev, next, prev->debugctlmsr);
 462
 463        if (next->debugctlmsr != debugctl)
 464                update_debugctlmsr(next->debugctlmsr);
 465
 466        if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
 467                set_debugreg(next->debugreg0, 0);
 468                set_debugreg(next->debugreg1, 1);
 469                set_debugreg(next->debugreg2, 2);
 470                set_debugreg(next->debugreg3, 3);
 471                /* no 4 and 5 */
 472                set_debugreg(next->debugreg6, 6);
 473                set_debugreg(next->debugreg7, 7);
 474        }
 475
 476        if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
 477            test_tsk_thread_flag(next_p, TIF_NOTSC)) {
 478                /* prev and next are different */
 479                if (test_tsk_thread_flag(next_p, TIF_NOTSC))
 480                        hard_disable_TSC();
 481                else
 482                        hard_enable_TSC();
 483        }
 484
 485#ifdef CONFIG_X86_PTRACE_BTS
 486        if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
 487                ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
 488
 489        if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
 490                ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
 491#endif /* CONFIG_X86_PTRACE_BTS */
 492
 493
 494        if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
 495                /*
 496                 * Disable the bitmap via an invalid offset. We still cache
 497                 * the previous bitmap owner and the IO bitmap contents:
 498                 */
 499                tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
 500                return;
 501        }
 502
 503        if (likely(next == tss->io_bitmap_owner)) {
 504                /*
 505                 * Previous owner of the bitmap (hence the bitmap content)
 506                 * matches the next task, we dont have to do anything but
 507                 * to set a valid offset in the TSS:
 508                 */
 509                tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 510                return;
 511        }
 512        /*
 513         * Lazy TSS's I/O bitmap copy. We set an invalid offset here
 514         * and we let the task to get a GPF in case an I/O instruction
 515         * is performed.  The handler of the GPF will verify that the
 516         * faulting task has a valid I/O bitmap and, it true, does the
 517         * real copy and restart the instruction.  This will save us
 518         * redundant copies when the currently switched task does not
 519         * perform any I/O during its timeslice.
 520         */
 521        tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
 522}
 523
 524/*
 525 *      switch_to(x,yn) should switch tasks from x to y.
 526 *
 527 * We fsave/fwait so that an exception goes off at the right time
 528 * (as a call from the fsave or fwait in effect) rather than to
 529 * the wrong process. Lazy FP saving no longer makes any sense
 530 * with modern CPU's, and this simplifies a lot of things (SMP
 531 * and UP become the same).
 532 *
 533 * NOTE! We used to use the x86 hardware context switching. The
 534 * reason for not using it any more becomes apparent when you
 535 * try to recover gracefully from saved state that is no longer
 536 * valid (stale segment register values in particular). With the
 537 * hardware task-switch, there is no way to fix up bad state in
 538 * a reasonable manner.
 539 *
 540 * The fact that Intel documents the hardware task-switching to
 541 * be slow is a fairly red herring - this code is not noticeably
 542 * faster. However, there _is_ some room for improvement here,
 543 * so the performance issues may eventually be a valid point.
 544 * More important, however, is the fact that this allows us much
 545 * more flexibility.
 546 *
 547 * The return value (in %ax) will be the "prev" task after
 548 * the task-switch, and shows up in ret_from_fork in entry.S,
 549 * for example.
 550 */
 551struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 552{
 553        struct thread_struct *prev = &prev_p->thread,
 554                                 *next = &next_p->thread;
 555        int cpu = smp_processor_id();
 556        struct tss_struct *tss = &per_cpu(init_tss, cpu);
 557
 558        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 559
 560        __unlazy_fpu(prev_p);
 561
 562
 563        /* we're going to use this soon, after a few expensive things */
 564        if (next_p->fpu_counter > 5)
 565                prefetch(next->xstate);
 566
 567        /*
 568         * Reload esp0.
 569         */
 570        load_sp0(tss, next);
 571
 572        /*
 573         * Save away %gs. No need to save %fs, as it was saved on the
 574         * stack on entry.  No need to save %es and %ds, as those are
 575         * always kernel segments while inside the kernel.  Doing this
 576         * before setting the new TLS descriptors avoids the situation
 577         * where we temporarily have non-reloadable segments in %fs
 578         * and %gs.  This could be an issue if the NMI handler ever
 579         * used %fs or %gs (it does not today), or if the kernel is
 580         * running inside of a hypervisor layer.
 581         */
 582        savesegment(gs, prev->gs);
 583
 584        /*
 585         * Load the per-thread Thread-Local Storage descriptor.
 586         */
 587        load_TLS(next, cpu);
 588
 589        /*
 590         * Restore IOPL if needed.  In normal use, the flags restore
 591         * in the switch assembly will handle this.  But if the kernel
 592         * is running virtualized at a non-zero CPL, the popf will
 593         * not restore flags, so it must be done in a separate step.
 594         */
 595        if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
 596                set_iopl_mask(next->iopl);
 597
 598        /*
 599         * Now maybe handle debug registers and/or IO bitmaps
 600         */
 601        if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
 602                     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
 603                __switch_to_xtra(prev_p, next_p, tss);
 604
 605        /*
 606         * Leave lazy mode, flushing any hypercalls made here.
 607         * This must be done before restoring TLS segments so
 608         * the GDT and LDT are properly updated, and must be
 609         * done before math_state_restore, so the TS bit is up
 610         * to date.
 611         */
 612        arch_leave_lazy_cpu_mode();
 613
 614        /* If the task has used fpu the last 5 timeslices, just do a full
 615         * restore of the math state immediately to avoid the trap; the
 616         * chances of needing FPU soon are obviously high now
 617         *
 618         * tsk_used_math() checks prevent calling math_state_restore(),
 619         * which can sleep in the case of !tsk_used_math()
 620         */
 621        if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
 622                math_state_restore();
 623
 624        /*
 625         * Restore %gs if needed (which is common)
 626         */
 627        if (prev->gs | next->gs)
 628                loadsegment(gs, next->gs);
 629
 630        x86_write_percpu(current_task, next_p);
 631
 632        return prev_p;
 633}
 634
 635asmlinkage int sys_fork(struct pt_regs regs)
 636{
 637        return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
 638}
 639
 640asmlinkage int sys_clone(struct pt_regs regs)
 641{
 642        unsigned long clone_flags;
 643        unsigned long newsp;
 644        int __user *parent_tidptr, *child_tidptr;
 645
 646        clone_flags = regs.bx;
 647        newsp = regs.cx;
 648        parent_tidptr = (int __user *)regs.dx;
 649        child_tidptr = (int __user *)regs.di;
 650        if (!newsp)
 651                newsp = regs.sp;
 652        return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
 653}
 654
 655/*
 656 * This is trivial, and on the face of it looks like it
 657 * could equally well be done in user mode.
 658 *
 659 * Not so, for quite unobvious reasons - register pressure.
 660 * In user mode vfork() cannot have a stack frame, and if
 661 * done by calling the "clone()" system call directly, you
 662 * do not have enough call-clobbered registers to hold all
 663 * the information you need.
 664 */
 665asmlinkage int sys_vfork(struct pt_regs regs)
 666{
 667        return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
 668}
 669
 670/*
 671 * sys_execve() executes a new program.
 672 */
 673asmlinkage int sys_execve(struct pt_regs regs)
 674{
 675        int error;
 676        char * filename;
 677
 678        filename = getname((char __user *) regs.bx);
 679        error = PTR_ERR(filename);
 680        if (IS_ERR(filename))
 681                goto out;
 682        error = do_execve(filename,
 683                        (char __user * __user *) regs.cx,
 684                        (char __user * __user *) regs.dx,
 685                        &regs);
 686        if (error == 0) {
 687                /* Make sure we don't return using sysenter.. */
 688                set_thread_flag(TIF_IRET);
 689        }
 690        putname(filename);
 691out:
 692        return error;
 693}
 694
 695#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
 696#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
 697
 698unsigned long get_wchan(struct task_struct *p)
 699{
 700        unsigned long bp, sp, ip;
 701        unsigned long stack_page;
 702        int count = 0;
 703        if (!p || p == current || p->state == TASK_RUNNING)
 704                return 0;
 705        stack_page = (unsigned long)task_stack_page(p);
 706        sp = p->thread.sp;
 707        if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
 708                return 0;
 709        /* include/asm-i386/system.h:switch_to() pushes bp last. */
 710        bp = *(unsigned long *) sp;
 711        do {
 712                if (bp < stack_page || bp > top_ebp+stack_page)
 713                        return 0;
 714                ip = *(unsigned long *) (bp+4);
 715                if (!in_sched_functions(ip))
 716                        return ip;
 717                bp = *(unsigned long *) bp;
 718        } while (count++ < 16);
 719        return 0;
 720}
 721
 722unsigned long arch_align_stack(unsigned long sp)
 723{
 724        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 725                sp -= get_random_int() % 8192;
 726        return sp & ~0xf;
 727}
 728
 729unsigned long arch_randomize_brk(struct mm_struct *mm)
 730{
 731        unsigned long range_end = mm->brk + 0x02000000;
 732        return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 733}
 734