linux/arch/x86/kernel/process.c
<<
>>
Prefs
   1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   2
   3#include <linux/errno.h>
   4#include <linux/kernel.h>
   5#include <linux/mm.h>
   6#include <linux/smp.h>
   7#include <linux/prctl.h>
   8#include <linux/slab.h>
   9#include <linux/sched.h>
  10#include <linux/module.h>
  11#include <linux/pm.h>
  12#include <linux/clockchips.h>
  13#include <linux/random.h>
  14#include <linux/user-return-notifier.h>
  15#include <linux/dmi.h>
  16#include <linux/utsname.h>
  17#include <linux/stackprotector.h>
  18#include <linux/tick.h>
  19#include <linux/cpuidle.h>
  20#include <trace/events/power.h>
  21#include <linux/hw_breakpoint.h>
  22#include <asm/cpu.h>
  23#include <asm/apic.h>
  24#include <asm/syscalls.h>
  25#include <asm/idle.h>
  26#include <asm/uaccess.h>
  27#include <asm/i387.h>
  28#include <asm/fpu-internal.h>
  29#include <asm/debugreg.h>
  30#include <asm/nmi.h>
  31
  32/*
  33 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  34 * no more per-task TSS's. The TSS size is kept cacheline-aligned
  35 * so they are allowed to end up in the .data..cacheline_aligned
  36 * section. Since TSS's are completely CPU-local, we want them
  37 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  38 */
  39DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
  40
  41#ifdef CONFIG_X86_64
  42static DEFINE_PER_CPU(unsigned char, is_idle);
  43static ATOMIC_NOTIFIER_HEAD(idle_notifier);
  44
  45void idle_notifier_register(struct notifier_block *n)
  46{
  47        atomic_notifier_chain_register(&idle_notifier, n);
  48}
  49EXPORT_SYMBOL_GPL(idle_notifier_register);
  50
  51void idle_notifier_unregister(struct notifier_block *n)
  52{
  53        atomic_notifier_chain_unregister(&idle_notifier, n);
  54}
  55EXPORT_SYMBOL_GPL(idle_notifier_unregister);
  56#endif
  57
  58struct kmem_cache *task_xstate_cachep;
  59EXPORT_SYMBOL_GPL(task_xstate_cachep);
  60
  61/*
  62 * this gets called so that we can store lazy state into memory and copy the
  63 * current task into the new thread.
  64 */
  65int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  66{
  67        int ret;
  68
  69        unlazy_fpu(src);
  70
  71        *dst = *src;
  72        if (fpu_allocated(&src->thread.fpu)) {
  73                memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
  74                ret = fpu_alloc(&dst->thread.fpu);
  75                if (ret)
  76                        return ret;
  77                fpu_copy(&dst->thread.fpu, &src->thread.fpu);
  78        }
  79        return 0;
  80}
  81
  82void free_thread_xstate(struct task_struct *tsk)
  83{
  84        fpu_free(&tsk->thread.fpu);
  85}
  86
  87void arch_release_task_struct(struct task_struct *tsk)
  88{
  89        free_thread_xstate(tsk);
  90}
  91
  92void arch_task_cache_init(void)
  93{
  94        task_xstate_cachep =
  95                kmem_cache_create("task_xstate", xstate_size,
  96                                  __alignof__(union thread_xstate),
  97                                  SLAB_PANIC | SLAB_NOTRACK, NULL);
  98}
  99
 100static inline void drop_fpu(struct task_struct *tsk)
 101{
 102        /*
 103         * Forget coprocessor state..
 104         */
 105        tsk->fpu_counter = 0;
 106        clear_fpu(tsk);
 107        clear_used_math();
 108}
 109
 110/*
 111 * Free current thread data structures etc..
 112 */
 113void exit_thread(void)
 114{
 115        struct task_struct *me = current;
 116        struct thread_struct *t = &me->thread;
 117        unsigned long *bp = t->io_bitmap_ptr;
 118
 119        if (bp) {
 120                struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
 121
 122                t->io_bitmap_ptr = NULL;
 123                clear_thread_flag(TIF_IO_BITMAP);
 124                /*
 125                 * Careful, clear this in the TSS too:
 126                 */
 127                memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
 128                t->io_bitmap_max = 0;
 129                put_cpu();
 130                kfree(bp);
 131        }
 132
 133        drop_fpu(me);
 134}
 135
 136void show_regs_common(void)
 137{
 138        const char *vendor, *product, *board;
 139
 140        vendor = dmi_get_system_info(DMI_SYS_VENDOR);
 141        if (!vendor)
 142                vendor = "";
 143        product = dmi_get_system_info(DMI_PRODUCT_NAME);
 144        if (!product)
 145                product = "";
 146
 147        /* Board Name is optional */
 148        board = dmi_get_system_info(DMI_BOARD_NAME);
 149
 150        printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
 151               current->pid, current->comm, print_tainted(),
 152               init_utsname()->release,
 153               (int)strcspn(init_utsname()->version, " "),
 154               init_utsname()->version,
 155               vendor, product,
 156               board ? "/" : "",
 157               board ? board : "");
 158}
 159
 160void flush_thread(void)
 161{
 162        struct task_struct *tsk = current;
 163
 164        flush_ptrace_hw_breakpoint(tsk);
 165        memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
 166        drop_fpu(tsk);
 167}
 168
 169static void hard_disable_TSC(void)
 170{
 171        write_cr4(read_cr4() | X86_CR4_TSD);
 172}
 173
 174void disable_TSC(void)
 175{
 176        preempt_disable();
 177        if (!test_and_set_thread_flag(TIF_NOTSC))
 178                /*
 179                 * Must flip the CPU state synchronously with
 180                 * TIF_NOTSC in the current running context.
 181                 */
 182                hard_disable_TSC();
 183        preempt_enable();
 184}
 185
 186static void hard_enable_TSC(void)
 187{
 188        write_cr4(read_cr4() & ~X86_CR4_TSD);
 189}
 190
 191static void enable_TSC(void)
 192{
 193        preempt_disable();
 194        if (test_and_clear_thread_flag(TIF_NOTSC))
 195                /*
 196                 * Must flip the CPU state synchronously with
 197                 * TIF_NOTSC in the current running context.
 198                 */
 199                hard_enable_TSC();
 200        preempt_enable();
 201}
 202
 203int get_tsc_mode(unsigned long adr)
 204{
 205        unsigned int val;
 206
 207        if (test_thread_flag(TIF_NOTSC))
 208                val = PR_TSC_SIGSEGV;
 209        else
 210                val = PR_TSC_ENABLE;
 211
 212        return put_user(val, (unsigned int __user *)adr);
 213}
 214
 215int set_tsc_mode(unsigned int val)
 216{
 217        if (val == PR_TSC_SIGSEGV)
 218                disable_TSC();
 219        else if (val == PR_TSC_ENABLE)
 220                enable_TSC();
 221        else
 222                return -EINVAL;
 223
 224        return 0;
 225}
 226
 227void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 228                      struct tss_struct *tss)
 229{
 230        struct thread_struct *prev, *next;
 231
 232        prev = &prev_p->thread;
 233        next = &next_p->thread;
 234
 235        if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
 236            test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
 237                unsigned long debugctl = get_debugctlmsr();
 238
 239                debugctl &= ~DEBUGCTLMSR_BTF;
 240                if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
 241                        debugctl |= DEBUGCTLMSR_BTF;
 242
 243                update_debugctlmsr(debugctl);
 244        }
 245
 246        if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
 247            test_tsk_thread_flag(next_p, TIF_NOTSC)) {
 248                /* prev and next are different */
 249                if (test_tsk_thread_flag(next_p, TIF_NOTSC))
 250                        hard_disable_TSC();
 251                else
 252                        hard_enable_TSC();
 253        }
 254
 255        if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
 256                /*
 257                 * Copy the relevant range of the IO bitmap.
 258                 * Normally this is 128 bytes or less:
 259                 */
 260                memcpy(tss->io_bitmap, next->io_bitmap_ptr,
 261                       max(prev->io_bitmap_max, next->io_bitmap_max));
 262        } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
 263                /*
 264                 * Clear any possible leftover bits:
 265                 */
 266                memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
 267        }
 268        propagate_user_return_notify(prev_p, next_p);
 269}
 270
 271int sys_fork(struct pt_regs *regs)
 272{
 273        return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
 274}
 275
 276/*
 277 * This is trivial, and on the face of it looks like it
 278 * could equally well be done in user mode.
 279 *
 280 * Not so, for quite unobvious reasons - register pressure.
 281 * In user mode vfork() cannot have a stack frame, and if
 282 * done by calling the "clone()" system call directly, you
 283 * do not have enough call-clobbered registers to hold all
 284 * the information you need.
 285 */
 286int sys_vfork(struct pt_regs *regs)
 287{
 288        return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
 289                       NULL, NULL);
 290}
 291
 292long
 293sys_clone(unsigned long clone_flags, unsigned long newsp,
 294          void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
 295{
 296        if (!newsp)
 297                newsp = regs->sp;
 298        return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
 299}
 300
 301/*
 302 * This gets run with %si containing the
 303 * function to call, and %di containing
 304 * the "args".
 305 */
 306extern void kernel_thread_helper(void);
 307
 308/*
 309 * Create a kernel thread
 310 */
 311int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
 312{
 313        struct pt_regs regs;
 314
 315        memset(&regs, 0, sizeof(regs));
 316
 317        regs.si = (unsigned long) fn;
 318        regs.di = (unsigned long) arg;
 319
 320#ifdef CONFIG_X86_32
 321        regs.ds = __USER_DS;
 322        regs.es = __USER_DS;
 323        regs.fs = __KERNEL_PERCPU;
 324        regs.gs = __KERNEL_STACK_CANARY;
 325#else
 326        regs.ss = __KERNEL_DS;
 327#endif
 328
 329        regs.orig_ax = -1;
 330        regs.ip = (unsigned long) kernel_thread_helper;
 331        regs.cs = __KERNEL_CS | get_kernel_rpl();
 332        regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
 333
 334        /* Ok, create the new process.. */
 335        return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
 336}
 337EXPORT_SYMBOL(kernel_thread);
 338
 339/*
 340 * sys_execve() executes a new program.
 341 */
 342long sys_execve(const char __user *name,
 343                const char __user *const __user *argv,
 344                const char __user *const __user *envp, struct pt_regs *regs)
 345{
 346        long error;
 347        char *filename;
 348
 349        filename = getname(name);
 350        error = PTR_ERR(filename);
 351        if (IS_ERR(filename))
 352                return error;
 353        error = do_execve(filename, argv, envp, regs);
 354
 355#ifdef CONFIG_X86_32
 356        if (error == 0) {
 357                /* Make sure we don't return using sysenter.. */
 358                set_thread_flag(TIF_IRET);
 359        }
 360#endif
 361
 362        putname(filename);
 363        return error;
 364}
 365
 366/*
 367 * Idle related variables and functions
 368 */
 369unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
 370EXPORT_SYMBOL(boot_option_idle_override);
 371
 372/*
 373 * Powermanagement idle function, if any..
 374 */
 375void (*pm_idle)(void);
 376#ifdef CONFIG_APM_MODULE
 377EXPORT_SYMBOL(pm_idle);
 378#endif
 379
 380static inline int hlt_use_halt(void)
 381{
 382        return 1;
 383}
 384
 385#ifndef CONFIG_SMP
 386static inline void play_dead(void)
 387{
 388        BUG();
 389}
 390#endif
 391
 392#ifdef CONFIG_X86_64
 393void enter_idle(void)
 394{
 395        this_cpu_write(is_idle, 1);
 396        atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
 397}
 398
 399static void __exit_idle(void)
 400{
 401        if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
 402                return;
 403        atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
 404}
 405
 406/* Called from interrupts to signify idle end */
 407void exit_idle(void)
 408{
 409        /* idle loop has pid 0 */
 410        if (current->pid)
 411                return;
 412        __exit_idle();
 413}
 414#endif
 415
 416/*
 417 * The idle thread. There's no useful work to be
 418 * done, so just try to conserve power and have a
 419 * low exit latency (ie sit in a loop waiting for
 420 * somebody to say that they'd like to reschedule)
 421 */
 422void cpu_idle(void)
 423{
 424        /*
 425         * If we're the non-boot CPU, nothing set the stack canary up
 426         * for us.  CPU0 already has it initialized but no harm in
 427         * doing it again.  This is a good place for updating it, as
 428         * we wont ever return from this function (so the invalid
 429         * canaries already on the stack wont ever trigger).
 430         */
 431        boot_init_stack_canary();
 432        current_thread_info()->status |= TS_POLLING;
 433
 434        while (1) {
 435                tick_nohz_idle_enter();
 436
 437                while (!need_resched()) {
 438                        rmb();
 439
 440                        if (cpu_is_offline(smp_processor_id()))
 441                                play_dead();
 442
 443                        /*
 444                         * Idle routines should keep interrupts disabled
 445                         * from here on, until they go to idle.
 446                         * Otherwise, idle callbacks can misfire.
 447                         */
 448                        local_touch_nmi();
 449                        local_irq_disable();
 450
 451                        enter_idle();
 452
 453                        /* Don't trace irqs off for idle */
 454                        stop_critical_timings();
 455
 456                        /* enter_idle() needs rcu for notifiers */
 457                        rcu_idle_enter();
 458
 459                        if (cpuidle_idle_call())
 460                                pm_idle();
 461
 462                        rcu_idle_exit();
 463                        start_critical_timings();
 464
 465                        /* In many cases the interrupt that ended idle
 466                           has already called exit_idle. But some idle
 467                           loops can be woken up without interrupt. */
 468                        __exit_idle();
 469                }
 470
 471                tick_nohz_idle_exit();
 472                preempt_enable_no_resched();
 473                schedule();
 474                preempt_disable();
 475        }
 476}
 477
 478/*
 479 * We use this if we don't have any better
 480 * idle routine..
 481 */
 482void default_idle(void)
 483{
 484        if (hlt_use_halt()) {
 485                trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
 486                trace_cpu_idle_rcuidle(1, smp_processor_id());
 487                current_thread_info()->status &= ~TS_POLLING;
 488                /*
 489                 * TS_POLLING-cleared state must be visible before we
 490                 * test NEED_RESCHED:
 491                 */
 492                smp_mb();
 493
 494                if (!need_resched())
 495                        safe_halt();    /* enables interrupts racelessly */
 496                else
 497                        local_irq_enable();
 498                current_thread_info()->status |= TS_POLLING;
 499                trace_power_end_rcuidle(smp_processor_id());
 500                trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 501        } else {
 502                local_irq_enable();
 503                /* loop is done by the caller */
 504                cpu_relax();
 505        }
 506}
 507#ifdef CONFIG_APM_MODULE
 508EXPORT_SYMBOL(default_idle);
 509#endif
 510
 511bool set_pm_idle_to_default(void)
 512{
 513        bool ret = !!pm_idle;
 514
 515        pm_idle = default_idle;
 516
 517        return ret;
 518}
 519void stop_this_cpu(void *dummy)
 520{
 521        local_irq_disable();
 522        /*
 523         * Remove this CPU:
 524         */
 525        set_cpu_online(smp_processor_id(), false);
 526        disable_local_APIC();
 527
 528        for (;;) {
 529                if (hlt_works(smp_processor_id()))
 530                        halt();
 531        }
 532}
 533
 534/* Default MONITOR/MWAIT with no hints, used for default C1 state */
 535static void mwait_idle(void)
 536{
 537        if (!need_resched()) {
 538                trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
 539                trace_cpu_idle_rcuidle(1, smp_processor_id());
 540                if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
 541                        clflush((void *)&current_thread_info()->flags);
 542
 543                __monitor((void *)&current_thread_info()->flags, 0, 0);
 544                smp_mb();
 545                if (!need_resched())
 546                        __sti_mwait(0, 0);
 547                else
 548                        local_irq_enable();
 549                trace_power_end_rcuidle(smp_processor_id());
 550                trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 551        } else
 552                local_irq_enable();
 553}
 554
 555/*
 556 * On SMP it's slightly faster (but much more power-consuming!)
 557 * to poll the ->work.need_resched flag instead of waiting for the
 558 * cross-CPU IPI to arrive. Use this option with caution.
 559 */
 560static void poll_idle(void)
 561{
 562        trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
 563        trace_cpu_idle_rcuidle(0, smp_processor_id());
 564        local_irq_enable();
 565        while (!need_resched())
 566                cpu_relax();
 567        trace_power_end_rcuidle(smp_processor_id());
 568        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 569}
 570
 571/*
 572 * mwait selection logic:
 573 *
 574 * It depends on the CPU. For AMD CPUs that support MWAIT this is
 575 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
 576 * then depend on a clock divisor and current Pstate of the core. If
 577 * all cores of a processor are in halt state (C1) the processor can
 578 * enter the C1E (C1 enhanced) state. If mwait is used this will never
 579 * happen.
 580 *
 581 * idle=mwait overrides this decision and forces the usage of mwait.
 582 */
 583
 584#define MWAIT_INFO                      0x05
 585#define MWAIT_ECX_EXTENDED_INFO         0x01
 586#define MWAIT_EDX_C1                    0xf0
 587
 588int mwait_usable(const struct cpuinfo_x86 *c)
 589{
 590        u32 eax, ebx, ecx, edx;
 591
 592        /* Use mwait if idle=mwait boot option is given */
 593        if (boot_option_idle_override == IDLE_FORCE_MWAIT)
 594                return 1;
 595
 596        /*
 597         * Any idle= boot option other than idle=mwait means that we must not
 598         * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
 599         */
 600        if (boot_option_idle_override != IDLE_NO_OVERRIDE)
 601                return 0;
 602
 603        if (c->cpuid_level < MWAIT_INFO)
 604                return 0;
 605
 606        cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
 607        /* Check, whether EDX has extended info about MWAIT */
 608        if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
 609                return 1;
 610
 611        /*
 612         * edx enumeratios MONITOR/MWAIT extensions. Check, whether
 613         * C1  supports MWAIT
 614         */
 615        return (edx & MWAIT_EDX_C1);
 616}
 617
 618bool amd_e400_c1e_detected;
 619EXPORT_SYMBOL(amd_e400_c1e_detected);
 620
 621static cpumask_var_t amd_e400_c1e_mask;
 622
 623void amd_e400_remove_cpu(int cpu)
 624{
 625        if (amd_e400_c1e_mask != NULL)
 626                cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
 627}
 628
 629/*
 630 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
 631 * pending message MSR. If we detect C1E, then we handle it the same
 632 * way as C3 power states (local apic timer and TSC stop)
 633 */
 634static void amd_e400_idle(void)
 635{
 636        if (need_resched())
 637                return;
 638
 639        if (!amd_e400_c1e_detected) {
 640                u32 lo, hi;
 641
 642                rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
 643
 644                if (lo & K8_INTP_C1E_ACTIVE_MASK) {
 645                        amd_e400_c1e_detected = true;
 646                        if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 647                                mark_tsc_unstable("TSC halt in AMD C1E");
 648                        pr_info("System has AMD C1E enabled\n");
 649                }
 650        }
 651
 652        if (amd_e400_c1e_detected) {
 653                int cpu = smp_processor_id();
 654
 655                if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
 656                        cpumask_set_cpu(cpu, amd_e400_c1e_mask);
 657                        /*
 658                         * Force broadcast so ACPI can not interfere.
 659                         */
 660                        clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
 661                                           &cpu);
 662                        pr_info("Switch to broadcast mode on CPU%d\n", cpu);
 663                }
 664                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 665
 666                default_idle();
 667
 668                /*
 669                 * The switch back from broadcast mode needs to be
 670                 * called with interrupts disabled.
 671                 */
 672                 local_irq_disable();
 673                 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 674                 local_irq_enable();
 675        } else
 676                default_idle();
 677}
 678
 679void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 680{
 681#ifdef CONFIG_SMP
 682        if (pm_idle == poll_idle && smp_num_siblings > 1) {
 683                pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
 684        }
 685#endif
 686        if (pm_idle)
 687                return;
 688
 689        if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
 690                /*
 691                 * One CPU supports mwait => All CPUs supports mwait
 692                 */
 693                pr_info("using mwait in idle threads\n");
 694                pm_idle = mwait_idle;
 695        } else if (cpu_has_amd_erratum(amd_erratum_400)) {
 696                /* E400: APIC timer interrupt does not wake up CPU from C1e */
 697                pr_info("using AMD E400 aware idle routine\n");
 698                pm_idle = amd_e400_idle;
 699        } else
 700                pm_idle = default_idle;
 701}
 702
 703void __init init_amd_e400_c1e_mask(void)
 704{
 705        /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
 706        if (pm_idle == amd_e400_idle)
 707                zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
 708}
 709
 710static int __init idle_setup(char *str)
 711{
 712        if (!str)
 713                return -EINVAL;
 714
 715        if (!strcmp(str, "poll")) {
 716                pr_info("using polling idle threads\n");
 717                pm_idle = poll_idle;
 718                boot_option_idle_override = IDLE_POLL;
 719        } else if (!strcmp(str, "mwait")) {
 720                boot_option_idle_override = IDLE_FORCE_MWAIT;
 721                WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
 722        } else if (!strcmp(str, "halt")) {
 723                /*
 724                 * When the boot option of idle=halt is added, halt is
 725                 * forced to be used for CPU idle. In such case CPU C2/C3
 726                 * won't be used again.
 727                 * To continue to load the CPU idle driver, don't touch
 728                 * the boot_option_idle_override.
 729                 */
 730                pm_idle = default_idle;
 731                boot_option_idle_override = IDLE_HALT;
 732        } else if (!strcmp(str, "nomwait")) {
 733                /*
 734                 * If the boot option of "idle=nomwait" is added,
 735                 * it means that mwait will be disabled for CPU C2/C3
 736                 * states. In such case it won't touch the variable
 737                 * of boot_option_idle_override.
 738                 */
 739                boot_option_idle_override = IDLE_NOMWAIT;
 740        } else
 741                return -1;
 742
 743        return 0;
 744}
 745early_param("idle", idle_setup);
 746
 747unsigned long arch_align_stack(unsigned long sp)
 748{
 749        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 750                sp -= get_random_int() % 8192;
 751        return sp & ~0xf;
 752}
 753
 754unsigned long arch_randomize_brk(struct mm_struct *mm)
 755{
 756        unsigned long range_end = mm->brk + 0x02000000;
 757        return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 758}
 759
 760
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.