linux/arch/um/kernel/process.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Copyright 2003 PathScale, Inc.
   4 * Licensed under the GPL
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/err.h>
   9#include <linux/hardirq.h>
  10#include <linux/mm.h>
  11#include <linux/module.h>
  12#include <linux/personality.h>
  13#include <linux/proc_fs.h>
  14#include <linux/ptrace.h>
  15#include <linux/random.h>
  16#include <linux/slab.h>
  17#include <linux/sched.h>
  18#include <linux/seq_file.h>
  19#include <linux/tick.h>
  20#include <linux/threads.h>
  21#include <asm/current.h>
  22#include <asm/pgtable.h>
  23#include <asm/mmu_context.h>
  24#include <asm/uaccess.h>
  25#include "as-layout.h"
  26#include "kern_util.h"
  27#include "os.h"
  28#include "skas.h"
  29
  30/*
  31 * This is a per-cpu array.  A processor only modifies its entry and it only
  32 * cares about its entry, so it's OK if another processor is modifying its
  33 * entry.
  34 */
  35struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
  36
  37static inline int external_pid(void)
  38{
  39        /* FIXME: Need to look up userspace_pid by cpu */
  40        return userspace_pid[0];
  41}
  42
  43int pid_to_processor_id(int pid)
  44{
  45        int i;
  46
  47        for (i = 0; i < ncpus; i++) {
  48                if (cpu_tasks[i].pid == pid)
  49                        return i;
  50        }
  51        return -1;
  52}
  53
  54void free_stack(unsigned long stack, int order)
  55{
  56        free_pages(stack, order);
  57}
  58
  59unsigned long alloc_stack(int order, int atomic)
  60{
  61        unsigned long page;
  62        gfp_t flags = GFP_KERNEL;
  63
  64        if (atomic)
  65                flags = GFP_ATOMIC;
  66        page = __get_free_pages(flags, order);
  67
  68        return page;
  69}
  70
  71int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  72{
  73        int pid;
  74
  75        current->thread.request.u.thread.proc = fn;
  76        current->thread.request.u.thread.arg = arg;
  77        pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
  78                      &current->thread.regs, 0, NULL, NULL);
  79        return pid;
  80}
  81EXPORT_SYMBOL(kernel_thread);
  82
  83static inline void set_current(struct task_struct *task)
  84{
  85        cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
  86                { external_pid(), task });
  87}
  88
  89extern void arch_switch_to(struct task_struct *to);
  90
  91void *_switch_to(void *prev, void *next, void *last)
  92{
  93        struct task_struct *from = prev;
  94        struct task_struct *to = next;
  95
  96        to->thread.prev_sched = from;
  97        set_current(to);
  98
  99        do {
 100                current->thread.saved_task = NULL;
 101
 102                switch_threads(&from->thread.switch_buf,
 103                               &to->thread.switch_buf);
 104
 105                arch_switch_to(current);
 106
 107                if (current->thread.saved_task)
 108                        show_regs(&(current->thread.regs));
 109                to = current->thread.saved_task;
 110                from = current;
 111        } while (current->thread.saved_task);
 112
 113        return current->thread.prev_sched;
 114
 115}
 116
 117void interrupt_end(void)
 118{
 119        if (need_resched())
 120                schedule();
 121        if (test_tsk_thread_flag(current, TIF_SIGPENDING))
 122                do_signal();
 123}
 124
 125void exit_thread(void)
 126{
 127}
 128
 129void *get_current(void)
 130{
 131        return current;
 132}
 133
 134/*
 135 * This is called magically, by its address being stuffed in a jmp_buf
 136 * and being longjmp-d to.
 137 */
 138void new_thread_handler(void)
 139{
 140        int (*fn)(void *), n;
 141        void *arg;
 142
 143        if (current->thread.prev_sched != NULL)
 144                schedule_tail(current->thread.prev_sched);
 145        current->thread.prev_sched = NULL;
 146
 147        fn = current->thread.request.u.thread.proc;
 148        arg = current->thread.request.u.thread.arg;
 149
 150        /*
 151         * The return value is 1 if the kernel thread execs a process,
 152         * 0 if it just exits
 153         */
 154        n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
 155        if (n == 1) {
 156                /* Handle any immediate reschedules or signals */
 157                interrupt_end();
 158                userspace(&current->thread.regs.regs);
 159        }
 160        else do_exit(0);
 161}
 162
 163/* Called magically, see new_thread_handler above */
 164void fork_handler(void)
 165{
 166        force_flush_all();
 167
 168        schedule_tail(current->thread.prev_sched);
 169
 170        /*
 171         * XXX: if interrupt_end() calls schedule, this call to
 172         * arch_switch_to isn't needed. We could want to apply this to
 173         * improve performance. -bb
 174         */
 175        arch_switch_to(current);
 176
 177        current->thread.prev_sched = NULL;
 178
 179        /* Handle any immediate reschedules or signals */
 180        interrupt_end();
 181
 182        userspace(&current->thread.regs.regs);
 183}
 184
 185int copy_thread(unsigned long clone_flags, unsigned long sp,
 186                unsigned long stack_top, struct task_struct * p,
 187                struct pt_regs *regs)
 188{
 189        void (*handler)(void);
 190        int ret = 0;
 191
 192        p->thread = (struct thread_struct) INIT_THREAD;
 193
 194        if (current->thread.forking) {
 195                memcpy(&p->thread.regs.regs, &regs->regs,
 196                       sizeof(p->thread.regs.regs));
 197                REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
 198                if (sp != 0)
 199                        REGS_SP(p->thread.regs.regs.gp) = sp;
 200
 201                handler = fork_handler;
 202
 203                arch_copy_thread(&current->thread.arch, &p->thread.arch);
 204        }
 205        else {
 206                get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
 207                p->thread.request.u.thread = current->thread.request.u.thread;
 208                handler = new_thread_handler;
 209        }
 210
 211        new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
 212
 213        if (current->thread.forking) {
 214                clear_flushed_tls(p);
 215
 216                /*
 217                 * Set a new TLS for the child thread?
 218                 */
 219                if (clone_flags & CLONE_SETTLS)
 220                        ret = arch_copy_tls(p);
 221        }
 222
 223        return ret;
 224}
 225
 226void initial_thread_cb(void (*proc)(void *), void *arg)
 227{
 228        int save_kmalloc_ok = kmalloc_ok;
 229
 230        kmalloc_ok = 0;
 231        initial_thread_cb_skas(proc, arg);
 232        kmalloc_ok = save_kmalloc_ok;
 233}
 234
 235void default_idle(void)
 236{
 237        unsigned long long nsecs;
 238
 239        while (1) {
 240                /* endless idle loop with no priority at all */
 241
 242                /*
 243                 * although we are an idle CPU, we do not want to
 244                 * get into the scheduler unnecessarily.
 245                 */
 246                if (need_resched())
 247                        schedule();
 248
 249                tick_nohz_idle_enter();
 250                rcu_idle_enter();
 251                nsecs = disable_timer();
 252                idle_sleep(nsecs);
 253                rcu_idle_exit();
 254                tick_nohz_idle_exit();
 255        }
 256}
 257
 258void cpu_idle(void)
 259{
 260        cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
 261        default_idle();
 262}
 263
 264int __cant_sleep(void) {
 265        return in_atomic() || irqs_disabled() || in_interrupt();
 266        /* Is in_interrupt() really needed? */
 267}
 268
 269int user_context(unsigned long sp)
 270{
 271        unsigned long stack;
 272
 273        stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
 274        return stack != (unsigned long) current_thread_info();
 275}
 276
 277extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
 278
 279void do_uml_exitcalls(void)
 280{
 281        exitcall_t *call;
 282
 283        call = &__uml_exitcall_end;
 284        while (--call >= &__uml_exitcall_begin)
 285                (*call)();
 286}
 287
 288char *uml_strdup(const char *string)
 289{
 290        return kstrdup(string, GFP_KERNEL);
 291}
 292EXPORT_SYMBOL(uml_strdup);
 293
 294int copy_to_user_proc(void __user *to, void *from, int size)
 295{
 296        return copy_to_user(to, from, size);
 297}
 298
 299int copy_from_user_proc(void *to, void __user *from, int size)
 300{
 301        return copy_from_user(to, from, size);
 302}
 303
 304int clear_user_proc(void __user *buf, int size)
 305{
 306        return clear_user(buf, size);
 307}
 308
 309int strlen_user_proc(char __user *str)
 310{
 311        return strlen_user(str);
 312}
 313
 314int smp_sigio_handler(void)
 315{
 316#ifdef CONFIG_SMP
 317        int cpu = current_thread_info()->cpu;
 318        IPI_handler(cpu);
 319        if (cpu != 0)
 320                return 1;
 321#endif
 322        return 0;
 323}
 324
 325int cpu(void)
 326{
 327        return current_thread_info()->cpu;
 328}
 329
 330static atomic_t using_sysemu = ATOMIC_INIT(0);
 331int sysemu_supported;
 332
 333void set_using_sysemu(int value)
 334{
 335        if (value > sysemu_supported)
 336                return;
 337        atomic_set(&using_sysemu, value);
 338}
 339
 340int get_using_sysemu(void)
 341{
 342        return atomic_read(&using_sysemu);
 343}
 344
 345static int sysemu_proc_show(struct seq_file *m, void *v)
 346{
 347        seq_printf(m, "%d\n", get_using_sysemu());
 348        return 0;
 349}
 350
 351static int sysemu_proc_open(struct inode *inode, struct file *file)
 352{
 353        return single_open(file, sysemu_proc_show, NULL);
 354}
 355
 356static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
 357                                 size_t count, loff_t *pos)
 358{
 359        char tmp[2];
 360
 361        if (copy_from_user(tmp, buf, 1))
 362                return -EFAULT;
 363
 364        if (tmp[0] >= '0' && tmp[0] <= '2')
 365                set_using_sysemu(tmp[0] - '0');
 366        /* We use the first char, but pretend to write everything */
 367        return count;
 368}
 369
 370static const struct file_operations sysemu_proc_fops = {
 371        .owner          = THIS_MODULE,
 372        .open           = sysemu_proc_open,
 373        .read           = seq_read,
 374        .llseek         = seq_lseek,
 375        .release        = single_release,
 376        .write          = sysemu_proc_write,
 377};
 378
 379int __init make_proc_sysemu(void)
 380{
 381        struct proc_dir_entry *ent;
 382        if (!sysemu_supported)
 383                return 0;
 384
 385        ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
 386
 387        if (ent == NULL)
 388        {
 389                printk(KERN_WARNING "Failed to register /proc/sysemu\n");
 390                return 0;
 391        }
 392
 393        return 0;
 394}
 395
 396late_initcall(make_proc_sysemu);
 397
 398int singlestepping(void * t)
 399{
 400        struct task_struct *task = t ? t : current;
 401
 402        if (!(task->ptrace & PT_DTRACE))
 403                return 0;
 404
 405        if (task->thread.singlestep_syscall)
 406                return 1;
 407
 408        return 2;
 409}
 410
 411/*
 412 * Only x86 and x86_64 have an arch_align_stack().
 413 * All other arches have "#define arch_align_stack(x) (x)"
 414 * in their asm/system.h
 415 * As this is included in UML from asm-um/system-generic.h,
 416 * we can use it to behave as the subarch does.
 417 */
 418#ifndef arch_align_stack
 419unsigned long arch_align_stack(unsigned long sp)
 420{
 421        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 422                sp -= get_random_int() % 8192;
 423        return sp & ~0xf;
 424}
 425#endif
 426
 427unsigned long get_wchan(struct task_struct *p)
 428{
 429        unsigned long stack_page, sp, ip;
 430        bool seen_sched = 0;
 431
 432        if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
 433                return 0;
 434
 435        stack_page = (unsigned long) task_stack_page(p);
 436        /* Bail if the process has no kernel stack for some reason */
 437        if (stack_page == 0)
 438                return 0;
 439
 440        sp = p->thread.switch_buf->JB_SP;
 441        /*
 442         * Bail if the stack pointer is below the bottom of the kernel
 443         * stack for some reason
 444         */
 445        if (sp < stack_page)
 446                return 0;
 447
 448        while (sp < stack_page + THREAD_SIZE) {
 449                ip = *((unsigned long *) sp);
 450                if (in_sched_functions(ip))
 451                        /* Ignore everything until we're above the scheduler */
 452                        seen_sched = 1;
 453                else if (kernel_text_address(ip) && seen_sched)
 454                        return ip;
 455
 456                sp += sizeof(unsigned long);
 457        }
 458
 459        return 0;
 460}
 461
 462int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
 463{
 464        int cpu = current_thread_info()->cpu;
 465
 466        return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
 467}
 468
 469