linux/kernel/ptrace.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/errno.h>
  14#include <linux/mm.h>
  15#include <linux/highmem.h>
  16#include <linux/pagemap.h>
  17#include <linux/ptrace.h>
  18#include <linux/security.h>
  19#include <linux/signal.h>
  20#include <linux/audit.h>
  21#include <linux/pid_namespace.h>
  22#include <linux/syscalls.h>
  23#include <linux/uaccess.h>
  24#include <linux/regset.h>
  25#include <linux/hw_breakpoint.h>
  26#include <linux/cn_proc.h>
  27
  28
  29static int ptrace_trapping_sleep_fn(void *flags)
  30{
  31        schedule();
  32        return 0;
  33}
  34
  35/*
  36 * ptrace a task: make the debugger its new parent and
  37 * move it to the ptrace list.
  38 *
  39 * Must be called with the tasklist lock write-held.
  40 */
  41void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  42{
  43        BUG_ON(!list_empty(&child->ptrace_entry));
  44        list_add(&child->ptrace_entry, &new_parent->ptraced);
  45        child->parent = new_parent;
  46}
  47
  48/**
  49 * __ptrace_unlink - unlink ptracee and restore its execution state
  50 * @child: ptracee to be unlinked
  51 *
  52 * Remove @child from the ptrace list, move it back to the original parent,
  53 * and restore the execution state so that it conforms to the group stop
  54 * state.
  55 *
  56 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  57 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  58 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  59 * If the ptracer is exiting, the ptracee can be in any state.
  60 *
  61 * After detach, the ptracee should be in a state which conforms to the
  62 * group stop.  If the group is stopped or in the process of stopping, the
  63 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
  64 * up from TASK_TRACED.
  65 *
  66 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
  67 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
  68 * to but in the opposite direction of what happens while attaching to a
  69 * stopped task.  However, in this direction, the intermediate RUNNING
  70 * state is not hidden even from the current ptracer and if it immediately
  71 * re-attaches and performs a WNOHANG wait(2), it may fail.
  72 *
  73 * CONTEXT:
  74 * write_lock_irq(tasklist_lock)
  75 */
  76void __ptrace_unlink(struct task_struct *child)
  77{
  78        BUG_ON(!child->ptrace);
  79
  80        child->ptrace = 0;
  81        child->parent = child->real_parent;
  82        list_del_init(&child->ptrace_entry);
  83
  84        spin_lock(&child->sighand->siglock);
  85
  86        /*
  87         * Clear all pending traps and TRAPPING.  TRAPPING should be
  88         * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
  89         */
  90        task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
  91        task_clear_jobctl_trapping(child);
  92
  93        /*
  94         * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
  95         * @child isn't dead.
  96         */
  97        if (!(child->flags & PF_EXITING) &&
  98            (child->signal->flags & SIGNAL_STOP_STOPPED ||
  99             child->signal->group_stop_count))
 100                child->jobctl |= JOBCTL_STOP_PENDING;
 101
 102        /*
 103         * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 104         * @child in the butt.  Note that @resume should be used iff @child
 105         * is in TASK_TRACED; otherwise, we might unduly disrupt
 106         * TASK_KILLABLE sleeps.
 107         */
 108        if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 109                signal_wake_up(child, task_is_traced(child));
 110
 111        spin_unlock(&child->sighand->siglock);
 112}
 113
 114/**
 115 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 116 * @child: ptracee to check for
 117 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 118 *
 119 * Check whether @child is being ptraced by %current and ready for further
 120 * ptrace operations.  If @ignore_state is %false, @child also should be in
 121 * %TASK_TRACED state and on return the child is guaranteed to be traced
 122 * and not executing.  If @ignore_state is %true, @child can be in any
 123 * state.
 124 *
 125 * CONTEXT:
 126 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 127 *
 128 * RETURNS:
 129 * 0 on success, -ESRCH if %child is not ready.
 130 */
 131int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 132{
 133        int ret = -ESRCH;
 134
 135        /*
 136         * We take the read lock around doing both checks to close a
 137         * possible race where someone else was tracing our child and
 138         * detached between these two checks.  After this locked check,
 139         * we are sure that this is our traced child and that can only
 140         * be changed by us so it's not changing right after this.
 141         */
 142        read_lock(&tasklist_lock);
 143        if ((child->ptrace & PT_PTRACED) && child->parent == current) {
 144                /*
 145                 * child->sighand can't be NULL, release_task()
 146                 * does ptrace_unlink() before __exit_signal().
 147                 */
 148                spin_lock_irq(&child->sighand->siglock);
 149                WARN_ON_ONCE(task_is_stopped(child));
 150                if (ignore_state || (task_is_traced(child) &&
 151                                     !(child->jobctl & JOBCTL_LISTENING)))
 152                        ret = 0;
 153                spin_unlock_irq(&child->sighand->siglock);
 154        }
 155        read_unlock(&tasklist_lock);
 156
 157        if (!ret && !ignore_state)
 158                ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
 159
 160        /* All systems go.. */
 161        return ret;
 162}
 163
 164int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 165{
 166        const struct cred *cred = current_cred(), *tcred;
 167
 168        /* May we inspect the given task?
 169         * This check is used both for attaching with ptrace
 170         * and for allowing access to sensitive information in /proc.
 171         *
 172         * ptrace_attach denies several cases that /proc allows
 173         * because setting up the necessary parent/child relationship
 174         * or halting the specified task is impossible.
 175         */
 176        int dumpable = 0;
 177        /* Don't let security modules deny introspection */
 178        if (task == current)
 179                return 0;
 180        rcu_read_lock();
 181        tcred = __task_cred(task);
 182        if (cred->user->user_ns == tcred->user->user_ns &&
 183            (cred->uid == tcred->euid &&
 184             cred->uid == tcred->suid &&
 185             cred->uid == tcred->uid  &&
 186             cred->gid == tcred->egid &&
 187             cred->gid == tcred->sgid &&
 188             cred->gid == tcred->gid))
 189                goto ok;
 190        if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
 191                goto ok;
 192        rcu_read_unlock();
 193        return -EPERM;
 194ok:
 195        rcu_read_unlock();
 196        smp_rmb();
 197        if (task->mm)
 198                dumpable = get_dumpable(task->mm);
 199        if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
 200                return -EPERM;
 201
 202        return security_ptrace_access_check(task, mode);
 203}
 204
 205bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 206{
 207        int err;
 208        task_lock(task);
 209        err = __ptrace_may_access(task, mode);
 210        task_unlock(task);
 211        return !err;
 212}
 213
 214static int ptrace_attach(struct task_struct *task, long request,
 215                         unsigned long flags)
 216{
 217        bool seize = (request == PTRACE_SEIZE);
 218        int retval;
 219
 220        /*
 221         * SEIZE will enable new ptrace behaviors which will be implemented
 222         * gradually.  SEIZE_DEVEL is used to prevent applications
 223         * expecting full SEIZE behaviors trapping on kernel commits which
 224         * are still in the process of implementing them.
 225         *
 226         * Only test programs for new ptrace behaviors being implemented
 227         * should set SEIZE_DEVEL.  If unset, SEIZE will fail with -EIO.
 228         *
 229         * Once SEIZE behaviors are completely implemented, this flag and
 230         * the following test will be removed.
 231         */
 232        retval = -EIO;
 233        if (seize && !(flags & PTRACE_SEIZE_DEVEL))
 234                goto out;
 235
 236        audit_ptrace(task);
 237
 238        retval = -EPERM;
 239        if (unlikely(task->flags & PF_KTHREAD))
 240                goto out;
 241        if (same_thread_group(task, current))
 242                goto out;
 243
 244        /*
 245         * Protect exec's credential calculations against our interference;
 246         * interference; SUID, SGID and LSM creds get determined differently
 247         * under ptrace.
 248         */
 249        retval = -ERESTARTNOINTR;
 250        if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 251                goto out;
 252
 253        task_lock(task);
 254        retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
 255        task_unlock(task);
 256        if (retval)
 257                goto unlock_creds;
 258
 259        write_lock_irq(&tasklist_lock);
 260        retval = -EPERM;
 261        if (unlikely(task->exit_state))
 262                goto unlock_tasklist;
 263        if (task->ptrace)
 264                goto unlock_tasklist;
 265
 266        task->ptrace = PT_PTRACED;
 267        if (seize)
 268                task->ptrace |= PT_SEIZED;
 269        if (task_ns_capable(task, CAP_SYS_PTRACE))
 270                task->ptrace |= PT_PTRACE_CAP;
 271
 272        __ptrace_link(task, current);
 273
 274        /* SEIZE doesn't trap tracee on attach */
 275        if (!seize)
 276                send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 277
 278        spin_lock(&task->sighand->siglock);
 279
 280        /*
 281         * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 282         * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 283         * will be cleared if the child completes the transition or any
 284         * event which clears the group stop states happens.  We'll wait
 285         * for the transition to complete before returning from this
 286         * function.
 287         *
 288         * This hides STOPPED -> RUNNING -> TRACED transition from the
 289         * attaching thread but a different thread in the same group can
 290         * still observe the transient RUNNING state.  IOW, if another
 291         * thread's WNOHANG wait(2) on the stopped tracee races against
 292         * ATTACH, the wait(2) may fail due to the transient RUNNING.
 293         *
 294         * The following task_is_stopped() test is safe as both transitions
 295         * in and out of STOPPED are protected by siglock.
 296         */
 297        if (task_is_stopped(task) &&
 298            task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 299                signal_wake_up(task, 1);
 300
 301        spin_unlock(&task->sighand->siglock);
 302
 303        retval = 0;
 304unlock_tasklist:
 305        write_unlock_irq(&tasklist_lock);
 306unlock_creds:
 307        mutex_unlock(&task->signal->cred_guard_mutex);
 308out:
 309        if (!retval) {
 310                wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
 311                            ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
 312                proc_ptrace_connector(task, PTRACE_ATTACH);
 313        }
 314
 315        return retval;
 316}
 317
 318/**
 319 * ptrace_traceme  --  helper for PTRACE_TRACEME
 320 *
 321 * Performs checks and sets PT_PTRACED.
 322 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 323 */
 324static int ptrace_traceme(void)
 325{
 326        int ret = -EPERM;
 327
 328        write_lock_irq(&tasklist_lock);
 329        /* Are we already being traced? */
 330        if (!current->ptrace) {
 331                ret = security_ptrace_traceme(current->parent);
 332                /*
 333                 * Check PF_EXITING to ensure ->real_parent has not passed
 334                 * exit_ptrace(). Otherwise we don't report the error but
 335                 * pretend ->real_parent untraces us right after return.
 336                 */
 337                if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 338                        current->ptrace = PT_PTRACED;
 339                        __ptrace_link(current, current->real_parent);
 340                }
 341        }
 342        write_unlock_irq(&tasklist_lock);
 343
 344        return ret;
 345}
 346
 347/*
 348 * Called with irqs disabled, returns true if childs should reap themselves.
 349 */
 350static int ignoring_children(struct sighand_struct *sigh)
 351{
 352        int ret;
 353        spin_lock(&sigh->siglock);
 354        ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 355              (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 356        spin_unlock(&sigh->siglock);
 357        return ret;
 358}
 359
 360/*
 361 * Called with tasklist_lock held for writing.
 362 * Unlink a traced task, and clean it up if it was a traced zombie.
 363 * Return true if it needs to be reaped with release_task().
 364 * (We can't call release_task() here because we already hold tasklist_lock.)
 365 *
 366 * If it's a zombie, our attachedness prevented normal parent notification
 367 * or self-reaping.  Do notification now if it would have happened earlier.
 368 * If it should reap itself, return true.
 369 *
 370 * If it's our own child, there is no notification to do. But if our normal
 371 * children self-reap, then this child was prevented by ptrace and we must
 372 * reap it now, in that case we must also wake up sub-threads sleeping in
 373 * do_wait().
 374 */
 375static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 376{
 377        bool dead;
 378
 379        __ptrace_unlink(p);
 380
 381        if (p->exit_state != EXIT_ZOMBIE)
 382                return false;
 383
 384        dead = !thread_group_leader(p);
 385
 386        if (!dead && thread_group_empty(p)) {
 387                if (!same_thread_group(p->real_parent, tracer))
 388                        dead = do_notify_parent(p, p->exit_signal);
 389                else if (ignoring_children(tracer->sighand)) {
 390                        __wake_up_parent(p, tracer);
 391                        dead = true;
 392                }
 393        }
 394        /* Mark it as in the process of being reaped. */
 395        if (dead)
 396                p->exit_state = EXIT_DEAD;
 397        return dead;
 398}
 399
 400static int ptrace_detach(struct task_struct *child, unsigned int data)
 401{
 402        bool dead = false;
 403
 404        if (!valid_signal(data))
 405                return -EIO;
 406
 407        /* Architecture-specific hardware disable .. */
 408        ptrace_disable(child);
 409        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 410
 411        write_lock_irq(&tasklist_lock);
 412        /*
 413         * This child can be already killed. Make sure de_thread() or
 414         * our sub-thread doing do_wait() didn't do release_task() yet.
 415         */
 416        if (child->ptrace) {
 417                child->exit_code = data;
 418                dead = __ptrace_detach(current, child);
 419        }
 420        write_unlock_irq(&tasklist_lock);
 421
 422        proc_ptrace_connector(child, PTRACE_DETACH);
 423        if (unlikely(dead))
 424                release_task(child);
 425
 426        return 0;
 427}
 428
 429/*
 430 * Detach all tasks we were using ptrace on. Called with tasklist held
 431 * for writing, and returns with it held too. But note it can release
 432 * and reacquire the lock.
 433 */
 434void exit_ptrace(struct task_struct *tracer)
 435        __releases(&tasklist_lock)
 436        __acquires(&tasklist_lock)
 437{
 438        struct task_struct *p, *n;
 439        LIST_HEAD(ptrace_dead);
 440
 441        if (likely(list_empty(&tracer->ptraced)))
 442                return;
 443
 444        list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 445                if (__ptrace_detach(tracer, p))
 446                        list_add(&p->ptrace_entry, &ptrace_dead);
 447        }
 448
 449        write_unlock_irq(&tasklist_lock);
 450        BUG_ON(!list_empty(&tracer->ptraced));
 451
 452        list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
 453                list_del_init(&p->ptrace_entry);
 454                release_task(p);
 455        }
 456
 457        write_lock_irq(&tasklist_lock);
 458}
 459
 460int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 461{
 462        int copied = 0;
 463
 464        while (len > 0) {
 465                char buf[128];
 466                int this_len, retval;
 467
 468                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 469                retval = access_process_vm(tsk, src, buf, this_len, 0);
 470                if (!retval) {
 471                        if (copied)
 472                                break;
 473                        return -EIO;
 474                }
 475                if (copy_to_user(dst, buf, retval))
 476                        return -EFAULT;
 477                copied += retval;
 478                src += retval;
 479                dst += retval;
 480                len -= retval;
 481        }
 482        return copied;
 483}
 484
 485int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 486{
 487        int copied = 0;
 488
 489        while (len > 0) {
 490                char buf[128];
 491                int this_len, retval;
 492
 493                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 494                if (copy_from_user(buf, src, this_len))
 495                        return -EFAULT;
 496                retval = access_process_vm(tsk, dst, buf, this_len, 1);
 497                if (!retval) {
 498                        if (copied)
 499                                break;
 500                        return -EIO;
 501                }
 502                copied += retval;
 503                src += retval;
 504                dst += retval;
 505                len -= retval;
 506        }
 507        return copied;
 508}
 509
 510static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 511{
 512        child->ptrace &= ~PT_TRACE_MASK;
 513
 514        if (data & PTRACE_O_TRACESYSGOOD)
 515                child->ptrace |= PT_TRACESYSGOOD;
 516
 517        if (data & PTRACE_O_TRACEFORK)
 518                child->ptrace |= PT_TRACE_FORK;
 519
 520        if (data & PTRACE_O_TRACEVFORK)
 521                child->ptrace |= PT_TRACE_VFORK;
 522
 523        if (data & PTRACE_O_TRACECLONE)
 524                child->ptrace |= PT_TRACE_CLONE;
 525
 526        if (data & PTRACE_O_TRACEEXEC)
 527                child->ptrace |= PT_TRACE_EXEC;
 528
 529        if (data & PTRACE_O_TRACEVFORKDONE)
 530                child->ptrace |= PT_TRACE_VFORK_DONE;
 531
 532        if (data & PTRACE_O_TRACEEXIT)
 533                child->ptrace |= PT_TRACE_EXIT;
 534
 535        return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
 536}
 537
 538static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 539{
 540        unsigned long flags;
 541        int error = -ESRCH;
 542
 543        if (lock_task_sighand(child, &flags)) {
 544                error = -EINVAL;
 545                if (likely(child->last_siginfo != NULL)) {
 546                        *info = *child->last_siginfo;
 547                        error = 0;
 548                }
 549                unlock_task_sighand(child, &flags);
 550        }
 551        return error;
 552}
 553
 554static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 555{
 556        unsigned long flags;
 557        int error = -ESRCH;
 558
 559        if (lock_task_sighand(child, &flags)) {
 560                error = -EINVAL;
 561                if (likely(child->last_siginfo != NULL)) {
 562                        *child->last_siginfo = *info;
 563                        error = 0;
 564                }
 565                unlock_task_sighand(child, &flags);
 566        }
 567        return error;
 568}
 569
 570
 571#ifdef PTRACE_SINGLESTEP
 572#define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
 573#else
 574#define is_singlestep(request)          0
 575#endif
 576
 577#ifdef PTRACE_SINGLEBLOCK
 578#define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
 579#else
 580#define is_singleblock(request)         0
 581#endif
 582
 583#ifdef PTRACE_SYSEMU
 584#define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
 585#else
 586#define is_sysemu_singlestep(request)   0
 587#endif
 588
 589static int ptrace_resume(struct task_struct *child, long request,
 590                         unsigned long data)
 591{
 592        if (!valid_signal(data))
 593                return -EIO;
 594
 595        if (request == PTRACE_SYSCALL)
 596                set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 597        else
 598                clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 599
 600#ifdef TIF_SYSCALL_EMU
 601        if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 602                set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 603        else
 604                clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 605#endif
 606
 607        if (is_singleblock(request)) {
 608                if (unlikely(!arch_has_block_step()))
 609                        return -EIO;
 610                user_enable_block_step(child);
 611        } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 612                if (unlikely(!arch_has_single_step()))
 613                        return -EIO;
 614                user_enable_single_step(child);
 615        } else {
 616                user_disable_single_step(child);
 617        }
 618
 619        child->exit_code = data;
 620        wake_up_state(child, __TASK_TRACED);
 621
 622        return 0;
 623}
 624
 625#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 626
 627static const struct user_regset *
 628find_regset(const struct user_regset_view *view, unsigned int type)
 629{
 630        const struct user_regset *regset;
 631        int n;
 632
 633        for (n = 0; n < view->n; ++n) {
 634                regset = view->regsets + n;
 635                if (regset->core_note_type == type)
 636                        return regset;
 637        }
 638
 639        return NULL;
 640}
 641
 642static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 643                         struct iovec *kiov)
 644{
 645        const struct user_regset_view *view = task_user_regset_view(task);
 646        const struct user_regset *regset = find_regset(view, type);
 647        int regset_no;
 648
 649        if (!regset || (kiov->iov_len % regset->size) != 0)
 650                return -EINVAL;
 651
 652        regset_no = regset - view->regsets;
 653        kiov->iov_len = min(kiov->iov_len,
 654                            (__kernel_size_t) (regset->n * regset->size));
 655
 656        if (req == PTRACE_GETREGSET)
 657                return copy_regset_to_user(task, view, regset_no, 0,
 658                                           kiov->iov_len, kiov->iov_base);
 659        else
 660                return copy_regset_from_user(task, view, regset_no, 0,
 661                                             kiov->iov_len, kiov->iov_base);
 662}
 663
 664#endif
 665
 666int ptrace_request(struct task_struct *child, long request,
 667                   unsigned long addr, unsigned long data)
 668{
 669        bool seized = child->ptrace & PT_SEIZED;
 670        int ret = -EIO;
 671        siginfo_t siginfo, *si;
 672        void __user *datavp = (void __user *) data;
 673        unsigned long __user *datalp = datavp;
 674        unsigned long flags;
 675
 676        switch (request) {
 677        case PTRACE_PEEKTEXT:
 678        case PTRACE_PEEKDATA:
 679                return generic_ptrace_peekdata(child, addr, data);
 680        case PTRACE_POKETEXT:
 681        case PTRACE_POKEDATA:
 682                return generic_ptrace_pokedata(child, addr, data);
 683
 684#ifdef PTRACE_OLDSETOPTIONS
 685        case PTRACE_OLDSETOPTIONS:
 686#endif
 687        case PTRACE_SETOPTIONS:
 688                ret = ptrace_setoptions(child, data);
 689                break;
 690        case PTRACE_GETEVENTMSG:
 691                ret = put_user(child->ptrace_message, datalp);
 692                break;
 693
 694        case PTRACE_GETSIGINFO:
 695                ret = ptrace_getsiginfo(child, &siginfo);
 696                if (!ret)
 697                        ret = copy_siginfo_to_user(datavp, &siginfo);
 698                break;
 699
 700        case PTRACE_SETSIGINFO:
 701                if (copy_from_user(&siginfo, datavp, sizeof siginfo))
 702                        ret = -EFAULT;
 703                else
 704                        ret = ptrace_setsiginfo(child, &siginfo);
 705                break;
 706
 707        case PTRACE_INTERRUPT:
 708                /*
 709                 * Stop tracee without any side-effect on signal or job
 710                 * control.  At least one trap is guaranteed to happen
 711                 * after this request.  If @child is already trapped, the
 712                 * current trap is not disturbed and another trap will
 713                 * happen after the current trap is ended with PTRACE_CONT.
 714                 *
 715                 * The actual trap might not be PTRACE_EVENT_STOP trap but
 716                 * the pending condition is cleared regardless.
 717                 */
 718                if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 719                        break;
 720
 721                /*
 722                 * INTERRUPT doesn't disturb existing trap sans one
 723                 * exception.  If ptracer issued LISTEN for the current
 724                 * STOP, this INTERRUPT should clear LISTEN and re-trap
 725                 * tracee into STOP.
 726                 */
 727                if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
 728                        signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 729
 730                unlock_task_sighand(child, &flags);
 731                ret = 0;
 732                break;
 733
 734        case PTRACE_LISTEN:
 735                /*
 736                 * Listen for events.  Tracee must be in STOP.  It's not
 737                 * resumed per-se but is not considered to be in TRACED by
 738                 * wait(2) or ptrace(2).  If an async event (e.g. group
 739                 * stop state change) happens, tracee will enter STOP trap
 740                 * again.  Alternatively, ptracer can issue INTERRUPT to
 741                 * finish listening and re-trap tracee into STOP.
 742                 */
 743                if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 744                        break;
 745
 746                si = child->last_siginfo;
 747                if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
 748                        child->jobctl |= JOBCTL_LISTENING;
 749                        /*
 750                         * If NOTIFY is set, it means event happened between
 751                         * start of this trap and now.  Trigger re-trap.
 752                         */
 753                        if (child->jobctl & JOBCTL_TRAP_NOTIFY)
 754                                signal_wake_up(child, true);
 755                        ret = 0;
 756                }
 757                unlock_task_sighand(child, &flags);
 758                break;
 759
 760        case PTRACE_DETACH:      /* detach a process that was attached. */
 761                ret = ptrace_detach(child, data);
 762                break;
 763
 764#ifdef CONFIG_BINFMT_ELF_FDPIC
 765        case PTRACE_GETFDPIC: {
 766                struct mm_struct *mm = get_task_mm(child);
 767                unsigned long tmp = 0;
 768
 769                ret = -ESRCH;
 770                if (!mm)
 771                        break;
 772
 773                switch (addr) {
 774                case PTRACE_GETFDPIC_EXEC:
 775                        tmp = mm->context.exec_fdpic_loadmap;
 776                        break;
 777                case PTRACE_GETFDPIC_INTERP:
 778                        tmp = mm->context.interp_fdpic_loadmap;
 779                        break;
 780                default:
 781                        break;
 782                }
 783                mmput(mm);
 784
 785                ret = put_user(tmp, datalp);
 786                break;
 787        }
 788#endif
 789
 790#ifdef PTRACE_SINGLESTEP
 791        case PTRACE_SINGLESTEP:
 792#endif
 793#ifdef PTRACE_SINGLEBLOCK
 794        case PTRACE_SINGLEBLOCK:
 795#endif
 796#ifdef PTRACE_SYSEMU
 797        case PTRACE_SYSEMU:
 798        case PTRACE_SYSEMU_SINGLESTEP:
 799#endif
 800        case PTRACE_SYSCALL:
 801        case PTRACE_CONT:
 802                return ptrace_resume(child, request, data);
 803
 804        case PTRACE_KILL:
 805                if (child->exit_state)  /* already dead */
 806                        return 0;
 807                return ptrace_resume(child, request, SIGKILL);
 808
 809#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 810        case PTRACE_GETREGSET:
 811        case PTRACE_SETREGSET:
 812        {
 813                struct iovec kiov;
 814                struct iovec __user *uiov = datavp;
 815
 816                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
 817                        return -EFAULT;
 818
 819                if (__get_user(kiov.iov_base, &uiov->iov_base) ||
 820                    __get_user(kiov.iov_len, &uiov->iov_len))
 821                        return -EFAULT;
 822
 823                ret = ptrace_regset(child, request, addr, &kiov);
 824                if (!ret)
 825                        ret = __put_user(kiov.iov_len, &uiov->iov_len);
 826                break;
 827        }
 828#endif
 829        default:
 830                break;
 831        }
 832
 833        return ret;
 834}
 835
 836static struct task_struct *ptrace_get_task_struct(pid_t pid)
 837{
 838        struct task_struct *child;
 839
 840        rcu_read_lock();
 841        child = find_task_by_vpid(pid);
 842        if (child)
 843                get_task_struct(child);
 844        rcu_read_unlock();
 845
 846        if (!child)
 847                return ERR_PTR(-ESRCH);
 848        return child;
 849}
 850
 851#ifndef arch_ptrace_attach
 852#define arch_ptrace_attach(child)       do { } while (0)
 853#endif
 854
 855SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
 856                unsigned long, data)
 857{
 858        struct task_struct *child;
 859        long ret;
 860
 861        if (request == PTRACE_TRACEME) {
 862                ret = ptrace_traceme();
 863                if (!ret)
 864                        arch_ptrace_attach(current);
 865                goto out;
 866        }
 867
 868        child = ptrace_get_task_struct(pid);
 869        if (IS_ERR(child)) {
 870                ret = PTR_ERR(child);
 871                goto out;
 872        }
 873
 874        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
 875                ret = ptrace_attach(child, request, data);
 876                /*
 877                 * Some architectures need to do book-keeping after
 878                 * a ptrace attach.
 879                 */
 880                if (!ret)
 881                        arch_ptrace_attach(child);
 882                goto out_put_task_struct;
 883        }
 884
 885        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
 886                                  request == PTRACE_INTERRUPT);
 887        if (ret < 0)
 888                goto out_put_task_struct;
 889
 890        ret = arch_ptrace(child, request, addr, data);
 891
 892 out_put_task_struct:
 893        put_task_struct(child);
 894 out:
 895        return ret;
 896}
 897
 898int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
 899                            unsigned long data)
 900{
 901        unsigned long tmp;
 902        int copied;
 903
 904        copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
 905        if (copied != sizeof(tmp))
 906                return -EIO;
 907        return put_user(tmp, (unsigned long __user *)data);
 908}
 909
 910int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
 911                            unsigned long data)
 912{
 913        int copied;
 914
 915        copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
 916        return (copied == sizeof(data)) ? 0 : -EIO;
 917}
 918
 919#if defined CONFIG_COMPAT
 920#include <linux/compat.h>
 921
 922int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 923                          compat_ulong_t addr, compat_ulong_t data)
 924{
 925        compat_ulong_t __user *datap = compat_ptr(data);
 926        compat_ulong_t word;
 927        siginfo_t siginfo;
 928        int ret;
 929
 930        switch (request) {
 931        case PTRACE_PEEKTEXT:
 932        case PTRACE_PEEKDATA:
 933                ret = access_process_vm(child, addr, &word, sizeof(word), 0);
 934                if (ret != sizeof(word))
 935                        ret = -EIO;
 936                else
 937                        ret = put_user(word, datap);
 938                break;
 939
 940        case PTRACE_POKETEXT:
 941        case PTRACE_POKEDATA:
 942                ret = access_process_vm(child, addr, &data, sizeof(data), 1);
 943                ret = (ret != sizeof(data) ? -EIO : 0);
 944                break;
 945
 946        case PTRACE_GETEVENTMSG:
 947                ret = put_user((compat_ulong_t) child->ptrace_message, datap);
 948                break;
 949
 950        case PTRACE_GETSIGINFO:
 951                ret = ptrace_getsiginfo(child, &siginfo);
 952                if (!ret)
 953                        ret = copy_siginfo_to_user32(
 954                                (struct compat_siginfo __user *) datap,
 955                                &siginfo);
 956                break;
 957
 958        case PTRACE_SETSIGINFO:
 959                memset(&siginfo, 0, sizeof siginfo);
 960                if (copy_siginfo_from_user32(
 961                            &siginfo, (struct compat_siginfo __user *) datap))
 962                        ret = -EFAULT;
 963                else
 964                        ret = ptrace_setsiginfo(child, &siginfo);
 965                break;
 966#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 967        case PTRACE_GETREGSET:
 968        case PTRACE_SETREGSET:
 969        {
 970                struct iovec kiov;
 971                struct compat_iovec __user *uiov =
 972                        (struct compat_iovec __user *) datap;
 973                compat_uptr_t ptr;
 974                compat_size_t len;
 975
 976                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
 977                        return -EFAULT;
 978
 979                if (__get_user(ptr, &uiov->iov_base) ||
 980                    __get_user(len, &uiov->iov_len))
 981                        return -EFAULT;
 982
 983                kiov.iov_base = compat_ptr(ptr);
 984                kiov.iov_len = len;
 985
 986                ret = ptrace_regset(child, request, addr, &kiov);
 987                if (!ret)
 988                        ret = __put_user(kiov.iov_len, &uiov->iov_len);
 989                break;
 990        }
 991#endif
 992
 993        default:
 994                ret = ptrace_request(child, request, addr, data);
 995        }
 996
 997        return ret;
 998}
 999
1000asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1001                                  compat_long_t addr, compat_long_t data)
1002{
1003        struct task_struct *child;
1004        long ret;
1005
1006        if (request == PTRACE_TRACEME) {
1007                ret = ptrace_traceme();
1008                goto out;
1009        }
1010
1011        child = ptrace_get_task_struct(pid);
1012        if (IS_ERR(child)) {
1013                ret = PTR_ERR(child);
1014                goto out;
1015        }
1016
1017        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1018                ret = ptrace_attach(child, request, data);
1019                /*
1020                 * Some architectures need to do book-keeping after
1021                 * a ptrace attach.
1022                 */
1023                if (!ret)
1024                        arch_ptrace_attach(child);
1025                goto out_put_task_struct;
1026        }
1027
1028        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1029                                  request == PTRACE_INTERRUPT);
1030        if (!ret)
1031                ret = compat_arch_ptrace(child, request, addr, data);
1032
1033 out_put_task_struct:
1034        put_task_struct(child);
1035 out:
1036        return ret;
1037}
1038#endif  /* CONFIG_COMPAT */
1039
1040#ifdef CONFIG_HAVE_HW_BREAKPOINT
1041int ptrace_get_breakpoints(struct task_struct *tsk)
1042{
1043        if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1044                return 0;
1045
1046        return -1;
1047}
1048
1049void ptrace_put_breakpoints(struct task_struct *tsk)
1050{
1051        if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1052                flush_ptrace_hw_breakpoint(tsk);
1053}
1054#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1055
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.