linux/kernel/ptrace.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/export.h>
  12#include <linux/sched.h>
  13#include <linux/errno.h>
  14#include <linux/mm.h>
  15#include <linux/highmem.h>
  16#include <linux/pagemap.h>
  17#include <linux/ptrace.h>
  18#include <linux/security.h>
  19#include <linux/signal.h>
  20#include <linux/audit.h>
  21#include <linux/pid_namespace.h>
  22#include <linux/syscalls.h>
  23#include <linux/uaccess.h>
  24#include <linux/regset.h>
  25#include <linux/hw_breakpoint.h>
  26#include <linux/cn_proc.h>
  27
  28
  29static int ptrace_trapping_sleep_fn(void *flags)
  30{
  31        schedule();
  32        return 0;
  33}
  34
  35/*
  36 * ptrace a task: make the debugger its new parent and
  37 * move it to the ptrace list.
  38 *
  39 * Must be called with the tasklist lock write-held.
  40 */
  41void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  42{
  43        BUG_ON(!list_empty(&child->ptrace_entry));
  44        list_add(&child->ptrace_entry, &new_parent->ptraced);
  45        child->parent = new_parent;
  46}
  47
  48/**
  49 * __ptrace_unlink - unlink ptracee and restore its execution state
  50 * @child: ptracee to be unlinked
  51 *
  52 * Remove @child from the ptrace list, move it back to the original parent,
  53 * and restore the execution state so that it conforms to the group stop
  54 * state.
  55 *
  56 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  57 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  58 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  59 * If the ptracer is exiting, the ptracee can be in any state.
  60 *
  61 * After detach, the ptracee should be in a state which conforms to the
  62 * group stop.  If the group is stopped or in the process of stopping, the
  63 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
  64 * up from TASK_TRACED.
  65 *
  66 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
  67 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
  68 * to but in the opposite direction of what happens while attaching to a
  69 * stopped task.  However, in this direction, the intermediate RUNNING
  70 * state is not hidden even from the current ptracer and if it immediately
  71 * re-attaches and performs a WNOHANG wait(2), it may fail.
  72 *
  73 * CONTEXT:
  74 * write_lock_irq(tasklist_lock)
  75 */
  76void __ptrace_unlink(struct task_struct *child)
  77{
  78        BUG_ON(!child->ptrace);
  79
  80        child->ptrace = 0;
  81        child->parent = child->real_parent;
  82        list_del_init(&child->ptrace_entry);
  83
  84        spin_lock(&child->sighand->siglock);
  85
  86        /*
  87         * Clear all pending traps and TRAPPING.  TRAPPING should be
  88         * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
  89         */
  90        task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
  91        task_clear_jobctl_trapping(child);
  92
  93        /*
  94         * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
  95         * @child isn't dead.
  96         */
  97        if (!(child->flags & PF_EXITING) &&
  98            (child->signal->flags & SIGNAL_STOP_STOPPED ||
  99             child->signal->group_stop_count)) {
 100                child->jobctl |= JOBCTL_STOP_PENDING;
 101
 102                /*
 103                 * This is only possible if this thread was cloned by the
 104                 * traced task running in the stopped group, set the signal
 105                 * for the future reports.
 106                 * FIXME: we should change ptrace_init_task() to handle this
 107                 * case.
 108                 */
 109                if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 110                        child->jobctl |= SIGSTOP;
 111        }
 112
 113        /*
 114         * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 115         * @child in the butt.  Note that @resume should be used iff @child
 116         * is in TASK_TRACED; otherwise, we might unduly disrupt
 117         * TASK_KILLABLE sleeps.
 118         */
 119        if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 120                ptrace_signal_wake_up(child, true);
 121
 122        spin_unlock(&child->sighand->siglock);
 123}
 124
 125/* Ensure that nothing can wake it up, even SIGKILL */
 126static bool ptrace_freeze_traced(struct task_struct *task)
 127{
 128        bool ret = false;
 129
 130        /* Lockless, nobody but us can set this flag */
 131        if (task->jobctl & JOBCTL_LISTENING)
 132                return ret;
 133
 134        spin_lock_irq(&task->sighand->siglock);
 135        if (task_is_traced(task) && !__fatal_signal_pending(task)) {
 136                task->state = __TASK_TRACED;
 137                ret = true;
 138        }
 139        spin_unlock_irq(&task->sighand->siglock);
 140
 141        return ret;
 142}
 143
 144static void ptrace_unfreeze_traced(struct task_struct *task)
 145{
 146        if (task->state != __TASK_TRACED)
 147                return;
 148
 149        WARN_ON(!task->ptrace || task->parent != current);
 150
 151        spin_lock_irq(&task->sighand->siglock);
 152        if (__fatal_signal_pending(task))
 153                wake_up_state(task, __TASK_TRACED);
 154        else
 155                task->state = TASK_TRACED;
 156        spin_unlock_irq(&task->sighand->siglock);
 157}
 158
 159/**
 160 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 161 * @child: ptracee to check for
 162 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 163 *
 164 * Check whether @child is being ptraced by %current and ready for further
 165 * ptrace operations.  If @ignore_state is %false, @child also should be in
 166 * %TASK_TRACED state and on return the child is guaranteed to be traced
 167 * and not executing.  If @ignore_state is %true, @child can be in any
 168 * state.
 169 *
 170 * CONTEXT:
 171 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 172 *
 173 * RETURNS:
 174 * 0 on success, -ESRCH if %child is not ready.
 175 */
 176static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 177{
 178        int ret = -ESRCH;
 179
 180        /*
 181         * We take the read lock around doing both checks to close a
 182         * possible race where someone else was tracing our child and
 183         * detached between these two checks.  After this locked check,
 184         * we are sure that this is our traced child and that can only
 185         * be changed by us so it's not changing right after this.
 186         */
 187        read_lock(&tasklist_lock);
 188        if (child->ptrace && child->parent == current) {
 189                WARN_ON(child->state == __TASK_TRACED);
 190                /*
 191                 * child->sighand can't be NULL, release_task()
 192                 * does ptrace_unlink() before __exit_signal().
 193                 */
 194                if (ignore_state || ptrace_freeze_traced(child))
 195                        ret = 0;
 196        }
 197        read_unlock(&tasklist_lock);
 198
 199        if (!ret && !ignore_state) {
 200                if (!wait_task_inactive(child, __TASK_TRACED)) {
 201                        /*
 202                         * This can only happen if may_ptrace_stop() fails and
 203                         * ptrace_stop() changes ->state back to TASK_RUNNING,
 204                         * so we should not worry about leaking __TASK_TRACED.
 205                         */
 206                        WARN_ON(child->state == __TASK_TRACED);
 207                        ret = -ESRCH;
 208                }
 209        }
 210
 211        return ret;
 212}
 213
 214static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 215{
 216        if (mode & PTRACE_MODE_NOAUDIT)
 217                return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 218        else
 219                return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 220}
 221
 222/* Returns 0 on success, -errno on denial. */
 223static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 224{
 225        const struct cred *cred = current_cred(), *tcred;
 226
 227        /* May we inspect the given task?
 228         * This check is used both for attaching with ptrace
 229         * and for allowing access to sensitive information in /proc.
 230         *
 231         * ptrace_attach denies several cases that /proc allows
 232         * because setting up the necessary parent/child relationship
 233         * or halting the specified task is impossible.
 234         */
 235        int dumpable = 0;
 236        /* Don't let security modules deny introspection */
 237        if (task == current)
 238                return 0;
 239        rcu_read_lock();
 240        tcred = __task_cred(task);
 241        if (uid_eq(cred->uid, tcred->euid) &&
 242            uid_eq(cred->uid, tcred->suid) &&
 243            uid_eq(cred->uid, tcred->uid)  &&
 244            gid_eq(cred->gid, tcred->egid) &&
 245            gid_eq(cred->gid, tcred->sgid) &&
 246            gid_eq(cred->gid, tcred->gid))
 247                goto ok;
 248        if (ptrace_has_cap(tcred->user_ns, mode))
 249                goto ok;
 250        rcu_read_unlock();
 251        return -EPERM;
 252ok:
 253        rcu_read_unlock();
 254        smp_rmb();
 255        if (task->mm)
 256                dumpable = get_dumpable(task->mm);
 257        rcu_read_lock();
 258        if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
 259                rcu_read_unlock();
 260                return -EPERM;
 261        }
 262        rcu_read_unlock();
 263
 264        return security_ptrace_access_check(task, mode);
 265}
 266
 267bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 268{
 269        int err;
 270        task_lock(task);
 271        err = __ptrace_may_access(task, mode);
 272        task_unlock(task);
 273        return !err;
 274}
 275
 276static int ptrace_attach(struct task_struct *task, long request,
 277                         unsigned long addr,
 278                         unsigned long flags)
 279{
 280        bool seize = (request == PTRACE_SEIZE);
 281        int retval;
 282
 283        retval = -EIO;
 284        if (seize) {
 285                if (addr != 0)
 286                        goto out;
 287                if (flags & ~(unsigned long)PTRACE_O_MASK)
 288                        goto out;
 289                flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 290        } else {
 291                flags = PT_PTRACED;
 292        }
 293
 294        audit_ptrace(task);
 295
 296        retval = -EPERM;
 297        if (unlikely(task->flags & PF_KTHREAD))
 298                goto out;
 299        if (same_thread_group(task, current))
 300                goto out;
 301
 302        /*
 303         * Protect exec's credential calculations against our interference;
 304         * SUID, SGID and LSM creds get determined differently
 305         * under ptrace.
 306         */
 307        retval = -ERESTARTNOINTR;
 308        if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 309                goto out;
 310
 311        task_lock(task);
 312        retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
 313        task_unlock(task);
 314        if (retval)
 315                goto unlock_creds;
 316
 317        write_lock_irq(&tasklist_lock);
 318        retval = -EPERM;
 319        if (unlikely(task->exit_state))
 320                goto unlock_tasklist;
 321        if (task->ptrace)
 322                goto unlock_tasklist;
 323
 324        if (seize)
 325                flags |= PT_SEIZED;
 326        rcu_read_lock();
 327        if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
 328                flags |= PT_PTRACE_CAP;
 329        rcu_read_unlock();
 330        task->ptrace = flags;
 331
 332        __ptrace_link(task, current);
 333
 334        /* SEIZE doesn't trap tracee on attach */
 335        if (!seize)
 336                send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 337
 338        spin_lock(&task->sighand->siglock);
 339
 340        /*
 341         * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 342         * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 343         * will be cleared if the child completes the transition or any
 344         * event which clears the group stop states happens.  We'll wait
 345         * for the transition to complete before returning from this
 346         * function.
 347         *
 348         * This hides STOPPED -> RUNNING -> TRACED transition from the
 349         * attaching thread but a different thread in the same group can
 350         * still observe the transient RUNNING state.  IOW, if another
 351         * thread's WNOHANG wait(2) on the stopped tracee races against
 352         * ATTACH, the wait(2) may fail due to the transient RUNNING.
 353         *
 354         * The following task_is_stopped() test is safe as both transitions
 355         * in and out of STOPPED are protected by siglock.
 356         */
 357        if (task_is_stopped(task) &&
 358            task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 359                signal_wake_up_state(task, __TASK_STOPPED);
 360
 361        spin_unlock(&task->sighand->siglock);
 362
 363        retval = 0;
 364unlock_tasklist:
 365        write_unlock_irq(&tasklist_lock);
 366unlock_creds:
 367        mutex_unlock(&task->signal->cred_guard_mutex);
 368out:
 369        if (!retval) {
 370                wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
 371                            ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
 372                proc_ptrace_connector(task, PTRACE_ATTACH);
 373        }
 374
 375        return retval;
 376}
 377
 378/**
 379 * ptrace_traceme  --  helper for PTRACE_TRACEME
 380 *
 381 * Performs checks and sets PT_PTRACED.
 382 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 383 */
 384static int ptrace_traceme(void)
 385{
 386        int ret = -EPERM;
 387
 388        write_lock_irq(&tasklist_lock);
 389        /* Are we already being traced? */
 390        if (!current->ptrace) {
 391                ret = security_ptrace_traceme(current->parent);
 392                /*
 393                 * Check PF_EXITING to ensure ->real_parent has not passed
 394                 * exit_ptrace(). Otherwise we don't report the error but
 395                 * pretend ->real_parent untraces us right after return.
 396                 */
 397                if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 398                        current->ptrace = PT_PTRACED;
 399                        __ptrace_link(current, current->real_parent);
 400                }
 401        }
 402        write_unlock_irq(&tasklist_lock);
 403
 404        return ret;
 405}
 406
 407/*
 408 * Called with irqs disabled, returns true if childs should reap themselves.
 409 */
 410static int ignoring_children(struct sighand_struct *sigh)
 411{
 412        int ret;
 413        spin_lock(&sigh->siglock);
 414        ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 415              (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 416        spin_unlock(&sigh->siglock);
 417        return ret;
 418}
 419
 420/*
 421 * Called with tasklist_lock held for writing.
 422 * Unlink a traced task, and clean it up if it was a traced zombie.
 423 * Return true if it needs to be reaped with release_task().
 424 * (We can't call release_task() here because we already hold tasklist_lock.)
 425 *
 426 * If it's a zombie, our attachedness prevented normal parent notification
 427 * or self-reaping.  Do notification now if it would have happened earlier.
 428 * If it should reap itself, return true.
 429 *
 430 * If it's our own child, there is no notification to do. But if our normal
 431 * children self-reap, then this child was prevented by ptrace and we must
 432 * reap it now, in that case we must also wake up sub-threads sleeping in
 433 * do_wait().
 434 */
 435static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 436{
 437        bool dead;
 438
 439        __ptrace_unlink(p);
 440
 441        if (p->exit_state != EXIT_ZOMBIE)
 442                return false;
 443
 444        dead = !thread_group_leader(p);
 445
 446        if (!dead && thread_group_empty(p)) {
 447                if (!same_thread_group(p->real_parent, tracer))
 448                        dead = do_notify_parent(p, p->exit_signal);
 449                else if (ignoring_children(tracer->sighand)) {
 450                        __wake_up_parent(p, tracer);
 451                        dead = true;
 452                }
 453        }
 454        /* Mark it as in the process of being reaped. */
 455        if (dead)
 456                p->exit_state = EXIT_DEAD;
 457        return dead;
 458}
 459
 460static int ptrace_detach(struct task_struct *child, unsigned int data)
 461{
 462        bool dead = false;
 463
 464        if (!valid_signal(data))
 465                return -EIO;
 466
 467        /* Architecture-specific hardware disable .. */
 468        ptrace_disable(child);
 469        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 470
 471        write_lock_irq(&tasklist_lock);
 472        /*
 473         * This child can be already killed. Make sure de_thread() or
 474         * our sub-thread doing do_wait() didn't do release_task() yet.
 475         */
 476        if (child->ptrace) {
 477                child->exit_code = data;
 478                dead = __ptrace_detach(current, child);
 479        }
 480        write_unlock_irq(&tasklist_lock);
 481
 482        proc_ptrace_connector(child, PTRACE_DETACH);
 483        if (unlikely(dead))
 484                release_task(child);
 485
 486        return 0;
 487}
 488
 489/*
 490 * Detach all tasks we were using ptrace on. Called with tasklist held
 491 * for writing, and returns with it held too. But note it can release
 492 * and reacquire the lock.
 493 */
 494void exit_ptrace(struct task_struct *tracer)
 495        __releases(&tasklist_lock)
 496        __acquires(&tasklist_lock)
 497{
 498        struct task_struct *p, *n;
 499        LIST_HEAD(ptrace_dead);
 500
 501        if (likely(list_empty(&tracer->ptraced)))
 502                return;
 503
 504        list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 505                if (unlikely(p->ptrace & PT_EXITKILL))
 506                        send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
 507
 508                if (__ptrace_detach(tracer, p))
 509                        list_add(&p->ptrace_entry, &ptrace_dead);
 510        }
 511
 512        write_unlock_irq(&tasklist_lock);
 513        BUG_ON(!list_empty(&tracer->ptraced));
 514
 515        list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
 516                list_del_init(&p->ptrace_entry);
 517                release_task(p);
 518        }
 519
 520        write_lock_irq(&tasklist_lock);
 521}
 522
 523int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 524{
 525        int copied = 0;
 526
 527        while (len > 0) {
 528                char buf[128];
 529                int this_len, retval;
 530
 531                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 532                retval = access_process_vm(tsk, src, buf, this_len, 0);
 533                if (!retval) {
 534                        if (copied)
 535                                break;
 536                        return -EIO;
 537                }
 538                if (copy_to_user(dst, buf, retval))
 539                        return -EFAULT;
 540                copied += retval;
 541                src += retval;
 542                dst += retval;
 543                len -= retval;
 544        }
 545        return copied;
 546}
 547
 548int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 549{
 550        int copied = 0;
 551
 552        while (len > 0) {
 553                char buf[128];
 554                int this_len, retval;
 555
 556                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 557                if (copy_from_user(buf, src, this_len))
 558                        return -EFAULT;
 559                retval = access_process_vm(tsk, dst, buf, this_len, 1);
 560                if (!retval) {
 561                        if (copied)
 562                                break;
 563                        return -EIO;
 564                }
 565                copied += retval;
 566                src += retval;
 567                dst += retval;
 568                len -= retval;
 569        }
 570        return copied;
 571}
 572
 573static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 574{
 575        unsigned flags;
 576
 577        if (data & ~(unsigned long)PTRACE_O_MASK)
 578                return -EINVAL;
 579
 580        /* Avoid intermediate state when all opts are cleared */
 581        flags = child->ptrace;
 582        flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 583        flags |= (data << PT_OPT_FLAG_SHIFT);
 584        child->ptrace = flags;
 585
 586        return 0;
 587}
 588
 589static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 590{
 591        unsigned long flags;
 592        int error = -ESRCH;
 593
 594        if (lock_task_sighand(child, &flags)) {
 595                error = -EINVAL;
 596                if (likely(child->last_siginfo != NULL)) {
 597                        *info = *child->last_siginfo;
 598                        error = 0;
 599                }
 600                unlock_task_sighand(child, &flags);
 601        }
 602        return error;
 603}
 604
 605static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 606{
 607        unsigned long flags;
 608        int error = -ESRCH;
 609
 610        if (lock_task_sighand(child, &flags)) {
 611                error = -EINVAL;
 612                if (likely(child->last_siginfo != NULL)) {
 613                        *child->last_siginfo = *info;
 614                        error = 0;
 615                }
 616                unlock_task_sighand(child, &flags);
 617        }
 618        return error;
 619}
 620
 621
 622#ifdef PTRACE_SINGLESTEP
 623#define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
 624#else
 625#define is_singlestep(request)          0
 626#endif
 627
 628#ifdef PTRACE_SINGLEBLOCK
 629#define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
 630#else
 631#define is_singleblock(request)         0
 632#endif
 633
 634#ifdef PTRACE_SYSEMU
 635#define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
 636#else
 637#define is_sysemu_singlestep(request)   0
 638#endif
 639
 640static int ptrace_resume(struct task_struct *child, long request,
 641                         unsigned long data)
 642{
 643        if (!valid_signal(data))
 644                return -EIO;
 645
 646        if (request == PTRACE_SYSCALL)
 647                set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 648        else
 649                clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 650
 651#ifdef TIF_SYSCALL_EMU
 652        if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 653                set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 654        else
 655                clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 656#endif
 657
 658        if (is_singleblock(request)) {
 659                if (unlikely(!arch_has_block_step()))
 660                        return -EIO;
 661                user_enable_block_step(child);
 662        } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 663                if (unlikely(!arch_has_single_step()))
 664                        return -EIO;
 665                user_enable_single_step(child);
 666        } else {
 667                user_disable_single_step(child);
 668        }
 669
 670        child->exit_code = data;
 671        wake_up_state(child, __TASK_TRACED);
 672
 673        return 0;
 674}
 675
 676#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 677
 678static const struct user_regset *
 679find_regset(const struct user_regset_view *view, unsigned int type)
 680{
 681        const struct user_regset *regset;
 682        int n;
 683
 684        for (n = 0; n < view->n; ++n) {
 685                regset = view->regsets + n;
 686                if (regset->core_note_type == type)
 687                        return regset;
 688        }
 689
 690        return NULL;
 691}
 692
 693static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 694                         struct iovec *kiov)
 695{
 696        const struct user_regset_view *view = task_user_regset_view(task);
 697        const struct user_regset *regset = find_regset(view, type);
 698        int regset_no;
 699
 700        if (!regset || (kiov->iov_len % regset->size) != 0)
 701                return -EINVAL;
 702
 703        regset_no = regset - view->regsets;
 704        kiov->iov_len = min(kiov->iov_len,
 705                            (__kernel_size_t) (regset->n * regset->size));
 706
 707        if (req == PTRACE_GETREGSET)
 708                return copy_regset_to_user(task, view, regset_no, 0,
 709                                           kiov->iov_len, kiov->iov_base);
 710        else
 711                return copy_regset_from_user(task, view, regset_no, 0,
 712                                             kiov->iov_len, kiov->iov_base);
 713}
 714
 715/*
 716 * This is declared in linux/regset.h and defined in machine-dependent
 717 * code.  We put the export here, near the primary machine-neutral use,
 718 * to ensure no machine forgets it.
 719 */
 720EXPORT_SYMBOL_GPL(task_user_regset_view);
 721#endif
 722
 723int ptrace_request(struct task_struct *child, long request,
 724                   unsigned long addr, unsigned long data)
 725{
 726        bool seized = child->ptrace & PT_SEIZED;
 727        int ret = -EIO;
 728        siginfo_t siginfo, *si;
 729        void __user *datavp = (void __user *) data;
 730        unsigned long __user *datalp = datavp;
 731        unsigned long flags;
 732
 733        switch (request) {
 734        case PTRACE_PEEKTEXT:
 735        case PTRACE_PEEKDATA:
 736                return generic_ptrace_peekdata(child, addr, data);
 737        case PTRACE_POKETEXT:
 738        case PTRACE_POKEDATA:
 739                return generic_ptrace_pokedata(child, addr, data);
 740
 741#ifdef PTRACE_OLDSETOPTIONS
 742        case PTRACE_OLDSETOPTIONS:
 743#endif
 744        case PTRACE_SETOPTIONS:
 745                ret = ptrace_setoptions(child, data);
 746                break;
 747        case PTRACE_GETEVENTMSG:
 748                ret = put_user(child->ptrace_message, datalp);
 749                break;
 750
 751        case PTRACE_GETSIGINFO:
 752                ret = ptrace_getsiginfo(child, &siginfo);
 753                if (!ret)
 754                        ret = copy_siginfo_to_user(datavp, &siginfo);
 755                break;
 756
 757        case PTRACE_SETSIGINFO:
 758                if (copy_from_user(&siginfo, datavp, sizeof siginfo))
 759                        ret = -EFAULT;
 760                else
 761                        ret = ptrace_setsiginfo(child, &siginfo);
 762                break;
 763
 764        case PTRACE_INTERRUPT:
 765                /*
 766                 * Stop tracee without any side-effect on signal or job
 767                 * control.  At least one trap is guaranteed to happen
 768                 * after this request.  If @child is already trapped, the
 769                 * current trap is not disturbed and another trap will
 770                 * happen after the current trap is ended with PTRACE_CONT.
 771                 *
 772                 * The actual trap might not be PTRACE_EVENT_STOP trap but
 773                 * the pending condition is cleared regardless.
 774                 */
 775                if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 776                        break;
 777
 778                /*
 779                 * INTERRUPT doesn't disturb existing trap sans one
 780                 * exception.  If ptracer issued LISTEN for the current
 781                 * STOP, this INTERRUPT should clear LISTEN and re-trap
 782                 * tracee into STOP.
 783                 */
 784                if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
 785                        ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 786
 787                unlock_task_sighand(child, &flags);
 788                ret = 0;
 789                break;
 790
 791        case PTRACE_LISTEN:
 792                /*
 793                 * Listen for events.  Tracee must be in STOP.  It's not
 794                 * resumed per-se but is not considered to be in TRACED by
 795                 * wait(2) or ptrace(2).  If an async event (e.g. group
 796                 * stop state change) happens, tracee will enter STOP trap
 797                 * again.  Alternatively, ptracer can issue INTERRUPT to
 798                 * finish listening and re-trap tracee into STOP.
 799                 */
 800                if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 801                        break;
 802
 803                si = child->last_siginfo;
 804                if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
 805                        child->jobctl |= JOBCTL_LISTENING;
 806                        /*
 807                         * If NOTIFY is set, it means event happened between
 808                         * start of this trap and now.  Trigger re-trap.
 809                         */
 810                        if (child->jobctl & JOBCTL_TRAP_NOTIFY)
 811                                ptrace_signal_wake_up(child, true);
 812                        ret = 0;
 813                }
 814                unlock_task_sighand(child, &flags);
 815                break;
 816
 817        case PTRACE_DETACH:      /* detach a process that was attached. */
 818                ret = ptrace_detach(child, data);
 819                break;
 820
 821#ifdef CONFIG_BINFMT_ELF_FDPIC
 822        case PTRACE_GETFDPIC: {
 823                struct mm_struct *mm = get_task_mm(child);
 824                unsigned long tmp = 0;
 825
 826                ret = -ESRCH;
 827                if (!mm)
 828                        break;
 829
 830                switch (addr) {
 831                case PTRACE_GETFDPIC_EXEC:
 832                        tmp = mm->context.exec_fdpic_loadmap;
 833                        break;
 834                case PTRACE_GETFDPIC_INTERP:
 835                        tmp = mm->context.interp_fdpic_loadmap;
 836                        break;
 837                default:
 838                        break;
 839                }
 840                mmput(mm);
 841
 842                ret = put_user(tmp, datalp);
 843                break;
 844        }
 845#endif
 846
 847#ifdef PTRACE_SINGLESTEP
 848        case PTRACE_SINGLESTEP:
 849#endif
 850#ifdef PTRACE_SINGLEBLOCK
 851        case PTRACE_SINGLEBLOCK:
 852#endif
 853#ifdef PTRACE_SYSEMU
 854        case PTRACE_SYSEMU:
 855        case PTRACE_SYSEMU_SINGLESTEP:
 856#endif
 857        case PTRACE_SYSCALL:
 858        case PTRACE_CONT:
 859                return ptrace_resume(child, request, data);
 860
 861        case PTRACE_KILL:
 862                if (child->exit_state)  /* already dead */
 863                        return 0;
 864                return ptrace_resume(child, request, SIGKILL);
 865
 866#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 867        case PTRACE_GETREGSET:
 868        case PTRACE_SETREGSET:
 869        {
 870                struct iovec kiov;
 871                struct iovec __user *uiov = datavp;
 872
 873                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
 874                        return -EFAULT;
 875
 876                if (__get_user(kiov.iov_base, &uiov->iov_base) ||
 877                    __get_user(kiov.iov_len, &uiov->iov_len))
 878                        return -EFAULT;
 879
 880                ret = ptrace_regset(child, request, addr, &kiov);
 881                if (!ret)
 882                        ret = __put_user(kiov.iov_len, &uiov->iov_len);
 883                break;
 884        }
 885#endif
 886        default:
 887                break;
 888        }
 889
 890        return ret;
 891}
 892
 893static struct task_struct *ptrace_get_task_struct(pid_t pid)
 894{
 895        struct task_struct *child;
 896
 897        rcu_read_lock();
 898        child = find_task_by_vpid(pid);
 899        if (child)
 900                get_task_struct(child);
 901        rcu_read_unlock();
 902
 903        if (!child)
 904                return ERR_PTR(-ESRCH);
 905        return child;
 906}
 907
 908#ifndef arch_ptrace_attach
 909#define arch_ptrace_attach(child)       do { } while (0)
 910#endif
 911
 912SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
 913                unsigned long, data)
 914{
 915        struct task_struct *child;
 916        long ret;
 917
 918        if (request == PTRACE_TRACEME) {
 919                ret = ptrace_traceme();
 920                if (!ret)
 921                        arch_ptrace_attach(current);
 922                goto out;
 923        }
 924
 925        child = ptrace_get_task_struct(pid);
 926        if (IS_ERR(child)) {
 927                ret = PTR_ERR(child);
 928                goto out;
 929        }
 930
 931        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
 932                ret = ptrace_attach(child, request, addr, data);
 933                /*
 934                 * Some architectures need to do book-keeping after
 935                 * a ptrace attach.
 936                 */
 937                if (!ret)
 938                        arch_ptrace_attach(child);
 939                goto out_put_task_struct;
 940        }
 941
 942        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
 943                                  request == PTRACE_INTERRUPT);
 944        if (ret < 0)
 945                goto out_put_task_struct;
 946
 947        ret = arch_ptrace(child, request, addr, data);
 948        if (ret || request != PTRACE_DETACH)
 949                ptrace_unfreeze_traced(child);
 950
 951 out_put_task_struct:
 952        put_task_struct(child);
 953 out:
 954        return ret;
 955}
 956
 957int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
 958                            unsigned long data)
 959{
 960        unsigned long tmp;
 961        int copied;
 962
 963        copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
 964        if (copied != sizeof(tmp))
 965                return -EIO;
 966        return put_user(tmp, (unsigned long __user *)data);
 967}
 968
 969int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
 970                            unsigned long data)
 971{
 972        int copied;
 973
 974        copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
 975        return (copied == sizeof(data)) ? 0 : -EIO;
 976}
 977
 978#if defined CONFIG_COMPAT
 979#include <linux/compat.h>
 980
 981int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 982                          compat_ulong_t addr, compat_ulong_t data)
 983{
 984        compat_ulong_t __user *datap = compat_ptr(data);
 985        compat_ulong_t word;
 986        siginfo_t siginfo;
 987        int ret;
 988
 989        switch (request) {
 990        case PTRACE_PEEKTEXT:
 991        case PTRACE_PEEKDATA:
 992                ret = access_process_vm(child, addr, &word, sizeof(word), 0);
 993                if (ret != sizeof(word))
 994                        ret = -EIO;
 995                else
 996                        ret = put_user(word, datap);
 997                break;
 998
 999        case PTRACE_POKETEXT:
1000        case PTRACE_POKEDATA:
1001                ret = access_process_vm(child, addr, &data, sizeof(data), 1);
1002                ret = (ret != sizeof(data) ? -EIO : 0);
1003                break;
1004
1005        case PTRACE_GETEVENTMSG:
1006                ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1007                break;
1008
1009        case PTRACE_GETSIGINFO:
1010                ret = ptrace_getsiginfo(child, &siginfo);
1011                if (!ret)
1012                        ret = copy_siginfo_to_user32(
1013                                (struct compat_siginfo __user *) datap,
1014                                &siginfo);
1015                break;
1016
1017        case PTRACE_SETSIGINFO:
1018                memset(&siginfo, 0, sizeof siginfo);
1019                if (copy_siginfo_from_user32(
1020                            &siginfo, (struct compat_siginfo __user *) datap))
1021                        ret = -EFAULT;
1022                else
1023                        ret = ptrace_setsiginfo(child, &siginfo);
1024                break;
1025#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1026        case PTRACE_GETREGSET:
1027        case PTRACE_SETREGSET:
1028        {
1029                struct iovec kiov;
1030                struct compat_iovec __user *uiov =
1031                        (struct compat_iovec __user *) datap;
1032                compat_uptr_t ptr;
1033                compat_size_t len;
1034
1035                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1036                        return -EFAULT;
1037
1038                if (__get_user(ptr, &uiov->iov_base) ||
1039                    __get_user(len, &uiov->iov_len))
1040                        return -EFAULT;
1041
1042                kiov.iov_base = compat_ptr(ptr);
1043                kiov.iov_len = len;
1044
1045                ret = ptrace_regset(child, request, addr, &kiov);
1046                if (!ret)
1047                        ret = __put_user(kiov.iov_len, &uiov->iov_len);
1048                break;
1049        }
1050#endif
1051
1052        default:
1053                ret = ptrace_request(child, request, addr, data);
1054        }
1055
1056        return ret;
1057}
1058
1059asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1060                                  compat_long_t addr, compat_long_t data)
1061{
1062        struct task_struct *child;
1063        long ret;
1064
1065        if (request == PTRACE_TRACEME) {
1066                ret = ptrace_traceme();
1067                goto out;
1068        }
1069
1070        child = ptrace_get_task_struct(pid);
1071        if (IS_ERR(child)) {
1072                ret = PTR_ERR(child);
1073                goto out;
1074        }
1075
1076        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1077                ret = ptrace_attach(child, request, addr, data);
1078                /*
1079                 * Some architectures need to do book-keeping after
1080                 * a ptrace attach.
1081                 */
1082                if (!ret)
1083                        arch_ptrace_attach(child);
1084                goto out_put_task_struct;
1085        }
1086
1087        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1088                                  request == PTRACE_INTERRUPT);
1089        if (!ret) {
1090                ret = compat_arch_ptrace(child, request, addr, data);
1091                if (ret || request != PTRACE_DETACH)
1092                        ptrace_unfreeze_traced(child);
1093        }
1094
1095 out_put_task_struct:
1096        put_task_struct(child);
1097 out:
1098        return ret;
1099}
1100#endif  /* CONFIG_COMPAT */
1101
1102#ifdef CONFIG_HAVE_HW_BREAKPOINT
1103int ptrace_get_breakpoints(struct task_struct *tsk)
1104{
1105        if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1106                return 0;
1107
1108        return -1;
1109}
1110
1111void ptrace_put_breakpoints(struct task_struct *tsk)
1112{
1113        if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1114                flush_ptrace_hw_breakpoint(tsk);
1115}
1116#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1117
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.