linux/kernel/ptrace.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/errno.h>
  14#include <linux/mm.h>
  15#include <linux/highmem.h>
  16#include <linux/pagemap.h>
  17#include <linux/smp_lock.h>
  18#include <linux/ptrace.h>
  19#include <linux/security.h>
  20#include <linux/signal.h>
  21#include <linux/audit.h>
  22#include <linux/pid_namespace.h>
  23#include <linux/syscalls.h>
  24
  25#include <asm/pgtable.h>
  26#include <asm/uaccess.h>
  27
  28/*
  29 * ptrace a task: make the debugger its new parent and
  30 * move it to the ptrace list.
  31 *
  32 * Must be called with the tasklist lock write-held.
  33 */
  34void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  35{
  36        BUG_ON(!list_empty(&child->ptrace_entry));
  37        list_add(&child->ptrace_entry, &new_parent->ptraced);
  38        child->parent = new_parent;
  39}
  40 
  41/*
  42 * Turn a tracing stop into a normal stop now, since with no tracer there
  43 * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
  44 * signal sent that would resume the child, but didn't because it was in
  45 * TASK_TRACED, resume it now.
  46 * Requires that irqs be disabled.
  47 */
  48static void ptrace_untrace(struct task_struct *child)
  49{
  50        spin_lock(&child->sighand->siglock);
  51        if (task_is_traced(child)) {
  52                if (child->signal->flags & SIGNAL_STOP_STOPPED) {
  53                        __set_task_state(child, TASK_STOPPED);
  54                } else {
  55                        signal_wake_up(child, 1);
  56                }
  57        }
  58        spin_unlock(&child->sighand->siglock);
  59}
  60
  61/*
  62 * unptrace a task: move it back to its original parent and
  63 * remove it from the ptrace list.
  64 *
  65 * Must be called with the tasklist lock write-held.
  66 */
  67void __ptrace_unlink(struct task_struct *child)
  68{
  69        BUG_ON(!child->ptrace);
  70
  71        child->ptrace = 0;
  72        child->parent = child->real_parent;
  73        list_del_init(&child->ptrace_entry);
  74
  75        if (task_is_traced(child))
  76                ptrace_untrace(child);
  77}
  78
  79/*
  80 * Check that we have indeed attached to the thing..
  81 */
  82int ptrace_check_attach(struct task_struct *child, int kill)
  83{
  84        int ret = -ESRCH;
  85
  86        /*
  87         * We take the read lock around doing both checks to close a
  88         * possible race where someone else was tracing our child and
  89         * detached between these two checks.  After this locked check,
  90         * we are sure that this is our traced child and that can only
  91         * be changed by us so it's not changing right after this.
  92         */
  93        read_lock(&tasklist_lock);
  94        if ((child->ptrace & PT_PTRACED) && child->parent == current) {
  95                ret = 0;
  96                /*
  97                 * child->sighand can't be NULL, release_task()
  98                 * does ptrace_unlink() before __exit_signal().
  99                 */
 100                spin_lock_irq(&child->sighand->siglock);
 101                if (task_is_stopped(child))
 102                        child->state = TASK_TRACED;
 103                else if (!task_is_traced(child) && !kill)
 104                        ret = -ESRCH;
 105                spin_unlock_irq(&child->sighand->siglock);
 106        }
 107        read_unlock(&tasklist_lock);
 108
 109        if (!ret && !kill)
 110                ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
 111
 112        /* All systems go.. */
 113        return ret;
 114}
 115
 116int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 117{
 118        /* May we inspect the given task?
 119         * This check is used both for attaching with ptrace
 120         * and for allowing access to sensitive information in /proc.
 121         *
 122         * ptrace_attach denies several cases that /proc allows
 123         * because setting up the necessary parent/child relationship
 124         * or halting the specified task is impossible.
 125         */
 126        int dumpable = 0;
 127        /* Don't let security modules deny introspection */
 128        if (task == current)
 129                return 0;
 130        if (((current->uid != task->euid) ||
 131             (current->uid != task->suid) ||
 132             (current->uid != task->uid) ||
 133             (current->gid != task->egid) ||
 134             (current->gid != task->sgid) ||
 135             (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
 136                return -EPERM;
 137        smp_rmb();
 138        if (task->mm)
 139                dumpable = get_dumpable(task->mm);
 140        if (!dumpable && !capable(CAP_SYS_PTRACE))
 141                return -EPERM;
 142
 143        return security_ptrace_may_access(task, mode);
 144}
 145
 146bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 147{
 148        int err;
 149        task_lock(task);
 150        err = __ptrace_may_access(task, mode);
 151        task_unlock(task);
 152        return (!err ? true : false);
 153}
 154
 155int ptrace_attach(struct task_struct *task)
 156{
 157        int retval;
 158        unsigned long flags;
 159
 160        audit_ptrace(task);
 161
 162        retval = -EPERM;
 163        if (same_thread_group(task, current))
 164                goto out;
 165
 166repeat:
 167        /*
 168         * Nasty, nasty.
 169         *
 170         * We want to hold both the task-lock and the
 171         * tasklist_lock for writing at the same time.
 172         * But that's against the rules (tasklist_lock
 173         * is taken for reading by interrupts on other
 174         * cpu's that may have task_lock).
 175         */
 176        task_lock(task);
 177        if (!write_trylock_irqsave(&tasklist_lock, flags)) {
 178                task_unlock(task);
 179                do {
 180                        cpu_relax();
 181                } while (!write_can_lock(&tasklist_lock));
 182                goto repeat;
 183        }
 184
 185        if (!task->mm)
 186                goto bad;
 187        /* the same process cannot be attached many times */
 188        if (task->ptrace & PT_PTRACED)
 189                goto bad;
 190        retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
 191        if (retval)
 192                goto bad;
 193
 194        /* Go */
 195        task->ptrace |= PT_PTRACED;
 196        if (capable(CAP_SYS_PTRACE))
 197                task->ptrace |= PT_PTRACE_CAP;
 198
 199        __ptrace_link(task, current);
 200
 201        send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 202bad:
 203        write_unlock_irqrestore(&tasklist_lock, flags);
 204        task_unlock(task);
 205out:
 206        return retval;
 207}
 208
 209static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
 210{
 211        child->exit_code = data;
 212        /* .. re-parent .. */
 213        __ptrace_unlink(child);
 214        /* .. and wake it up. */
 215        if (child->exit_state != EXIT_ZOMBIE)
 216                wake_up_process(child);
 217}
 218
 219int ptrace_detach(struct task_struct *child, unsigned int data)
 220{
 221        if (!valid_signal(data))
 222                return -EIO;
 223
 224        /* Architecture-specific hardware disable .. */
 225        ptrace_disable(child);
 226        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 227
 228        write_lock_irq(&tasklist_lock);
 229        /* protect against de_thread()->release_task() */
 230        if (child->ptrace)
 231                __ptrace_detach(child, data);
 232        write_unlock_irq(&tasklist_lock);
 233
 234        return 0;
 235}
 236
 237int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 238{
 239        int copied = 0;
 240
 241        while (len > 0) {
 242                char buf[128];
 243                int this_len, retval;
 244
 245                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 246                retval = access_process_vm(tsk, src, buf, this_len, 0);
 247                if (!retval) {
 248                        if (copied)
 249                                break;
 250                        return -EIO;
 251                }
 252                if (copy_to_user(dst, buf, retval))
 253                        return -EFAULT;
 254                copied += retval;
 255                src += retval;
 256                dst += retval;
 257                len -= retval;                  
 258        }
 259        return copied;
 260}
 261
 262int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 263{
 264        int copied = 0;
 265
 266        while (len > 0) {
 267                char buf[128];
 268                int this_len, retval;
 269
 270                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 271                if (copy_from_user(buf, src, this_len))
 272                        return -EFAULT;
 273                retval = access_process_vm(tsk, dst, buf, this_len, 1);
 274                if (!retval) {
 275                        if (copied)
 276                                break;
 277                        return -EIO;
 278                }
 279                copied += retval;
 280                src += retval;
 281                dst += retval;
 282                len -= retval;                  
 283        }
 284        return copied;
 285}
 286
 287static int ptrace_setoptions(struct task_struct *child, long data)
 288{
 289        child->ptrace &= ~PT_TRACE_MASK;
 290
 291        if (data & PTRACE_O_TRACESYSGOOD)
 292                child->ptrace |= PT_TRACESYSGOOD;
 293
 294        if (data & PTRACE_O_TRACEFORK)
 295                child->ptrace |= PT_TRACE_FORK;
 296
 297        if (data & PTRACE_O_TRACEVFORK)
 298                child->ptrace |= PT_TRACE_VFORK;
 299
 300        if (data & PTRACE_O_TRACECLONE)
 301                child->ptrace |= PT_TRACE_CLONE;
 302
 303        if (data & PTRACE_O_TRACEEXEC)
 304                child->ptrace |= PT_TRACE_EXEC;
 305
 306        if (data & PTRACE_O_TRACEVFORKDONE)
 307                child->ptrace |= PT_TRACE_VFORK_DONE;
 308
 309        if (data & PTRACE_O_TRACEEXIT)
 310                child->ptrace |= PT_TRACE_EXIT;
 311
 312        return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
 313}
 314
 315static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 316{
 317        int error = -ESRCH;
 318
 319        read_lock(&tasklist_lock);
 320        if (likely(child->sighand != NULL)) {
 321                error = -EINVAL;
 322                spin_lock_irq(&child->sighand->siglock);
 323                if (likely(child->last_siginfo != NULL)) {
 324                        *info = *child->last_siginfo;
 325                        error = 0;
 326                }
 327                spin_unlock_irq(&child->sighand->siglock);
 328        }
 329        read_unlock(&tasklist_lock);
 330        return error;
 331}
 332
 333static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 334{
 335        int error = -ESRCH;
 336
 337        read_lock(&tasklist_lock);
 338        if (likely(child->sighand != NULL)) {
 339                error = -EINVAL;
 340                spin_lock_irq(&child->sighand->siglock);
 341                if (likely(child->last_siginfo != NULL)) {
 342                        *child->last_siginfo = *info;
 343                        error = 0;
 344                }
 345                spin_unlock_irq(&child->sighand->siglock);
 346        }
 347        read_unlock(&tasklist_lock);
 348        return error;
 349}
 350
 351
 352#ifdef PTRACE_SINGLESTEP
 353#define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
 354#else
 355#define is_singlestep(request)          0
 356#endif
 357
 358#ifdef PTRACE_SINGLEBLOCK
 359#define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
 360#else
 361#define is_singleblock(request)         0
 362#endif
 363
 364#ifdef PTRACE_SYSEMU
 365#define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
 366#else
 367#define is_sysemu_singlestep(request)   0
 368#endif
 369
 370static int ptrace_resume(struct task_struct *child, long request, long data)
 371{
 372        if (!valid_signal(data))
 373                return -EIO;
 374
 375        if (request == PTRACE_SYSCALL)
 376                set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 377        else
 378                clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 379
 380#ifdef TIF_SYSCALL_EMU
 381        if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 382                set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 383        else
 384                clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 385#endif
 386
 387        if (is_singleblock(request)) {
 388                if (unlikely(!arch_has_block_step()))
 389                        return -EIO;
 390                user_enable_block_step(child);
 391        } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 392                if (unlikely(!arch_has_single_step()))
 393                        return -EIO;
 394                user_enable_single_step(child);
 395        }
 396        else
 397                user_disable_single_step(child);
 398
 399        child->exit_code = data;
 400        wake_up_process(child);
 401
 402        return 0;
 403}
 404
 405int ptrace_request(struct task_struct *child, long request,
 406                   long addr, long data)
 407{
 408        int ret = -EIO;
 409        siginfo_t siginfo;
 410
 411        switch (request) {
 412        case PTRACE_PEEKTEXT:
 413        case PTRACE_PEEKDATA:
 414                return generic_ptrace_peekdata(child, addr, data);
 415        case PTRACE_POKETEXT:
 416        case PTRACE_POKEDATA:
 417                return generic_ptrace_pokedata(child, addr, data);
 418
 419#ifdef PTRACE_OLDSETOPTIONS
 420        case PTRACE_OLDSETOPTIONS:
 421#endif
 422        case PTRACE_SETOPTIONS:
 423                ret = ptrace_setoptions(child, data);
 424                break;
 425        case PTRACE_GETEVENTMSG:
 426                ret = put_user(child->ptrace_message, (unsigned long __user *) data);
 427                break;
 428
 429        case PTRACE_GETSIGINFO:
 430                ret = ptrace_getsiginfo(child, &siginfo);
 431                if (!ret)
 432                        ret = copy_siginfo_to_user((siginfo_t __user *) data,
 433                                                   &siginfo);
 434                break;
 435
 436        case PTRACE_SETSIGINFO:
 437                if (copy_from_user(&siginfo, (siginfo_t __user *) data,
 438                                   sizeof siginfo))
 439                        ret = -EFAULT;
 440                else
 441                        ret = ptrace_setsiginfo(child, &siginfo);
 442                break;
 443
 444        case PTRACE_DETACH:      /* detach a process that was attached. */
 445                ret = ptrace_detach(child, data);
 446                break;
 447
 448#ifdef PTRACE_SINGLESTEP
 449        case PTRACE_SINGLESTEP:
 450#endif
 451#ifdef PTRACE_SINGLEBLOCK
 452        case PTRACE_SINGLEBLOCK:
 453#endif
 454#ifdef PTRACE_SYSEMU
 455        case PTRACE_SYSEMU:
 456        case PTRACE_SYSEMU_SINGLESTEP:
 457#endif
 458        case PTRACE_SYSCALL:
 459        case PTRACE_CONT:
 460                return ptrace_resume(child, request, data);
 461
 462        case PTRACE_KILL:
 463                if (child->exit_state)  /* already dead */
 464                        return 0;
 465                return ptrace_resume(child, request, SIGKILL);
 466
 467        default:
 468                break;
 469        }
 470
 471        return ret;
 472}
 473
 474/**
 475 * ptrace_traceme  --  helper for PTRACE_TRACEME
 476 *
 477 * Performs checks and sets PT_PTRACED.
 478 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 479 */
 480int ptrace_traceme(void)
 481{
 482        int ret = -EPERM;
 483
 484        /*
 485         * Are we already being traced?
 486         */
 487repeat:
 488        task_lock(current);
 489        if (!(current->ptrace & PT_PTRACED)) {
 490                /*
 491                 * See ptrace_attach() comments about the locking here.
 492                 */
 493                unsigned long flags;
 494                if (!write_trylock_irqsave(&tasklist_lock, flags)) {
 495                        task_unlock(current);
 496                        do {
 497                                cpu_relax();
 498                        } while (!write_can_lock(&tasklist_lock));
 499                        goto repeat;
 500                }
 501
 502                ret = security_ptrace_traceme(current->parent);
 503
 504                /*
 505                 * Set the ptrace bit in the process ptrace flags.
 506                 * Then link us on our parent's ptraced list.
 507                 */
 508                if (!ret) {
 509                        current->ptrace |= PT_PTRACED;
 510                        __ptrace_link(current, current->real_parent);
 511                }
 512
 513                write_unlock_irqrestore(&tasklist_lock, flags);
 514        }
 515        task_unlock(current);
 516        return ret;
 517}
 518
 519/**
 520 * ptrace_get_task_struct  --  grab a task struct reference for ptrace
 521 * @pid:       process id to grab a task_struct reference of
 522 *
 523 * This function is a helper for ptrace implementations.  It checks
 524 * permissions and then grabs a task struct for use of the actual
 525 * ptrace implementation.
 526 *
 527 * Returns the task_struct for @pid or an ERR_PTR() on failure.
 528 */
 529struct task_struct *ptrace_get_task_struct(pid_t pid)
 530{
 531        struct task_struct *child;
 532
 533        read_lock(&tasklist_lock);
 534        child = find_task_by_vpid(pid);
 535        if (child)
 536                get_task_struct(child);
 537
 538        read_unlock(&tasklist_lock);
 539        if (!child)
 540                return ERR_PTR(-ESRCH);
 541        return child;
 542}
 543
 544#ifndef arch_ptrace_attach
 545#define arch_ptrace_attach(child)       do { } while (0)
 546#endif
 547
 548SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
 549{
 550        struct task_struct *child;
 551        long ret;
 552
 553        /*
 554         * This lock_kernel fixes a subtle race with suid exec
 555         */
 556        lock_kernel();
 557        if (request == PTRACE_TRACEME) {
 558                ret = ptrace_traceme();
 559                if (!ret)
 560                        arch_ptrace_attach(current);
 561                goto out;
 562        }
 563
 564        child = ptrace_get_task_struct(pid);
 565        if (IS_ERR(child)) {
 566                ret = PTR_ERR(child);
 567                goto out;
 568        }
 569
 570        if (request == PTRACE_ATTACH) {
 571                ret = ptrace_attach(child);
 572                /*
 573                 * Some architectures need to do book-keeping after
 574                 * a ptrace attach.
 575                 */
 576                if (!ret)
 577                        arch_ptrace_attach(child);
 578                goto out_put_task_struct;
 579        }
 580
 581        ret = ptrace_check_attach(child, request == PTRACE_KILL);
 582        if (ret < 0)
 583                goto out_put_task_struct;
 584
 585        ret = arch_ptrace(child, request, addr, data);
 586        if (ret < 0)
 587                goto out_put_task_struct;
 588
 589 out_put_task_struct:
 590        put_task_struct(child);
 591 out:
 592        unlock_kernel();
 593        return ret;
 594}
 595
 596int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
 597{
 598        unsigned long tmp;
 599        int copied;
 600
 601        copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
 602        if (copied != sizeof(tmp))
 603                return -EIO;
 604        return put_user(tmp, (unsigned long __user *)data);
 605}
 606
 607int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
 608{
 609        int copied;
 610
 611        copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
 612        return (copied == sizeof(data)) ? 0 : -EIO;
 613}
 614
 615#if defined CONFIG_COMPAT
 616#include <linux/compat.h>
 617
 618int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 619                          compat_ulong_t addr, compat_ulong_t data)
 620{
 621        compat_ulong_t __user *datap = compat_ptr(data);
 622        compat_ulong_t word;
 623        siginfo_t siginfo;
 624        int ret;
 625
 626        switch (request) {
 627        case PTRACE_PEEKTEXT:
 628        case PTRACE_PEEKDATA:
 629                ret = access_process_vm(child, addr, &word, sizeof(word), 0);
 630                if (ret != sizeof(word))
 631                        ret = -EIO;
 632                else
 633                        ret = put_user(word, datap);
 634                break;
 635
 636        case PTRACE_POKETEXT:
 637        case PTRACE_POKEDATA:
 638                ret = access_process_vm(child, addr, &data, sizeof(data), 1);
 639                ret = (ret != sizeof(data) ? -EIO : 0);
 640                break;
 641
 642        case PTRACE_GETEVENTMSG:
 643                ret = put_user((compat_ulong_t) child->ptrace_message, datap);
 644                break;
 645
 646        case PTRACE_GETSIGINFO:
 647                ret = ptrace_getsiginfo(child, &siginfo);
 648                if (!ret)
 649                        ret = copy_siginfo_to_user32(
 650                                (struct compat_siginfo __user *) datap,
 651                                &siginfo);
 652                break;
 653
 654        case PTRACE_SETSIGINFO:
 655                memset(&siginfo, 0, sizeof siginfo);
 656                if (copy_siginfo_from_user32(
 657                            &siginfo, (struct compat_siginfo __user *) datap))
 658                        ret = -EFAULT;
 659                else
 660                        ret = ptrace_setsiginfo(child, &siginfo);
 661                break;
 662
 663        default:
 664                ret = ptrace_request(child, request, addr, data);
 665        }
 666
 667        return ret;
 668}
 669
 670asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
 671                                  compat_long_t addr, compat_long_t data)
 672{
 673        struct task_struct *child;
 674        long ret;
 675
 676        /*
 677         * This lock_kernel fixes a subtle race with suid exec
 678         */
 679        lock_kernel();
 680        if (request == PTRACE_TRACEME) {
 681                ret = ptrace_traceme();
 682                goto out;
 683        }
 684
 685        child = ptrace_get_task_struct(pid);
 686        if (IS_ERR(child)) {
 687                ret = PTR_ERR(child);
 688                goto out;
 689        }
 690
 691        if (request == PTRACE_ATTACH) {
 692                ret = ptrace_attach(child);
 693                /*
 694                 * Some architectures need to do book-keeping after
 695                 * a ptrace attach.
 696                 */
 697                if (!ret)
 698                        arch_ptrace_attach(child);
 699                goto out_put_task_struct;
 700        }
 701
 702        ret = ptrace_check_attach(child, request == PTRACE_KILL);
 703        if (!ret)
 704                ret = compat_arch_ptrace(child, request, addr, data);
 705
 706 out_put_task_struct:
 707        put_task_struct(child);
 708 out:
 709        unlock_kernel();
 710        return ret;
 711}
 712#endif  /* CONFIG_COMPAT */
 713