linux/include/linux/ptrace.h
<<
>>
Prefs
   1#ifndef _LINUX_PTRACE_H
   2#define _LINUX_PTRACE_H
   3
   4#include <linux/compiler.h>             /* For unlikely.  */
   5#include <linux/sched.h>                /* For struct task_struct.  */
   6#include <linux/err.h>                  /* for IS_ERR_VALUE */
   7#include <linux/bug.h>                  /* For BUG_ON.  */
   8#include <uapi/linux/ptrace.h>
   9
  10/*
  11 * Ptrace flags
  12 *
  13 * The owner ship rules for task->ptrace which holds the ptrace
  14 * flags is simple.  When a task is running it owns it's task->ptrace
  15 * flags.  When the a task is stopped the ptracer owns task->ptrace.
  16 */
  17
  18#define PT_SEIZED       0x00010000      /* SEIZE used, enable new behavior */
  19#define PT_PTRACED      0x00000001
  20#define PT_DTRACE       0x00000002      /* delayed trace (used on m68k, i386) */
  21#define PT_PTRACE_CAP   0x00000004      /* ptracer can follow suid-exec */
  22
  23#define PT_OPT_FLAG_SHIFT       3
  24/* PT_TRACE_* event enable flags */
  25#define PT_EVENT_FLAG(event)    (1 << (PT_OPT_FLAG_SHIFT + (event)))
  26#define PT_TRACESYSGOOD         PT_EVENT_FLAG(0)
  27#define PT_TRACE_FORK           PT_EVENT_FLAG(PTRACE_EVENT_FORK)
  28#define PT_TRACE_VFORK          PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
  29#define PT_TRACE_CLONE          PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
  30#define PT_TRACE_EXEC           PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
  31#define PT_TRACE_VFORK_DONE     PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
  32#define PT_TRACE_EXIT           PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
  33#define PT_TRACE_SECCOMP        PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
  34
  35/* single stepping state bits (used on ARM and PA-RISC) */
  36#define PT_SINGLESTEP_BIT       31
  37#define PT_SINGLESTEP           (1<<PT_SINGLESTEP_BIT)
  38#define PT_BLOCKSTEP_BIT        30
  39#define PT_BLOCKSTEP            (1<<PT_BLOCKSTEP_BIT)
  40
  41extern long arch_ptrace(struct task_struct *child, long request,
  42                        unsigned long addr, unsigned long data);
  43extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
  44extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
  45extern void ptrace_disable(struct task_struct *);
  46extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
  47extern int ptrace_request(struct task_struct *child, long request,
  48                          unsigned long addr, unsigned long data);
  49extern void ptrace_notify(int exit_code);
  50extern void __ptrace_link(struct task_struct *child,
  51                          struct task_struct *new_parent);
  52extern void __ptrace_unlink(struct task_struct *child);
  53extern void exit_ptrace(struct task_struct *tracer);
  54#define PTRACE_MODE_READ        0x01
  55#define PTRACE_MODE_ATTACH      0x02
  56#define PTRACE_MODE_NOAUDIT     0x04
  57/* Returns true on success, false on denial. */
  58extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
  59
  60static inline int ptrace_reparented(struct task_struct *child)
  61{
  62        return !same_thread_group(child->real_parent, child->parent);
  63}
  64
  65static inline void ptrace_unlink(struct task_struct *child)
  66{
  67        if (unlikely(child->ptrace))
  68                __ptrace_unlink(child);
  69}
  70
  71int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
  72                            unsigned long data);
  73int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
  74                            unsigned long data);
  75
  76/**
  77 * ptrace_parent - return the task that is tracing the given task
  78 * @task: task to consider
  79 *
  80 * Returns %NULL if no one is tracing @task, or the &struct task_struct
  81 * pointer to its tracer.
  82 *
  83 * Must called under rcu_read_lock().  The pointer returned might be kept
  84 * live only by RCU.  During exec, this may be called with task_lock() held
  85 * on @task, still held from when check_unsafe_exec() was called.
  86 */
  87static inline struct task_struct *ptrace_parent(struct task_struct *task)
  88{
  89        if (unlikely(task->ptrace))
  90                return rcu_dereference(task->parent);
  91        return NULL;
  92}
  93
  94/**
  95 * ptrace_event_enabled - test whether a ptrace event is enabled
  96 * @task: ptracee of interest
  97 * @event: %PTRACE_EVENT_* to test
  98 *
  99 * Test whether @event is enabled for ptracee @task.
 100 *
 101 * Returns %true if @event is enabled, %false otherwise.
 102 */
 103static inline bool ptrace_event_enabled(struct task_struct *task, int event)
 104{
 105        return task->ptrace & PT_EVENT_FLAG(event);
 106}
 107
 108/**
 109 * ptrace_event - possibly stop for a ptrace event notification
 110 * @event:      %PTRACE_EVENT_* value to report
 111 * @message:    value for %PTRACE_GETEVENTMSG to return
 112 *
 113 * Check whether @event is enabled and, if so, report @event and @message
 114 * to the ptrace parent.
 115 *
 116 * Called without locks.
 117 */
 118static inline void ptrace_event(int event, unsigned long message)
 119{
 120        if (unlikely(ptrace_event_enabled(current, event))) {
 121                current->ptrace_message = message;
 122                ptrace_notify((event << 8) | SIGTRAP);
 123        } else if (event == PTRACE_EVENT_EXEC) {
 124                /* legacy EXEC report via SIGTRAP */
 125                if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
 126                        send_sig(SIGTRAP, current, 0);
 127        }
 128}
 129
 130/**
 131 * ptrace_init_task - initialize ptrace state for a new child
 132 * @child:              new child task
 133 * @ptrace:             true if child should be ptrace'd by parent's tracer
 134 *
 135 * This is called immediately after adding @child to its parent's children
 136 * list.  @ptrace is false in the normal case, and true to ptrace @child.
 137 *
 138 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
 139 */
 140static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 141{
 142        INIT_LIST_HEAD(&child->ptrace_entry);
 143        INIT_LIST_HEAD(&child->ptraced);
 144#ifdef CONFIG_HAVE_HW_BREAKPOINT
 145        atomic_set(&child->ptrace_bp_refcnt, 1);
 146#endif
 147        child->jobctl = 0;
 148        child->ptrace = 0;
 149        child->parent = child->real_parent;
 150
 151        if (unlikely(ptrace) && current->ptrace) {
 152                child->ptrace = current->ptrace;
 153                __ptrace_link(child, current->parent);
 154
 155                if (child->ptrace & PT_SEIZED)
 156                        task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
 157                else
 158                        sigaddset(&child->pending.signal, SIGSTOP);
 159
 160                set_tsk_thread_flag(child, TIF_SIGPENDING);
 161        }
 162}
 163
 164/**
 165 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
 166 * @task:       task in %EXIT_DEAD state
 167 *
 168 * Called with write_lock(&tasklist_lock) held.
 169 */
 170static inline void ptrace_release_task(struct task_struct *task)
 171{
 172        BUG_ON(!list_empty(&task->ptraced));
 173        ptrace_unlink(task);
 174        BUG_ON(!list_empty(&task->ptrace_entry));
 175}
 176
 177#ifndef force_successful_syscall_return
 178/*
 179 * System call handlers that, upon successful completion, need to return a
 180 * negative value should call force_successful_syscall_return() right before
 181 * returning.  On architectures where the syscall convention provides for a
 182 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
 183 * others), this macro can be used to ensure that the error flag will not get
 184 * set.  On architectures which do not support a separate error flag, the macro
 185 * is a no-op and the spurious error condition needs to be filtered out by some
 186 * other means (e.g., in user-level, by passing an extra argument to the
 187 * syscall handler, or something along those lines).
 188 */
 189#define force_successful_syscall_return() do { } while (0)
 190#endif
 191
 192#ifndef is_syscall_success
 193/*
 194 * On most systems we can tell if a syscall is a success based on if the retval
 195 * is an error value.  On some systems like ia64 and powerpc they have different
 196 * indicators of success/failure and must define their own.
 197 */
 198#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
 199#endif
 200
 201/*
 202 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
 203 *
 204 * These do-nothing inlines are used when the arch does not
 205 * implement single-step.  The kerneldoc comments are here
 206 * to document the interface for all arch definitions.
 207 */
 208
 209#ifndef arch_has_single_step
 210/**
 211 * arch_has_single_step - does this CPU support user-mode single-step?
 212 *
 213 * If this is defined, then there must be function declarations or
 214 * inlines for user_enable_single_step() and user_disable_single_step().
 215 * arch_has_single_step() should evaluate to nonzero iff the machine
 216 * supports instruction single-step for user mode.
 217 * It can be a constant or it can test a CPU feature bit.
 218 */
 219#define arch_has_single_step()          (0)
 220
 221/**
 222 * user_enable_single_step - single-step in user-mode task
 223 * @task: either current or a task stopped in %TASK_TRACED
 224 *
 225 * This can only be called when arch_has_single_step() has returned nonzero.
 226 * Set @task so that when it returns to user mode, it will trap after the
 227 * next single instruction executes.  If arch_has_block_step() is defined,
 228 * this must clear the effects of user_enable_block_step() too.
 229 */
 230static inline void user_enable_single_step(struct task_struct *task)
 231{
 232        BUG();                  /* This can never be called.  */
 233}
 234
 235/**
 236 * user_disable_single_step - cancel user-mode single-step
 237 * @task: either current or a task stopped in %TASK_TRACED
 238 *
 239 * Clear @task of the effects of user_enable_single_step() and
 240 * user_enable_block_step().  This can be called whether or not either
 241 * of those was ever called on @task, and even if arch_has_single_step()
 242 * returned zero.
 243 */
 244static inline void user_disable_single_step(struct task_struct *task)
 245{
 246}
 247#else
 248extern void user_enable_single_step(struct task_struct *);
 249extern void user_disable_single_step(struct task_struct *);
 250#endif  /* arch_has_single_step */
 251
 252#ifndef arch_has_block_step
 253/**
 254 * arch_has_block_step - does this CPU support user-mode block-step?
 255 *
 256 * If this is defined, then there must be a function declaration or inline
 257 * for user_enable_block_step(), and arch_has_single_step() must be defined
 258 * too.  arch_has_block_step() should evaluate to nonzero iff the machine
 259 * supports step-until-branch for user mode.  It can be a constant or it
 260 * can test a CPU feature bit.
 261 */
 262#define arch_has_block_step()           (0)
 263
 264/**
 265 * user_enable_block_step - step until branch in user-mode task
 266 * @task: either current or a task stopped in %TASK_TRACED
 267 *
 268 * This can only be called when arch_has_block_step() has returned nonzero,
 269 * and will never be called when single-instruction stepping is being used.
 270 * Set @task so that when it returns to user mode, it will trap after the
 271 * next branch or trap taken.
 272 */
 273static inline void user_enable_block_step(struct task_struct *task)
 274{
 275        BUG();                  /* This can never be called.  */
 276}
 277#else
 278extern void user_enable_block_step(struct task_struct *);
 279#endif  /* arch_has_block_step */
 280
 281#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
 282extern void user_single_step_siginfo(struct task_struct *tsk,
 283                                struct pt_regs *regs, siginfo_t *info);
 284#else
 285static inline void user_single_step_siginfo(struct task_struct *tsk,
 286                                struct pt_regs *regs, siginfo_t *info)
 287{
 288        memset(info, 0, sizeof(*info));
 289        info->si_signo = SIGTRAP;
 290}
 291#endif
 292
 293#ifndef arch_ptrace_stop_needed
 294/**
 295 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
 296 * @code:       current->exit_code value ptrace will stop with
 297 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 298 *
 299 * This is called with the siglock held, to decide whether or not it's
 300 * necessary to release the siglock and call arch_ptrace_stop() with the
 301 * same @code and @info arguments.  It can be defined to a constant if
 302 * arch_ptrace_stop() is never required, or always is.  On machines where
 303 * this makes sense, it should be defined to a quick test to optimize out
 304 * calling arch_ptrace_stop() when it would be superfluous.  For example,
 305 * if the thread has not been back to user mode since the last stop, the
 306 * thread state might indicate that nothing needs to be done.
 307 */
 308#define arch_ptrace_stop_needed(code, info)     (0)
 309#endif
 310
 311#ifndef arch_ptrace_stop
 312/**
 313 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
 314 * @code:       current->exit_code value ptrace will stop with
 315 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 316 *
 317 * This is called with no locks held when arch_ptrace_stop_needed() has
 318 * just returned nonzero.  It is allowed to block, e.g. for user memory
 319 * access.  The arch can have machine-specific work to be done before
 320 * ptrace stops.  On ia64, register backing store gets written back to user
 321 * memory here.  Since this can be costly (requires dropping the siglock),
 322 * we only do it when the arch requires it for this particular stop, as
 323 * indicated by arch_ptrace_stop_needed().
 324 */
 325#define arch_ptrace_stop(code, info)            do { } while (0)
 326#endif
 327
 328#ifndef current_pt_regs
 329#define current_pt_regs() task_pt_regs(current)
 330#endif
 331
 332extern int task_current_syscall(struct task_struct *target, long *callno,
 333                                unsigned long args[6], unsigned int maxargs,
 334                                unsigned long *sp, unsigned long *pc);
 335
 336#ifdef CONFIG_HAVE_HW_BREAKPOINT
 337extern int ptrace_get_breakpoints(struct task_struct *tsk);
 338extern void ptrace_put_breakpoints(struct task_struct *tsk);
 339#else
 340static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
 341#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 342
 343#endif
 344
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.