linux/arch/arm64/kernel/stacktrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Stack tracing support
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 */
   7#include <linux/kernel.h>
   8#include <linux/export.h>
   9#include <linux/ftrace.h>
  10#include <linux/sched.h>
  11#include <linux/sched/debug.h>
  12#include <linux/sched/task_stack.h>
  13#include <linux/stacktrace.h>
  14
  15#include <asm/irq.h>
  16#include <asm/stack_pointer.h>
  17#include <asm/stacktrace.h>
  18
  19/*
  20 * Start an unwind from a pt_regs.
  21 *
  22 * The unwind will begin at the PC within the regs.
  23 *
  24 * The regs must be on a stack currently owned by the calling task.
  25 */
  26static inline void unwind_init_from_regs(struct unwind_state *state,
  27                                         struct pt_regs *regs)
  28{
  29        unwind_init_common(state, current);
  30
  31        state->fp = regs->regs[29];
  32        state->pc = regs->pc;
  33}
  34
  35/*
  36 * Start an unwind from a caller.
  37 *
  38 * The unwind will begin at the caller of whichever function this is inlined
  39 * into.
  40 *
  41 * The function which invokes this must be noinline.
  42 */
  43static __always_inline void unwind_init_from_caller(struct unwind_state *state)
  44{
  45        unwind_init_common(state, current);
  46
  47        state->fp = (unsigned long)__builtin_frame_address(1);
  48        state->pc = (unsigned long)__builtin_return_address(0);
  49}
  50
  51/*
  52 * Start an unwind from a blocked task.
  53 *
  54 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
  55 * cpu_switch_to()).
  56 *
  57 * The caller should ensure the task is blocked in cpu_switch_to() for the
  58 * duration of the unwind, or the unwind will be bogus. It is never valid to
  59 * call this for the current task.
  60 */
  61static inline void unwind_init_from_task(struct unwind_state *state,
  62                                         struct task_struct *task)
  63{
  64        unwind_init_common(state, task);
  65
  66        state->fp = thread_saved_fp(task);
  67        state->pc = thread_saved_pc(task);
  68}
  69
  70/*
  71 * We can only safely access per-cpu stacks from current in a non-preemptible
  72 * context.
  73 */
  74static bool on_accessible_stack(const struct task_struct *tsk,
  75                                unsigned long sp, unsigned long size,
  76                                struct stack_info *info)
  77{
  78        if (info)
  79                info->type = STACK_TYPE_UNKNOWN;
  80
  81        if (on_task_stack(tsk, sp, size, info))
  82                return true;
  83        if (tsk != current || preemptible())
  84                return false;
  85        if (on_irq_stack(sp, size, info))
  86                return true;
  87        if (on_overflow_stack(sp, size, info))
  88                return true;
  89        if (on_sdei_stack(sp, size, info))
  90                return true;
  91
  92        return false;
  93}
  94
  95/*
  96 * Unwind from one frame record (A) to the next frame record (B).
  97 *
  98 * We terminate early if the location of B indicates a malformed chain of frame
  99 * records (e.g. a cycle), determined based on the location and fp value of A
 100 * and the location (but not the fp value) of B.
 101 */
 102static int notrace unwind_next(struct unwind_state *state)
 103{
 104        struct task_struct *tsk = state->task;
 105        unsigned long fp = state->fp;
 106        struct stack_info info;
 107        int err;
 108
 109        /* Final frame; nothing to unwind */
 110        if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
 111                return -ENOENT;
 112
 113        err = unwind_next_common(state, &info, on_accessible_stack, NULL);
 114        if (err)
 115                return err;
 116
 117        state->pc = ptrauth_strip_insn_pac(state->pc);
 118
 119#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 120        if (tsk->ret_stack &&
 121                (state->pc == (unsigned long)return_to_handler)) {
 122                unsigned long orig_pc;
 123                /*
 124                 * This is a case where function graph tracer has
 125                 * modified a return address (LR) in a stack frame
 126                 * to hook a function return.
 127                 * So replace it to an original value.
 128                 */
 129                orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
 130                                                (void *)state->fp);
 131                if (WARN_ON_ONCE(state->pc == orig_pc))
 132                        return -EINVAL;
 133                state->pc = orig_pc;
 134        }
 135#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 136#ifdef CONFIG_KRETPROBES
 137        if (is_kretprobe_trampoline(state->pc))
 138                state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
 139#endif
 140
 141        return 0;
 142}
 143NOKPROBE_SYMBOL(unwind_next);
 144
 145static void notrace unwind(struct unwind_state *state,
 146                           stack_trace_consume_fn consume_entry, void *cookie)
 147{
 148        while (1) {
 149                int ret;
 150
 151                if (!consume_entry(cookie, state->pc))
 152                        break;
 153                ret = unwind_next(state);
 154                if (ret < 0)
 155                        break;
 156        }
 157}
 158NOKPROBE_SYMBOL(unwind);
 159
 160static bool dump_backtrace_entry(void *arg, unsigned long where)
 161{
 162        char *loglvl = arg;
 163        printk("%s %pSb\n", loglvl, (void *)where);
 164        return true;
 165}
 166
 167void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 168                    const char *loglvl)
 169{
 170        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 171
 172        if (regs && user_mode(regs))
 173                return;
 174
 175        if (!tsk)
 176                tsk = current;
 177
 178        if (!try_get_task_stack(tsk))
 179                return;
 180
 181        printk("%sCall trace:\n", loglvl);
 182        arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
 183
 184        put_task_stack(tsk);
 185}
 186
 187void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 188{
 189        dump_backtrace(NULL, tsk, loglvl);
 190        barrier();
 191}
 192
 193noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
 194                              void *cookie, struct task_struct *task,
 195                              struct pt_regs *regs)
 196{
 197        struct unwind_state state;
 198
 199        if (regs) {
 200                if (task != current)
 201                        return;
 202                unwind_init_from_regs(&state, regs);
 203        } else if (task == current) {
 204                unwind_init_from_caller(&state);
 205        } else {
 206                unwind_init_from_task(&state, task);
 207        }
 208
 209        unwind(&state, consume_entry, cookie);
 210}
 211