linux/arch/arm64/kernel/ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * arch/arm64/kernel/ftrace.c
   4 *
   5 * Copyright (C) 2013 Linaro Limited
   6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
   7 */
   8
   9#include <linux/ftrace.h>
  10#include <linux/module.h>
  11#include <linux/swab.h>
  12#include <linux/uaccess.h>
  13
  14#include <asm/cacheflush.h>
  15#include <asm/debug-monitors.h>
  16#include <asm/ftrace.h>
  17#include <asm/insn.h>
  18#include <asm/patching.h>
  19
  20#ifdef CONFIG_DYNAMIC_FTRACE
  21/*
  22 * Replace a single instruction, which may be a branch or NOP.
  23 * If @validate == true, a replaced instruction is checked against 'old'.
  24 */
  25static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
  26                              bool validate)
  27{
  28        u32 replaced;
  29
  30        /*
  31         * Note:
  32         * We are paranoid about modifying text, as if a bug were to happen, it
  33         * could cause us to read or write to someplace that could cause harm.
  34         * Carefully read and modify the code with aarch64_insn_*() which uses
  35         * probe_kernel_*(), and make sure what we read is what we expected it
  36         * to be before modifying it.
  37         */
  38        if (validate) {
  39                if (aarch64_insn_read((void *)pc, &replaced))
  40                        return -EFAULT;
  41
  42                if (replaced != old)
  43                        return -EINVAL;
  44        }
  45        if (aarch64_insn_patch_text_nosync((void *)pc, new))
  46                return -EPERM;
  47
  48        return 0;
  49}
  50
  51/*
  52 * Replace tracer function in ftrace_caller()
  53 */
  54int ftrace_update_ftrace_func(ftrace_func_t func)
  55{
  56        unsigned long pc;
  57        u32 new;
  58
  59        pc = (unsigned long)function_nocfi(ftrace_call);
  60        new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
  61                                          AARCH64_INSN_BRANCH_LINK);
  62
  63        return ftrace_modify_code(pc, 0, new, false);
  64}
  65
  66static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
  67{
  68#ifdef CONFIG_ARM64_MODULE_PLTS
  69        struct plt_entry *plt = mod->arch.ftrace_trampolines;
  70
  71        if (addr == FTRACE_ADDR)
  72                return &plt[FTRACE_PLT_IDX];
  73        if (addr == FTRACE_REGS_ADDR &&
  74            IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
  75                return &plt[FTRACE_REGS_PLT_IDX];
  76#endif
  77        return NULL;
  78}
  79
  80/*
  81 * Turn on the call to ftrace_caller() in instrumented function
  82 */
  83int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  84{
  85        unsigned long pc = rec->ip;
  86        u32 old, new;
  87        long offset = (long)pc - (long)addr;
  88
  89        if (offset < -SZ_128M || offset >= SZ_128M) {
  90                struct module *mod;
  91                struct plt_entry *plt;
  92
  93                if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
  94                        return -EINVAL;
  95
  96                /*
  97                 * On kernels that support module PLTs, the offset between the
  98                 * branch instruction and its target may legally exceed the
  99                 * range of an ordinary relative 'bl' opcode. In this case, we
 100                 * need to branch via a trampoline in the module.
 101                 *
 102                 * NOTE: __module_text_address() must be called with preemption
 103                 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
 104                 * retains its validity throughout the remainder of this code.
 105                 */
 106                preempt_disable();
 107                mod = __module_text_address(pc);
 108                preempt_enable();
 109
 110                if (WARN_ON(!mod))
 111                        return -EINVAL;
 112
 113                plt = get_ftrace_plt(mod, addr);
 114                if (!plt) {
 115                        pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
 116                        return -EINVAL;
 117                }
 118
 119                addr = (unsigned long)plt;
 120        }
 121
 122        old = aarch64_insn_gen_nop();
 123        new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
 124
 125        return ftrace_modify_code(pc, old, new, true);
 126}
 127
 128#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 129int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 130                        unsigned long addr)
 131{
 132        unsigned long pc = rec->ip;
 133        u32 old, new;
 134
 135        old = aarch64_insn_gen_branch_imm(pc, old_addr,
 136                                          AARCH64_INSN_BRANCH_LINK);
 137        new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
 138
 139        return ftrace_modify_code(pc, old, new, true);
 140}
 141
 142/*
 143 * The compiler has inserted two NOPs before the regular function prologue.
 144 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
 145 * and x9-x18 are free for our use.
 146 *
 147 * At runtime we want to be able to swing a single NOP <-> BL to enable or
 148 * disable the ftrace call. The BL requires us to save the original LR value,
 149 * so here we insert a <MOV X9, LR> over the first NOP so the instructions
 150 * before the regular prologue are:
 151 *
 152 * | Compiled | Disabled   | Enabled    |
 153 * +----------+------------+------------+
 154 * | NOP      | MOV X9, LR | MOV X9, LR |
 155 * | NOP      | NOP        | BL <entry> |
 156 *
 157 * The LR value will be recovered by ftrace_regs_entry, and restored into LR
 158 * before returning to the regular function prologue. When a function is not
 159 * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
 160 *
 161 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
 162 * the BL.
 163 */
 164int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 165{
 166        unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
 167        u32 old, new;
 168
 169        old = aarch64_insn_gen_nop();
 170        new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
 171                                        AARCH64_INSN_REG_LR,
 172                                        AARCH64_INSN_VARIANT_64BIT);
 173        return ftrace_modify_code(pc, old, new, true);
 174}
 175#endif
 176
 177/*
 178 * Turn off the call to ftrace_caller() in instrumented function
 179 */
 180int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 181                    unsigned long addr)
 182{
 183        unsigned long pc = rec->ip;
 184        bool validate = true;
 185        u32 old = 0, new;
 186        long offset = (long)pc - (long)addr;
 187
 188        if (offset < -SZ_128M || offset >= SZ_128M) {
 189                u32 replaced;
 190
 191                if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
 192                        return -EINVAL;
 193
 194                /*
 195                 * 'mod' is only set at module load time, but if we end up
 196                 * dealing with an out-of-range condition, we can assume it
 197                 * is due to a module being loaded far away from the kernel.
 198                 */
 199                if (!mod) {
 200                        preempt_disable();
 201                        mod = __module_text_address(pc);
 202                        preempt_enable();
 203
 204                        if (WARN_ON(!mod))
 205                                return -EINVAL;
 206                }
 207
 208                /*
 209                 * The instruction we are about to patch may be a branch and
 210                 * link instruction that was redirected via a PLT entry. In
 211                 * this case, the normal validation will fail, but we can at
 212                 * least check that we are dealing with a branch and link
 213                 * instruction that points into the right module.
 214                 */
 215                if (aarch64_insn_read((void *)pc, &replaced))
 216                        return -EFAULT;
 217
 218                if (!aarch64_insn_is_bl(replaced) ||
 219                    !within_module(pc + aarch64_get_branch_offset(replaced),
 220                                   mod))
 221                        return -EINVAL;
 222
 223                validate = false;
 224        } else {
 225                old = aarch64_insn_gen_branch_imm(pc, addr,
 226                                                  AARCH64_INSN_BRANCH_LINK);
 227        }
 228
 229        new = aarch64_insn_gen_nop();
 230
 231        return ftrace_modify_code(pc, old, new, validate);
 232}
 233
 234void arch_ftrace_update_code(int command)
 235{
 236        command |= FTRACE_MAY_SLEEP;
 237        ftrace_modify_all_code(command);
 238}
 239
 240int __init ftrace_dyn_arch_init(void)
 241{
 242        return 0;
 243}
 244#endif /* CONFIG_DYNAMIC_FTRACE */
 245
 246#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 247/*
 248 * function_graph tracer expects ftrace_return_to_handler() to be called
 249 * on the way back to parent. For this purpose, this function is called
 250 * in _mcount() or ftrace_caller() to replace return address (*parent) on
 251 * the call stack to return_to_handler.
 252 *
 253 * Note that @frame_pointer is used only for sanity check later.
 254 */
 255void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 256                           unsigned long frame_pointer)
 257{
 258        unsigned long return_hooker = (unsigned long)&return_to_handler;
 259        unsigned long old;
 260
 261        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 262                return;
 263
 264        /*
 265         * Note:
 266         * No protection against faulting at *parent, which may be seen
 267         * on other archs. It's unlikely on AArch64.
 268         */
 269        old = *parent;
 270
 271        if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
 272                *parent = return_hooker;
 273}
 274
 275#ifdef CONFIG_DYNAMIC_FTRACE
 276/*
 277 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
 278 * depending on @enable.
 279 */
 280static int ftrace_modify_graph_caller(bool enable)
 281{
 282        unsigned long pc = (unsigned long)&ftrace_graph_call;
 283        u32 branch, nop;
 284
 285        branch = aarch64_insn_gen_branch_imm(pc,
 286                                             (unsigned long)ftrace_graph_caller,
 287                                             AARCH64_INSN_BRANCH_NOLINK);
 288        nop = aarch64_insn_gen_nop();
 289
 290        if (enable)
 291                return ftrace_modify_code(pc, nop, branch, true);
 292        else
 293                return ftrace_modify_code(pc, branch, nop, true);
 294}
 295
 296int ftrace_enable_ftrace_graph_caller(void)
 297{
 298        return ftrace_modify_graph_caller(true);
 299}
 300
 301int ftrace_disable_ftrace_graph_caller(void)
 302{
 303        return ftrace_modify_graph_caller(false);
 304}
 305#endif /* CONFIG_DYNAMIC_FTRACE */
 306#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 307