linux/arch/powerpc/kernel/trace/ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Code for replacing ftrace calls with jumps.
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 *
   7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
   8 *
   9 * Added function graph tracer code, taken from x86 that was written
  10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
  11 *
  12 */
  13
  14#define pr_fmt(fmt) "ftrace-powerpc: " fmt
  15
  16#include <linux/spinlock.h>
  17#include <linux/hardirq.h>
  18#include <linux/uaccess.h>
  19#include <linux/module.h>
  20#include <linux/ftrace.h>
  21#include <linux/percpu.h>
  22#include <linux/init.h>
  23#include <linux/list.h>
  24
  25#include <asm/asm-prototypes.h>
  26#include <asm/cacheflush.h>
  27#include <asm/code-patching.h>
  28#include <asm/ftrace.h>
  29#include <asm/syscall.h>
  30#include <asm/inst.h>
  31
  32
  33#ifdef CONFIG_DYNAMIC_FTRACE
  34
  35/*
  36 * We generally only have a single long_branch tramp and at most 2 or 3 plt
  37 * tramps generated. But, we don't use the plt tramps currently. We also allot
  38 * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
  39 * tramps in total. Set aside 8 just to be sure.
  40 */
  41#define NUM_FTRACE_TRAMPS       8
  42static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
  43
  44static struct ppc_inst
  45ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
  46{
  47        struct ppc_inst op;
  48
  49        addr = ppc_function_entry((void *)addr);
  50
  51        /* if (link) set op to 'bl' else 'b' */
  52        create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
  53
  54        return op;
  55}
  56
  57static int
  58ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
  59{
  60        struct ppc_inst replaced;
  61
  62        /*
  63         * Note:
  64         * We are paranoid about modifying text, as if a bug was to happen, it
  65         * could cause us to read or write to someplace that could cause harm.
  66         * Carefully read and modify the code with probe_kernel_*(), and make
  67         * sure what we read is what we expected it to be before modifying it.
  68         */
  69
  70        /* read the text we want to modify */
  71        if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
  72                return -EFAULT;
  73
  74        /* Make sure it is what we expect it to be */
  75        if (!ppc_inst_equal(replaced, old)) {
  76                pr_err("%p: replaced (%s) != old (%s)",
  77                (void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
  78                return -EINVAL;
  79        }
  80
  81        /* replace the text with the new text */
  82        if (patch_instruction((u32 *)ip, new))
  83                return -EPERM;
  84
  85        return 0;
  86}
  87
  88/*
  89 * Helper functions that are the same for both PPC64 and PPC32.
  90 */
  91static int test_24bit_addr(unsigned long ip, unsigned long addr)
  92{
  93        struct ppc_inst op;
  94        addr = ppc_function_entry((void *)addr);
  95
  96        /* use the create_branch to verify that this offset can be branched */
  97        return create_branch(&op, (u32 *)ip, addr, 0) == 0;
  98}
  99
 100static int is_bl_op(struct ppc_inst op)
 101{
 102        return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
 103}
 104
 105static int is_b_op(struct ppc_inst op)
 106{
 107        return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
 108}
 109
 110static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op)
 111{
 112        int offset;
 113
 114        offset = (ppc_inst_val(op) & 0x03fffffc);
 115        /* make it signed */
 116        if (offset & 0x02000000)
 117                offset |= 0xfe000000;
 118
 119        return ip + (long)offset;
 120}
 121
 122#ifdef CONFIG_MODULES
 123#ifdef CONFIG_PPC64
 124static int
 125__ftrace_make_nop(struct module *mod,
 126                  struct dyn_ftrace *rec, unsigned long addr)
 127{
 128        unsigned long entry, ptr, tramp;
 129        unsigned long ip = rec->ip;
 130        struct ppc_inst op, pop;
 131
 132        /* read where this goes */
 133        if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
 134                pr_err("Fetching opcode failed.\n");
 135                return -EFAULT;
 136        }
 137
 138        /* Make sure that that this is still a 24bit jump */
 139        if (!is_bl_op(op)) {
 140                pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
 141                return -EINVAL;
 142        }
 143
 144        /* lets find where the pointer goes */
 145        tramp = find_bl_target(ip, op);
 146
 147        pr_devel("ip:%lx jumps to %lx", ip, tramp);
 148
 149        if (module_trampoline_target(mod, tramp, &ptr)) {
 150                pr_err("Failed to get trampoline target\n");
 151                return -EFAULT;
 152        }
 153
 154        pr_devel("trampoline target %lx", ptr);
 155
 156        entry = ppc_global_function_entry((void *)addr);
 157        /* This should match what was called */
 158        if (ptr != entry) {
 159                pr_err("addr %lx does not match expected %lx\n", ptr, entry);
 160                return -EINVAL;
 161        }
 162
 163#ifdef CONFIG_MPROFILE_KERNEL
 164        /* When using -mkernel_profile there is no load to jump over */
 165        pop = ppc_inst(PPC_RAW_NOP());
 166
 167        if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
 168                pr_err("Fetching instruction at %lx failed.\n", ip - 4);
 169                return -EFAULT;
 170        }
 171
 172        /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
 173        if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
 174            !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
 175                pr_err("Unexpected instruction %s around bl _mcount\n",
 176                       ppc_inst_as_str(op));
 177                return -EINVAL;
 178        }
 179#else
 180        /*
 181         * Our original call site looks like:
 182         *
 183         * bl <tramp>
 184         * ld r2,XX(r1)
 185         *
 186         * Milton Miller pointed out that we can not simply nop the branch.
 187         * If a task was preempted when calling a trace function, the nops
 188         * will remove the way to restore the TOC in r2 and the r2 TOC will
 189         * get corrupted.
 190         *
 191         * Use a b +8 to jump over the load.
 192         */
 193
 194        pop = ppc_inst(PPC_INST_BRANCH | 8);    /* b +8 */
 195
 196        /*
 197         * Check what is in the next instruction. We can see ld r2,40(r1), but
 198         * on first pass after boot we will see mflr r0.
 199         */
 200        if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
 201                pr_err("Fetching op failed.\n");
 202                return -EFAULT;
 203        }
 204
 205        if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
 206                pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
 207                return -EINVAL;
 208        }
 209#endif /* CONFIG_MPROFILE_KERNEL */
 210
 211        if (patch_instruction((u32 *)ip, pop)) {
 212                pr_err("Patching NOP failed.\n");
 213                return -EPERM;
 214        }
 215
 216        return 0;
 217}
 218
 219#else /* !PPC64 */
 220static int
 221__ftrace_make_nop(struct module *mod,
 222                  struct dyn_ftrace *rec, unsigned long addr)
 223{
 224        struct ppc_inst op;
 225        unsigned int jmp[4];
 226        unsigned long ip = rec->ip;
 227        unsigned long tramp;
 228
 229        if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
 230                return -EFAULT;
 231
 232        /* Make sure that that this is still a 24bit jump */
 233        if (!is_bl_op(op)) {
 234                pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
 235                return -EINVAL;
 236        }
 237
 238        /* lets find where the pointer goes */
 239        tramp = find_bl_target(ip, op);
 240
 241        /*
 242         * On PPC32 the trampoline looks like:
 243         *  0x3d, 0x80, 0x00, 0x00  lis r12,sym@ha
 244         *  0x39, 0x8c, 0x00, 0x00  addi r12,r12,sym@l
 245         *  0x7d, 0x89, 0x03, 0xa6  mtctr r12
 246         *  0x4e, 0x80, 0x04, 0x20  bctr
 247         */
 248
 249        pr_devel("ip:%lx jumps to %lx", ip, tramp);
 250
 251        /* Find where the trampoline jumps to */
 252        if (copy_from_kernel_nofault(jmp, (void *)tramp, sizeof(jmp))) {
 253                pr_err("Failed to read %lx\n", tramp);
 254                return -EFAULT;
 255        }
 256
 257        pr_devel(" %08x %08x ", jmp[0], jmp[1]);
 258
 259        /* verify that this is what we expect it to be */
 260        if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
 261            ((jmp[1] & 0xffff0000) != 0x398c0000) ||
 262            (jmp[2] != 0x7d8903a6) ||
 263            (jmp[3] != 0x4e800420)) {
 264                pr_err("Not a trampoline\n");
 265                return -EINVAL;
 266        }
 267
 268        tramp = (jmp[1] & 0xffff) |
 269                ((jmp[0] & 0xffff) << 16);
 270        if (tramp & 0x8000)
 271                tramp -= 0x10000;
 272
 273        pr_devel(" %lx ", tramp);
 274
 275        if (tramp != addr) {
 276                pr_err("Trampoline location %08lx does not match addr\n",
 277                       tramp);
 278                return -EINVAL;
 279        }
 280
 281        op = ppc_inst(PPC_RAW_NOP());
 282
 283        if (patch_instruction((u32 *)ip, op))
 284                return -EPERM;
 285
 286        return 0;
 287}
 288#endif /* PPC64 */
 289#endif /* CONFIG_MODULES */
 290
 291static unsigned long find_ftrace_tramp(unsigned long ip)
 292{
 293        int i;
 294        struct ppc_inst instr;
 295
 296        /*
 297         * We have the compiler generated long_branch tramps at the end
 298         * and we prefer those
 299         */
 300        for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
 301                if (!ftrace_tramps[i])
 302                        continue;
 303                else if (create_branch(&instr, (void *)ip,
 304                                       ftrace_tramps[i], 0) == 0)
 305                        return ftrace_tramps[i];
 306
 307        return 0;
 308}
 309
 310static int add_ftrace_tramp(unsigned long tramp)
 311{
 312        int i;
 313
 314        for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
 315                if (!ftrace_tramps[i]) {
 316                        ftrace_tramps[i] = tramp;
 317                        return 0;
 318                }
 319
 320        return -1;
 321}
 322
 323/*
 324 * If this is a compiler generated long_branch trampoline (essentially, a
 325 * trampoline that has a branch to _mcount()), we re-write the branch to
 326 * instead go to ftrace_[regs_]caller() and note down the location of this
 327 * trampoline.
 328 */
 329static int setup_mcount_compiler_tramp(unsigned long tramp)
 330{
 331        int i;
 332        struct ppc_inst op;
 333        unsigned long ptr;
 334        struct ppc_inst instr;
 335        static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
 336
 337        /* Is this a known long jump tramp? */
 338        for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
 339                if (!ftrace_tramps[i])
 340                        break;
 341                else if (ftrace_tramps[i] == tramp)
 342                        return 0;
 343
 344        /* Is this a known plt tramp? */
 345        for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
 346                if (!ftrace_plt_tramps[i])
 347                        break;
 348                else if (ftrace_plt_tramps[i] == tramp)
 349                        return -1;
 350
 351        /* New trampoline -- read where this goes */
 352        if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
 353                pr_debug("Fetching opcode failed.\n");
 354                return -1;
 355        }
 356
 357        /* Is this a 24 bit branch? */
 358        if (!is_b_op(op)) {
 359                pr_debug("Trampoline is not a long branch tramp.\n");
 360                return -1;
 361        }
 362
 363        /* lets find where the pointer goes */
 364        ptr = find_bl_target(tramp, op);
 365
 366        if (ptr != ppc_global_function_entry((void *)_mcount)) {
 367                pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
 368                return -1;
 369        }
 370
 371        /* Let's re-write the tramp to go to ftrace_[regs_]caller */
 372#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 373        ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
 374#else
 375        ptr = ppc_global_function_entry((void *)ftrace_caller);
 376#endif
 377        if (create_branch(&instr, (void *)tramp, ptr, 0)) {
 378                pr_debug("%ps is not reachable from existing mcount tramp\n",
 379                                (void *)ptr);
 380                return -1;
 381        }
 382
 383        if (patch_branch((u32 *)tramp, ptr, 0)) {
 384                pr_debug("REL24 out of range!\n");
 385                return -1;
 386        }
 387
 388        if (add_ftrace_tramp(tramp)) {
 389                pr_debug("No tramp locations left\n");
 390                return -1;
 391        }
 392
 393        return 0;
 394}
 395
 396static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
 397{
 398        unsigned long tramp, ip = rec->ip;
 399        struct ppc_inst op;
 400
 401        /* Read where this goes */
 402        if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
 403                pr_err("Fetching opcode failed.\n");
 404                return -EFAULT;
 405        }
 406
 407        /* Make sure that that this is still a 24bit jump */
 408        if (!is_bl_op(op)) {
 409                pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
 410                return -EINVAL;
 411        }
 412
 413        /* Let's find where the pointer goes */
 414        tramp = find_bl_target(ip, op);
 415
 416        pr_devel("ip:%lx jumps to %lx", ip, tramp);
 417
 418        if (setup_mcount_compiler_tramp(tramp)) {
 419                /* Are other trampolines reachable? */
 420                if (!find_ftrace_tramp(ip)) {
 421                        pr_err("No ftrace trampolines reachable from %ps\n",
 422                                        (void *)ip);
 423                        return -EINVAL;
 424                }
 425        }
 426
 427        if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
 428                pr_err("Patching NOP failed.\n");
 429                return -EPERM;
 430        }
 431
 432        return 0;
 433}
 434
 435int ftrace_make_nop(struct module *mod,
 436                    struct dyn_ftrace *rec, unsigned long addr)
 437{
 438        unsigned long ip = rec->ip;
 439        struct ppc_inst old, new;
 440
 441        /*
 442         * If the calling address is more that 24 bits away,
 443         * then we had to use a trampoline to make the call.
 444         * Otherwise just update the call site.
 445         */
 446        if (test_24bit_addr(ip, addr)) {
 447                /* within range */
 448                old = ftrace_call_replace(ip, addr, 1);
 449                new = ppc_inst(PPC_RAW_NOP());
 450                return ftrace_modify_code(ip, old, new);
 451        } else if (core_kernel_text(ip))
 452                return __ftrace_make_nop_kernel(rec, addr);
 453
 454#ifdef CONFIG_MODULES
 455        /*
 456         * Out of range jumps are called from modules.
 457         * We should either already have a pointer to the module
 458         * or it has been passed in.
 459         */
 460        if (!rec->arch.mod) {
 461                if (!mod) {
 462                        pr_err("No module loaded addr=%lx\n", addr);
 463                        return -EFAULT;
 464                }
 465                rec->arch.mod = mod;
 466        } else if (mod) {
 467                if (mod != rec->arch.mod) {
 468                        pr_err("Record mod %p not equal to passed in mod %p\n",
 469                               rec->arch.mod, mod);
 470                        return -EINVAL;
 471                }
 472                /* nothing to do if mod == rec->arch.mod */
 473        } else
 474                mod = rec->arch.mod;
 475
 476        return __ftrace_make_nop(mod, rec, addr);
 477#else
 478        /* We should not get here without modules */
 479        return -EINVAL;
 480#endif /* CONFIG_MODULES */
 481}
 482
 483#ifdef CONFIG_MODULES
 484#ifdef CONFIG_PPC64
 485/*
 486 * Examine the existing instructions for __ftrace_make_call.
 487 * They should effectively be a NOP, and follow formal constraints,
 488 * depending on the ABI. Return false if they don't.
 489 */
 490#ifndef CONFIG_MPROFILE_KERNEL
 491static int
 492expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
 493{
 494        /*
 495         * We expect to see:
 496         *
 497         * b +8
 498         * ld r2,XX(r1)
 499         *
 500         * The load offset is different depending on the ABI. For simplicity
 501         * just mask it out when doing the compare.
 502         */
 503        if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
 504            (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
 505                return 0;
 506        return 1;
 507}
 508#else
 509static int
 510expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
 511{
 512        /* look for patched "NOP" on ppc64 with -mprofile-kernel */
 513        if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
 514                return 0;
 515        return 1;
 516}
 517#endif
 518
 519static int
 520__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 521{
 522        struct ppc_inst op[2];
 523        struct ppc_inst instr;
 524        void *ip = (void *)rec->ip;
 525        unsigned long entry, ptr, tramp;
 526        struct module *mod = rec->arch.mod;
 527
 528        /* read where this goes */
 529        if (copy_inst_from_kernel_nofault(op, ip))
 530                return -EFAULT;
 531
 532        if (copy_inst_from_kernel_nofault(op + 1, ip + 4))
 533                return -EFAULT;
 534
 535        if (!expected_nop_sequence(ip, op[0], op[1])) {
 536                pr_err("Unexpected call sequence at %p: %s %s\n",
 537                ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
 538                return -EINVAL;
 539        }
 540
 541        /* If we never set up ftrace trampoline(s), then bail */
 542#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 543        if (!mod->arch.tramp || !mod->arch.tramp_regs) {
 544#else
 545        if (!mod->arch.tramp) {
 546#endif
 547                pr_err("No ftrace trampoline\n");
 548                return -EINVAL;
 549        }
 550
 551#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 552        if (rec->flags & FTRACE_FL_REGS)
 553                tramp = mod->arch.tramp_regs;
 554        else
 555#endif
 556                tramp = mod->arch.tramp;
 557
 558        if (module_trampoline_target(mod, tramp, &ptr)) {
 559                pr_err("Failed to get trampoline target\n");
 560                return -EFAULT;
 561        }
 562
 563        pr_devel("trampoline target %lx", ptr);
 564
 565        entry = ppc_global_function_entry((void *)addr);
 566        /* This should match what was called */
 567        if (ptr != entry) {
 568                pr_err("addr %lx does not match expected %lx\n", ptr, entry);
 569                return -EINVAL;
 570        }
 571
 572        /* Ensure branch is within 24 bits */
 573        if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) {
 574                pr_err("Branch out of range\n");
 575                return -EINVAL;
 576        }
 577
 578        if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
 579                pr_err("REL24 out of range!\n");
 580                return -EINVAL;
 581        }
 582
 583        return 0;
 584}
 585
 586#else  /* !CONFIG_PPC64: */
 587static int
 588__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 589{
 590        int err;
 591        struct ppc_inst op;
 592        u32 *ip = (u32 *)rec->ip;
 593
 594        /* read where this goes */
 595        if (copy_inst_from_kernel_nofault(&op, ip))
 596                return -EFAULT;
 597
 598        /* It should be pointing to a nop */
 599        if (!ppc_inst_equal(op,  ppc_inst(PPC_RAW_NOP()))) {
 600                pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
 601                return -EINVAL;
 602        }
 603
 604        /* If we never set up a trampoline to ftrace_caller, then bail */
 605        if (!rec->arch.mod->arch.tramp) {
 606                pr_err("No ftrace trampoline\n");
 607                return -EINVAL;
 608        }
 609
 610        /* create the branch to the trampoline */
 611        err = create_branch(&op, ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
 612        if (err) {
 613                pr_err("REL24 out of range!\n");
 614                return -EINVAL;
 615        }
 616
 617        pr_devel("write to %lx\n", rec->ip);
 618
 619        if (patch_instruction(ip, op))
 620                return -EPERM;
 621
 622        return 0;
 623}
 624#endif /* CONFIG_PPC64 */
 625#endif /* CONFIG_MODULES */
 626
 627static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
 628{
 629        struct ppc_inst op;
 630        void *ip = (void *)rec->ip;
 631        unsigned long tramp, entry, ptr;
 632
 633        /* Make sure we're being asked to patch branch to a known ftrace addr */
 634        entry = ppc_global_function_entry((void *)ftrace_caller);
 635        ptr = ppc_global_function_entry((void *)addr);
 636
 637        if (ptr != entry) {
 638#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 639                entry = ppc_global_function_entry((void *)ftrace_regs_caller);
 640                if (ptr != entry) {
 641#endif
 642                        pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
 643                        return -EINVAL;
 644#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 645                }
 646#endif
 647        }
 648
 649        /* Make sure we have a nop */
 650        if (copy_inst_from_kernel_nofault(&op, ip)) {
 651                pr_err("Unable to read ftrace location %p\n", ip);
 652                return -EFAULT;
 653        }
 654
 655        if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
 656                pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
 657                return -EINVAL;
 658        }
 659
 660        tramp = find_ftrace_tramp((unsigned long)ip);
 661        if (!tramp) {
 662                pr_err("No ftrace trampolines reachable from %ps\n", ip);
 663                return -EINVAL;
 664        }
 665
 666        if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
 667                pr_err("Error patching branch to ftrace tramp!\n");
 668                return -EINVAL;
 669        }
 670
 671        return 0;
 672}
 673
 674int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 675{
 676        unsigned long ip = rec->ip;
 677        struct ppc_inst old, new;
 678
 679        /*
 680         * If the calling address is more that 24 bits away,
 681         * then we had to use a trampoline to make the call.
 682         * Otherwise just update the call site.
 683         */
 684        if (test_24bit_addr(ip, addr)) {
 685                /* within range */
 686                old = ppc_inst(PPC_RAW_NOP());
 687                new = ftrace_call_replace(ip, addr, 1);
 688                return ftrace_modify_code(ip, old, new);
 689        } else if (core_kernel_text(ip))
 690                return __ftrace_make_call_kernel(rec, addr);
 691
 692#ifdef CONFIG_MODULES
 693        /*
 694         * Out of range jumps are called from modules.
 695         * Being that we are converting from nop, it had better
 696         * already have a module defined.
 697         */
 698        if (!rec->arch.mod) {
 699                pr_err("No module loaded\n");
 700                return -EINVAL;
 701        }
 702
 703        return __ftrace_make_call(rec, addr);
 704#else
 705        /* We should not get here without modules */
 706        return -EINVAL;
 707#endif /* CONFIG_MODULES */
 708}
 709
 710#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 711#ifdef CONFIG_MODULES
 712static int
 713__ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 714                                        unsigned long addr)
 715{
 716        struct ppc_inst op;
 717        unsigned long ip = rec->ip;
 718        unsigned long entry, ptr, tramp;
 719        struct module *mod = rec->arch.mod;
 720
 721        /* If we never set up ftrace trampolines, then bail */
 722        if (!mod->arch.tramp || !mod->arch.tramp_regs) {
 723                pr_err("No ftrace trampoline\n");
 724                return -EINVAL;
 725        }
 726
 727        /* read where this goes */
 728        if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
 729                pr_err("Fetching opcode failed.\n");
 730                return -EFAULT;
 731        }
 732
 733        /* Make sure that that this is still a 24bit jump */
 734        if (!is_bl_op(op)) {
 735                pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
 736                return -EINVAL;
 737        }
 738
 739        /* lets find where the pointer goes */
 740        tramp = find_bl_target(ip, op);
 741        entry = ppc_global_function_entry((void *)old_addr);
 742
 743        pr_devel("ip:%lx jumps to %lx", ip, tramp);
 744
 745        if (tramp != entry) {
 746                /* old_addr is not within range, so we must have used a trampoline */
 747                if (module_trampoline_target(mod, tramp, &ptr)) {
 748                        pr_err("Failed to get trampoline target\n");
 749                        return -EFAULT;
 750                }
 751
 752                pr_devel("trampoline target %lx", ptr);
 753
 754                /* This should match what was called */
 755                if (ptr != entry) {
 756                        pr_err("addr %lx does not match expected %lx\n", ptr, entry);
 757                        return -EINVAL;
 758                }
 759        }
 760
 761        /* The new target may be within range */
 762        if (test_24bit_addr(ip, addr)) {
 763                /* within range */
 764                if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
 765                        pr_err("REL24 out of range!\n");
 766                        return -EINVAL;
 767                }
 768
 769                return 0;
 770        }
 771
 772        if (rec->flags & FTRACE_FL_REGS)
 773                tramp = mod->arch.tramp_regs;
 774        else
 775                tramp = mod->arch.tramp;
 776
 777        if (module_trampoline_target(mod, tramp, &ptr)) {
 778                pr_err("Failed to get trampoline target\n");
 779                return -EFAULT;
 780        }
 781
 782        pr_devel("trampoline target %lx", ptr);
 783
 784        entry = ppc_global_function_entry((void *)addr);
 785        /* This should match what was called */
 786        if (ptr != entry) {
 787                pr_err("addr %lx does not match expected %lx\n", ptr, entry);
 788                return -EINVAL;
 789        }
 790
 791        /* Ensure branch is within 24 bits */
 792        if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) {
 793                pr_err("Branch out of range\n");
 794                return -EINVAL;
 795        }
 796
 797        if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
 798                pr_err("REL24 out of range!\n");
 799                return -EINVAL;
 800        }
 801
 802        return 0;
 803}
 804#endif
 805
 806int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 807                        unsigned long addr)
 808{
 809        unsigned long ip = rec->ip;
 810        struct ppc_inst old, new;
 811
 812        /*
 813         * If the calling address is more that 24 bits away,
 814         * then we had to use a trampoline to make the call.
 815         * Otherwise just update the call site.
 816         */
 817        if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
 818                /* within range */
 819                old = ftrace_call_replace(ip, old_addr, 1);
 820                new = ftrace_call_replace(ip, addr, 1);
 821                return ftrace_modify_code(ip, old, new);
 822        } else if (core_kernel_text(ip)) {
 823                /*
 824                 * We always patch out of range locations to go to the regs
 825                 * variant, so there is nothing to do here
 826                 */
 827                return 0;
 828        }
 829
 830#ifdef CONFIG_MODULES
 831        /*
 832         * Out of range jumps are called from modules.
 833         */
 834        if (!rec->arch.mod) {
 835                pr_err("No module loaded\n");
 836                return -EINVAL;
 837        }
 838
 839        return __ftrace_modify_call(rec, old_addr, addr);
 840#else
 841        /* We should not get here without modules */
 842        return -EINVAL;
 843#endif /* CONFIG_MODULES */
 844}
 845#endif
 846
 847int ftrace_update_ftrace_func(ftrace_func_t func)
 848{
 849        unsigned long ip = (unsigned long)(&ftrace_call);
 850        struct ppc_inst old, new;
 851        int ret;
 852
 853        old = ppc_inst_read((u32 *)&ftrace_call);
 854        new = ftrace_call_replace(ip, (unsigned long)func, 1);
 855        ret = ftrace_modify_code(ip, old, new);
 856
 857#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 858        /* Also update the regs callback function */
 859        if (!ret) {
 860                ip = (unsigned long)(&ftrace_regs_call);
 861                old = ppc_inst_read((u32 *)&ftrace_regs_call);
 862                new = ftrace_call_replace(ip, (unsigned long)func, 1);
 863                ret = ftrace_modify_code(ip, old, new);
 864        }
 865#endif
 866
 867        return ret;
 868}
 869
 870/*
 871 * Use the default ftrace_modify_all_code, but without
 872 * stop_machine().
 873 */
 874void arch_ftrace_update_code(int command)
 875{
 876        ftrace_modify_all_code(command);
 877}
 878
 879#ifdef CONFIG_PPC64
 880#define PACATOC offsetof(struct paca_struct, kernel_toc)
 881
 882extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
 883
 884int __init ftrace_dyn_arch_init(void)
 885{
 886        int i;
 887        unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
 888        u32 stub_insns[] = {
 889                0xe98d0000 | PACATOC,   /* ld      r12,PACATOC(r13)     */
 890                0x3d8c0000,             /* addis   r12,r12,<high>       */
 891                0x398c0000,             /* addi    r12,r12,<low>        */
 892                0x7d8903a6,             /* mtctr   r12                  */
 893                0x4e800420,             /* bctr                         */
 894        };
 895#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 896        unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
 897#else
 898        unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
 899#endif
 900        long reladdr = addr - kernel_toc_addr();
 901
 902        if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
 903                pr_err("Address of %ps out of range of kernel_toc.\n",
 904                                (void *)addr);
 905                return -1;
 906        }
 907
 908        for (i = 0; i < 2; i++) {
 909                memcpy(tramp[i], stub_insns, sizeof(stub_insns));
 910                tramp[i][1] |= PPC_HA(reladdr);
 911                tramp[i][2] |= PPC_LO(reladdr);
 912                add_ftrace_tramp((unsigned long)tramp[i]);
 913        }
 914
 915        return 0;
 916}
 917#else
 918int __init ftrace_dyn_arch_init(void)
 919{
 920        return 0;
 921}
 922#endif
 923#endif /* CONFIG_DYNAMIC_FTRACE */
 924
 925#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 926
 927extern void ftrace_graph_call(void);
 928extern void ftrace_graph_stub(void);
 929
 930int ftrace_enable_ftrace_graph_caller(void)
 931{
 932        unsigned long ip = (unsigned long)(&ftrace_graph_call);
 933        unsigned long addr = (unsigned long)(&ftrace_graph_caller);
 934        unsigned long stub = (unsigned long)(&ftrace_graph_stub);
 935        struct ppc_inst old, new;
 936
 937        old = ftrace_call_replace(ip, stub, 0);
 938        new = ftrace_call_replace(ip, addr, 0);
 939
 940        return ftrace_modify_code(ip, old, new);
 941}
 942
 943int ftrace_disable_ftrace_graph_caller(void)
 944{
 945        unsigned long ip = (unsigned long)(&ftrace_graph_call);
 946        unsigned long addr = (unsigned long)(&ftrace_graph_caller);
 947        unsigned long stub = (unsigned long)(&ftrace_graph_stub);
 948        struct ppc_inst old, new;
 949
 950        old = ftrace_call_replace(ip, addr, 0);
 951        new = ftrace_call_replace(ip, stub, 0);
 952
 953        return ftrace_modify_code(ip, old, new);
 954}
 955
 956/*
 957 * Hook the return address and push it in the stack of return addrs
 958 * in current thread info. Return the address we want to divert to.
 959 */
 960unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
 961                                                unsigned long sp)
 962{
 963        unsigned long return_hooker;
 964
 965        if (unlikely(ftrace_graph_is_dead()))
 966                goto out;
 967
 968        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 969                goto out;
 970
 971        return_hooker = ppc_function_entry(return_to_handler);
 972
 973        if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
 974                parent = return_hooker;
 975out:
 976        return parent;
 977}
 978#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 979
 980#ifdef PPC64_ELF_ABI_v1
 981char *arch_ftrace_match_adjust(char *str, const char *search)
 982{
 983        if (str[0] == '.' && search[0] != '.')
 984                return str + 1;
 985        else
 986                return str;
 987}
 988#endif /* PPC64_ELF_ABI_v1 */
 989