linux/arch/arm/kernel/traps.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/kernel/traps.c
   4 *
   5 *  Copyright (C) 1995-2009 Russell King
   6 *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
   7 *
   8 *  'traps.c' handles hardware exceptions after we have saved some state in
   9 *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
  10 *  kill the offending process.
  11 */
  12#include <linux/signal.h>
  13#include <linux/personality.h>
  14#include <linux/kallsyms.h>
  15#include <linux/spinlock.h>
  16#include <linux/uaccess.h>
  17#include <linux/hardirq.h>
  18#include <linux/kdebug.h>
  19#include <linux/kprobes.h>
  20#include <linux/module.h>
  21#include <linux/kexec.h>
  22#include <linux/bug.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/sched/signal.h>
  26#include <linux/sched/debug.h>
  27#include <linux/sched/task_stack.h>
  28#include <linux/irq.h>
  29
  30#include <linux/atomic.h>
  31#include <asm/cacheflush.h>
  32#include <asm/exception.h>
  33#include <asm/unistd.h>
  34#include <asm/traps.h>
  35#include <asm/ptrace.h>
  36#include <asm/unwind.h>
  37#include <asm/tls.h>
  38#include <asm/system_misc.h>
  39#include <asm/opcodes.h>
  40
  41
  42static const char *handler[]= {
  43        "prefetch abort",
  44        "data abort",
  45        "address exception",
  46        "interrupt",
  47        "undefined instruction",
  48};
  49
  50void *vectors_page;
  51
  52#ifdef CONFIG_DEBUG_USER
  53unsigned int user_debug;
  54
  55static int __init user_debug_setup(char *str)
  56{
  57        get_option(&str, &user_debug);
  58        return 1;
  59}
  60__setup("user_debug=", user_debug_setup);
  61#endif
  62
  63static void dump_mem(const char *, const char *, unsigned long, unsigned long);
  64
  65void dump_backtrace_entry(unsigned long where, unsigned long from,
  66                          unsigned long frame, const char *loglvl)
  67{
  68        unsigned long end = frame + 4 + sizeof(struct pt_regs);
  69
  70#ifdef CONFIG_KALLSYMS
  71        printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
  72                loglvl, where, (void *)where, from, (void *)from);
  73#else
  74        printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
  75                loglvl, where, from);
  76#endif
  77
  78        if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
  79                dump_mem(loglvl, "Exception stack", frame + 4, end);
  80}
  81
  82void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
  83{
  84        char str[80], *p;
  85        unsigned int x;
  86        int reg;
  87
  88        for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
  89                if (instruction & BIT(reg)) {
  90                        p += sprintf(p, " r%d:%08x", reg, *stack--);
  91                        if (++x == 6) {
  92                                x = 0;
  93                                p = str;
  94                                printk("%s%s\n", loglvl, str);
  95                        }
  96                }
  97        }
  98        if (p != str)
  99                printk("%s%s\n", loglvl, str);
 100}
 101
 102#ifndef CONFIG_ARM_UNWIND
 103/*
 104 * Stack pointers should always be within the kernels view of
 105 * physical memory.  If it is not there, then we can't dump
 106 * out any information relating to the stack.
 107 */
 108static int verify_stack(unsigned long sp)
 109{
 110        if (sp < PAGE_OFFSET ||
 111            (sp > (unsigned long)high_memory && high_memory != NULL))
 112                return -EFAULT;
 113
 114        return 0;
 115}
 116#endif
 117
 118/*
 119 * Dump out the contents of some memory nicely...
 120 */
 121static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 122                     unsigned long top)
 123{
 124        unsigned long first;
 125        mm_segment_t fs;
 126        int i;
 127
 128        /*
 129         * We need to switch to kernel mode so that we can use __get_user
 130         * to safely read from kernel space.  Note that we now dump the
 131         * code first, just in case the backtrace kills us.
 132         */
 133        fs = get_fs();
 134        set_fs(KERNEL_DS);
 135
 136        printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 137
 138        for (first = bottom & ~31; first < top; first += 32) {
 139                unsigned long p;
 140                char str[sizeof(" 12345678") * 8 + 1];
 141
 142                memset(str, ' ', sizeof(str));
 143                str[sizeof(str) - 1] = '\0';
 144
 145                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 146                        if (p >= bottom && p < top) {
 147                                unsigned long val;
 148                                if (__get_user(val, (unsigned long *)p) == 0)
 149                                        sprintf(str + i * 9, " %08lx", val);
 150                                else
 151                                        sprintf(str + i * 9, " ????????");
 152                        }
 153                }
 154                printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
 155        }
 156
 157        set_fs(fs);
 158}
 159
 160static void __dump_instr(const char *lvl, struct pt_regs *regs)
 161{
 162        unsigned long addr = instruction_pointer(regs);
 163        const int thumb = thumb_mode(regs);
 164        const int width = thumb ? 4 : 8;
 165        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 166        int i;
 167
 168        /*
 169         * Note that we now dump the code first, just in case the backtrace
 170         * kills us.
 171         */
 172
 173        for (i = -4; i < 1 + !!thumb; i++) {
 174                unsigned int val, bad;
 175
 176                if (thumb)
 177                        bad = get_user(val, &((u16 *)addr)[i]);
 178                else
 179                        bad = get_user(val, &((u32 *)addr)[i]);
 180
 181                if (!bad)
 182                        p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
 183                                        width, val);
 184                else {
 185                        p += sprintf(p, "bad PC value");
 186                        break;
 187                }
 188        }
 189        printk("%sCode: %s\n", lvl, str);
 190}
 191
 192static void dump_instr(const char *lvl, struct pt_regs *regs)
 193{
 194        mm_segment_t fs;
 195
 196        if (!user_mode(regs)) {
 197                fs = get_fs();
 198                set_fs(KERNEL_DS);
 199                __dump_instr(lvl, regs);
 200                set_fs(fs);
 201        } else {
 202                __dump_instr(lvl, regs);
 203        }
 204}
 205
 206#ifdef CONFIG_ARM_UNWIND
 207static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 208                                  const char *loglvl)
 209{
 210        unwind_backtrace(regs, tsk, loglvl);
 211}
 212#else
 213static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 214                           const char *loglvl)
 215{
 216        unsigned int fp, mode;
 217        int ok = 1;
 218
 219        printk("%sBacktrace: ", loglvl);
 220
 221        if (!tsk)
 222                tsk = current;
 223
 224        if (regs) {
 225                fp = frame_pointer(regs);
 226                mode = processor_mode(regs);
 227        } else if (tsk != current) {
 228                fp = thread_saved_fp(tsk);
 229                mode = 0x10;
 230        } else {
 231                asm("mov %0, fp" : "=r" (fp) : : "cc");
 232                mode = 0x10;
 233        }
 234
 235        if (!fp) {
 236                pr_cont("no frame pointer");
 237                ok = 0;
 238        } else if (verify_stack(fp)) {
 239                pr_cont("invalid frame pointer 0x%08x", fp);
 240                ok = 0;
 241        } else if (fp < (unsigned long)end_of_stack(tsk))
 242                pr_cont("frame pointer underflow");
 243        pr_cont("\n");
 244
 245        if (ok)
 246                c_backtrace(fp, mode, loglvl);
 247}
 248#endif
 249
 250void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 251{
 252        dump_backtrace(NULL, tsk, loglvl);
 253        barrier();
 254}
 255
 256#ifdef CONFIG_PREEMPT
 257#define S_PREEMPT " PREEMPT"
 258#elif defined(CONFIG_PREEMPT_RT)
 259#define S_PREEMPT " PREEMPT_RT"
 260#else
 261#define S_PREEMPT ""
 262#endif
 263#ifdef CONFIG_SMP
 264#define S_SMP " SMP"
 265#else
 266#define S_SMP ""
 267#endif
 268#ifdef CONFIG_THUMB2_KERNEL
 269#define S_ISA " THUMB2"
 270#else
 271#define S_ISA " ARM"
 272#endif
 273
 274static int __die(const char *str, int err, struct pt_regs *regs)
 275{
 276        struct task_struct *tsk = current;
 277        static int die_counter;
 278        int ret;
 279
 280        pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
 281                 str, err, ++die_counter);
 282
 283        /* trap and error numbers are mostly meaningless on ARM */
 284        ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
 285        if (ret == NOTIFY_STOP)
 286                return 1;
 287
 288        print_modules();
 289        __show_regs(regs);
 290        __show_regs_alloc_free(regs);
 291        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
 292                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
 293
 294        if (!user_mode(regs) || in_interrupt()) {
 295                dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
 296                         THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 297                dump_backtrace(regs, tsk, KERN_EMERG);
 298                dump_instr(KERN_EMERG, regs);
 299        }
 300
 301        return 0;
 302}
 303
 304static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 305static int die_owner = -1;
 306static unsigned int die_nest_count;
 307
 308static unsigned long oops_begin(void)
 309{
 310        int cpu;
 311        unsigned long flags;
 312
 313        oops_enter();
 314
 315        /* racy, but better than risking deadlock. */
 316        raw_local_irq_save(flags);
 317        cpu = smp_processor_id();
 318        if (!arch_spin_trylock(&die_lock)) {
 319                if (cpu == die_owner)
 320                        /* nested oops. should stop eventually */;
 321                else
 322                        arch_spin_lock(&die_lock);
 323        }
 324        die_nest_count++;
 325        die_owner = cpu;
 326        console_verbose();
 327        bust_spinlocks(1);
 328        return flags;
 329}
 330
 331static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 332{
 333        if (regs && kexec_should_crash(current))
 334                crash_kexec(regs);
 335
 336        bust_spinlocks(0);
 337        die_owner = -1;
 338        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 339        die_nest_count--;
 340        if (!die_nest_count)
 341                /* Nest count reaches zero, release the lock. */
 342                arch_spin_unlock(&die_lock);
 343        raw_local_irq_restore(flags);
 344        oops_exit();
 345
 346        if (in_interrupt())
 347                panic("Fatal exception in interrupt");
 348        if (panic_on_oops)
 349                panic("Fatal exception");
 350        if (signr)
 351                do_exit(signr);
 352}
 353
 354/*
 355 * This function is protected against re-entrancy.
 356 */
 357void die(const char *str, struct pt_regs *regs, int err)
 358{
 359        enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
 360        unsigned long flags = oops_begin();
 361        int sig = SIGSEGV;
 362
 363        if (!user_mode(regs))
 364                bug_type = report_bug(regs->ARM_pc, regs);
 365        if (bug_type != BUG_TRAP_TYPE_NONE)
 366                str = "Oops - BUG";
 367
 368        if (__die(str, err, regs))
 369                sig = 0;
 370
 371        oops_end(flags, regs, sig);
 372}
 373
 374void arm_notify_die(const char *str, struct pt_regs *regs,
 375                int signo, int si_code, void __user *addr,
 376                unsigned long err, unsigned long trap)
 377{
 378        if (user_mode(regs)) {
 379                current->thread.error_code = err;
 380                current->thread.trap_no = trap;
 381
 382                force_sig_fault(signo, si_code, addr);
 383        } else {
 384                die(str, regs, err);
 385        }
 386}
 387
 388#ifdef CONFIG_GENERIC_BUG
 389
 390int is_valid_bugaddr(unsigned long pc)
 391{
 392#ifdef CONFIG_THUMB2_KERNEL
 393        u16 bkpt;
 394        u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 395#else
 396        u32 bkpt;
 397        u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 398#endif
 399
 400        if (get_kernel_nofault(bkpt, (void *)pc))
 401                return 0;
 402
 403        return bkpt == insn;
 404}
 405
 406#endif
 407
 408static LIST_HEAD(undef_hook);
 409static DEFINE_RAW_SPINLOCK(undef_lock);
 410
 411void register_undef_hook(struct undef_hook *hook)
 412{
 413        unsigned long flags;
 414
 415        raw_spin_lock_irqsave(&undef_lock, flags);
 416        list_add(&hook->node, &undef_hook);
 417        raw_spin_unlock_irqrestore(&undef_lock, flags);
 418}
 419
 420void unregister_undef_hook(struct undef_hook *hook)
 421{
 422        unsigned long flags;
 423
 424        raw_spin_lock_irqsave(&undef_lock, flags);
 425        list_del(&hook->node);
 426        raw_spin_unlock_irqrestore(&undef_lock, flags);
 427}
 428
 429static nokprobe_inline
 430int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 431{
 432        struct undef_hook *hook;
 433        unsigned long flags;
 434        int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
 435
 436        raw_spin_lock_irqsave(&undef_lock, flags);
 437        list_for_each_entry(hook, &undef_hook, node)
 438                if ((instr & hook->instr_mask) == hook->instr_val &&
 439                    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
 440                        fn = hook->fn;
 441        raw_spin_unlock_irqrestore(&undef_lock, flags);
 442
 443        return fn ? fn(regs, instr) : 1;
 444}
 445
 446asmlinkage void do_undefinstr(struct pt_regs *regs)
 447{
 448        unsigned int instr;
 449        void __user *pc;
 450
 451        pc = (void __user *)instruction_pointer(regs);
 452
 453        if (processor_mode(regs) == SVC_MODE) {
 454#ifdef CONFIG_THUMB2_KERNEL
 455                if (thumb_mode(regs)) {
 456                        instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 457                        if (is_wide_instruction(instr)) {
 458                                u16 inst2;
 459                                inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
 460                                instr = __opcode_thumb32_compose(instr, inst2);
 461                        }
 462                } else
 463#endif
 464                        instr = __mem_to_opcode_arm(*(u32 *) pc);
 465        } else if (thumb_mode(regs)) {
 466                if (get_user(instr, (u16 __user *)pc))
 467                        goto die_sig;
 468                instr = __mem_to_opcode_thumb16(instr);
 469                if (is_wide_instruction(instr)) {
 470                        unsigned int instr2;
 471                        if (get_user(instr2, (u16 __user *)pc+1))
 472                                goto die_sig;
 473                        instr2 = __mem_to_opcode_thumb16(instr2);
 474                        instr = __opcode_thumb32_compose(instr, instr2);
 475                }
 476        } else {
 477                if (get_user(instr, (u32 __user *)pc))
 478                        goto die_sig;
 479                instr = __mem_to_opcode_arm(instr);
 480        }
 481
 482        if (call_undef_hook(regs, instr) == 0)
 483                return;
 484
 485die_sig:
 486#ifdef CONFIG_DEBUG_USER
 487        if (user_debug & UDBG_UNDEFINED) {
 488                pr_info("%s (%d): undefined instruction: pc=%p\n",
 489                        current->comm, task_pid_nr(current), pc);
 490                __show_regs(regs);
 491                dump_instr(KERN_INFO, regs);
 492        }
 493#endif
 494        arm_notify_die("Oops - undefined instruction", regs,
 495                       SIGILL, ILL_ILLOPC, pc, 0, 6);
 496}
 497NOKPROBE_SYMBOL(do_undefinstr)
 498
 499/*
 500 * Handle FIQ similarly to NMI on x86 systems.
 501 *
 502 * The runtime environment for NMIs is extremely restrictive
 503 * (NMIs can pre-empt critical sections meaning almost all locking is
 504 * forbidden) meaning this default FIQ handling must only be used in
 505 * circumstances where non-maskability improves robustness, such as
 506 * watchdog or debug logic.
 507 *
 508 * This handler is not appropriate for general purpose use in drivers
 509 * platform code and can be overrideen using set_fiq_handler.
 510 */
 511asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
 512{
 513        struct pt_regs *old_regs = set_irq_regs(regs);
 514
 515        nmi_enter();
 516
 517        /* nop. FIQ handlers for special arch/arm features can be added here. */
 518
 519        nmi_exit();
 520
 521        set_irq_regs(old_regs);
 522}
 523
 524/*
 525 * bad_mode handles the impossible case in the vectors.  If you see one of
 526 * these, then it's extremely serious, and could mean you have buggy hardware.
 527 * It never returns, and never tries to sync.  We hope that we can at least
 528 * dump out some state information...
 529 */
 530asmlinkage void bad_mode(struct pt_regs *regs, int reason)
 531{
 532        console_verbose();
 533
 534        pr_crit("Bad mode in %s handler detected\n", handler[reason]);
 535
 536        die("Oops - bad mode", regs, 0);
 537        local_irq_disable();
 538        panic("bad mode");
 539}
 540
 541static int bad_syscall(int n, struct pt_regs *regs)
 542{
 543        if ((current->personality & PER_MASK) != PER_LINUX) {
 544                send_sig(SIGSEGV, current, 1);
 545                return regs->ARM_r0;
 546        }
 547
 548#ifdef CONFIG_DEBUG_USER
 549        if (user_debug & UDBG_SYSCALL) {
 550                pr_err("[%d] %s: obsolete system call %08x.\n",
 551                        task_pid_nr(current), current->comm, n);
 552                dump_instr(KERN_ERR, regs);
 553        }
 554#endif
 555
 556        arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
 557                       (void __user *)instruction_pointer(regs) -
 558                         (thumb_mode(regs) ? 2 : 4),
 559                       n, 0);
 560
 561        return regs->ARM_r0;
 562}
 563
 564static inline int
 565__do_cache_op(unsigned long start, unsigned long end)
 566{
 567        int ret;
 568
 569        do {
 570                unsigned long chunk = min(PAGE_SIZE, end - start);
 571
 572                if (fatal_signal_pending(current))
 573                        return 0;
 574
 575                ret = flush_icache_user_range(start, start + chunk);
 576                if (ret)
 577                        return ret;
 578
 579                cond_resched();
 580                start += chunk;
 581        } while (start < end);
 582
 583        return 0;
 584}
 585
 586static inline int
 587do_cache_op(unsigned long start, unsigned long end, int flags)
 588{
 589        if (end < start || flags)
 590                return -EINVAL;
 591
 592        if (!access_ok(start, end - start))
 593                return -EFAULT;
 594
 595        return __do_cache_op(start, end);
 596}
 597
 598/*
 599 * Handle all unrecognised system calls.
 600 *  0x9f0000 - 0x9fffff are some more esoteric system calls
 601 */
 602#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 603asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 604{
 605        if ((no >> 16) != (__ARM_NR_BASE>> 16))
 606                return bad_syscall(no, regs);
 607
 608        switch (no & 0xffff) {
 609        case 0: /* branch through 0 */
 610                arm_notify_die("branch through zero", regs,
 611                               SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
 612                return 0;
 613
 614        case NR(breakpoint): /* SWI BREAK_POINT */
 615                regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
 616                ptrace_break(regs);
 617                return regs->ARM_r0;
 618
 619        /*
 620         * Flush a region from virtual address 'r0' to virtual address 'r1'
 621         * _exclusive_.  There is no alignment requirement on either address;
 622         * user space does not need to know the hardware cache layout.
 623         *
 624         * r2 contains flags.  It should ALWAYS be passed as ZERO until it
 625         * is defined to be something else.  For now we ignore it, but may
 626         * the fires of hell burn in your belly if you break this rule. ;)
 627         *
 628         * (at a later date, we may want to allow this call to not flush
 629         * various aspects of the cache.  Passing '0' will guarantee that
 630         * everything necessary gets flushed to maintain consistency in
 631         * the specified region).
 632         */
 633        case NR(cacheflush):
 634                return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 635
 636        case NR(usr26):
 637                if (!(elf_hwcap & HWCAP_26BIT))
 638                        break;
 639                regs->ARM_cpsr &= ~MODE32_BIT;
 640                return regs->ARM_r0;
 641
 642        case NR(usr32):
 643                if (!(elf_hwcap & HWCAP_26BIT))
 644                        break;
 645                regs->ARM_cpsr |= MODE32_BIT;
 646                return regs->ARM_r0;
 647
 648        case NR(set_tls):
 649                set_tls(regs->ARM_r0);
 650                return 0;
 651
 652        case NR(get_tls):
 653                return current_thread_info()->tp_value[0];
 654
 655        default:
 656                /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
 657                   if not implemented, rather than raising SIGILL.  This
 658                   way the calling program can gracefully determine whether
 659                   a feature is supported.  */
 660                if ((no & 0xffff) <= 0x7ff)
 661                        return -ENOSYS;
 662                break;
 663        }
 664#ifdef CONFIG_DEBUG_USER
 665        /*
 666         * experience shows that these seem to indicate that
 667         * something catastrophic has happened
 668         */
 669        if (user_debug & UDBG_SYSCALL) {
 670                pr_err("[%d] %s: arm syscall %d\n",
 671                       task_pid_nr(current), current->comm, no);
 672                dump_instr(KERN_ERR, regs);
 673                if (user_mode(regs)) {
 674                        __show_regs(regs);
 675                        c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
 676                }
 677        }
 678#endif
 679        arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
 680                       (void __user *)instruction_pointer(regs) -
 681                         (thumb_mode(regs) ? 2 : 4),
 682                       no, 0);
 683        return 0;
 684}
 685
 686#ifdef CONFIG_TLS_REG_EMUL
 687
 688/*
 689 * We might be running on an ARMv6+ processor which should have the TLS
 690 * register but for some reason we can't use it, or maybe an SMP system
 691 * using a pre-ARMv6 processor (there are apparently a few prototypes like
 692 * that in existence) and therefore access to that register must be
 693 * emulated.
 694 */
 695
 696static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
 697{
 698        int reg = (instr >> 12) & 15;
 699        if (reg == 15)
 700                return 1;
 701        regs->uregs[reg] = current_thread_info()->tp_value[0];
 702        regs->ARM_pc += 4;
 703        return 0;
 704}
 705
 706static struct undef_hook arm_mrc_hook = {
 707        .instr_mask     = 0x0fff0fff,
 708        .instr_val      = 0x0e1d0f70,
 709        .cpsr_mask      = PSR_T_BIT,
 710        .cpsr_val       = 0,
 711        .fn             = get_tp_trap,
 712};
 713
 714static int __init arm_mrc_hook_init(void)
 715{
 716        register_undef_hook(&arm_mrc_hook);
 717        return 0;
 718}
 719
 720late_initcall(arm_mrc_hook_init);
 721
 722#endif
 723
 724/*
 725 * A data abort trap was taken, but we did not handle the instruction.
 726 * Try to abort the user program, or panic if it was the kernel.
 727 */
 728asmlinkage void
 729baddataabort(int code, unsigned long instr, struct pt_regs *regs)
 730{
 731        unsigned long addr = instruction_pointer(regs);
 732
 733#ifdef CONFIG_DEBUG_USER
 734        if (user_debug & UDBG_BADABORT) {
 735                pr_err("8<--- cut here ---\n");
 736                pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 737                       task_pid_nr(current), current->comm, code, instr);
 738                dump_instr(KERN_ERR, regs);
 739                show_pte(KERN_ERR, current->mm, addr);
 740        }
 741#endif
 742
 743        arm_notify_die("unknown data abort code", regs,
 744                       SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
 745}
 746
 747void __readwrite_bug(const char *fn)
 748{
 749        pr_err("%s called, but not implemented\n", fn);
 750        BUG();
 751}
 752EXPORT_SYMBOL(__readwrite_bug);
 753
 754void __pte_error(const char *file, int line, pte_t pte)
 755{
 756        pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
 757}
 758
 759void __pmd_error(const char *file, int line, pmd_t pmd)
 760{
 761        pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
 762}
 763
 764void __pgd_error(const char *file, int line, pgd_t pgd)
 765{
 766        pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
 767}
 768
 769asmlinkage void __div0(void)
 770{
 771        pr_err("Division by zero in kernel.\n");
 772        dump_stack();
 773}
 774EXPORT_SYMBOL(__div0);
 775
 776void abort(void)
 777{
 778        BUG();
 779
 780        /* if that doesn't kill us, halt */
 781        panic("Oops failed to kill thread");
 782}
 783
 784void __init trap_init(void)
 785{
 786        return;
 787}
 788
 789#ifdef CONFIG_KUSER_HELPERS
 790static void __init kuser_init(void *vectors)
 791{
 792        extern char __kuser_helper_start[], __kuser_helper_end[];
 793        int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 794
 795        memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 796
 797        /*
 798         * vectors + 0xfe0 = __kuser_get_tls
 799         * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
 800         */
 801        if (tls_emu || has_tls_reg)
 802                memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
 803}
 804#else
 805static inline void __init kuser_init(void *vectors)
 806{
 807}
 808#endif
 809
 810void __init early_trap_init(void *vectors_base)
 811{
 812#ifndef CONFIG_CPU_V7M
 813        unsigned long vectors = (unsigned long)vectors_base;
 814        extern char __stubs_start[], __stubs_end[];
 815        extern char __vectors_start[], __vectors_end[];
 816        unsigned i;
 817
 818        vectors_page = vectors_base;
 819
 820        /*
 821         * Poison the vectors page with an undefined instruction.  This
 822         * instruction is chosen to be undefined for both ARM and Thumb
 823         * ISAs.  The Thumb version is an undefined instruction with a
 824         * branch back to the undefined instruction.
 825         */
 826        for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
 827                ((u32 *)vectors_base)[i] = 0xe7fddef1;
 828
 829        /*
 830         * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
 831         * into the vector page, mapped at 0xffff0000, and ensure these
 832         * are visible to the instruction stream.
 833         */
 834        memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
 835        memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
 836
 837        kuser_init(vectors_base);
 838
 839        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 840#else /* ifndef CONFIG_CPU_V7M */
 841        /*
 842         * on V7-M there is no need to copy the vector table to a dedicated
 843         * memory area. The address is configurable and so a table in the kernel
 844         * image can be used.
 845         */
 846#endif
 847}
 848