linux/arch/parisc/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/parisc/traps.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
   6 */
   7
   8/*
   9 * 'Traps.c' handles hardware traps and faults after we have saved some
  10 * state in 'asm.s'.
  11 */
  12
  13#include <linux/sched.h>
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/ptrace.h>
  18#include <linux/timer.h>
  19#include <linux/delay.h>
  20#include <linux/mm.h>
  21#include <linux/module.h>
  22#include <linux/smp.h>
  23#include <linux/spinlock.h>
  24#include <linux/init.h>
  25#include <linux/interrupt.h>
  26#include <linux/console.h>
  27#include <linux/bug.h>
  28
  29#include <asm/assembly.h>
  30#include <asm/system.h>
  31#include <asm/uaccess.h>
  32#include <asm/io.h>
  33#include <asm/irq.h>
  34#include <asm/traps.h>
  35#include <asm/unaligned.h>
  36#include <asm/atomic.h>
  37#include <asm/smp.h>
  38#include <asm/pdc.h>
  39#include <asm/pdc_chassis.h>
  40#include <asm/unwind.h>
  41#include <asm/tlbflush.h>
  42#include <asm/cacheflush.h>
  43
  44#include "../math-emu/math-emu.h"       /* for handle_fpe() */
  45
  46#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
  47                          /*  dumped to the console via printk)          */
  48
  49#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  50DEFINE_SPINLOCK(pa_dbit_lock);
  51#endif
  52
  53static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
  54        struct pt_regs *regs);
  55
  56static int printbinary(char *buf, unsigned long x, int nbits)
  57{
  58        unsigned long mask = 1UL << (nbits - 1);
  59        while (mask != 0) {
  60                *buf++ = (mask & x ? '1' : '0');
  61                mask >>= 1;
  62        }
  63        *buf = '\0';
  64
  65        return nbits;
  66}
  67
  68#ifdef CONFIG_64BIT
  69#define RFMT "%016lx"
  70#else
  71#define RFMT "%08lx"
  72#endif
  73#define FFMT "%016llx"  /* fpregs are 64-bit always */
  74
  75#define PRINTREGS(lvl,r,f,fmt,x)        \
  76        printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",      \
  77                lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],             \
  78                (r)[(x)+2], (r)[(x)+3])
  79
  80static void print_gr(char *level, struct pt_regs *regs)
  81{
  82        int i;
  83        char buf[64];
  84
  85        printk("%s\n", level);
  86        printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
  87        printbinary(buf, regs->gr[0], 32);
  88        printk("%sPSW: %s %s\n", level, buf, print_tainted());
  89
  90        for (i = 0; i < 32; i += 4)
  91                PRINTREGS(level, regs->gr, "r", RFMT, i);
  92}
  93
  94static void print_fr(char *level, struct pt_regs *regs)
  95{
  96        int i;
  97        char buf[64];
  98        struct { u32 sw[2]; } s;
  99
 100        /* FR are 64bit everywhere. Need to use asm to get the content
 101         * of fpsr/fper1, and we assume that we won't have a FP Identify
 102         * in our way, otherwise we're screwed.
 103         * The fldd is used to restore the T-bit if there was one, as the
 104         * store clears it anyway.
 105         * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
 106        asm volatile ("fstd %%fr0,0(%1) \n\t"
 107                      "fldd 0(%1),%%fr0 \n\t"
 108                      : "=m" (s) : "r" (&s) : "r0");
 109
 110        printk("%s\n", level);
 111        printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
 112        printbinary(buf, s.sw[0], 32);
 113        printk("%sFPSR: %s\n", level, buf);
 114        printk("%sFPER1: %08x\n", level, s.sw[1]);
 115
 116        /* here we'll print fr0 again, tho it'll be meaningless */
 117        for (i = 0; i < 32; i += 4)
 118                PRINTREGS(level, regs->fr, "fr", FFMT, i);
 119}
 120
 121void show_regs(struct pt_regs *regs)
 122{
 123        int i, user;
 124        char *level;
 125        unsigned long cr30, cr31;
 126
 127        user = user_mode(regs);
 128        level = user ? KERN_DEBUG : KERN_CRIT;
 129
 130        print_gr(level, regs);
 131
 132        for (i = 0; i < 8; i += 4)
 133                PRINTREGS(level, regs->sr, "sr", RFMT, i);
 134
 135        if (user)
 136                print_fr(level, regs);
 137
 138        cr30 = mfctl(30);
 139        cr31 = mfctl(31);
 140        printk("%s\n", level);
 141        printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
 142               level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
 143        printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
 144               level, regs->iir, regs->isr, regs->ior);
 145        printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
 146               level, current_thread_info()->cpu, cr30, cr31);
 147        printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
 148
 149        if (user) {
 150                printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
 151                printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
 152                printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
 153        } else {
 154                printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
 155                printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
 156                printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
 157
 158                parisc_show_stack(current, NULL, regs);
 159        }
 160}
 161
 162
 163void dump_stack(void)
 164{
 165        show_stack(NULL, NULL);
 166}
 167
 168EXPORT_SYMBOL(dump_stack);
 169
 170static void do_show_stack(struct unwind_frame_info *info)
 171{
 172        int i = 1;
 173
 174        printk(KERN_CRIT "Backtrace:\n");
 175        while (i <= 16) {
 176                if (unwind_once(info) < 0 || info->ip == 0)
 177                        break;
 178
 179                if (__kernel_text_address(info->ip)) {
 180                        printk(KERN_CRIT " [<" RFMT ">] %pS\n",
 181                                info->ip, (void *) info->ip);
 182                        i++;
 183                }
 184        }
 185        printk(KERN_CRIT "\n");
 186}
 187
 188static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
 189        struct pt_regs *regs)
 190{
 191        struct unwind_frame_info info;
 192        struct task_struct *t;
 193
 194        t = task ? task : current;
 195        if (regs) {
 196                unwind_frame_init(&info, t, regs);
 197                goto show_stack;
 198        }
 199
 200        if (t == current) {
 201                unsigned long sp;
 202
 203HERE:
 204                asm volatile ("copy %%r30, %0" : "=r"(sp));
 205                {
 206                        struct pt_regs r;
 207
 208                        memset(&r, 0, sizeof(struct pt_regs));
 209                        r.iaoq[0] = (unsigned long)&&HERE;
 210                        r.gr[2] = (unsigned long)__builtin_return_address(0);
 211                        r.gr[30] = sp;
 212
 213                        unwind_frame_init(&info, current, &r);
 214                }
 215        } else {
 216                unwind_frame_init_from_blocked_task(&info, t);
 217        }
 218
 219show_stack:
 220        do_show_stack(&info);
 221}
 222
 223void show_stack(struct task_struct *t, unsigned long *sp)
 224{
 225        return parisc_show_stack(t, sp, NULL);
 226}
 227
 228int is_valid_bugaddr(unsigned long iaoq)
 229{
 230        return 1;
 231}
 232
 233void die_if_kernel(char *str, struct pt_regs *regs, long err)
 234{
 235        if (user_mode(regs)) {
 236                if (err == 0)
 237                        return; /* STFU */
 238
 239                printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
 240                        current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
 241#ifdef PRINT_USER_FAULTS
 242                /* XXX for debugging only */
 243                show_regs(regs);
 244#endif
 245                return;
 246        }
 247
 248        oops_in_progress = 1;
 249
 250        oops_enter();
 251
 252        /* Amuse the user in a SPARC fashion */
 253        if (err) printk(KERN_CRIT
 254                        "      _______________________________ \n"
 255                        "     < Your System ate a SPARC! Gah! >\n"
 256                        "      ------------------------------- \n"
 257                        "             \\   ^__^\n"
 258                        "                 (__)\\       )\\/\\\n"
 259                        "                  U  ||----w |\n"
 260                        "                     ||     ||\n");
 261        
 262        /* unlock the pdc lock if necessary */
 263        pdc_emergency_unlock();
 264
 265        /* maybe the kernel hasn't booted very far yet and hasn't been able 
 266         * to initialize the serial or STI console. In that case we should 
 267         * re-enable the pdc console, so that the user will be able to 
 268         * identify the problem. */
 269        if (!console_drivers)
 270                pdc_console_restart();
 271        
 272        if (err)
 273                printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
 274                        current->comm, task_pid_nr(current), str, err);
 275
 276        /* Wot's wrong wif bein' racy? */
 277        if (current->thread.flags & PARISC_KERNEL_DEATH) {
 278                printk(KERN_CRIT "%s() recursion detected.\n", __func__);
 279                local_irq_enable();
 280                while (1);
 281        }
 282        current->thread.flags |= PARISC_KERNEL_DEATH;
 283
 284        show_regs(regs);
 285        dump_stack();
 286        add_taint(TAINT_DIE);
 287
 288        if (in_interrupt())
 289                panic("Fatal exception in interrupt");
 290
 291        if (panic_on_oops) {
 292                printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
 293                ssleep(5);
 294                panic("Fatal exception");
 295        }
 296
 297        oops_exit();
 298        do_exit(SIGSEGV);
 299}
 300
 301int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
 302{
 303        return syscall(regs);
 304}
 305
 306/* gdb uses break 4,8 */
 307#define GDB_BREAK_INSN 0x10004
 308static void handle_gdb_break(struct pt_regs *regs, int wot)
 309{
 310        struct siginfo si;
 311
 312        si.si_signo = SIGTRAP;
 313        si.si_errno = 0;
 314        si.si_code = wot;
 315        si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
 316        force_sig_info(SIGTRAP, &si, current);
 317}
 318
 319static void handle_break(struct pt_regs *regs)
 320{
 321        unsigned iir = regs->iir;
 322
 323        if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
 324                /* check if a BUG() or WARN() trapped here.  */
 325                enum bug_trap_type tt;
 326                tt = report_bug(regs->iaoq[0] & ~3, regs);
 327                if (tt == BUG_TRAP_TYPE_WARN) {
 328                        regs->iaoq[0] += 4;
 329                        regs->iaoq[1] += 4;
 330                        return; /* return to next instruction when WARN_ON().  */
 331                }
 332                die_if_kernel("Unknown kernel breakpoint", regs,
 333                        (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
 334        }
 335
 336#ifdef PRINT_USER_FAULTS
 337        if (unlikely(iir != GDB_BREAK_INSN)) {
 338                printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
 339                        iir & 31, (iir>>13) & ((1<<13)-1),
 340                        task_pid_nr(current), current->comm);
 341                show_regs(regs);
 342        }
 343#endif
 344
 345        /* send standard GDB signal */
 346        handle_gdb_break(regs, TRAP_BRKPT);
 347}
 348
 349static void default_trap(int code, struct pt_regs *regs)
 350{
 351        printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
 352        show_regs(regs);
 353}
 354
 355void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
 356
 357
 358void transfer_pim_to_trap_frame(struct pt_regs *regs)
 359{
 360    register int i;
 361    extern unsigned int hpmc_pim_data[];
 362    struct pdc_hpmc_pim_11 *pim_narrow;
 363    struct pdc_hpmc_pim_20 *pim_wide;
 364
 365    if (boot_cpu_data.cpu_type >= pcxu) {
 366
 367        pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
 368
 369        /*
 370         * Note: The following code will probably generate a
 371         * bunch of truncation error warnings from the compiler.
 372         * Could be handled with an ifdef, but perhaps there
 373         * is a better way.
 374         */
 375
 376        regs->gr[0] = pim_wide->cr[22];
 377
 378        for (i = 1; i < 32; i++)
 379            regs->gr[i] = pim_wide->gr[i];
 380
 381        for (i = 0; i < 32; i++)
 382            regs->fr[i] = pim_wide->fr[i];
 383
 384        for (i = 0; i < 8; i++)
 385            regs->sr[i] = pim_wide->sr[i];
 386
 387        regs->iasq[0] = pim_wide->cr[17];
 388        regs->iasq[1] = pim_wide->iasq_back;
 389        regs->iaoq[0] = pim_wide->cr[18];
 390        regs->iaoq[1] = pim_wide->iaoq_back;
 391
 392        regs->sar  = pim_wide->cr[11];
 393        regs->iir  = pim_wide->cr[19];
 394        regs->isr  = pim_wide->cr[20];
 395        regs->ior  = pim_wide->cr[21];
 396    }
 397    else {
 398        pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
 399
 400        regs->gr[0] = pim_narrow->cr[22];
 401
 402        for (i = 1; i < 32; i++)
 403            regs->gr[i] = pim_narrow->gr[i];
 404
 405        for (i = 0; i < 32; i++)
 406            regs->fr[i] = pim_narrow->fr[i];
 407
 408        for (i = 0; i < 8; i++)
 409            regs->sr[i] = pim_narrow->sr[i];
 410
 411        regs->iasq[0] = pim_narrow->cr[17];
 412        regs->iasq[1] = pim_narrow->iasq_back;
 413        regs->iaoq[0] = pim_narrow->cr[18];
 414        regs->iaoq[1] = pim_narrow->iaoq_back;
 415
 416        regs->sar  = pim_narrow->cr[11];
 417        regs->iir  = pim_narrow->cr[19];
 418        regs->isr  = pim_narrow->cr[20];
 419        regs->ior  = pim_narrow->cr[21];
 420    }
 421
 422    /*
 423     * The following fields only have meaning if we came through
 424     * another path. So just zero them here.
 425     */
 426
 427    regs->ksp = 0;
 428    regs->kpc = 0;
 429    regs->orig_r28 = 0;
 430}
 431
 432
 433/*
 434 * This routine is called as a last resort when everything else
 435 * has gone clearly wrong. We get called for faults in kernel space,
 436 * and HPMC's.
 437 */
 438void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
 439{
 440        static DEFINE_SPINLOCK(terminate_lock);
 441
 442        oops_in_progress = 1;
 443
 444        set_eiem(0);
 445        local_irq_disable();
 446        spin_lock(&terminate_lock);
 447
 448        /* unlock the pdc lock if necessary */
 449        pdc_emergency_unlock();
 450
 451        /* restart pdc console if necessary */
 452        if (!console_drivers)
 453                pdc_console_restart();
 454
 455        /* Not all paths will gutter the processor... */
 456        switch(code){
 457
 458        case 1:
 459                transfer_pim_to_trap_frame(regs);
 460                break;
 461
 462        default:
 463                /* Fall through */
 464                break;
 465
 466        }
 467            
 468        {
 469                /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
 470                struct unwind_frame_info info;
 471                unwind_frame_init(&info, current, regs);
 472                do_show_stack(&info);
 473        }
 474
 475        printk("\n");
 476        printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
 477                        msg, code, regs, offset);
 478        show_regs(regs);
 479
 480        spin_unlock(&terminate_lock);
 481
 482        /* put soft power button back under hardware control;
 483         * if the user had pressed it once at any time, the 
 484         * system will shut down immediately right here. */
 485        pdc_soft_power_button(0);
 486        
 487        /* Call kernel panic() so reboot timeouts work properly 
 488         * FIXME: This function should be on the list of
 489         * panic notifiers, and we should call panic
 490         * directly from the location that we wish. 
 491         * e.g. We should not call panic from
 492         * parisc_terminate, but rather the oter way around.
 493         * This hack works, prints the panic message twice,
 494         * and it enables reboot timers!
 495         */
 496        panic(msg);
 497}
 498
 499void notrace handle_interruption(int code, struct pt_regs *regs)
 500{
 501        unsigned long fault_address = 0;
 502        unsigned long fault_space = 0;
 503        struct siginfo si;
 504
 505        if (code == 1)
 506            pdc_console_restart();  /* switch back to pdc if HPMC */
 507        else
 508            local_irq_enable();
 509
 510        /* Security check:
 511         * If the priority level is still user, and the
 512         * faulting space is not equal to the active space
 513         * then the user is attempting something in a space
 514         * that does not belong to them. Kill the process.
 515         *
 516         * This is normally the situation when the user
 517         * attempts to jump into the kernel space at the
 518         * wrong offset, be it at the gateway page or a
 519         * random location.
 520         *
 521         * We cannot normally signal the process because it
 522         * could *be* on the gateway page, and processes
 523         * executing on the gateway page can't have signals
 524         * delivered.
 525         * 
 526         * We merely readjust the address into the users
 527         * space, at a destination address of zero, and
 528         * allow processing to continue.
 529         */
 530        if (((unsigned long)regs->iaoq[0] & 3) &&
 531            ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
 532                /* Kill the user process later */
 533                regs->iaoq[0] = 0 | 3;
 534                regs->iaoq[1] = regs->iaoq[0] + 4;
 535                regs->iasq[0] = regs->iasq[1] = regs->sr[7];
 536                regs->gr[0] &= ~PSW_B;
 537                return;
 538        }
 539        
 540#if 0
 541        printk(KERN_CRIT "Interruption # %d\n", code);
 542#endif
 543
 544        switch(code) {
 545
 546        case  1:
 547                /* High-priority machine check (HPMC) */
 548                
 549                /* set up a new led state on systems shipped with a LED State panel */
 550                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
 551                    
 552                parisc_terminate("High Priority Machine Check (HPMC)",
 553                                regs, code, 0);
 554                /* NOT REACHED */
 555                
 556        case  2:
 557                /* Power failure interrupt */
 558                printk(KERN_CRIT "Power failure interrupt !\n");
 559                return;
 560
 561        case  3:
 562                /* Recovery counter trap */
 563                regs->gr[0] &= ~PSW_R;
 564                if (user_space(regs))
 565                        handle_gdb_break(regs, TRAP_TRACE);
 566                /* else this must be the start of a syscall - just let it run */
 567                return;
 568
 569        case  5:
 570                /* Low-priority machine check */
 571                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
 572                
 573                flush_cache_all();
 574                flush_tlb_all();
 575                cpu_lpmc(5, regs);
 576                return;
 577
 578        case  6:
 579                /* Instruction TLB miss fault/Instruction page fault */
 580                fault_address = regs->iaoq[0];
 581                fault_space   = regs->iasq[0];
 582                break;
 583
 584        case  8:
 585                /* Illegal instruction trap */
 586                die_if_kernel("Illegal instruction", regs, code);
 587                si.si_code = ILL_ILLOPC;
 588                goto give_sigill;
 589
 590        case  9:
 591                /* Break instruction trap */
 592                handle_break(regs);
 593                return;
 594        
 595        case 10:
 596                /* Privileged operation trap */
 597                die_if_kernel("Privileged operation", regs, code);
 598                si.si_code = ILL_PRVOPC;
 599                goto give_sigill;
 600        
 601        case 11:
 602                /* Privileged register trap */
 603                if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
 604
 605                        /* This is a MFCTL cr26/cr27 to gr instruction.
 606                         * PCXS traps on this, so we need to emulate it.
 607                         */
 608
 609                        if (regs->iir & 0x00200000)
 610                                regs->gr[regs->iir & 0x1f] = mfctl(27);
 611                        else
 612                                regs->gr[regs->iir & 0x1f] = mfctl(26);
 613
 614                        regs->iaoq[0] = regs->iaoq[1];
 615                        regs->iaoq[1] += 4;
 616                        regs->iasq[0] = regs->iasq[1];
 617                        return;
 618                }
 619
 620                die_if_kernel("Privileged register usage", regs, code);
 621                si.si_code = ILL_PRVREG;
 622        give_sigill:
 623                si.si_signo = SIGILL;
 624                si.si_errno = 0;
 625                si.si_addr = (void __user *) regs->iaoq[0];
 626                force_sig_info(SIGILL, &si, current);
 627                return;
 628
 629        case 12:
 630                /* Overflow Trap, let the userland signal handler do the cleanup */
 631                si.si_signo = SIGFPE;
 632                si.si_code = FPE_INTOVF;
 633                si.si_addr = (void __user *) regs->iaoq[0];
 634                force_sig_info(SIGFPE, &si, current);
 635                return;
 636                
 637        case 13:
 638                /* Conditional Trap
 639                   The condition succeeds in an instruction which traps
 640                   on condition  */
 641                if(user_mode(regs)){
 642                        si.si_signo = SIGFPE;
 643                        /* Set to zero, and let the userspace app figure it out from
 644                           the insn pointed to by si_addr */
 645                        si.si_code = 0;
 646                        si.si_addr = (void __user *) regs->iaoq[0];
 647                        force_sig_info(SIGFPE, &si, current);
 648                        return;
 649                } 
 650                /* The kernel doesn't want to handle condition codes */
 651                break;
 652                
 653        case 14:
 654                /* Assist Exception Trap, i.e. floating point exception. */
 655                die_if_kernel("Floating point exception", regs, 0); /* quiet */
 656                handle_fpe(regs);
 657                return;
 658                
 659        case 15:
 660                /* Data TLB miss fault/Data page fault */
 661                /* Fall through */
 662        case 16:
 663                /* Non-access instruction TLB miss fault */
 664                /* The instruction TLB entry needed for the target address of the FIC
 665                   is absent, and hardware can't find it, so we get to cleanup */
 666                /* Fall through */
 667        case 17:
 668                /* Non-access data TLB miss fault/Non-access data page fault */
 669                /* FIXME: 
 670                         Still need to add slow path emulation code here!
 671                         If the insn used a non-shadow register, then the tlb
 672                         handlers could not have their side-effect (e.g. probe
 673                         writing to a target register) emulated since rfir would
 674                         erase the changes to said register. Instead we have to
 675                         setup everything, call this function we are in, and emulate
 676                         by hand. Technically we need to emulate:
 677                         fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
 678                */                        
 679                fault_address = regs->ior;
 680                fault_space = regs->isr;
 681                break;
 682
 683        case 18:
 684                /* PCXS only -- later cpu's split this into types 26,27 & 28 */
 685                /* Check for unaligned access */
 686                if (check_unaligned(regs)) {
 687                        handle_unaligned(regs);
 688                        return;
 689                }
 690                /* Fall Through */
 691        case 26: 
 692                /* PCXL: Data memory access rights trap */
 693                fault_address = regs->ior;
 694                fault_space   = regs->isr;
 695                break;
 696
 697        case 19:
 698                /* Data memory break trap */
 699                regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
 700                /* fall thru */
 701        case 21:
 702                /* Page reference trap */
 703                handle_gdb_break(regs, TRAP_HWBKPT);
 704                return;
 705
 706        case 25:
 707                /* Taken branch trap */
 708                regs->gr[0] &= ~PSW_T;
 709                if (user_space(regs))
 710                        handle_gdb_break(regs, TRAP_BRANCH);
 711                /* else this must be the start of a syscall - just let it
 712                 * run.
 713                 */
 714                return;
 715
 716        case  7:  
 717                /* Instruction access rights */
 718                /* PCXL: Instruction memory protection trap */
 719
 720                /*
 721                 * This could be caused by either: 1) a process attempting
 722                 * to execute within a vma that does not have execute
 723                 * permission, or 2) an access rights violation caused by a
 724                 * flush only translation set up by ptep_get_and_clear().
 725                 * So we check the vma permissions to differentiate the two.
 726                 * If the vma indicates we have execute permission, then
 727                 * the cause is the latter one. In this case, we need to
 728                 * call do_page_fault() to fix the problem.
 729                 */
 730
 731                if (user_mode(regs)) {
 732                        struct vm_area_struct *vma;
 733
 734                        down_read(&current->mm->mmap_sem);
 735                        vma = find_vma(current->mm,regs->iaoq[0]);
 736                        if (vma && (regs->iaoq[0] >= vma->vm_start)
 737                                && (vma->vm_flags & VM_EXEC)) {
 738
 739                                fault_address = regs->iaoq[0];
 740                                fault_space = regs->iasq[0];
 741
 742                                up_read(&current->mm->mmap_sem);
 743                                break; /* call do_page_fault() */
 744                        }
 745                        up_read(&current->mm->mmap_sem);
 746                }
 747                /* Fall Through */
 748        case 27: 
 749                /* Data memory protection ID trap */
 750                if (code == 27 && !user_mode(regs) &&
 751                        fixup_exception(regs))
 752                        return;
 753
 754                die_if_kernel("Protection id trap", regs, code);
 755                si.si_code = SEGV_MAPERR;
 756                si.si_signo = SIGSEGV;
 757                si.si_errno = 0;
 758                if (code == 7)
 759                    si.si_addr = (void __user *) regs->iaoq[0];
 760                else
 761                    si.si_addr = (void __user *) regs->ior;
 762                force_sig_info(SIGSEGV, &si, current);
 763                return;
 764
 765        case 28: 
 766                /* Unaligned data reference trap */
 767                handle_unaligned(regs);
 768                return;
 769
 770        default:
 771                if (user_mode(regs)) {
 772#ifdef PRINT_USER_FAULTS
 773                        printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
 774                            task_pid_nr(current), current->comm);
 775                        show_regs(regs);
 776#endif
 777                        /* SIGBUS, for lack of a better one. */
 778                        si.si_signo = SIGBUS;
 779                        si.si_code = BUS_OBJERR;
 780                        si.si_errno = 0;
 781                        si.si_addr = (void __user *) regs->ior;
 782                        force_sig_info(SIGBUS, &si, current);
 783                        return;
 784                }
 785                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
 786                
 787                parisc_terminate("Unexpected interruption", regs, code, 0);
 788                /* NOT REACHED */
 789        }
 790
 791        if (user_mode(regs)) {
 792            if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
 793#ifdef PRINT_USER_FAULTS
 794                if (fault_space == 0)
 795                        printk(KERN_DEBUG "User Fault on Kernel Space ");
 796                else
 797                        printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
 798                               code);
 799                printk(KERN_CONT "pid=%d command='%s'\n",
 800                       task_pid_nr(current), current->comm);
 801                show_regs(regs);
 802#endif
 803                si.si_signo = SIGSEGV;
 804                si.si_errno = 0;
 805                si.si_code = SEGV_MAPERR;
 806                si.si_addr = (void __user *) regs->ior;
 807                force_sig_info(SIGSEGV, &si, current);
 808                return;
 809            }
 810        }
 811        else {
 812
 813            /*
 814             * The kernel should never fault on its own address space.
 815             */
 816
 817            if (fault_space == 0) 
 818            {
 819                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
 820                parisc_terminate("Kernel Fault", regs, code, fault_address);
 821        
 822            }
 823        }
 824
 825        do_page_fault(regs, code, fault_address);
 826}
 827
 828
 829int __init check_ivt(void *iva)
 830{
 831        extern u32 os_hpmc_size;
 832        extern const u32 os_hpmc[];
 833
 834        int i;
 835        u32 check = 0;
 836        u32 *ivap;
 837        u32 *hpmcp;
 838        u32 length;
 839
 840        if (strcmp((char *)iva, "cows can fly"))
 841                return -1;
 842
 843        ivap = (u32 *)iva;
 844
 845        for (i = 0; i < 8; i++)
 846            *ivap++ = 0;
 847
 848        /* Compute Checksum for HPMC handler */
 849        length = os_hpmc_size;
 850        ivap[7] = length;
 851
 852        hpmcp = (u32 *)os_hpmc;
 853
 854        for (i=0; i<length/4; i++)
 855            check += *hpmcp++;
 856
 857        for (i=0; i<8; i++)
 858            check += ivap[i];
 859
 860        ivap[5] = -check;
 861
 862        return 0;
 863}
 864        
 865#ifndef CONFIG_64BIT
 866extern const void fault_vector_11;
 867#endif
 868extern const void fault_vector_20;
 869
 870void __init trap_init(void)
 871{
 872        void *iva;
 873
 874        if (boot_cpu_data.cpu_type >= pcxu)
 875                iva = (void *) &fault_vector_20;
 876        else
 877#ifdef CONFIG_64BIT
 878                panic("Can't boot 64-bit OS on PA1.1 processor!");
 879#else
 880                iva = (void *) &fault_vector_11;
 881#endif
 882
 883        if (check_ivt(iva))
 884                panic("IVT invalid");
 885}
 886