linux/arch/mn10300/mm/fault.c
<<
>>
Prefs
   1/* MN10300 MMU Fault handler
   2 *
   3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Modified by David Howells (dhowells@redhat.com)
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public Licence
   9 * as published by the Free Software Foundation; either version
  10 * 2 of the Licence, or (at your option) any later version.
  11 */
  12
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/smp.h>
  23#include <linux/smp_lock.h>
  24#include <linux/interrupt.h>
  25#include <linux/init.h>
  26#include <linux/vt_kern.h>              /* For unblank_screen() */
  27
  28#include <asm/system.h>
  29#include <asm/uaccess.h>
  30#include <asm/pgalloc.h>
  31#include <asm/hardirq.h>
  32#include <asm/gdb-stub.h>
  33#include <asm/cpu-regs.h>
  34
  35/*
  36 * Unlock any spinlocks which will prevent us from getting the
  37 * message out
  38 */
  39void bust_spinlocks(int yes)
  40{
  41        if (yes) {
  42                oops_in_progress = 1;
  43#ifdef CONFIG_SMP
  44                /* Many serial drivers do __global_cli() */
  45                global_irq_lock = 0;
  46#endif
  47        } else {
  48                int loglevel_save = console_loglevel;
  49#ifdef CONFIG_VT
  50                unblank_screen();
  51#endif
  52                oops_in_progress = 0;
  53                /*
  54                 * OK, the message is on the console.  Now we call printk()
  55                 * without oops_in_progress set so that printk will give klogd
  56                 * a poke.  Hold onto your hats...
  57                 */
  58                console_loglevel = 15;  /* NMI oopser may have shut the console
  59                                         * up */
  60                printk(" ");
  61                console_loglevel = loglevel_save;
  62        }
  63}
  64
  65void do_BUG(const char *file, int line)
  66{
  67        bust_spinlocks(1);
  68        printk(KERN_EMERG "------------[ cut here ]------------\n");
  69        printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
  70}
  71
  72#if 0
  73static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
  74{
  75        pgd_t *pgd;
  76        pmd_t *pmd;
  77        pte_t *pte;
  78
  79        pgd = pgdir + __pgd_offset(address);
  80        printk(KERN_DEBUG "pgd entry %p: %016Lx\n",
  81               pgd, (long long) pgd_val(*pgd));
  82
  83        if (!pgd_present(*pgd)) {
  84                printk(KERN_DEBUG "... pgd not present!\n");
  85                return;
  86        }
  87        pmd = pmd_offset(pgd, address);
  88        printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
  89               pmd, (long long)pmd_val(*pmd));
  90
  91        if (!pmd_present(*pmd)) {
  92                printk(KERN_DEBUG "... pmd not present!\n");
  93                return;
  94        }
  95        pte = pte_offset(pmd, address);
  96        printk(KERN_DEBUG "pte entry %p: %016Lx\n",
  97               pte, (long long) pte_val(*pte));
  98
  99        if (!pte_present(*pte))
 100                printk(KERN_DEBUG "... pte not present!\n");
 101}
 102#endif
 103
 104asmlinkage void monitor_signal(struct pt_regs *);
 105
 106/*
 107 * This routine handles page faults.  It determines the address,
 108 * and the problem, and then passes it off to one of the appropriate
 109 * routines.
 110 *
 111 * fault_code:
 112 * - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate
 113 * - MSW: 0 if data access, 1 if instruction access
 114 * - bit 0: TLB miss flag
 115 * - bit 1: initial write
 116 * - bit 2: page invalid
 117 * - bit 3: protection violation
 118 * - bit 4: accessor (0=user 1=kernel)
 119 * - bit 5: 0=read 1=write
 120 * - bit 6-8: page protection spec
 121 * - bit 9: illegal address
 122 * - bit 16: 0=data 1=ins
 123 *
 124 */
 125asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
 126                              unsigned long address)
 127{
 128        struct vm_area_struct *vma;
 129        struct task_struct *tsk;
 130        struct mm_struct *mm;
 131        unsigned long page;
 132        siginfo_t info;
 133        int write, fault;
 134
 135#ifdef CONFIG_GDBSTUB
 136        /* handle GDB stub causing a fault */
 137        if (gdbstub_busy) {
 138                gdbstub_exception(regs, TBR & TBR_INT_CODE);
 139                return;
 140        }
 141#endif
 142
 143#if 0
 144        printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n",
 145               regs,
 146               fault_code & 0x10000 ? "ins" : "data",
 147               fault_code & 0xffff, address);
 148#endif
 149
 150        tsk = current;
 151
 152        /*
 153         * We fault-in kernel-space virtual memory on-demand. The
 154         * 'reference' page table is init_mm.pgd.
 155         *
 156         * NOTE! We MUST NOT take any locks for this case. We may
 157         * be in an interrupt or a critical region, and should
 158         * only copy the information from the master page table,
 159         * nothing more.
 160         *
 161         * This verifies that the fault happens in kernel space
 162         * and that the fault was a page not present (invalid) error
 163         */
 164        if (address >= VMALLOC_START && address < VMALLOC_END &&
 165            (fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR &&
 166            (fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL
 167            )
 168                goto vmalloc_fault;
 169
 170        mm = tsk->mm;
 171        info.si_code = SEGV_MAPERR;
 172
 173        /*
 174         * If we're in an interrupt or have no user
 175         * context, we must not take the fault..
 176         */
 177        if (in_atomic() || !mm)
 178                goto no_context;
 179
 180        down_read(&mm->mmap_sem);
 181
 182        vma = find_vma(mm, address);
 183        if (!vma)
 184                goto bad_area;
 185        if (vma->vm_start <= address)
 186                goto good_area;
 187        if (!(vma->vm_flags & VM_GROWSDOWN))
 188                goto bad_area;
 189
 190        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
 191                /* accessing the stack below the stack pointer is always a
 192                 * bug */
 193                if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) {
 194#if 0
 195                        printk(KERN_WARNING
 196                               "[%d] ### Access below stack @%lx (sp=%lx)\n",
 197                               current->pid, address, regs->sp);
 198                        printk(KERN_WARNING
 199                               "vma [%08x - %08x]\n",
 200                               vma->vm_start, vma->vm_end);
 201                        show_registers(regs);
 202                        printk(KERN_WARNING
 203                               "[%d] ### Code: [%08lx]"
 204                               " %02x %02x %02x %02x %02x %02x %02x %02x\n",
 205                               current->pid,
 206                               regs->pc,
 207                               ((u8 *) regs->pc)[0],
 208                               ((u8 *) regs->pc)[1],
 209                               ((u8 *) regs->pc)[2],
 210                               ((u8 *) regs->pc)[3],
 211                               ((u8 *) regs->pc)[4],
 212                               ((u8 *) regs->pc)[5],
 213                               ((u8 *) regs->pc)[6],
 214                               ((u8 *) regs->pc)[7]
 215                               );
 216#endif
 217                        goto bad_area;
 218                }
 219        }
 220
 221        if (expand_stack(vma, address))
 222                goto bad_area;
 223
 224/*
 225 * Ok, we have a good vm_area for this memory access, so
 226 * we can handle it..
 227 */
 228good_area:
 229        info.si_code = SEGV_ACCERR;
 230        write = 0;
 231        switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) {
 232        default:        /* 3: write, present */
 233        case MMUFCR_xFC_TYPE_WRITE:
 234#ifdef TEST_VERIFY_AREA
 235                if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
 236                        printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc);
 237#endif
 238                /* write to absent page */
 239        case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE:
 240                if (!(vma->vm_flags & VM_WRITE))
 241                        goto bad_area;
 242                write++;
 243                break;
 244
 245                /* read from protected page */
 246        case MMUFCR_xFC_TYPE_READ:
 247                goto bad_area;
 248
 249                /* read from absent page present */
 250        case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ:
 251                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 252                        goto bad_area;
 253                break;
 254        }
 255
 256        /*
 257         * If for any reason at all we couldn't handle the fault,
 258         * make sure we exit gracefully rather than endlessly redo
 259         * the fault.
 260         */
 261        fault = handle_mm_fault(mm, vma, address, write);
 262        if (unlikely(fault & VM_FAULT_ERROR)) {
 263                if (fault & VM_FAULT_OOM)
 264                        goto out_of_memory;
 265                else if (fault & VM_FAULT_SIGBUS)
 266                        goto do_sigbus;
 267                BUG();
 268        }
 269        if (fault & VM_FAULT_MAJOR)
 270                current->maj_flt++;
 271        else
 272                current->min_flt++;
 273
 274        up_read(&mm->mmap_sem);
 275        return;
 276
 277/*
 278 * Something tried to access memory that isn't in our memory map..
 279 * Fix it, but check if it's kernel or user first..
 280 */
 281bad_area:
 282        up_read(&mm->mmap_sem);
 283        monitor_signal(regs);
 284
 285        /* User mode accesses just cause a SIGSEGV */
 286        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
 287                info.si_signo = SIGSEGV;
 288                info.si_errno = 0;
 289                /* info.si_code has been set above */
 290                info.si_addr = (void *)address;
 291                force_sig_info(SIGSEGV, &info, tsk);
 292                return;
 293        }
 294
 295no_context:
 296        monitor_signal(regs);
 297        /* Are we prepared to handle this kernel fault?  */
 298        if (fixup_exception(regs))
 299                return;
 300
 301/*
 302 * Oops. The kernel tried to access some bad page. We'll have to
 303 * terminate things with extreme prejudice.
 304 */
 305
 306        bust_spinlocks(1);
 307
 308        if (address < PAGE_SIZE)
 309                printk(KERN_ALERT
 310                       "Unable to handle kernel NULL pointer dereference");
 311        else
 312                printk(KERN_ALERT
 313                       "Unable to handle kernel paging request");
 314        printk(" at virtual address %08lx\n", address);
 315        printk(" printing pc:\n");
 316        printk(KERN_ALERT "%08lx\n", regs->pc);
 317
 318#ifdef CONFIG_GDBSTUB
 319        gdbstub_intercept(
 320                regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR);
 321#endif
 322
 323        page = PTBR;
 324        page = ((unsigned long *) __va(page))[address >> 22];
 325        printk(KERN_ALERT "*pde = %08lx\n", page);
 326        if (page & 1) {
 327                page &= PAGE_MASK;
 328                address &= 0x003ff000;
 329                page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
 330                printk(KERN_ALERT "*pte = %08lx\n", page);
 331        }
 332
 333        die("Oops", regs, fault_code);
 334        do_exit(SIGKILL);
 335
 336/*
 337 * We ran out of memory, or some other thing happened to us that made
 338 * us unable to handle the page fault gracefully.
 339 */
 340out_of_memory:
 341        up_read(&mm->mmap_sem);
 342        monitor_signal(regs);
 343        printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
 344        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
 345                do_exit(SIGKILL);
 346        goto no_context;
 347
 348do_sigbus:
 349        up_read(&mm->mmap_sem);
 350        monitor_signal(regs);
 351
 352        /*
 353         * Send a sigbus, regardless of whether we were in kernel
 354         * or user mode.
 355         */
 356        info.si_signo = SIGBUS;
 357        info.si_errno = 0;
 358        info.si_code = BUS_ADRERR;
 359        info.si_addr = (void *)address;
 360        force_sig_info(SIGBUS, &info, tsk);
 361
 362        /* Kernel mode? Handle exceptions or die */
 363        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
 364                goto no_context;
 365        return;
 366
 367vmalloc_fault:
 368        {
 369                /*
 370                 * Synchronize this task's top level page-table
 371                 * with the 'reference' page table.
 372                 *
 373                 * Do _not_ use "tsk" here. We might be inside
 374                 * an interrupt in the middle of a task switch..
 375                 */
 376                int index = pgd_index(address);
 377                pgd_t *pgd, *pgd_k;
 378                pud_t *pud, *pud_k;
 379                pmd_t *pmd, *pmd_k;
 380                pte_t *pte_k;
 381
 382                pgd_k = init_mm.pgd + index;
 383
 384                if (!pgd_present(*pgd_k))
 385                        goto no_context;
 386
 387                pud_k = pud_offset(pgd_k, address);
 388                if (!pud_present(*pud_k))
 389                        goto no_context;
 390
 391                pmd_k = pmd_offset(pud_k, address);
 392                if (!pmd_present(*pmd_k))
 393                        goto no_context;
 394
 395                pgd = (pgd_t *) PTBR + index;
 396                pud = pud_offset(pgd, address);
 397                pmd = pmd_offset(pud, address);
 398                set_pmd(pmd, *pmd_k);
 399
 400                pte_k = pte_offset_kernel(pmd_k, address);
 401                if (!pte_present(*pte_k))
 402                        goto no_context;
 403                return;
 404        }
 405}
 406