linux/arch/s390/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  arch/s390/mm/fault.c
   3 *
   4 *  S390 version
   5 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
   6 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   7 *               Ulrich Weigand (uweigand@de.ibm.com)
   8 *
   9 *  Derived from "arch/i386/mm/fault.c"
  10 *    Copyright (C) 1995  Linus Torvalds
  11 */
  12
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/smp.h>
  23#include <linux/kdebug.h>
  24#include <linux/smp_lock.h>
  25#include <linux/init.h>
  26#include <linux/console.h>
  27#include <linux/module.h>
  28#include <linux/hardirq.h>
  29#include <linux/kprobes.h>
  30#include <linux/uaccess.h>
  31#include <linux/hugetlb.h>
  32#include <asm/system.h>
  33#include <asm/pgtable.h>
  34#include <asm/s390_ext.h>
  35#include <asm/mmu_context.h>
  36#include "../kernel/entry.h"
  37
  38#ifndef CONFIG_64BIT
  39#define __FAIL_ADDR_MASK 0x7ffff000
  40#define __FIXUP_MASK 0x7fffffff
  41#define __SUBCODE_MASK 0x0200
  42#define __PF_RES_FIELD 0ULL
  43#else /* CONFIG_64BIT */
  44#define __FAIL_ADDR_MASK -4096L
  45#define __FIXUP_MASK ~0L
  46#define __SUBCODE_MASK 0x0600
  47#define __PF_RES_FIELD 0x8000000000000000ULL
  48#endif /* CONFIG_64BIT */
  49
  50#ifdef CONFIG_SYSCTL
  51extern int sysctl_userprocess_debug;
  52#endif
  53
  54#ifdef CONFIG_KPROBES
  55static inline int notify_page_fault(struct pt_regs *regs, long err)
  56{
  57        int ret = 0;
  58
  59        /* kprobe_running() needs smp_processor_id() */
  60        if (!user_mode(regs)) {
  61                preempt_disable();
  62                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  63                        ret = 1;
  64                preempt_enable();
  65        }
  66
  67        return ret;
  68}
  69#else
  70static inline int notify_page_fault(struct pt_regs *regs, long err)
  71{
  72        return 0;
  73}
  74#endif
  75
  76
  77/*
  78 * Unlock any spinlocks which will prevent us from getting the
  79 * message out.
  80 */
  81void bust_spinlocks(int yes)
  82{
  83        if (yes) {
  84                oops_in_progress = 1;
  85        } else {
  86                int loglevel_save = console_loglevel;
  87                console_unblank();
  88                oops_in_progress = 0;
  89                /*
  90                 * OK, the message is on the console.  Now we call printk()
  91                 * without oops_in_progress set so that printk will give klogd
  92                 * a poke.  Hold onto your hats...
  93                 */
  94                console_loglevel = 15;
  95                printk(" ");
  96                console_loglevel = loglevel_save;
  97        }
  98}
  99
 100/*
 101 * Returns the address space associated with the fault.
 102 * Returns 0 for kernel space, 1 for user space and
 103 * 2 for code execution in user space with noexec=on.
 104 */
 105static inline int check_space(struct task_struct *tsk)
 106{
 107        /*
 108         * The lowest two bits of S390_lowcore.trans_exc_code
 109         * indicate which paging table was used.
 110         */
 111        int desc = S390_lowcore.trans_exc_code & 3;
 112
 113        if (desc == 3)  /* Home Segment Table Descriptor */
 114                return switch_amode == 0;
 115        if (desc == 2)  /* Secondary Segment Table Descriptor */
 116                return tsk->thread.mm_segment.ar4;
 117#ifdef CONFIG_S390_SWITCH_AMODE
 118        if (unlikely(desc == 1)) { /* STD determined via access register */
 119                /* %a0 always indicates primary space. */
 120                if (S390_lowcore.exc_access_id != 0) {
 121                        save_access_regs(tsk->thread.acrs);
 122                        /*
 123                         * An alet of 0 indicates primary space.
 124                         * An alet of 1 indicates secondary space.
 125                         * Any other alet values generate an
 126                         * alen-translation exception.
 127                         */
 128                        if (tsk->thread.acrs[S390_lowcore.exc_access_id])
 129                                return tsk->thread.mm_segment.ar4;
 130                }
 131        }
 132#endif
 133        /* Primary Segment Table Descriptor */
 134        return switch_amode << s390_noexec;
 135}
 136
 137/*
 138 * Send SIGSEGV to task.  This is an external routine
 139 * to keep the stack usage of do_page_fault small.
 140 */
 141static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
 142                       int si_code, unsigned long address)
 143{
 144        struct siginfo si;
 145
 146#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
 147#if defined(CONFIG_SYSCTL)
 148        if (sysctl_userprocess_debug)
 149#endif
 150        {
 151                printk("User process fault: interruption code 0x%lX\n",
 152                       error_code);
 153                printk("failing address: %lX\n", address);
 154                show_regs(regs);
 155        }
 156#endif
 157        si.si_signo = SIGSEGV;
 158        si.si_code = si_code;
 159        si.si_addr = (void __user *) address;
 160        force_sig_info(SIGSEGV, &si, current);
 161}
 162
 163static void do_no_context(struct pt_regs *regs, unsigned long error_code,
 164                          unsigned long address)
 165{
 166        const struct exception_table_entry *fixup;
 167
 168        /* Are we prepared to handle this kernel fault?  */
 169        fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
 170        if (fixup) {
 171                regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
 172                return;
 173        }
 174
 175        /*
 176         * Oops. The kernel tried to access some bad page. We'll have to
 177         * terminate things with extreme prejudice.
 178         */
 179        if (check_space(current) == 0)
 180                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 181                       " at virtual kernel address %p\n", (void *)address);
 182        else
 183                printk(KERN_ALERT "Unable to handle kernel paging request"
 184                       " at virtual user address %p\n", (void *)address);
 185
 186        die("Oops", regs, error_code);
 187        do_exit(SIGKILL);
 188}
 189
 190static void do_low_address(struct pt_regs *regs, unsigned long error_code)
 191{
 192        /* Low-address protection hit in kernel mode means
 193           NULL pointer write access in kernel mode.  */
 194        if (regs->psw.mask & PSW_MASK_PSTATE) {
 195                /* Low-address protection hit in user mode 'cannot happen'. */
 196                die ("Low-address protection", regs, error_code);
 197                do_exit(SIGKILL);
 198        }
 199
 200        do_no_context(regs, error_code, 0);
 201}
 202
 203/*
 204 * We ran out of memory, or some other thing happened to us that made
 205 * us unable to handle the page fault gracefully.
 206 */
 207static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code,
 208                            unsigned long address)
 209{
 210        struct task_struct *tsk = current;
 211        struct mm_struct *mm = tsk->mm;
 212
 213        up_read(&mm->mmap_sem);
 214        if (is_global_init(tsk)) {
 215                yield();
 216                down_read(&mm->mmap_sem);
 217                return 1;
 218        }
 219        printk("VM: killing process %s\n", tsk->comm);
 220        if (regs->psw.mask & PSW_MASK_PSTATE)
 221                do_group_exit(SIGKILL);
 222        do_no_context(regs, error_code, address);
 223        return 0;
 224}
 225
 226static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
 227                      unsigned long address)
 228{
 229        struct task_struct *tsk = current;
 230        struct mm_struct *mm = tsk->mm;
 231
 232        up_read(&mm->mmap_sem);
 233        /*
 234         * Send a sigbus, regardless of whether we were in kernel
 235         * or user mode.
 236         */
 237        tsk->thread.prot_addr = address;
 238        tsk->thread.trap_no = error_code;
 239        force_sig(SIGBUS, tsk);
 240
 241        /* Kernel mode? Handle exceptions or die */
 242        if (!(regs->psw.mask & PSW_MASK_PSTATE))
 243                do_no_context(regs, error_code, address);
 244}
 245
 246#ifdef CONFIG_S390_EXEC_PROTECT
 247static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
 248                         unsigned long address, unsigned long error_code)
 249{
 250        u16 instruction;
 251        int rc;
 252#ifdef CONFIG_COMPAT
 253        int compat;
 254#endif
 255
 256        pagefault_disable();
 257        rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
 258        pagefault_enable();
 259        if (rc)
 260                return -EFAULT;
 261
 262        up_read(&mm->mmap_sem);
 263        clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
 264#ifdef CONFIG_COMPAT
 265        compat = test_tsk_thread_flag(current, TIF_31BIT);
 266        if (compat && instruction == 0x0a77)
 267                sys32_sigreturn();
 268        else if (compat && instruction == 0x0aad)
 269                sys32_rt_sigreturn();
 270        else
 271#endif
 272        if (instruction == 0x0a77)
 273                sys_sigreturn();
 274        else if (instruction == 0x0aad)
 275                sys_rt_sigreturn();
 276        else {
 277                current->thread.prot_addr = address;
 278                current->thread.trap_no = error_code;
 279                do_sigsegv(regs, error_code, SEGV_MAPERR, address);
 280        }
 281        return 0;
 282}
 283#endif /* CONFIG_S390_EXEC_PROTECT */
 284
 285/*
 286 * This routine handles page faults.  It determines the address,
 287 * and the problem, and then passes it off to one of the appropriate
 288 * routines.
 289 *
 290 * error_code:
 291 *   04       Protection           ->  Write-Protection  (suprression)
 292 *   10       Segment translation  ->  Not present       (nullification)
 293 *   11       Page translation     ->  Not present       (nullification)
 294 *   3b       Region third trans.  ->  Not present       (nullification)
 295 */
 296static inline void
 297do_exception(struct pt_regs *regs, unsigned long error_code, int write)
 298{
 299        struct task_struct *tsk;
 300        struct mm_struct *mm;
 301        struct vm_area_struct *vma;
 302        unsigned long address;
 303        int space;
 304        int si_code;
 305        int fault;
 306
 307        if (notify_page_fault(regs, error_code))
 308                return;
 309
 310        tsk = current;
 311        mm = tsk->mm;
 312
 313        /* get the failing address and the affected space */
 314        address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
 315        space = check_space(tsk);
 316
 317        /*
 318         * Verify that the fault happened in user space, that
 319         * we are not in an interrupt and that there is a 
 320         * user context.
 321         */
 322        if (unlikely(space == 0 || in_atomic() || !mm))
 323                goto no_context;
 324
 325        /*
 326         * When we get here, the fault happened in the current
 327         * task's user address space, so we can switch on the
 328         * interrupts again and then search the VMAs
 329         */
 330        local_irq_enable();
 331
 332        down_read(&mm->mmap_sem);
 333
 334        si_code = SEGV_MAPERR;
 335        vma = find_vma(mm, address);
 336        if (!vma)
 337                goto bad_area;
 338
 339#ifdef CONFIG_S390_EXEC_PROTECT
 340        if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
 341                if (!signal_return(mm, regs, address, error_code))
 342                        /*
 343                         * signal_return() has done an up_read(&mm->mmap_sem)
 344                         * if it returns 0.
 345                         */
 346                        return;
 347#endif
 348
 349        if (vma->vm_start <= address)
 350                goto good_area;
 351        if (!(vma->vm_flags & VM_GROWSDOWN))
 352                goto bad_area;
 353        if (expand_stack(vma, address))
 354                goto bad_area;
 355/*
 356 * Ok, we have a good vm_area for this memory access, so
 357 * we can handle it..
 358 */
 359good_area:
 360        si_code = SEGV_ACCERR;
 361        if (!write) {
 362                /* page not present, check vm flags */
 363                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 364                        goto bad_area;
 365        } else {
 366                if (!(vma->vm_flags & VM_WRITE))
 367                        goto bad_area;
 368        }
 369
 370survive:
 371        if (is_vm_hugetlb_page(vma))
 372                address &= HPAGE_MASK;
 373        /*
 374         * If for any reason at all we couldn't handle the fault,
 375         * make sure we exit gracefully rather than endlessly redo
 376         * the fault.
 377         */
 378        fault = handle_mm_fault(mm, vma, address, write);
 379        if (unlikely(fault & VM_FAULT_ERROR)) {
 380                if (fault & VM_FAULT_OOM) {
 381                        if (do_out_of_memory(regs, error_code, address))
 382                                goto survive;
 383                        return;
 384                } else if (fault & VM_FAULT_SIGBUS) {
 385                        do_sigbus(regs, error_code, address);
 386                        return;
 387                }
 388                BUG();
 389        }
 390        if (fault & VM_FAULT_MAJOR)
 391                tsk->maj_flt++;
 392        else
 393                tsk->min_flt++;
 394
 395        up_read(&mm->mmap_sem);
 396        /*
 397         * The instruction that caused the program check will
 398         * be repeated. Don't signal single step via SIGTRAP.
 399         */
 400        clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
 401        return;
 402
 403/*
 404 * Something tried to access memory that isn't in our memory map..
 405 * Fix it, but check if it's kernel or user first..
 406 */
 407bad_area:
 408        up_read(&mm->mmap_sem);
 409
 410        /* User mode accesses just cause a SIGSEGV */
 411        if (regs->psw.mask & PSW_MASK_PSTATE) {
 412                tsk->thread.prot_addr = address;
 413                tsk->thread.trap_no = error_code;
 414                do_sigsegv(regs, error_code, si_code, address);
 415                return;
 416        }
 417
 418no_context:
 419        do_no_context(regs, error_code, address);
 420}
 421
 422void __kprobes do_protection_exception(struct pt_regs *regs,
 423                                       long error_code)
 424{
 425        /* Protection exception is supressing, decrement psw address. */
 426        regs->psw.addr -= (error_code >> 16);
 427        /*
 428         * Check for low-address protection.  This needs to be treated
 429         * as a special case because the translation exception code
 430         * field is not guaranteed to contain valid data in this case.
 431         */
 432        if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
 433                do_low_address(regs, error_code);
 434                return;
 435        }
 436        do_exception(regs, 4, 1);
 437}
 438
 439void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
 440{
 441        do_exception(regs, error_code & 0xff, 0);
 442}
 443
 444#ifdef CONFIG_64BIT
 445void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
 446{
 447        struct mm_struct *mm;
 448        struct vm_area_struct *vma;
 449        unsigned long address;
 450        int space;
 451
 452        mm = current->mm;
 453        address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
 454        space = check_space(current);
 455
 456        if (unlikely(space == 0 || in_atomic() || !mm))
 457                goto no_context;
 458
 459        local_irq_enable();
 460
 461        down_read(&mm->mmap_sem);
 462        vma = find_vma(mm, address);
 463        up_read(&mm->mmap_sem);
 464
 465        if (vma) {
 466                update_mm(mm, current);
 467                return;
 468        }
 469
 470        /* User mode accesses just cause a SIGSEGV */
 471        if (regs->psw.mask & PSW_MASK_PSTATE) {
 472                current->thread.prot_addr = address;
 473                current->thread.trap_no = error_code;
 474                do_sigsegv(regs, error_code, SEGV_MAPERR, address);
 475                return;
 476        }
 477
 478no_context:
 479        do_no_context(regs, error_code, address);
 480}
 481#endif
 482
 483#ifdef CONFIG_PFAULT 
 484/*
 485 * 'pfault' pseudo page faults routines.
 486 */
 487static ext_int_info_t ext_int_pfault;
 488static int pfault_disable = 0;
 489
 490static int __init nopfault(char *str)
 491{
 492        pfault_disable = 1;
 493        return 1;
 494}
 495
 496__setup("nopfault", nopfault);
 497
 498typedef struct {
 499        __u16 refdiagc;
 500        __u16 reffcode;
 501        __u16 refdwlen;
 502        __u16 refversn;
 503        __u64 refgaddr;
 504        __u64 refselmk;
 505        __u64 refcmpmk;
 506        __u64 reserved;
 507} __attribute__ ((packed, aligned(8))) pfault_refbk_t;
 508
 509int pfault_init(void)
 510{
 511        pfault_refbk_t refbk =
 512                { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
 513                  __PF_RES_FIELD };
 514        int rc;
 515
 516        if (!MACHINE_IS_VM || pfault_disable)
 517                return -1;
 518        asm volatile(
 519                "       diag    %1,%0,0x258\n"
 520                "0:     j       2f\n"
 521                "1:     la      %0,8\n"
 522                "2:\n"
 523                EX_TABLE(0b,1b)
 524                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 525        __ctl_set_bit(0, 9);
 526        return rc;
 527}
 528
 529void pfault_fini(void)
 530{
 531        pfault_refbk_t refbk =
 532        { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
 533
 534        if (!MACHINE_IS_VM || pfault_disable)
 535                return;
 536        __ctl_clear_bit(0,9);
 537        asm volatile(
 538                "       diag    %0,0,0x258\n"
 539                "0:\n"
 540                EX_TABLE(0b,0b)
 541                : : "a" (&refbk), "m" (refbk) : "cc");
 542}
 543
 544static void pfault_interrupt(__u16 error_code)
 545{
 546        struct task_struct *tsk;
 547        __u16 subcode;
 548
 549        /*
 550         * Get the external interruption subcode & pfault
 551         * initial/completion signal bit. VM stores this 
 552         * in the 'cpu address' field associated with the
 553         * external interrupt. 
 554         */
 555        subcode = S390_lowcore.cpu_addr;
 556        if ((subcode & 0xff00) != __SUBCODE_MASK)
 557                return;
 558
 559        /*
 560         * Get the token (= address of the task structure of the affected task).
 561         */
 562        tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
 563
 564        if (subcode & 0x0080) {
 565                /* signal bit is set -> a page has been swapped in by VM */
 566                if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
 567                        /* Initial interrupt was faster than the completion
 568                         * interrupt. pfault_wait is valid. Set pfault_wait
 569                         * back to zero and wake up the process. This can
 570                         * safely be done because the task is still sleeping
 571                         * and can't produce new pfaults. */
 572                        tsk->thread.pfault_wait = 0;
 573                        wake_up_process(tsk);
 574                        put_task_struct(tsk);
 575                }
 576        } else {
 577                /* signal bit not set -> a real page is missing. */
 578                get_task_struct(tsk);
 579                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 580                if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
 581                        /* Completion interrupt was faster than the initial
 582                         * interrupt (swapped in a -1 for pfault_wait). Set
 583                         * pfault_wait back to zero and exit. This can be
 584                         * done safely because tsk is running in kernel 
 585                         * mode and can't produce new pfaults. */
 586                        tsk->thread.pfault_wait = 0;
 587                        set_task_state(tsk, TASK_RUNNING);
 588                        put_task_struct(tsk);
 589                } else
 590                        set_tsk_need_resched(tsk);
 591        }
 592}
 593
 594void __init pfault_irq_init(void)
 595{
 596        if (!MACHINE_IS_VM)
 597                return;
 598
 599        /*
 600         * Try to get pfault pseudo page faults going.
 601         */
 602        if (register_early_external_interrupt(0x2603, pfault_interrupt,
 603                                              &ext_int_pfault) != 0)
 604                panic("Couldn't request external interrupt 0x2603");
 605
 606        if (pfault_init() == 0)
 607                return;
 608
 609        /* Tough luck, no pfault. */
 610        pfault_disable = 1;
 611        unregister_early_external_interrupt(0x2603, pfault_interrupt,
 612                                            &ext_int_pfault);
 613}
 614#endif
 615