linux/arch/x86/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995  Linus Torvalds
   3 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   4 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   5 */
   6#include <linux/sched.h>                /* test_thread_flag(), ...      */
   7#include <linux/kdebug.h>               /* oops_begin/end, ...          */
   8#include <linux/extable.h>              /* search_exception_tables      */
   9#include <linux/bootmem.h>              /* max_low_pfn                  */
  10#include <linux/kprobes.h>              /* NOKPROBE_SYMBOL, ...         */
  11#include <linux/mmiotrace.h>            /* kmmio_handler, ...           */
  12#include <linux/perf_event.h>           /* perf_sw_event                */
  13#include <linux/hugetlb.h>              /* hstate_index_to_shift        */
  14#include <linux/prefetch.h>             /* prefetchw                    */
  15#include <linux/context_tracking.h>     /* exception_enter(), ...       */
  16#include <linux/uaccess.h>              /* faulthandler_disabled()      */
  17
  18#include <asm/cpufeature.h>             /* boot_cpu_has, ...            */
  19#include <asm/traps.h>                  /* dotraplinkage, ...           */
  20#include <asm/pgalloc.h>                /* pgd_*(), ...                 */
  21#include <asm/kmemcheck.h>              /* kmemcheck_*(), ...           */
  22#include <asm/fixmap.h>                 /* VSYSCALL_ADDR                */
  23#include <asm/vsyscall.h>               /* emulate_vsyscall             */
  24#include <asm/vm86.h>                   /* struct vm86                  */
  25#include <asm/mmu_context.h>            /* vma_pkey()                   */
  26
  27#define CREATE_TRACE_POINTS
  28#include <asm/trace/exceptions.h>
  29
  30/*
  31 * Page fault error code bits:
  32 *
  33 *   bit 0 ==    0: no page found       1: protection fault
  34 *   bit 1 ==    0: read access         1: write access
  35 *   bit 2 ==    0: kernel-mode access  1: user-mode access
  36 *   bit 3 ==                           1: use of reserved bit detected
  37 *   bit 4 ==                           1: fault was an instruction fetch
  38 *   bit 5 ==                           1: protection keys block access
  39 */
  40enum x86_pf_error_code {
  41
  42        PF_PROT         =               1 << 0,
  43        PF_WRITE        =               1 << 1,
  44        PF_USER         =               1 << 2,
  45        PF_RSVD         =               1 << 3,
  46        PF_INSTR        =               1 << 4,
  47        PF_PK           =               1 << 5,
  48};
  49
  50/*
  51 * Returns 0 if mmiotrace is disabled, or if the fault is not
  52 * handled by mmiotrace:
  53 */
  54static nokprobe_inline int
  55kmmio_fault(struct pt_regs *regs, unsigned long addr)
  56{
  57        if (unlikely(is_kmmio_active()))
  58                if (kmmio_handler(regs, addr) == 1)
  59                        return -1;
  60        return 0;
  61}
  62
  63static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
  64{
  65        int ret = 0;
  66
  67        /* kprobe_running() needs smp_processor_id() */
  68        if (kprobes_built_in() && !user_mode(regs)) {
  69                preempt_disable();
  70                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  71                        ret = 1;
  72                preempt_enable();
  73        }
  74
  75        return ret;
  76}
  77
  78/*
  79 * Prefetch quirks:
  80 *
  81 * 32-bit mode:
  82 *
  83 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  84 *   Check that here and ignore it.
  85 *
  86 * 64-bit mode:
  87 *
  88 *   Sometimes the CPU reports invalid exceptions on prefetch.
  89 *   Check that here and ignore it.
  90 *
  91 * Opcode checker based on code by Richard Brunner.
  92 */
  93static inline int
  94check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  95                      unsigned char opcode, int *prefetch)
  96{
  97        unsigned char instr_hi = opcode & 0xf0;
  98        unsigned char instr_lo = opcode & 0x0f;
  99
 100        switch (instr_hi) {
 101        case 0x20:
 102        case 0x30:
 103                /*
 104                 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
 105                 * In X86_64 long mode, the CPU will signal invalid
 106                 * opcode if some of these prefixes are present so
 107                 * X86_64 will never get here anyway
 108                 */
 109                return ((instr_lo & 7) == 0x6);
 110#ifdef CONFIG_X86_64
 111        case 0x40:
 112                /*
 113                 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
 114                 * Need to figure out under what instruction mode the
 115                 * instruction was issued. Could check the LDT for lm,
 116                 * but for now it's good enough to assume that long
 117                 * mode only uses well known segments or kernel.
 118                 */
 119                return (!user_mode(regs) || user_64bit_mode(regs));
 120#endif
 121        case 0x60:
 122                /* 0x64 thru 0x67 are valid prefixes in all modes. */
 123                return (instr_lo & 0xC) == 0x4;
 124        case 0xF0:
 125                /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
 126                return !instr_lo || (instr_lo>>1) == 1;
 127        case 0x00:
 128                /* Prefetch instruction is 0x0F0D or 0x0F18 */
 129                if (probe_kernel_address(instr, opcode))
 130                        return 0;
 131
 132                *prefetch = (instr_lo == 0xF) &&
 133                        (opcode == 0x0D || opcode == 0x18);
 134                return 0;
 135        default:
 136                return 0;
 137        }
 138}
 139
 140static int
 141is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 142{
 143        unsigned char *max_instr;
 144        unsigned char *instr;
 145        int prefetch = 0;
 146
 147        /*
 148         * If it was a exec (instruction fetch) fault on NX page, then
 149         * do not ignore the fault:
 150         */
 151        if (error_code & PF_INSTR)
 152                return 0;
 153
 154        instr = (void *)convert_ip_to_linear(current, regs);
 155        max_instr = instr + 15;
 156
 157        if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
 158                return 0;
 159
 160        while (instr < max_instr) {
 161                unsigned char opcode;
 162
 163                if (probe_kernel_address(instr, opcode))
 164                        break;
 165
 166                instr++;
 167
 168                if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
 169                        break;
 170        }
 171        return prefetch;
 172}
 173
 174/*
 175 * A protection key fault means that the PKRU value did not allow
 176 * access to some PTE.  Userspace can figure out what PKRU was
 177 * from the XSAVE state, and this function fills out a field in
 178 * siginfo so userspace can discover which protection key was set
 179 * on the PTE.
 180 *
 181 * If we get here, we know that the hardware signaled a PF_PK
 182 * fault and that there was a VMA once we got in the fault
 183 * handler.  It does *not* guarantee that the VMA we find here
 184 * was the one that we faulted on.
 185 *
 186 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 187 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 188 * 3. T1   : faults...
 189 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 190 * 5. T1   : enters fault handler, takes mmap_sem, etc...
 191 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 192 *           faulted on a pte with its pkey=4.
 193 */
 194static void fill_sig_info_pkey(int si_code, siginfo_t *info,
 195                struct vm_area_struct *vma)
 196{
 197        /* This is effectively an #ifdef */
 198        if (!boot_cpu_has(X86_FEATURE_OSPKE))
 199                return;
 200
 201        /* Fault not from Protection Keys: nothing to do */
 202        if (si_code != SEGV_PKUERR)
 203                return;
 204        /*
 205         * force_sig_info_fault() is called from a number of
 206         * contexts, some of which have a VMA and some of which
 207         * do not.  The PF_PK handing happens after we have a
 208         * valid VMA, so we should never reach this without a
 209         * valid VMA.
 210         */
 211        if (!vma) {
 212                WARN_ONCE(1, "PKU fault with no VMA passed in");
 213                info->si_pkey = 0;
 214                return;
 215        }
 216        /*
 217         * si_pkey should be thought of as a strong hint, but not
 218         * absolutely guranteed to be 100% accurate because of
 219         * the race explained above.
 220         */
 221        info->si_pkey = vma_pkey(vma);
 222}
 223
 224static void
 225force_sig_info_fault(int si_signo, int si_code, unsigned long address,
 226                     struct task_struct *tsk, struct vm_area_struct *vma,
 227                     int fault)
 228{
 229        unsigned lsb = 0;
 230        siginfo_t info;
 231
 232        info.si_signo   = si_signo;
 233        info.si_errno   = 0;
 234        info.si_code    = si_code;
 235        info.si_addr    = (void __user *)address;
 236        if (fault & VM_FAULT_HWPOISON_LARGE)
 237                lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
 238        if (fault & VM_FAULT_HWPOISON)
 239                lsb = PAGE_SHIFT;
 240        info.si_addr_lsb = lsb;
 241
 242        fill_sig_info_pkey(si_code, &info, vma);
 243
 244        force_sig_info(si_signo, &info, tsk);
 245}
 246
 247DEFINE_SPINLOCK(pgd_lock);
 248LIST_HEAD(pgd_list);
 249
 250#ifdef CONFIG_X86_32
 251static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 252{
 253        unsigned index = pgd_index(address);
 254        pgd_t *pgd_k;
 255        pud_t *pud, *pud_k;
 256        pmd_t *pmd, *pmd_k;
 257
 258        pgd += index;
 259        pgd_k = init_mm.pgd + index;
 260
 261        if (!pgd_present(*pgd_k))
 262                return NULL;
 263
 264        /*
 265         * set_pgd(pgd, *pgd_k); here would be useless on PAE
 266         * and redundant with the set_pmd() on non-PAE. As would
 267         * set_pud.
 268         */
 269        pud = pud_offset(pgd, address);
 270        pud_k = pud_offset(pgd_k, address);
 271        if (!pud_present(*pud_k))
 272                return NULL;
 273
 274        pmd = pmd_offset(pud, address);
 275        pmd_k = pmd_offset(pud_k, address);
 276        if (!pmd_present(*pmd_k))
 277                return NULL;
 278
 279        if (!pmd_present(*pmd))
 280                set_pmd(pmd, *pmd_k);
 281        else
 282                BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
 283
 284        return pmd_k;
 285}
 286
 287void vmalloc_sync_all(void)
 288{
 289        unsigned long address;
 290
 291        if (SHARED_KERNEL_PMD)
 292                return;
 293
 294        for (address = VMALLOC_START & PMD_MASK;
 295             address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
 296             address += PMD_SIZE) {
 297                struct page *page;
 298
 299                spin_lock(&pgd_lock);
 300                list_for_each_entry(page, &pgd_list, lru) {
 301                        spinlock_t *pgt_lock;
 302                        pmd_t *ret;
 303
 304                        /* the pgt_lock only for Xen */
 305                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 306
 307                        spin_lock(pgt_lock);
 308                        ret = vmalloc_sync_one(page_address(page), address);
 309                        spin_unlock(pgt_lock);
 310
 311                        if (!ret)
 312                                break;
 313                }
 314                spin_unlock(&pgd_lock);
 315        }
 316}
 317
 318/*
 319 * 32-bit:
 320 *
 321 *   Handle a fault on the vmalloc or module mapping area
 322 */
 323static noinline int vmalloc_fault(unsigned long address)
 324{
 325        unsigned long pgd_paddr;
 326        pmd_t *pmd_k;
 327        pte_t *pte_k;
 328
 329        /* Make sure we are in vmalloc area: */
 330        if (!(address >= VMALLOC_START && address < VMALLOC_END))
 331                return -1;
 332
 333        WARN_ON_ONCE(in_nmi());
 334
 335        /*
 336         * Synchronize this task's top level page-table
 337         * with the 'reference' page table.
 338         *
 339         * Do _not_ use "current" here. We might be inside
 340         * an interrupt in the middle of a task switch..
 341         */
 342        pgd_paddr = read_cr3();
 343        pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 344        if (!pmd_k)
 345                return -1;
 346
 347        if (pmd_huge(*pmd_k))
 348                return 0;
 349
 350        pte_k = pte_offset_kernel(pmd_k, address);
 351        if (!pte_present(*pte_k))
 352                return -1;
 353
 354        return 0;
 355}
 356NOKPROBE_SYMBOL(vmalloc_fault);
 357
 358/*
 359 * Did it hit the DOS screen memory VA from vm86 mode?
 360 */
 361static inline void
 362check_v8086_mode(struct pt_regs *regs, unsigned long address,
 363                 struct task_struct *tsk)
 364{
 365#ifdef CONFIG_VM86
 366        unsigned long bit;
 367
 368        if (!v8086_mode(regs) || !tsk->thread.vm86)
 369                return;
 370
 371        bit = (address - 0xA0000) >> PAGE_SHIFT;
 372        if (bit < 32)
 373                tsk->thread.vm86->screen_bitmap |= 1 << bit;
 374#endif
 375}
 376
 377static bool low_pfn(unsigned long pfn)
 378{
 379        return pfn < max_low_pfn;
 380}
 381
 382static void dump_pagetable(unsigned long address)
 383{
 384        pgd_t *base = __va(read_cr3());
 385        pgd_t *pgd = &base[pgd_index(address)];
 386        pmd_t *pmd;
 387        pte_t *pte;
 388
 389#ifdef CONFIG_X86_PAE
 390        printk("*pdpt = %016Lx ", pgd_val(*pgd));
 391        if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
 392                goto out;
 393#endif
 394        pmd = pmd_offset(pud_offset(pgd, address), address);
 395        printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
 396
 397        /*
 398         * We must not directly access the pte in the highpte
 399         * case if the page table is located in highmem.
 400         * And let's rather not kmap-atomic the pte, just in case
 401         * it's allocated already:
 402         */
 403        if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
 404                goto out;
 405
 406        pte = pte_offset_kernel(pmd, address);
 407        printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
 408out:
 409        printk("\n");
 410}
 411
 412#else /* CONFIG_X86_64: */
 413
 414void vmalloc_sync_all(void)
 415{
 416        sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 417}
 418
 419/*
 420 * 64-bit:
 421 *
 422 *   Handle a fault on the vmalloc area
 423 */
 424static noinline int vmalloc_fault(unsigned long address)
 425{
 426        pgd_t *pgd, *pgd_ref;
 427        pud_t *pud, *pud_ref;
 428        pmd_t *pmd, *pmd_ref;
 429        pte_t *pte, *pte_ref;
 430
 431        /* Make sure we are in vmalloc area: */
 432        if (!(address >= VMALLOC_START && address < VMALLOC_END))
 433                return -1;
 434
 435        WARN_ON_ONCE(in_nmi());
 436
 437        /*
 438         * Copy kernel mappings over when needed. This can also
 439         * happen within a race in page table update. In the later
 440         * case just flush:
 441         */
 442        pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
 443        pgd_ref = pgd_offset_k(address);
 444        if (pgd_none(*pgd_ref))
 445                return -1;
 446
 447        if (pgd_none(*pgd)) {
 448                set_pgd(pgd, *pgd_ref);
 449                arch_flush_lazy_mmu_mode();
 450        } else {
 451                BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 452        }
 453
 454        /*
 455         * Below here mismatches are bugs because these lower tables
 456         * are shared:
 457         */
 458
 459        pud = pud_offset(pgd, address);
 460        pud_ref = pud_offset(pgd_ref, address);
 461        if (pud_none(*pud_ref))
 462                return -1;
 463
 464        if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
 465                BUG();
 466
 467        if (pud_huge(*pud))
 468                return 0;
 469
 470        pmd = pmd_offset(pud, address);
 471        pmd_ref = pmd_offset(pud_ref, address);
 472        if (pmd_none(*pmd_ref))
 473                return -1;
 474
 475        if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
 476                BUG();
 477
 478        if (pmd_huge(*pmd))
 479                return 0;
 480
 481        pte_ref = pte_offset_kernel(pmd_ref, address);
 482        if (!pte_present(*pte_ref))
 483                return -1;
 484
 485        pte = pte_offset_kernel(pmd, address);
 486
 487        /*
 488         * Don't use pte_page here, because the mappings can point
 489         * outside mem_map, and the NUMA hash lookup cannot handle
 490         * that:
 491         */
 492        if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
 493                BUG();
 494
 495        return 0;
 496}
 497NOKPROBE_SYMBOL(vmalloc_fault);
 498
 499#ifdef CONFIG_CPU_SUP_AMD
 500static const char errata93_warning[] =
 501KERN_ERR 
 502"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
 503"******* Working around it, but it may cause SEGVs or burn power.\n"
 504"******* Please consider a BIOS update.\n"
 505"******* Disabling USB legacy in the BIOS may also help.\n";
 506#endif
 507
 508/*
 509 * No vm86 mode in 64-bit mode:
 510 */
 511static inline void
 512check_v8086_mode(struct pt_regs *regs, unsigned long address,
 513                 struct task_struct *tsk)
 514{
 515}
 516
 517static int bad_address(void *p)
 518{
 519        unsigned long dummy;
 520
 521        return probe_kernel_address((unsigned long *)p, dummy);
 522}
 523
 524static void dump_pagetable(unsigned long address)
 525{
 526        pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
 527        pgd_t *pgd = base + pgd_index(address);
 528        pud_t *pud;
 529        pmd_t *pmd;
 530        pte_t *pte;
 531
 532        if (bad_address(pgd))
 533                goto bad;
 534
 535        printk("PGD %lx ", pgd_val(*pgd));
 536
 537        if (!pgd_present(*pgd))
 538                goto out;
 539
 540        pud = pud_offset(pgd, address);
 541        if (bad_address(pud))
 542                goto bad;
 543
 544        printk("PUD %lx ", pud_val(*pud));
 545        if (!pud_present(*pud) || pud_large(*pud))
 546                goto out;
 547
 548        pmd = pmd_offset(pud, address);
 549        if (bad_address(pmd))
 550                goto bad;
 551
 552        printk("PMD %lx ", pmd_val(*pmd));
 553        if (!pmd_present(*pmd) || pmd_large(*pmd))
 554                goto out;
 555
 556        pte = pte_offset_kernel(pmd, address);
 557        if (bad_address(pte))
 558                goto bad;
 559
 560        printk("PTE %lx", pte_val(*pte));
 561out:
 562        printk("\n");
 563        return;
 564bad:
 565        printk("BAD\n");
 566}
 567
 568#endif /* CONFIG_X86_64 */
 569
 570/*
 571 * Workaround for K8 erratum #93 & buggy BIOS.
 572 *
 573 * BIOS SMM functions are required to use a specific workaround
 574 * to avoid corruption of the 64bit RIP register on C stepping K8.
 575 *
 576 * A lot of BIOS that didn't get tested properly miss this.
 577 *
 578 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 579 * Try to work around it here.
 580 *
 581 * Note we only handle faults in kernel here.
 582 * Does nothing on 32-bit.
 583 */
 584static int is_errata93(struct pt_regs *regs, unsigned long address)
 585{
 586#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
 587        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
 588            || boot_cpu_data.x86 != 0xf)
 589                return 0;
 590
 591        if (address != regs->ip)
 592                return 0;
 593
 594        if ((address >> 32) != 0)
 595                return 0;
 596
 597        address |= 0xffffffffUL << 32;
 598        if ((address >= (u64)_stext && address <= (u64)_etext) ||
 599            (address >= MODULES_VADDR && address <= MODULES_END)) {
 600                printk_once(errata93_warning);
 601                regs->ip = address;
 602                return 1;
 603        }
 604#endif
 605        return 0;
 606}
 607
 608/*
 609 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 610 * to illegal addresses >4GB.
 611 *
 612 * We catch this in the page fault handler because these addresses
 613 * are not reachable. Just detect this case and return.  Any code
 614 * segment in LDT is compatibility mode.
 615 */
 616static int is_errata100(struct pt_regs *regs, unsigned long address)
 617{
 618#ifdef CONFIG_X86_64
 619        if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
 620                return 1;
 621#endif
 622        return 0;
 623}
 624
 625static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
 626{
 627#ifdef CONFIG_X86_F00F_BUG
 628        unsigned long nr;
 629
 630        /*
 631         * Pentium F0 0F C7 C8 bug workaround:
 632         */
 633        if (boot_cpu_has_bug(X86_BUG_F00F)) {
 634                nr = (address - idt_descr.address) >> 3;
 635
 636                if (nr == 6) {
 637                        do_invalid_op(regs, 0);
 638                        return 1;
 639                }
 640        }
 641#endif
 642        return 0;
 643}
 644
 645static const char nx_warning[] = KERN_CRIT
 646"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
 647static const char smep_warning[] = KERN_CRIT
 648"unable to execute userspace code (SMEP?) (uid: %d)\n";
 649
 650static void
 651show_fault_oops(struct pt_regs *regs, unsigned long error_code,
 652                unsigned long address)
 653{
 654        if (!oops_may_print())
 655                return;
 656
 657        if (error_code & PF_INSTR) {
 658                unsigned int level;
 659                pgd_t *pgd;
 660                pte_t *pte;
 661
 662                pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
 663                pgd += pgd_index(address);
 664
 665                pte = lookup_address_in_pgd(pgd, address, &level);
 666
 667                if (pte && pte_present(*pte) && !pte_exec(*pte))
 668                        printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
 669                if (pte && pte_present(*pte) && pte_exec(*pte) &&
 670                                (pgd_flags(*pgd) & _PAGE_USER) &&
 671                                (__read_cr4() & X86_CR4_SMEP))
 672                        printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
 673        }
 674
 675        printk(KERN_ALERT "BUG: unable to handle kernel ");
 676        if (address < PAGE_SIZE)
 677                printk(KERN_CONT "NULL pointer dereference");
 678        else
 679                printk(KERN_CONT "paging request");
 680
 681        printk(KERN_CONT " at %p\n", (void *) address);
 682        printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip);
 683
 684        dump_pagetable(address);
 685}
 686
 687static noinline void
 688pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 689            unsigned long address)
 690{
 691        struct task_struct *tsk;
 692        unsigned long flags;
 693        int sig;
 694
 695        flags = oops_begin();
 696        tsk = current;
 697        sig = SIGKILL;
 698
 699        printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
 700               tsk->comm, address);
 701        dump_pagetable(address);
 702
 703        tsk->thread.cr2         = address;
 704        tsk->thread.trap_nr     = X86_TRAP_PF;
 705        tsk->thread.error_code  = error_code;
 706
 707        if (__die("Bad pagetable", regs, error_code))
 708                sig = 0;
 709
 710        oops_end(flags, regs, sig);
 711}
 712
 713static noinline void
 714no_context(struct pt_regs *regs, unsigned long error_code,
 715           unsigned long address, int signal, int si_code)
 716{
 717        struct task_struct *tsk = current;
 718        unsigned long flags;
 719        int sig;
 720        /* No context means no VMA to pass down */
 721        struct vm_area_struct *vma = NULL;
 722
 723        /* Are we prepared to handle this kernel fault? */
 724        if (fixup_exception(regs, X86_TRAP_PF)) {
 725                /*
 726                 * Any interrupt that takes a fault gets the fixup. This makes
 727                 * the below recursive fault logic only apply to a faults from
 728                 * task context.
 729                 */
 730                if (in_interrupt())
 731                        return;
 732
 733                /*
 734                 * Per the above we're !in_interrupt(), aka. task context.
 735                 *
 736                 * In this case we need to make sure we're not recursively
 737                 * faulting through the emulate_vsyscall() logic.
 738                 */
 739                if (current->thread.sig_on_uaccess_err && signal) {
 740                        tsk->thread.trap_nr = X86_TRAP_PF;
 741                        tsk->thread.error_code = error_code | PF_USER;
 742                        tsk->thread.cr2 = address;
 743
 744                        /* XXX: hwpoison faults will set the wrong code. */
 745                        force_sig_info_fault(signal, si_code, address,
 746                                             tsk, vma, 0);
 747                }
 748
 749                /*
 750                 * Barring that, we can do the fixup and be happy.
 751                 */
 752                return;
 753        }
 754
 755#ifdef CONFIG_VMAP_STACK
 756        /*
 757         * Stack overflow?  During boot, we can fault near the initial
 758         * stack in the direct map, but that's not an overflow -- check
 759         * that we're in vmalloc space to avoid this.
 760         */
 761        if (is_vmalloc_addr((void *)address) &&
 762            (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
 763             address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
 764                register void *__sp asm("rsp");
 765                unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
 766                /*
 767                 * We're likely to be running with very little stack space
 768                 * left.  It's plausible that we'd hit this condition but
 769                 * double-fault even before we get this far, in which case
 770                 * we're fine: the double-fault handler will deal with it.
 771                 *
 772                 * We don't want to make it all the way into the oops code
 773                 * and then double-fault, though, because we're likely to
 774                 * break the console driver and lose most of the stack dump.
 775                 */
 776                asm volatile ("movq %[stack], %%rsp\n\t"
 777                              "call handle_stack_overflow\n\t"
 778                              "1: jmp 1b"
 779                              : "+r" (__sp)
 780                              : "D" ("kernel stack overflow (page fault)"),
 781                                "S" (regs), "d" (address),
 782                                [stack] "rm" (stack));
 783                unreachable();
 784        }
 785#endif
 786
 787        /*
 788         * 32-bit:
 789         *
 790         *   Valid to do another page fault here, because if this fault
 791         *   had been triggered by is_prefetch fixup_exception would have
 792         *   handled it.
 793         *
 794         * 64-bit:
 795         *
 796         *   Hall of shame of CPU/BIOS bugs.
 797         */
 798        if (is_prefetch(regs, error_code, address))
 799                return;
 800
 801        if (is_errata93(regs, address))
 802                return;
 803
 804        /*
 805         * Oops. The kernel tried to access some bad page. We'll have to
 806         * terminate things with extreme prejudice:
 807         */
 808        flags = oops_begin();
 809
 810        show_fault_oops(regs, error_code, address);
 811
 812        if (task_stack_end_corrupted(tsk))
 813                printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 814
 815        tsk->thread.cr2         = address;
 816        tsk->thread.trap_nr     = X86_TRAP_PF;
 817        tsk->thread.error_code  = error_code;
 818
 819        sig = SIGKILL;
 820        if (__die("Oops", regs, error_code))
 821                sig = 0;
 822
 823        /* Executive summary in case the body of the oops scrolled away */
 824        printk(KERN_DEFAULT "CR2: %016lx\n", address);
 825
 826        oops_end(flags, regs, sig);
 827}
 828
 829/*
 830 * Print out info about fatal segfaults, if the show_unhandled_signals
 831 * sysctl is set:
 832 */
 833static inline void
 834show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 835                unsigned long address, struct task_struct *tsk)
 836{
 837        if (!unhandled_signal(tsk, SIGSEGV))
 838                return;
 839
 840        if (!printk_ratelimit())
 841                return;
 842
 843        printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
 844                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 845                tsk->comm, task_pid_nr(tsk), address,
 846                (void *)regs->ip, (void *)regs->sp, error_code);
 847
 848        print_vma_addr(KERN_CONT " in ", regs->ip);
 849
 850        printk(KERN_CONT "\n");
 851}
 852
 853static void
 854__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 855                       unsigned long address, struct vm_area_struct *vma,
 856                       int si_code)
 857{
 858        struct task_struct *tsk = current;
 859
 860        /* User mode accesses just cause a SIGSEGV */
 861        if (error_code & PF_USER) {
 862                /*
 863                 * It's possible to have interrupts off here:
 864                 */
 865                local_irq_enable();
 866
 867                /*
 868                 * Valid to do another page fault here because this one came
 869                 * from user space:
 870                 */
 871                if (is_prefetch(regs, error_code, address))
 872                        return;
 873
 874                if (is_errata100(regs, address))
 875                        return;
 876
 877#ifdef CONFIG_X86_64
 878                /*
 879                 * Instruction fetch faults in the vsyscall page might need
 880                 * emulation.
 881                 */
 882                if (unlikely((error_code & PF_INSTR) &&
 883                             ((address & ~0xfff) == VSYSCALL_ADDR))) {
 884                        if (emulate_vsyscall(regs, address))
 885                                return;
 886                }
 887#endif
 888
 889                /*
 890                 * To avoid leaking information about the kernel page table
 891                 * layout, pretend that user-mode accesses to kernel addresses
 892                 * are always protection faults.
 893                 */
 894                if (address >= TASK_SIZE_MAX)
 895                        error_code |= PF_PROT;
 896
 897                if (likely(show_unhandled_signals))
 898                        show_signal_msg(regs, error_code, address, tsk);
 899
 900                tsk->thread.cr2         = address;
 901                tsk->thread.error_code  = error_code;
 902                tsk->thread.trap_nr     = X86_TRAP_PF;
 903
 904                force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
 905
 906                return;
 907        }
 908
 909        if (is_f00f_bug(regs, address))
 910                return;
 911
 912        no_context(regs, error_code, address, SIGSEGV, si_code);
 913}
 914
 915static noinline void
 916bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 917                     unsigned long address, struct vm_area_struct *vma)
 918{
 919        __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
 920}
 921
 922static void
 923__bad_area(struct pt_regs *regs, unsigned long error_code,
 924           unsigned long address,  struct vm_area_struct *vma, int si_code)
 925{
 926        struct mm_struct *mm = current->mm;
 927
 928        /*
 929         * Something tried to access memory that isn't in our memory map..
 930         * Fix it, but check if it's kernel or user first..
 931         */
 932        up_read(&mm->mmap_sem);
 933
 934        __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
 935}
 936
 937static noinline void
 938bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 939{
 940        __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
 941}
 942
 943static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 944                struct vm_area_struct *vma)
 945{
 946        /* This code is always called on the current mm */
 947        bool foreign = false;
 948
 949        if (!boot_cpu_has(X86_FEATURE_OSPKE))
 950                return false;
 951        if (error_code & PF_PK)
 952                return true;
 953        /* this checks permission keys on the VMA: */
 954        if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
 955                                (error_code & PF_INSTR), foreign))
 956                return true;
 957        return false;
 958}
 959
 960static noinline void
 961bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 962                      unsigned long address, struct vm_area_struct *vma)
 963{
 964        /*
 965         * This OSPKE check is not strictly necessary at runtime.
 966         * But, doing it this way allows compiler optimizations
 967         * if pkeys are compiled out.
 968         */
 969        if (bad_area_access_from_pkeys(error_code, vma))
 970                __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
 971        else
 972                __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
 973}
 974
 975static void
 976do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 977          struct vm_area_struct *vma, unsigned int fault)
 978{
 979        struct task_struct *tsk = current;
 980        int code = BUS_ADRERR;
 981
 982        /* Kernel mode? Handle exceptions or die: */
 983        if (!(error_code & PF_USER)) {
 984                no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
 985                return;
 986        }
 987
 988        /* User-space => ok to do another page fault: */
 989        if (is_prefetch(regs, error_code, address))
 990                return;
 991
 992        tsk->thread.cr2         = address;
 993        tsk->thread.error_code  = error_code;
 994        tsk->thread.trap_nr     = X86_TRAP_PF;
 995
 996#ifdef CONFIG_MEMORY_FAILURE
 997        if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 998                printk(KERN_ERR
 999        "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
1000                        tsk->comm, tsk->pid, address);
1001                code = BUS_MCEERR_AR;
1002        }
1003#endif
1004        force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
1005}
1006
1007static noinline void
1008mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1009               unsigned long address, struct vm_area_struct *vma,
1010               unsigned int fault)
1011{
1012        if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
1013                no_context(regs, error_code, address, 0, 0);
1014                return;
1015        }
1016
1017        if (fault & VM_FAULT_OOM) {
1018                /* Kernel mode? Handle exceptions or die: */
1019                if (!(error_code & PF_USER)) {
1020                        no_context(regs, error_code, address,
1021                                   SIGSEGV, SEGV_MAPERR);
1022                        return;
1023                }
1024
1025                /*
1026                 * We ran out of memory, call the OOM killer, and return the
1027                 * userspace (which will retry the fault, or kill us if we got
1028                 * oom-killed):
1029                 */
1030                pagefault_out_of_memory();
1031        } else {
1032                if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1033                             VM_FAULT_HWPOISON_LARGE))
1034                        do_sigbus(regs, error_code, address, vma, fault);
1035                else if (fault & VM_FAULT_SIGSEGV)
1036                        bad_area_nosemaphore(regs, error_code, address, vma);
1037                else
1038                        BUG();
1039        }
1040}
1041
1042static int spurious_fault_check(unsigned long error_code, pte_t *pte)
1043{
1044        if ((error_code & PF_WRITE) && !pte_write(*pte))
1045                return 0;
1046
1047        if ((error_code & PF_INSTR) && !pte_exec(*pte))
1048                return 0;
1049        /*
1050         * Note: We do not do lazy flushing on protection key
1051         * changes, so no spurious fault will ever set PF_PK.
1052         */
1053        if ((error_code & PF_PK))
1054                return 1;
1055
1056        return 1;
1057}
1058
1059/*
1060 * Handle a spurious fault caused by a stale TLB entry.
1061 *
1062 * This allows us to lazily refresh the TLB when increasing the
1063 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
1064 * eagerly is very expensive since that implies doing a full
1065 * cross-processor TLB flush, even if no stale TLB entries exist
1066 * on other processors.
1067 *
1068 * Spurious faults may only occur if the TLB contains an entry with
1069 * fewer permission than the page table entry.  Non-present (P = 0)
1070 * and reserved bit (R = 1) faults are never spurious.
1071 *
1072 * There are no security implications to leaving a stale TLB when
1073 * increasing the permissions on a page.
1074 *
1075 * Returns non-zero if a spurious fault was handled, zero otherwise.
1076 *
1077 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
1078 * (Optional Invalidation).
1079 */
1080static noinline int
1081spurious_fault(unsigned long error_code, unsigned long address)
1082{
1083        pgd_t *pgd;
1084        pud_t *pud;
1085        pmd_t *pmd;
1086        pte_t *pte;
1087        int ret;
1088
1089        /*
1090         * Only writes to RO or instruction fetches from NX may cause
1091         * spurious faults.
1092         *
1093         * These could be from user or supervisor accesses but the TLB
1094         * is only lazily flushed after a kernel mapping protection
1095         * change, so user accesses are not expected to cause spurious
1096         * faults.
1097         */
1098        if (error_code != (PF_WRITE | PF_PROT)
1099            && error_code != (PF_INSTR | PF_PROT))
1100                return 0;
1101
1102        pgd = init_mm.pgd + pgd_index(address);
1103        if (!pgd_present(*pgd))
1104                return 0;
1105
1106        pud = pud_offset(pgd, address);
1107        if (!pud_present(*pud))
1108                return 0;
1109
1110        if (pud_large(*pud))
1111                return spurious_fault_check(error_code, (pte_t *) pud);
1112
1113        pmd = pmd_offset(pud, address);
1114        if (!pmd_present(*pmd))
1115                return 0;
1116
1117        if (pmd_large(*pmd))
1118                return spurious_fault_check(error_code, (pte_t *) pmd);
1119
1120        pte = pte_offset_kernel(pmd, address);
1121        if (!pte_present(*pte))
1122                return 0;
1123
1124        ret = spurious_fault_check(error_code, pte);
1125        if (!ret)
1126                return 0;
1127
1128        /*
1129         * Make sure we have permissions in PMD.
1130         * If not, then there's a bug in the page tables:
1131         */
1132        ret = spurious_fault_check(error_code, (pte_t *) pmd);
1133        WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1134
1135        return ret;
1136}
1137NOKPROBE_SYMBOL(spurious_fault);
1138
1139int show_unhandled_signals = 1;
1140
1141static inline int
1142access_error(unsigned long error_code, struct vm_area_struct *vma)
1143{
1144        /* This is only called for the current mm, so: */
1145        bool foreign = false;
1146
1147        /*
1148         * Read or write was blocked by protection keys.  This is
1149         * always an unconditional error and can never result in
1150         * a follow-up action to resolve the fault, like a COW.
1151         */
1152        if (error_code & PF_PK)
1153                return 1;
1154
1155        /*
1156         * Make sure to check the VMA so that we do not perform
1157         * faults just to hit a PF_PK as soon as we fill in a
1158         * page.
1159         */
1160        if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
1161                                (error_code & PF_INSTR), foreign))
1162                return 1;
1163
1164        if (error_code & PF_WRITE) {
1165                /* write, present and write, not present: */
1166                if (unlikely(!(vma->vm_flags & VM_WRITE)))
1167                        return 1;
1168                return 0;
1169        }
1170
1171        /* read, present: */
1172        if (unlikely(error_code & PF_PROT))
1173                return 1;
1174
1175        /* read, not present: */
1176        if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
1177                return 1;
1178
1179        return 0;
1180}
1181
1182static int fault_in_kernel_space(unsigned long address)
1183{
1184        return address >= TASK_SIZE_MAX;
1185}
1186
1187static inline bool smap_violation(int error_code, struct pt_regs *regs)
1188{
1189        if (!IS_ENABLED(CONFIG_X86_SMAP))
1190                return false;
1191
1192        if (!static_cpu_has(X86_FEATURE_SMAP))
1193                return false;
1194
1195        if (error_code & PF_USER)
1196                return false;
1197
1198        if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
1199                return false;
1200
1201        return true;
1202}
1203
1204/*
1205 * This routine handles page faults.  It determines the address,
1206 * and the problem, and then passes it off to one of the appropriate
1207 * routines.
1208 *
1209 * This function must have noinline because both callers
1210 * {,trace_}do_page_fault() have notrace on. Having this an actual function
1211 * guarantees there's a function trace entry.
1212 */
1213static noinline void
1214__do_page_fault(struct pt_regs *regs, unsigned long error_code,
1215                unsigned long address)
1216{
1217        struct vm_area_struct *vma;
1218        struct task_struct *tsk;
1219        struct mm_struct *mm;
1220        int fault, major = 0;
1221        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1222
1223        tsk = current;
1224        mm = tsk->mm;
1225
1226        /*
1227         * Detect and handle instructions that would cause a page fault for
1228         * both a tracked kernel page and a userspace page.
1229         */
1230        if (kmemcheck_active(regs))
1231                kmemcheck_hide(regs);
1232        prefetchw(&mm->mmap_sem);
1233
1234        if (unlikely(kmmio_fault(regs, address)))
1235                return;
1236
1237        /*
1238         * We fault-in kernel-space virtual memory on-demand. The
1239         * 'reference' page table is init_mm.pgd.
1240         *
1241         * NOTE! We MUST NOT take any locks for this case. We may
1242         * be in an interrupt or a critical region, and should
1243         * only copy the information from the master page table,
1244         * nothing more.
1245         *
1246         * This verifies that the fault happens in kernel space
1247         * (error_code & 4) == 0, and that the fault was not a
1248         * protection error (error_code & 9) == 0.
1249         */
1250        if (unlikely(fault_in_kernel_space(address))) {
1251                if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
1252                        if (vmalloc_fault(address) >= 0)
1253                                return;
1254
1255                        if (kmemcheck_fault(regs, address, error_code))
1256                                return;
1257                }
1258
1259                /* Can handle a stale RO->RW TLB: */
1260                if (spurious_fault(error_code, address))
1261                        return;
1262
1263                /* kprobes don't want to hook the spurious faults: */
1264                if (kprobes_fault(regs))
1265                        return;
1266                /*
1267                 * Don't take the mm semaphore here. If we fixup a prefetch
1268                 * fault we could otherwise deadlock:
1269                 */
1270                bad_area_nosemaphore(regs, error_code, address, NULL);
1271
1272                return;
1273        }
1274
1275        /* kprobes don't want to hook the spurious faults: */
1276        if (unlikely(kprobes_fault(regs)))
1277                return;
1278
1279        if (unlikely(error_code & PF_RSVD))
1280                pgtable_bad(regs, error_code, address);
1281
1282        if (unlikely(smap_violation(error_code, regs))) {
1283                bad_area_nosemaphore(regs, error_code, address, NULL);
1284                return;
1285        }
1286
1287        /*
1288         * If we're in an interrupt, have no user context or are running
1289         * in a region with pagefaults disabled then we must not take the fault
1290         */
1291        if (unlikely(faulthandler_disabled() || !mm)) {
1292                bad_area_nosemaphore(regs, error_code, address, NULL);
1293                return;
1294        }
1295
1296        /*
1297         * It's safe to allow irq's after cr2 has been saved and the
1298         * vmalloc fault has been handled.
1299         *
1300         * User-mode registers count as a user access even for any
1301         * potential system fault or CPU buglet:
1302         */
1303        if (user_mode(regs)) {
1304                local_irq_enable();
1305                error_code |= PF_USER;
1306                flags |= FAULT_FLAG_USER;
1307        } else {
1308                if (regs->flags & X86_EFLAGS_IF)
1309                        local_irq_enable();
1310        }
1311
1312        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1313
1314        if (error_code & PF_WRITE)
1315                flags |= FAULT_FLAG_WRITE;
1316        if (error_code & PF_INSTR)
1317                flags |= FAULT_FLAG_INSTRUCTION;
1318
1319        /*
1320         * When running in the kernel we expect faults to occur only to
1321         * addresses in user space.  All other faults represent errors in
1322         * the kernel and should generate an OOPS.  Unfortunately, in the
1323         * case of an erroneous fault occurring in a code path which already
1324         * holds mmap_sem we will deadlock attempting to validate the fault
1325         * against the address space.  Luckily the kernel only validly
1326         * references user space from well defined areas of code, which are
1327         * listed in the exceptions table.
1328         *
1329         * As the vast majority of faults will be valid we will only perform
1330         * the source reference check when there is a possibility of a
1331         * deadlock. Attempt to lock the address space, if we cannot we then
1332         * validate the source. If this is invalid we can skip the address
1333         * space check, thus avoiding the deadlock:
1334         */
1335        if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1336                if ((error_code & PF_USER) == 0 &&
1337                    !search_exception_tables(regs->ip)) {
1338                        bad_area_nosemaphore(regs, error_code, address, NULL);
1339                        return;
1340                }
1341retry:
1342                down_read(&mm->mmap_sem);
1343        } else {
1344                /*
1345                 * The above down_read_trylock() might have succeeded in
1346                 * which case we'll have missed the might_sleep() from
1347                 * down_read():
1348                 */
1349                might_sleep();
1350        }
1351
1352        vma = find_vma(mm, address);
1353        if (unlikely(!vma)) {
1354                bad_area(regs, error_code, address);
1355                return;
1356        }
1357        if (likely(vma->vm_start <= address))
1358                goto good_area;
1359        if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1360                bad_area(regs, error_code, address);
1361                return;
1362        }
1363        if (error_code & PF_USER) {
1364                /*
1365                 * Accessing the stack below %sp is always a bug.
1366                 * The large cushion allows instructions like enter
1367                 * and pusha to work. ("enter $65535, $31" pushes
1368                 * 32 pointers and then decrements %sp by 65535.)
1369                 */
1370                if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
1371                        bad_area(regs, error_code, address);
1372                        return;
1373                }
1374        }
1375        if (unlikely(expand_stack(vma, address))) {
1376                bad_area(regs, error_code, address);
1377                return;
1378        }
1379
1380        /*
1381         * Ok, we have a good vm_area for this memory access, so
1382         * we can handle it..
1383         */
1384good_area:
1385        if (unlikely(access_error(error_code, vma))) {
1386                bad_area_access_error(regs, error_code, address, vma);
1387                return;
1388        }
1389
1390        /*
1391         * If for any reason at all we couldn't handle the fault,
1392         * make sure we exit gracefully rather than endlessly redo
1393         * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1394         * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1395         */
1396        fault = handle_mm_fault(vma, address, flags);
1397        major |= fault & VM_FAULT_MAJOR;
1398
1399        /*
1400         * If we need to retry the mmap_sem has already been released,
1401         * and if there is a fatal signal pending there is no guarantee
1402         * that we made any progress. Handle this case first.
1403         */
1404        if (unlikely(fault & VM_FAULT_RETRY)) {
1405                /* Retry at most once */
1406                if (flags & FAULT_FLAG_ALLOW_RETRY) {
1407                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
1408                        flags |= FAULT_FLAG_TRIED;
1409                        if (!fatal_signal_pending(tsk))
1410                                goto retry;
1411                }
1412
1413                /* User mode? Just return to handle the fatal exception */
1414                if (flags & FAULT_FLAG_USER)
1415                        return;
1416
1417                /* Not returning to user mode? Handle exceptions or die: */
1418                no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
1419                return;
1420        }
1421
1422        up_read(&mm->mmap_sem);
1423        if (unlikely(fault & VM_FAULT_ERROR)) {
1424                mm_fault_error(regs, error_code, address, vma, fault);
1425                return;
1426        }
1427
1428        /*
1429         * Major/minor page fault accounting. If any of the events
1430         * returned VM_FAULT_MAJOR, we account it as a major fault.
1431         */
1432        if (major) {
1433                tsk->maj_flt++;
1434                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1435        } else {
1436                tsk->min_flt++;
1437                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1438        }
1439
1440        check_v8086_mode(regs, address, tsk);
1441}
1442NOKPROBE_SYMBOL(__do_page_fault);
1443
1444dotraplinkage void notrace
1445do_page_fault(struct pt_regs *regs, unsigned long error_code)
1446{
1447        unsigned long address = read_cr2(); /* Get the faulting address */
1448        enum ctx_state prev_state;
1449
1450        /*
1451         * We must have this function tagged with __kprobes, notrace and call
1452         * read_cr2() before calling anything else. To avoid calling any kind
1453         * of tracing machinery before we've observed the CR2 value.
1454         *
1455         * exception_{enter,exit}() contain all sorts of tracepoints.
1456         */
1457
1458        prev_state = exception_enter();
1459        __do_page_fault(regs, error_code, address);
1460        exception_exit(prev_state);
1461}
1462NOKPROBE_SYMBOL(do_page_fault);
1463
1464#ifdef CONFIG_TRACING
1465static nokprobe_inline void
1466trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1467                         unsigned long error_code)
1468{
1469        if (user_mode(regs))
1470                trace_page_fault_user(address, regs, error_code);
1471        else
1472                trace_page_fault_kernel(address, regs, error_code);
1473}
1474
1475dotraplinkage void notrace
1476trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
1477{
1478        /*
1479         * The exception_enter and tracepoint processing could
1480         * trigger another page faults (user space callchain
1481         * reading) and destroy the original cr2 value, so read
1482         * the faulting address now.
1483         */
1484        unsigned long address = read_cr2();
1485        enum ctx_state prev_state;
1486
1487        prev_state = exception_enter();
1488        trace_page_fault_entries(address, regs, error_code);
1489        __do_page_fault(regs, error_code, address);
1490        exception_exit(prev_state);
1491}
1492NOKPROBE_SYMBOL(trace_do_page_fault);
1493#endif /* CONFIG_TRACING */
1494
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.