linux/arch/x86/kernel/cpu/mce/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Machine check handler.
   4 *
   5 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
   6 * Rest from unknown author(s).
   7 * 2004 Andi Kleen. Rewrote most of it.
   8 * Copyright 2008 Intel Corporation
   9 * Author: Andi Kleen
  10 */
  11
  12#include <linux/thread_info.h>
  13#include <linux/capability.h>
  14#include <linux/miscdevice.h>
  15#include <linux/ratelimit.h>
  16#include <linux/rcupdate.h>
  17#include <linux/kobject.h>
  18#include <linux/uaccess.h>
  19#include <linux/kdebug.h>
  20#include <linux/kernel.h>
  21#include <linux/percpu.h>
  22#include <linux/string.h>
  23#include <linux/device.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/delay.h>
  26#include <linux/ctype.h>
  27#include <linux/sched.h>
  28#include <linux/sysfs.h>
  29#include <linux/types.h>
  30#include <linux/slab.h>
  31#include <linux/init.h>
  32#include <linux/kmod.h>
  33#include <linux/poll.h>
  34#include <linux/nmi.h>
  35#include <linux/cpu.h>
  36#include <linux/ras.h>
  37#include <linux/smp.h>
  38#include <linux/fs.h>
  39#include <linux/mm.h>
  40#include <linux/debugfs.h>
  41#include <linux/irq_work.h>
  42#include <linux/export.h>
  43#include <linux/set_memory.h>
  44#include <linux/sync_core.h>
  45#include <linux/task_work.h>
  46#include <linux/hardirq.h>
  47
  48#include <asm/intel-family.h>
  49#include <asm/processor.h>
  50#include <asm/traps.h>
  51#include <asm/tlbflush.h>
  52#include <asm/mce.h>
  53#include <asm/msr.h>
  54#include <asm/reboot.h>
  55
  56#include "internal.h"
  57
  58/* sysfs synchronization */
  59static DEFINE_MUTEX(mce_sysfs_mutex);
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/mce.h>
  63
  64#define SPINUNIT                100     /* 100ns */
  65
  66DEFINE_PER_CPU(unsigned, mce_exception_count);
  67
  68DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
  69
  70struct mce_bank {
  71        u64                     ctl;                    /* subevents to enable */
  72        bool                    init;                   /* initialise bank? */
  73};
  74static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
  75
  76#define ATTR_LEN               16
  77/* One object for each MCE bank, shared by all CPUs */
  78struct mce_bank_dev {
  79        struct device_attribute attr;                   /* device attribute */
  80        char                    attrname[ATTR_LEN];     /* attribute name */
  81        u8                      bank;                   /* bank number */
  82};
  83static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
  84
  85struct mce_vendor_flags mce_flags __read_mostly;
  86
  87struct mca_config mca_cfg __read_mostly = {
  88        .bootlog  = -1,
  89        /*
  90         * Tolerant levels:
  91         * 0: always panic on uncorrected errors, log corrected errors
  92         * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  93         * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
  94         * 3: never panic or SIGBUS, log all errors (for testing only)
  95         */
  96        .tolerant = 1,
  97        .monarch_timeout = -1
  98};
  99
 100static DEFINE_PER_CPU(struct mce, mces_seen);
 101static unsigned long mce_need_notify;
 102static int cpu_missing;
 103
 104/*
 105 * MCA banks polled by the period polling timer for corrected events.
 106 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 107 */
 108DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 109        [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
 110};
 111
 112/*
 113 * MCA banks controlled through firmware first for corrected errors.
 114 * This is a global list of banks for which we won't enable CMCI and we
 115 * won't poll. Firmware controls these banks and is responsible for
 116 * reporting corrected errors through GHES. Uncorrected/recoverable
 117 * errors are still notified through a machine check.
 118 */
 119mce_banks_t mce_banks_ce_disabled;
 120
 121static struct work_struct mce_work;
 122static struct irq_work mce_irq_work;
 123
 124static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
 125
 126/*
 127 * CPU/chipset specific EDAC code can register a notifier call here to print
 128 * MCE errors in a human-readable form.
 129 */
 130BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 131
 132/* Do initial initialization of a struct mce */
 133noinstr void mce_setup(struct mce *m)
 134{
 135        memset(m, 0, sizeof(struct mce));
 136        m->cpu = m->extcpu = smp_processor_id();
 137        /* need the internal __ version to avoid deadlocks */
 138        m->time = __ktime_get_real_seconds();
 139        m->cpuvendor = boot_cpu_data.x86_vendor;
 140        m->cpuid = cpuid_eax(1);
 141        m->socketid = cpu_data(m->extcpu).phys_proc_id;
 142        m->apicid = cpu_data(m->extcpu).initial_apicid;
 143        m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
 144
 145        if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
 146                m->ppin = __rdmsr(MSR_PPIN);
 147        else if (this_cpu_has(X86_FEATURE_AMD_PPIN))
 148                m->ppin = __rdmsr(MSR_AMD_PPIN);
 149
 150        m->microcode = boot_cpu_data.microcode;
 151}
 152
 153DEFINE_PER_CPU(struct mce, injectm);
 154EXPORT_PER_CPU_SYMBOL_GPL(injectm);
 155
 156void mce_log(struct mce *m)
 157{
 158        if (!mce_gen_pool_add(m))
 159                irq_work_queue(&mce_irq_work);
 160}
 161EXPORT_SYMBOL_GPL(mce_log);
 162
 163void mce_register_decode_chain(struct notifier_block *nb)
 164{
 165        if (WARN_ON(nb->priority < MCE_PRIO_LOWEST ||
 166                    nb->priority > MCE_PRIO_HIGHEST))
 167                return;
 168
 169        blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 170}
 171EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 172
 173void mce_unregister_decode_chain(struct notifier_block *nb)
 174{
 175        blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 176}
 177EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 178
 179static inline u32 ctl_reg(int bank)
 180{
 181        return MSR_IA32_MCx_CTL(bank);
 182}
 183
 184static inline u32 status_reg(int bank)
 185{
 186        return MSR_IA32_MCx_STATUS(bank);
 187}
 188
 189static inline u32 addr_reg(int bank)
 190{
 191        return MSR_IA32_MCx_ADDR(bank);
 192}
 193
 194static inline u32 misc_reg(int bank)
 195{
 196        return MSR_IA32_MCx_MISC(bank);
 197}
 198
 199static inline u32 smca_ctl_reg(int bank)
 200{
 201        return MSR_AMD64_SMCA_MCx_CTL(bank);
 202}
 203
 204static inline u32 smca_status_reg(int bank)
 205{
 206        return MSR_AMD64_SMCA_MCx_STATUS(bank);
 207}
 208
 209static inline u32 smca_addr_reg(int bank)
 210{
 211        return MSR_AMD64_SMCA_MCx_ADDR(bank);
 212}
 213
 214static inline u32 smca_misc_reg(int bank)
 215{
 216        return MSR_AMD64_SMCA_MCx_MISC(bank);
 217}
 218
 219struct mca_msr_regs msr_ops = {
 220        .ctl    = ctl_reg,
 221        .status = status_reg,
 222        .addr   = addr_reg,
 223        .misc   = misc_reg
 224};
 225
 226static void __print_mce(struct mce *m)
 227{
 228        pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
 229                 m->extcpu,
 230                 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
 231                 m->mcgstatus, m->bank, m->status);
 232
 233        if (m->ip) {
 234                pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
 235                        !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
 236                        m->cs, m->ip);
 237
 238                if (m->cs == __KERNEL_CS)
 239                        pr_cont("{%pS}", (void *)(unsigned long)m->ip);
 240                pr_cont("\n");
 241        }
 242
 243        pr_emerg(HW_ERR "TSC %llx ", m->tsc);
 244        if (m->addr)
 245                pr_cont("ADDR %llx ", m->addr);
 246        if (m->misc)
 247                pr_cont("MISC %llx ", m->misc);
 248        if (m->ppin)
 249                pr_cont("PPIN %llx ", m->ppin);
 250
 251        if (mce_flags.smca) {
 252                if (m->synd)
 253                        pr_cont("SYND %llx ", m->synd);
 254                if (m->ipid)
 255                        pr_cont("IPID %llx ", m->ipid);
 256        }
 257
 258        pr_cont("\n");
 259
 260        /*
 261         * Note this output is parsed by external tools and old fields
 262         * should not be changed.
 263         */
 264        pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 265                m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
 266                m->microcode);
 267}
 268
 269static void print_mce(struct mce *m)
 270{
 271        __print_mce(m);
 272
 273        if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
 274                pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 275}
 276
 277#define PANIC_TIMEOUT 5 /* 5 seconds */
 278
 279static atomic_t mce_panicked;
 280
 281static int fake_panic;
 282static atomic_t mce_fake_panicked;
 283
 284/* Panic in progress. Enable interrupts and wait for final IPI */
 285static void wait_for_panic(void)
 286{
 287        long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
 288
 289        preempt_disable();
 290        local_irq_enable();
 291        while (timeout-- > 0)
 292                udelay(1);
 293        if (panic_timeout == 0)
 294                panic_timeout = mca_cfg.panic_timeout;
 295        panic("Panicing machine check CPU died");
 296}
 297
 298static void mce_panic(const char *msg, struct mce *final, char *exp)
 299{
 300        int apei_err = 0;
 301        struct llist_node *pending;
 302        struct mce_evt_llist *l;
 303
 304        if (!fake_panic) {
 305                /*
 306                 * Make sure only one CPU runs in machine check panic
 307                 */
 308                if (atomic_inc_return(&mce_panicked) > 1)
 309                        wait_for_panic();
 310                barrier();
 311
 312                bust_spinlocks(1);
 313                console_verbose();
 314        } else {
 315                /* Don't log too much for fake panic */
 316                if (atomic_inc_return(&mce_fake_panicked) > 1)
 317                        return;
 318        }
 319        pending = mce_gen_pool_prepare_records();
 320        /* First print corrected ones that are still unlogged */
 321        llist_for_each_entry(l, pending, llnode) {
 322                struct mce *m = &l->mce;
 323                if (!(m->status & MCI_STATUS_UC)) {
 324                        print_mce(m);
 325                        if (!apei_err)
 326                                apei_err = apei_write_mce(m);
 327                }
 328        }
 329        /* Now print uncorrected but with the final one last */
 330        llist_for_each_entry(l, pending, llnode) {
 331                struct mce *m = &l->mce;
 332                if (!(m->status & MCI_STATUS_UC))
 333                        continue;
 334                if (!final || mce_cmp(m, final)) {
 335                        print_mce(m);
 336                        if (!apei_err)
 337                                apei_err = apei_write_mce(m);
 338                }
 339        }
 340        if (final) {
 341                print_mce(final);
 342                if (!apei_err)
 343                        apei_err = apei_write_mce(final);
 344        }
 345        if (cpu_missing)
 346                pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
 347        if (exp)
 348                pr_emerg(HW_ERR "Machine check: %s\n", exp);
 349        if (!fake_panic) {
 350                if (panic_timeout == 0)
 351                        panic_timeout = mca_cfg.panic_timeout;
 352                panic(msg);
 353        } else
 354                pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
 355}
 356
 357/* Support code for software error injection */
 358
 359static int msr_to_offset(u32 msr)
 360{
 361        unsigned bank = __this_cpu_read(injectm.bank);
 362
 363        if (msr == mca_cfg.rip_msr)
 364                return offsetof(struct mce, ip);
 365        if (msr == msr_ops.status(bank))
 366                return offsetof(struct mce, status);
 367        if (msr == msr_ops.addr(bank))
 368                return offsetof(struct mce, addr);
 369        if (msr == msr_ops.misc(bank))
 370                return offsetof(struct mce, misc);
 371        if (msr == MSR_IA32_MCG_STATUS)
 372                return offsetof(struct mce, mcgstatus);
 373        return -1;
 374}
 375
 376__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
 377                                      struct pt_regs *regs, int trapnr,
 378                                      unsigned long error_code,
 379                                      unsigned long fault_addr)
 380{
 381        pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
 382                 (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
 383
 384        show_stack_regs(regs);
 385
 386        panic("MCA architectural violation!\n");
 387
 388        while (true)
 389                cpu_relax();
 390
 391        return true;
 392}
 393
 394/* MSR access wrappers used for error injection */
 395static noinstr u64 mce_rdmsrl(u32 msr)
 396{
 397        DECLARE_ARGS(val, low, high);
 398
 399        if (__this_cpu_read(injectm.finished)) {
 400                int offset;
 401                u64 ret;
 402
 403                instrumentation_begin();
 404
 405                offset = msr_to_offset(msr);
 406                if (offset < 0)
 407                        ret = 0;
 408                else
 409                        ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
 410
 411                instrumentation_end();
 412
 413                return ret;
 414        }
 415
 416        /*
 417         * RDMSR on MCA MSRs should not fault. If they do, this is very much an
 418         * architectural violation and needs to be reported to hw vendor. Panic
 419         * the box to not allow any further progress.
 420         */
 421        asm volatile("1: rdmsr\n"
 422                     "2:\n"
 423                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
 424                     : EAX_EDX_RET(val, low, high) : "c" (msr));
 425
 426
 427        return EAX_EDX_VAL(val, low, high);
 428}
 429
 430__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
 431                                      struct pt_regs *regs, int trapnr,
 432                                      unsigned long error_code,
 433                                      unsigned long fault_addr)
 434{
 435        pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
 436                 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
 437                  regs->ip, (void *)regs->ip);
 438
 439        show_stack_regs(regs);
 440
 441        panic("MCA architectural violation!\n");
 442
 443        while (true)
 444                cpu_relax();
 445
 446        return true;
 447}
 448
 449static noinstr void mce_wrmsrl(u32 msr, u64 v)
 450{
 451        u32 low, high;
 452
 453        if (__this_cpu_read(injectm.finished)) {
 454                int offset;
 455
 456                instrumentation_begin();
 457
 458                offset = msr_to_offset(msr);
 459                if (offset >= 0)
 460                        *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
 461
 462                instrumentation_end();
 463
 464                return;
 465        }
 466
 467        low  = (u32)v;
 468        high = (u32)(v >> 32);
 469
 470        /* See comment in mce_rdmsrl() */
 471        asm volatile("1: wrmsr\n"
 472                     "2:\n"
 473                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
 474                     : : "c" (msr), "a"(low), "d" (high) : "memory");
 475}
 476
 477/*
 478 * Collect all global (w.r.t. this processor) status about this machine
 479 * check into our "mce" struct so that we can use it later to assess
 480 * the severity of the problem as we read per-bank specific details.
 481 */
 482static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
 483{
 484        mce_setup(m);
 485
 486        m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
 487        if (regs) {
 488                /*
 489                 * Get the address of the instruction at the time of
 490                 * the machine check error.
 491                 */
 492                if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
 493                        m->ip = regs->ip;
 494                        m->cs = regs->cs;
 495
 496                        /*
 497                         * When in VM86 mode make the cs look like ring 3
 498                         * always. This is a lie, but it's better than passing
 499                         * the additional vm86 bit around everywhere.
 500                         */
 501                        if (v8086_mode(regs))
 502                                m->cs |= 3;
 503                }
 504                /* Use accurate RIP reporting if available. */
 505                if (mca_cfg.rip_msr)
 506                        m->ip = mce_rdmsrl(mca_cfg.rip_msr);
 507        }
 508}
 509
 510int mce_available(struct cpuinfo_x86 *c)
 511{
 512        if (mca_cfg.disabled)
 513                return 0;
 514        return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
 515}
 516
 517static void mce_schedule_work(void)
 518{
 519        if (!mce_gen_pool_empty())
 520                schedule_work(&mce_work);
 521}
 522
 523static void mce_irq_work_cb(struct irq_work *entry)
 524{
 525        mce_schedule_work();
 526}
 527
 528/*
 529 * Check if the address reported by the CPU is in a format we can parse.
 530 * It would be possible to add code for most other cases, but all would
 531 * be somewhat complicated (e.g. segment offset would require an instruction
 532 * parser). So only support physical addresses up to page granularity for now.
 533 */
 534int mce_usable_address(struct mce *m)
 535{
 536        if (!(m->status & MCI_STATUS_ADDRV))
 537                return 0;
 538
 539        /* Checks after this one are Intel/Zhaoxin-specific: */
 540        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
 541            boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
 542                return 1;
 543
 544        if (!(m->status & MCI_STATUS_MISCV))
 545                return 0;
 546
 547        if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
 548                return 0;
 549
 550        if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
 551                return 0;
 552
 553        return 1;
 554}
 555EXPORT_SYMBOL_GPL(mce_usable_address);
 556
 557bool mce_is_memory_error(struct mce *m)
 558{
 559        switch (m->cpuvendor) {
 560        case X86_VENDOR_AMD:
 561        case X86_VENDOR_HYGON:
 562                return amd_mce_is_memory_error(m);
 563
 564        case X86_VENDOR_INTEL:
 565        case X86_VENDOR_ZHAOXIN:
 566                /*
 567                 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
 568                 *
 569                 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
 570                 * indicating a memory error. Bit 8 is used for indicating a
 571                 * cache hierarchy error. The combination of bit 2 and bit 3
 572                 * is used for indicating a `generic' cache hierarchy error
 573                 * But we can't just blindly check the above bits, because if
 574                 * bit 11 is set, then it is a bus/interconnect error - and
 575                 * either way the above bits just gives more detail on what
 576                 * bus/interconnect error happened. Note that bit 12 can be
 577                 * ignored, as it's the "filter" bit.
 578                 */
 579                return (m->status & 0xef80) == BIT(7) ||
 580                       (m->status & 0xef00) == BIT(8) ||
 581                       (m->status & 0xeffc) == 0xc;
 582
 583        default:
 584                return false;
 585        }
 586}
 587EXPORT_SYMBOL_GPL(mce_is_memory_error);
 588
 589static bool whole_page(struct mce *m)
 590{
 591        if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
 592                return true;
 593
 594        return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
 595}
 596
 597bool mce_is_correctable(struct mce *m)
 598{
 599        if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
 600                return false;
 601
 602        if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
 603                return false;
 604
 605        if (m->status & MCI_STATUS_UC)
 606                return false;
 607
 608        return true;
 609}
 610EXPORT_SYMBOL_GPL(mce_is_correctable);
 611
 612static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
 613                              void *data)
 614{
 615        struct mce *m = (struct mce *)data;
 616
 617        if (!m)
 618                return NOTIFY_DONE;
 619
 620        /* Emit the trace record: */
 621        trace_mce_record(m);
 622
 623        set_bit(0, &mce_need_notify);
 624
 625        mce_notify_irq();
 626
 627        return NOTIFY_DONE;
 628}
 629
 630static struct notifier_block early_nb = {
 631        .notifier_call  = mce_early_notifier,
 632        .priority       = MCE_PRIO_EARLY,
 633};
 634
 635static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
 636                              void *data)
 637{
 638        struct mce *mce = (struct mce *)data;
 639        unsigned long pfn;
 640
 641        if (!mce || !mce_usable_address(mce))
 642                return NOTIFY_DONE;
 643
 644        if (mce->severity != MCE_AO_SEVERITY &&
 645            mce->severity != MCE_DEFERRED_SEVERITY)
 646                return NOTIFY_DONE;
 647
 648        pfn = mce->addr >> PAGE_SHIFT;
 649        if (!memory_failure(pfn, 0)) {
 650                set_mce_nospec(pfn, whole_page(mce));
 651                mce->kflags |= MCE_HANDLED_UC;
 652        }
 653
 654        return NOTIFY_OK;
 655}
 656
 657static struct notifier_block mce_uc_nb = {
 658        .notifier_call  = uc_decode_notifier,
 659        .priority       = MCE_PRIO_UC,
 660};
 661
 662static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
 663                                void *data)
 664{
 665        struct mce *m = (struct mce *)data;
 666
 667        if (!m)
 668                return NOTIFY_DONE;
 669
 670        if (mca_cfg.print_all || !m->kflags)
 671                __print_mce(m);
 672
 673        return NOTIFY_DONE;
 674}
 675
 676static struct notifier_block mce_default_nb = {
 677        .notifier_call  = mce_default_notifier,
 678        /* lowest prio, we want it to run last. */
 679        .priority       = MCE_PRIO_LOWEST,
 680};
 681
 682/*
 683 * Read ADDR and MISC registers.
 684 */
 685static void mce_read_aux(struct mce *m, int i)
 686{
 687        if (m->status & MCI_STATUS_MISCV)
 688                m->misc = mce_rdmsrl(msr_ops.misc(i));
 689
 690        if (m->status & MCI_STATUS_ADDRV) {
 691                m->addr = mce_rdmsrl(msr_ops.addr(i));
 692
 693                /*
 694                 * Mask the reported address by the reported granularity.
 695                 */
 696                if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
 697                        u8 shift = MCI_MISC_ADDR_LSB(m->misc);
 698                        m->addr >>= shift;
 699                        m->addr <<= shift;
 700                }
 701
 702                /*
 703                 * Extract [55:<lsb>] where lsb is the least significant
 704                 * *valid* bit of the address bits.
 705                 */
 706                if (mce_flags.smca) {
 707                        u8 lsb = (m->addr >> 56) & 0x3f;
 708
 709                        m->addr &= GENMASK_ULL(55, lsb);
 710                }
 711        }
 712
 713        if (mce_flags.smca) {
 714                m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
 715
 716                if (m->status & MCI_STATUS_SYNDV)
 717                        m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
 718        }
 719}
 720
 721DEFINE_PER_CPU(unsigned, mce_poll_count);
 722
 723/*
 724 * Poll for corrected events or events that happened before reset.
 725 * Those are just logged through /dev/mcelog.
 726 *
 727 * This is executed in standard interrupt context.
 728 *
 729 * Note: spec recommends to panic for fatal unsignalled
 730 * errors here. However this would be quite problematic --
 731 * we would need to reimplement the Monarch handling and
 732 * it would mess up the exclusion between exception handler
 733 * and poll handler -- * so we skip this for now.
 734 * These cases should not happen anyways, or only when the CPU
 735 * is already totally * confused. In this case it's likely it will
 736 * not fully execute the machine check handler either.
 737 */
 738bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 739{
 740        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
 741        bool error_seen = false;
 742        struct mce m;
 743        int i;
 744
 745        this_cpu_inc(mce_poll_count);
 746
 747        mce_gather_info(&m, NULL);
 748
 749        if (flags & MCP_TIMESTAMP)
 750                m.tsc = rdtsc();
 751
 752        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 753                if (!mce_banks[i].ctl || !test_bit(i, *b))
 754                        continue;
 755
 756                m.misc = 0;
 757                m.addr = 0;
 758                m.bank = i;
 759
 760                barrier();
 761                m.status = mce_rdmsrl(msr_ops.status(i));
 762
 763                /* If this entry is not valid, ignore it */
 764                if (!(m.status & MCI_STATUS_VAL))
 765                        continue;
 766
 767                /*
 768                 * If we are logging everything (at CPU online) or this
 769                 * is a corrected error, then we must log it.
 770                 */
 771                if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
 772                        goto log_it;
 773
 774                /*
 775                 * Newer Intel systems that support software error
 776                 * recovery need to make additional checks. Other
 777                 * CPUs should skip over uncorrected errors, but log
 778                 * everything else.
 779                 */
 780                if (!mca_cfg.ser) {
 781                        if (m.status & MCI_STATUS_UC)
 782                                continue;
 783                        goto log_it;
 784                }
 785
 786                /* Log "not enabled" (speculative) errors */
 787                if (!(m.status & MCI_STATUS_EN))
 788                        goto log_it;
 789
 790                /*
 791                 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
 792                 * UC == 1 && PCC == 0 && S == 0
 793                 */
 794                if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
 795                        goto log_it;
 796
 797                /*
 798                 * Skip anything else. Presumption is that our read of this
 799                 * bank is racing with a machine check. Leave the log alone
 800                 * for do_machine_check() to deal with it.
 801                 */
 802                continue;
 803
 804log_it:
 805                error_seen = true;
 806
 807                if (flags & MCP_DONTLOG)
 808                        goto clear_it;
 809
 810                mce_read_aux(&m, i);
 811                m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false);
 812                /*
 813                 * Don't get the IP here because it's unlikely to
 814                 * have anything to do with the actual error location.
 815                 */
 816
 817                if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
 818                        goto clear_it;
 819
 820                mce_log(&m);
 821
 822clear_it:
 823                /*
 824                 * Clear state for this bank.
 825                 */
 826                mce_wrmsrl(msr_ops.status(i), 0);
 827        }
 828
 829        /*
 830         * Don't clear MCG_STATUS here because it's only defined for
 831         * exceptions.
 832         */
 833
 834        sync_core();
 835
 836        return error_seen;
 837}
 838EXPORT_SYMBOL_GPL(machine_check_poll);
 839
 840/*
 841 * Do a quick check if any of the events requires a panic.
 842 * This decides if we keep the events around or clear them.
 843 */
 844static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
 845                          struct pt_regs *regs)
 846{
 847        char *tmp = *msg;
 848        int i;
 849
 850        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 851                m->status = mce_rdmsrl(msr_ops.status(i));
 852                if (!(m->status & MCI_STATUS_VAL))
 853                        continue;
 854
 855                __set_bit(i, validp);
 856                if (quirk_no_way_out)
 857                        quirk_no_way_out(i, m, regs);
 858
 859                m->bank = i;
 860                if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
 861                        mce_read_aux(m, i);
 862                        *msg = tmp;
 863                        return 1;
 864                }
 865        }
 866        return 0;
 867}
 868
 869/*
 870 * Variable to establish order between CPUs while scanning.
 871 * Each CPU spins initially until executing is equal its number.
 872 */
 873static atomic_t mce_executing;
 874
 875/*
 876 * Defines order of CPUs on entry. First CPU becomes Monarch.
 877 */
 878static atomic_t mce_callin;
 879
 880/*
 881 * Track which CPUs entered the MCA broadcast synchronization and which not in
 882 * order to print holdouts.
 883 */
 884static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
 885
 886/*
 887 * Check if a timeout waiting for other CPUs happened.
 888 */
 889static int mce_timed_out(u64 *t, const char *msg)
 890{
 891        /*
 892         * The others already did panic for some reason.
 893         * Bail out like in a timeout.
 894         * rmb() to tell the compiler that system_state
 895         * might have been modified by someone else.
 896         */
 897        rmb();
 898        if (atomic_read(&mce_panicked))
 899                wait_for_panic();
 900        if (!mca_cfg.monarch_timeout)
 901                goto out;
 902        if ((s64)*t < SPINUNIT) {
 903                if (mca_cfg.tolerant <= 1) {
 904                        if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
 905                                pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
 906                                         cpumask_pr_args(&mce_missing_cpus));
 907                        mce_panic(msg, NULL, NULL);
 908                }
 909                cpu_missing = 1;
 910                return 1;
 911        }
 912        *t -= SPINUNIT;
 913out:
 914        touch_nmi_watchdog();
 915        return 0;
 916}
 917
 918/*
 919 * The Monarch's reign.  The Monarch is the CPU who entered
 920 * the machine check handler first. It waits for the others to
 921 * raise the exception too and then grades them. When any
 922 * error is fatal panic. Only then let the others continue.
 923 *
 924 * The other CPUs entering the MCE handler will be controlled by the
 925 * Monarch. They are called Subjects.
 926 *
 927 * This way we prevent any potential data corruption in a unrecoverable case
 928 * and also makes sure always all CPU's errors are examined.
 929 *
 930 * Also this detects the case of a machine check event coming from outer
 931 * space (not detected by any CPUs) In this case some external agent wants
 932 * us to shut down, so panic too.
 933 *
 934 * The other CPUs might still decide to panic if the handler happens
 935 * in a unrecoverable place, but in this case the system is in a semi-stable
 936 * state and won't corrupt anything by itself. It's ok to let the others
 937 * continue for a bit first.
 938 *
 939 * All the spin loops have timeouts; when a timeout happens a CPU
 940 * typically elects itself to be Monarch.
 941 */
 942static void mce_reign(void)
 943{
 944        int cpu;
 945        struct mce *m = NULL;
 946        int global_worst = 0;
 947        char *msg = NULL;
 948
 949        /*
 950         * This CPU is the Monarch and the other CPUs have run
 951         * through their handlers.
 952         * Grade the severity of the errors of all the CPUs.
 953         */
 954        for_each_possible_cpu(cpu) {
 955                struct mce *mtmp = &per_cpu(mces_seen, cpu);
 956
 957                if (mtmp->severity > global_worst) {
 958                        global_worst = mtmp->severity;
 959                        m = &per_cpu(mces_seen, cpu);
 960                }
 961        }
 962
 963        /*
 964         * Cannot recover? Panic here then.
 965         * This dumps all the mces in the log buffer and stops the
 966         * other CPUs.
 967         */
 968        if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
 969                /* call mce_severity() to get "msg" for panic */
 970                mce_severity(m, NULL, mca_cfg.tolerant, &msg, true);
 971                mce_panic("Fatal machine check", m, msg);
 972        }
 973
 974        /*
 975         * For UC somewhere we let the CPU who detects it handle it.
 976         * Also must let continue the others, otherwise the handling
 977         * CPU could deadlock on a lock.
 978         */
 979
 980        /*
 981         * No machine check event found. Must be some external
 982         * source or one CPU is hung. Panic.
 983         */
 984        if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
 985                mce_panic("Fatal machine check from unknown source", NULL, NULL);
 986
 987        /*
 988         * Now clear all the mces_seen so that they don't reappear on
 989         * the next mce.
 990         */
 991        for_each_possible_cpu(cpu)
 992                memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
 993}
 994
 995static atomic_t global_nwo;
 996
 997/*
 998 * Start of Monarch synchronization. This waits until all CPUs have
 999 * entered the exception handler and then determines if any of them
1000 * saw a fatal event that requires panic. Then it executes them
1001 * in the entry order.
1002 * TBD double check parallel CPU hotunplug
1003 */
1004static int mce_start(int *no_way_out)
1005{
1006        int order;
1007        int cpus = num_online_cpus();
1008        u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1009
1010        if (!timeout)
1011                return -1;
1012
1013        atomic_add(*no_way_out, &global_nwo);
1014        /*
1015         * Rely on the implied barrier below, such that global_nwo
1016         * is updated before mce_callin.
1017         */
1018        order = atomic_inc_return(&mce_callin);
1019        cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
1020
1021        /*
1022         * Wait for everyone.
1023         */
1024        while (atomic_read(&mce_callin) != cpus) {
1025                if (mce_timed_out(&timeout,
1026                                  "Timeout: Not all CPUs entered broadcast exception handler")) {
1027                        atomic_set(&global_nwo, 0);
1028                        return -1;
1029                }
1030                ndelay(SPINUNIT);
1031        }
1032
1033        /*
1034         * mce_callin should be read before global_nwo
1035         */
1036        smp_rmb();
1037
1038        if (order == 1) {
1039                /*
1040                 * Monarch: Starts executing now, the others wait.
1041                 */
1042                atomic_set(&mce_executing, 1);
1043        } else {
1044                /*
1045                 * Subject: Now start the scanning loop one by one in
1046                 * the original callin order.
1047                 * This way when there are any shared banks it will be
1048                 * only seen by one CPU before cleared, avoiding duplicates.
1049                 */
1050                while (atomic_read(&mce_executing) < order) {
1051                        if (mce_timed_out(&timeout,
1052                                          "Timeout: Subject CPUs unable to finish machine check processing")) {
1053                                atomic_set(&global_nwo, 0);
1054                                return -1;
1055                        }
1056                        ndelay(SPINUNIT);
1057                }
1058        }
1059
1060        /*
1061         * Cache the global no_way_out state.
1062         */
1063        *no_way_out = atomic_read(&global_nwo);
1064
1065        return order;
1066}
1067
1068/*
1069 * Synchronize between CPUs after main scanning loop.
1070 * This invokes the bulk of the Monarch processing.
1071 */
1072static int mce_end(int order)
1073{
1074        int ret = -1;
1075        u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1076
1077        if (!timeout)
1078                goto reset;
1079        if (order < 0)
1080                goto reset;
1081
1082        /*
1083         * Allow others to run.
1084         */
1085        atomic_inc(&mce_executing);
1086
1087        if (order == 1) {
1088                /* CHECKME: Can this race with a parallel hotplug? */
1089                int cpus = num_online_cpus();
1090
1091                /*
1092                 * Monarch: Wait for everyone to go through their scanning
1093                 * loops.
1094                 */
1095                while (atomic_read(&mce_executing) <= cpus) {
1096                        if (mce_timed_out(&timeout,
1097                                          "Timeout: Monarch CPU unable to finish machine check processing"))
1098                                goto reset;
1099                        ndelay(SPINUNIT);
1100                }
1101
1102                mce_reign();
1103                barrier();
1104                ret = 0;
1105        } else {
1106                /*
1107                 * Subject: Wait for Monarch to finish.
1108                 */
1109                while (atomic_read(&mce_executing) != 0) {
1110                        if (mce_timed_out(&timeout,
1111                                          "Timeout: Monarch CPU did not finish machine check processing"))
1112                                goto reset;
1113                        ndelay(SPINUNIT);
1114                }
1115
1116                /*
1117                 * Don't reset anything. That's done by the Monarch.
1118                 */
1119                return 0;
1120        }
1121
1122        /*
1123         * Reset all global state.
1124         */
1125reset:
1126        atomic_set(&global_nwo, 0);
1127        atomic_set(&mce_callin, 0);
1128        cpumask_setall(&mce_missing_cpus);
1129        barrier();
1130
1131        /*
1132         * Let others run again.
1133         */
1134        atomic_set(&mce_executing, 0);
1135        return ret;
1136}
1137
1138static void mce_clear_state(unsigned long *toclear)
1139{
1140        int i;
1141
1142        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1143                if (test_bit(i, toclear))
1144                        mce_wrmsrl(msr_ops.status(i), 0);
1145        }
1146}
1147
1148/*
1149 * Cases where we avoid rendezvous handler timeout:
1150 * 1) If this CPU is offline.
1151 *
1152 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1153 *  skip those CPUs which remain looping in the 1st kernel - see
1154 *  crash_nmi_callback().
1155 *
1156 * Note: there still is a small window between kexec-ing and the new,
1157 * kdump kernel establishing a new #MC handler where a broadcasted MCE
1158 * might not get handled properly.
1159 */
1160static noinstr bool mce_check_crashing_cpu(void)
1161{
1162        unsigned int cpu = smp_processor_id();
1163
1164        if (arch_cpu_is_offline(cpu) ||
1165            (crashing_cpu != -1 && crashing_cpu != cpu)) {
1166                u64 mcgstatus;
1167
1168                mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
1169
1170                if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
1171                        if (mcgstatus & MCG_STATUS_LMCES)
1172                                return false;
1173                }
1174
1175                if (mcgstatus & MCG_STATUS_RIPV) {
1176                        __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
1177                        return true;
1178                }
1179        }
1180        return false;
1181}
1182
1183static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
1184                            unsigned long *toclear, unsigned long *valid_banks,
1185                            int no_way_out, int *worst)
1186{
1187        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1188        struct mca_config *cfg = &mca_cfg;
1189        int severity, i;
1190
1191        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1192                __clear_bit(i, toclear);
1193                if (!test_bit(i, valid_banks))
1194                        continue;
1195
1196                if (!mce_banks[i].ctl)
1197                        continue;
1198
1199                m->misc = 0;
1200                m->addr = 0;
1201                m->bank = i;
1202
1203                m->status = mce_rdmsrl(msr_ops.status(i));
1204                if (!(m->status & MCI_STATUS_VAL))
1205                        continue;
1206
1207                /*
1208                 * Corrected or non-signaled errors are handled by
1209                 * machine_check_poll(). Leave them alone, unless this panics.
1210                 */
1211                if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1212                        !no_way_out)
1213                        continue;
1214
1215                /* Set taint even when machine check was not enabled. */
1216                add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1217
1218                severity = mce_severity(m, regs, cfg->tolerant, NULL, true);
1219
1220                /*
1221                 * When machine check was for corrected/deferred handler don't
1222                 * touch, unless we're panicking.
1223                 */
1224                if ((severity == MCE_KEEP_SEVERITY ||
1225                     severity == MCE_UCNA_SEVERITY) && !no_way_out)
1226                        continue;
1227
1228                __set_bit(i, toclear);
1229
1230                /* Machine check event was not enabled. Clear, but ignore. */
1231                if (severity == MCE_NO_SEVERITY)
1232                        continue;
1233
1234                mce_read_aux(m, i);
1235
1236                /* assuming valid severity level != 0 */
1237                m->severity = severity;
1238
1239                mce_log(m);
1240
1241                if (severity > *worst) {
1242                        *final = *m;
1243                        *worst = severity;
1244                }
1245        }
1246
1247        /* mce_clear_state will clear *final, save locally for use later */
1248        *m = *final;
1249}
1250
1251static void kill_me_now(struct callback_head *ch)
1252{
1253        force_sig(SIGBUS);
1254}
1255
1256static void kill_me_maybe(struct callback_head *cb)
1257{
1258        struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1259        int flags = MF_ACTION_REQUIRED;
1260        int ret;
1261
1262        pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
1263
1264        if (!p->mce_ripv)
1265                flags |= MF_MUST_KILL;
1266
1267        ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
1268        if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
1269                set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
1270                sync_core();
1271                return;
1272        }
1273
1274        /*
1275         * -EHWPOISON from memory_failure() means that it already sent SIGBUS
1276         * to the current process with the proper error info, so no need to
1277         * send SIGBUS here again.
1278         */
1279        if (ret == -EHWPOISON)
1280                return;
1281
1282        if (p->mce_vaddr != (void __user *)-1l) {
1283                force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
1284        } else {
1285                pr_err("Memory error not recovered");
1286                kill_me_now(cb);
1287        }
1288}
1289
1290static void queue_task_work(struct mce *m, int kill_current_task)
1291{
1292        current->mce_addr = m->addr;
1293        current->mce_kflags = m->kflags;
1294        current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
1295        current->mce_whole_page = whole_page(m);
1296
1297        if (kill_current_task)
1298                current->mce_kill_me.func = kill_me_now;
1299        else
1300                current->mce_kill_me.func = kill_me_maybe;
1301
1302        task_work_add(current, &current->mce_kill_me, TWA_RESUME);
1303}
1304
1305/*
1306 * The actual machine check handler. This only handles real
1307 * exceptions when something got corrupted coming in through int 18.
1308 *
1309 * This is executed in NMI context not subject to normal locking rules. This
1310 * implies that most kernel services cannot be safely used. Don't even
1311 * think about putting a printk in there!
1312 *
1313 * On Intel systems this is entered on all CPUs in parallel through
1314 * MCE broadcast. However some CPUs might be broken beyond repair,
1315 * so be always careful when synchronizing with others.
1316 *
1317 * Tracing and kprobes are disabled: if we interrupted a kernel context
1318 * with IF=1, we need to minimize stack usage.  There are also recursion
1319 * issues: if the machine check was due to a failure of the memory
1320 * backing the user stack, tracing that reads the user stack will cause
1321 * potentially infinite recursion.
1322 */
1323noinstr void do_machine_check(struct pt_regs *regs)
1324{
1325        DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1326        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1327        struct mca_config *cfg = &mca_cfg;
1328        struct mce m, *final;
1329        char *msg = NULL;
1330        int worst = 0;
1331
1332        /*
1333         * Establish sequential order between the CPUs entering the machine
1334         * check handler.
1335         */
1336        int order = -1;
1337
1338        /*
1339         * If no_way_out gets set, there is no safe way to recover from this
1340         * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1341         */
1342        int no_way_out = 0;
1343
1344        /*
1345         * If kill_current_task is not set, there might be a way to recover from this
1346         * error.
1347         */
1348        int kill_current_task = 0;
1349
1350        /*
1351         * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1352         * on Intel.
1353         */
1354        int lmce = 1;
1355
1356        this_cpu_inc(mce_exception_count);
1357
1358        mce_gather_info(&m, regs);
1359        m.tsc = rdtsc();
1360
1361        final = this_cpu_ptr(&mces_seen);
1362        *final = m;
1363
1364        memset(valid_banks, 0, sizeof(valid_banks));
1365        no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1366
1367        barrier();
1368
1369        /*
1370         * When no restart IP might need to kill or panic.
1371         * Assume the worst for now, but if we find the
1372         * severity is MCE_AR_SEVERITY we have other options.
1373         */
1374        if (!(m.mcgstatus & MCG_STATUS_RIPV))
1375                kill_current_task = (cfg->tolerant == 3) ? 0 : 1;
1376        /*
1377         * Check if this MCE is signaled to only this logical processor,
1378         * on Intel, Zhaoxin only.
1379         */
1380        if (m.cpuvendor == X86_VENDOR_INTEL ||
1381            m.cpuvendor == X86_VENDOR_ZHAOXIN)
1382                lmce = m.mcgstatus & MCG_STATUS_LMCES;
1383
1384        /*
1385         * Local machine check may already know that we have to panic.
1386         * Broadcast machine check begins rendezvous in mce_start()
1387         * Go through all banks in exclusion of the other CPUs. This way we
1388         * don't report duplicated events on shared banks because the first one
1389         * to see it will clear it.
1390         */
1391        if (lmce) {
1392                if (no_way_out && cfg->tolerant < 3)
1393                        mce_panic("Fatal local machine check", &m, msg);
1394        } else {
1395                order = mce_start(&no_way_out);
1396        }
1397
1398        __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
1399
1400        if (!no_way_out)
1401                mce_clear_state(toclear);
1402
1403        /*
1404         * Do most of the synchronization with other CPUs.
1405         * When there's any problem use only local no_way_out state.
1406         */
1407        if (!lmce) {
1408                if (mce_end(order) < 0) {
1409                        if (!no_way_out)
1410                                no_way_out = worst >= MCE_PANIC_SEVERITY;
1411
1412                        if (no_way_out && cfg->tolerant < 3)
1413                                mce_panic("Fatal machine check on current CPU", &m, msg);
1414                }
1415        } else {
1416                /*
1417                 * If there was a fatal machine check we should have
1418                 * already called mce_panic earlier in this function.
1419                 * Since we re-read the banks, we might have found
1420                 * something new. Check again to see if we found a
1421                 * fatal error. We call "mce_severity()" again to
1422                 * make sure we have the right "msg".
1423                 */
1424                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
1425                        mce_severity(&m, regs, cfg->tolerant, &msg, true);
1426                        mce_panic("Local fatal machine check!", &m, msg);
1427                }
1428        }
1429
1430        if (worst != MCE_AR_SEVERITY && !kill_current_task)
1431                goto out;
1432
1433        /* Fault was in user mode and we need to take some action */
1434        if ((m.cs & 3) == 3) {
1435                /* If this triggers there is no way to recover. Die hard. */
1436                BUG_ON(!on_thread_stack() || !user_mode(regs));
1437
1438                queue_task_work(&m, kill_current_task);
1439
1440        } else {
1441                /*
1442                 * Handle an MCE which has happened in kernel space but from
1443                 * which the kernel can recover: ex_has_fault_handler() has
1444                 * already verified that the rIP at which the error happened is
1445                 * a rIP from which the kernel can recover (by jumping to
1446                 * recovery code specified in _ASM_EXTABLE_FAULT()) and the
1447                 * corresponding exception handler which would do that is the
1448                 * proper one.
1449                 */
1450                if (m.kflags & MCE_IN_KERNEL_RECOV) {
1451                        if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
1452                                mce_panic("Failed kernel mode recovery", &m, msg);
1453                }
1454
1455                if (m.kflags & MCE_IN_KERNEL_COPYIN)
1456                        queue_task_work(&m, kill_current_task);
1457        }
1458out:
1459        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1460}
1461EXPORT_SYMBOL_GPL(do_machine_check);
1462
1463#ifndef CONFIG_MEMORY_FAILURE
1464int memory_failure(unsigned long pfn, int flags)
1465{
1466        /* mce_severity() should not hand us an ACTION_REQUIRED error */
1467        BUG_ON(flags & MF_ACTION_REQUIRED);
1468        pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1469               "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1470               pfn);
1471
1472        return 0;
1473}
1474#endif
1475
1476/*
1477 * Periodic polling timer for "silent" machine check errors.  If the
1478 * poller finds an MCE, poll 2x faster.  When the poller finds no more
1479 * errors, poll 2x slower (up to check_interval seconds).
1480 */
1481static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1482
1483static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1484static DEFINE_PER_CPU(struct timer_list, mce_timer);
1485
1486static unsigned long mce_adjust_timer_default(unsigned long interval)
1487{
1488        return interval;
1489}
1490
1491static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1492
1493static void __start_timer(struct timer_list *t, unsigned long interval)
1494{
1495        unsigned long when = jiffies + interval;
1496        unsigned long flags;
1497
1498        local_irq_save(flags);
1499
1500        if (!timer_pending(t) || time_before(when, t->expires))
1501                mod_timer(t, round_jiffies(when));
1502
1503        local_irq_restore(flags);
1504}
1505
1506static void mce_timer_fn(struct timer_list *t)
1507{
1508        struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1509        unsigned long iv;
1510
1511        WARN_ON(cpu_t != t);
1512
1513        iv = __this_cpu_read(mce_next_interval);
1514
1515        if (mce_available(this_cpu_ptr(&cpu_info))) {
1516                machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1517
1518                if (mce_intel_cmci_poll()) {
1519                        iv = mce_adjust_timer(iv);
1520                        goto done;
1521                }
1522        }
1523
1524        /*
1525         * Alert userspace if needed. If we logged an MCE, reduce the polling
1526         * interval, otherwise increase the polling interval.
1527         */
1528        if (mce_notify_irq())
1529                iv = max(iv / 2, (unsigned long) HZ/100);
1530        else
1531                iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1532
1533done:
1534        __this_cpu_write(mce_next_interval, iv);
1535        __start_timer(t, iv);
1536}
1537
1538/*
1539 * Ensure that the timer is firing in @interval from now.
1540 */
1541void mce_timer_kick(unsigned long interval)
1542{
1543        struct timer_list *t = this_cpu_ptr(&mce_timer);
1544        unsigned long iv = __this_cpu_read(mce_next_interval);
1545
1546        __start_timer(t, interval);
1547
1548        if (interval < iv)
1549                __this_cpu_write(mce_next_interval, interval);
1550}
1551
1552/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1553static void mce_timer_delete_all(void)
1554{
1555        int cpu;
1556
1557        for_each_online_cpu(cpu)
1558                del_timer_sync(&per_cpu(mce_timer, cpu));
1559}
1560
1561/*
1562 * Notify the user(s) about new machine check events.
1563 * Can be called from interrupt context, but not from machine check/NMI
1564 * context.
1565 */
1566int mce_notify_irq(void)
1567{
1568        /* Not more than two messages every minute */
1569        static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1570
1571        if (test_and_clear_bit(0, &mce_need_notify)) {
1572                mce_work_trigger();
1573
1574                if (__ratelimit(&ratelimit))
1575                        pr_info(HW_ERR "Machine check events logged\n");
1576
1577                return 1;
1578        }
1579        return 0;
1580}
1581EXPORT_SYMBOL_GPL(mce_notify_irq);
1582
1583static void __mcheck_cpu_mce_banks_init(void)
1584{
1585        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1586        u8 n_banks = this_cpu_read(mce_num_banks);
1587        int i;
1588
1589        for (i = 0; i < n_banks; i++) {
1590                struct mce_bank *b = &mce_banks[i];
1591
1592                /*
1593                 * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1594                 * the required vendor quirks before
1595                 * __mcheck_cpu_init_clear_banks() does the final bank setup.
1596                 */
1597                b->ctl = -1ULL;
1598                b->init = true;
1599        }
1600}
1601
1602/*
1603 * Initialize Machine Checks for a CPU.
1604 */
1605static void __mcheck_cpu_cap_init(void)
1606{
1607        u64 cap;
1608        u8 b;
1609
1610        rdmsrl(MSR_IA32_MCG_CAP, cap);
1611
1612        b = cap & MCG_BANKCNT_MASK;
1613
1614        if (b > MAX_NR_BANKS) {
1615                pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1616                        smp_processor_id(), MAX_NR_BANKS, b);
1617                b = MAX_NR_BANKS;
1618        }
1619
1620        this_cpu_write(mce_num_banks, b);
1621
1622        __mcheck_cpu_mce_banks_init();
1623
1624        /* Use accurate RIP reporting if available. */
1625        if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1626                mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1627
1628        if (cap & MCG_SER_P)
1629                mca_cfg.ser = 1;
1630}
1631
1632static void __mcheck_cpu_init_generic(void)
1633{
1634        enum mcp_flags m_fl = 0;
1635        mce_banks_t all_banks;
1636        u64 cap;
1637
1638        if (!mca_cfg.bootlog)
1639                m_fl = MCP_DONTLOG;
1640
1641        /*
1642         * Log the machine checks left over from the previous reset.
1643         */
1644        bitmap_fill(all_banks, MAX_NR_BANKS);
1645        machine_check_poll(MCP_UC | m_fl, &all_banks);
1646
1647        cr4_set_bits(X86_CR4_MCE);
1648
1649        rdmsrl(MSR_IA32_MCG_CAP, cap);
1650        if (cap & MCG_CTL_P)
1651                wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1652}
1653
1654static void __mcheck_cpu_init_clear_banks(void)
1655{
1656        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1657        int i;
1658
1659        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1660                struct mce_bank *b = &mce_banks[i];
1661
1662                if (!b->init)
1663                        continue;
1664                wrmsrl(msr_ops.ctl(i), b->ctl);
1665                wrmsrl(msr_ops.status(i), 0);
1666        }
1667}
1668
1669/*
1670 * Do a final check to see if there are any unused/RAZ banks.
1671 *
1672 * This must be done after the banks have been initialized and any quirks have
1673 * been applied.
1674 *
1675 * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1676 * Otherwise, a user who disables a bank will not be able to re-enable it
1677 * without a system reboot.
1678 */
1679static void __mcheck_cpu_check_banks(void)
1680{
1681        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1682        u64 msrval;
1683        int i;
1684
1685        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1686                struct mce_bank *b = &mce_banks[i];
1687
1688                if (!b->init)
1689                        continue;
1690
1691                rdmsrl(msr_ops.ctl(i), msrval);
1692                b->init = !!msrval;
1693        }
1694}
1695
1696/*
1697 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1698 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1699 * Vol 3B Table 15-20). But this confuses both the code that determines
1700 * whether the machine check occurred in kernel or user mode, and also
1701 * the severity assessment code. Pretend that EIPV was set, and take the
1702 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1703 */
1704static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1705{
1706        if (bank != 0)
1707                return;
1708        if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1709                return;
1710        if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1711                          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1712                          MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1713                          MCACOD)) !=
1714                         (MCI_STATUS_UC|MCI_STATUS_EN|
1715                          MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1716                          MCI_STATUS_AR|MCACOD_INSTR))
1717                return;
1718
1719        m->mcgstatus |= MCG_STATUS_EIPV;
1720        m->ip = regs->ip;
1721        m->cs = regs->cs;
1722}
1723
1724/* Add per CPU specific workarounds here */
1725static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1726{
1727        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1728        struct mca_config *cfg = &mca_cfg;
1729
1730        if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1731                pr_info("unknown CPU type - not enabling MCE support\n");
1732                return -EOPNOTSUPP;
1733        }
1734
1735        /* This should be disabled by the BIOS, but isn't always */
1736        if (c->x86_vendor == X86_VENDOR_AMD) {
1737                if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1738                        /*
1739                         * disable GART TBL walk error reporting, which
1740                         * trips off incorrectly with the IOMMU & 3ware
1741                         * & Cerberus:
1742                         */
1743                        clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1744                }
1745                if (c->x86 < 0x11 && cfg->bootlog < 0) {
1746                        /*
1747                         * Lots of broken BIOS around that don't clear them
1748                         * by default and leave crap in there. Don't log:
1749                         */
1750                        cfg->bootlog = 0;
1751                }
1752                /*
1753                 * Various K7s with broken bank 0 around. Always disable
1754                 * by default.
1755                 */
1756                if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1757                        mce_banks[0].ctl = 0;
1758
1759                /*
1760                 * overflow_recov is supported for F15h Models 00h-0fh
1761                 * even though we don't have a CPUID bit for it.
1762                 */
1763                if (c->x86 == 0x15 && c->x86_model <= 0xf)
1764                        mce_flags.overflow_recov = 1;
1765
1766        }
1767
1768        if (c->x86_vendor == X86_VENDOR_INTEL) {
1769                /*
1770                 * SDM documents that on family 6 bank 0 should not be written
1771                 * because it aliases to another special BIOS controlled
1772                 * register.
1773                 * But it's not aliased anymore on model 0x1a+
1774                 * Don't ignore bank 0 completely because there could be a
1775                 * valid event later, merely don't write CTL0.
1776                 */
1777
1778                if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1779                        mce_banks[0].init = false;
1780
1781                /*
1782                 * All newer Intel systems support MCE broadcasting. Enable
1783                 * synchronization with a one second timeout.
1784                 */
1785                if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1786                        cfg->monarch_timeout < 0)
1787                        cfg->monarch_timeout = USEC_PER_SEC;
1788
1789                /*
1790                 * There are also broken BIOSes on some Pentium M and
1791                 * earlier systems:
1792                 */
1793                if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1794                        cfg->bootlog = 0;
1795
1796                if (c->x86 == 6 && c->x86_model == 45)
1797                        quirk_no_way_out = quirk_sandybridge_ifu;
1798        }
1799
1800        if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
1801                /*
1802                 * All newer Zhaoxin CPUs support MCE broadcasting. Enable
1803                 * synchronization with a one second timeout.
1804                 */
1805                if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1806                        if (cfg->monarch_timeout < 0)
1807                                cfg->monarch_timeout = USEC_PER_SEC;
1808                }
1809        }
1810
1811        if (cfg->monarch_timeout < 0)
1812                cfg->monarch_timeout = 0;
1813        if (cfg->bootlog != 0)
1814                cfg->panic_timeout = 30;
1815
1816        return 0;
1817}
1818
1819static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1820{
1821        if (c->x86 != 5)
1822                return 0;
1823
1824        switch (c->x86_vendor) {
1825        case X86_VENDOR_INTEL:
1826                intel_p5_mcheck_init(c);
1827                return 1;
1828        case X86_VENDOR_CENTAUR:
1829                winchip_mcheck_init(c);
1830                return 1;
1831        default:
1832                return 0;
1833        }
1834
1835        return 0;
1836}
1837
1838/*
1839 * Init basic CPU features needed for early decoding of MCEs.
1840 */
1841static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1842{
1843        if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
1844                mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1845                mce_flags.succor         = !!cpu_has(c, X86_FEATURE_SUCCOR);
1846                mce_flags.smca           = !!cpu_has(c, X86_FEATURE_SMCA);
1847                mce_flags.amd_threshold  = 1;
1848
1849                if (mce_flags.smca) {
1850                        msr_ops.ctl     = smca_ctl_reg;
1851                        msr_ops.status  = smca_status_reg;
1852                        msr_ops.addr    = smca_addr_reg;
1853                        msr_ops.misc    = smca_misc_reg;
1854                }
1855        }
1856}
1857
1858static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
1859{
1860        struct mca_config *cfg = &mca_cfg;
1861
1862         /*
1863          * All newer Centaur CPUs support MCE broadcasting. Enable
1864          * synchronization with a one second timeout.
1865          */
1866        if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
1867             c->x86 > 6) {
1868                if (cfg->monarch_timeout < 0)
1869                        cfg->monarch_timeout = USEC_PER_SEC;
1870        }
1871}
1872
1873static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
1874{
1875        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1876
1877        /*
1878         * These CPUs have MCA bank 8 which reports only one error type called
1879         * SVAD (System View Address Decoder). The reporting of that error is
1880         * controlled by IA32_MC8.CTL.0.
1881         *
1882         * If enabled, prefetching on these CPUs will cause SVAD MCE when
1883         * virtual machines start and result in a system  panic. Always disable
1884         * bank 8 SVAD error by default.
1885         */
1886        if ((c->x86 == 7 && c->x86_model == 0x1b) ||
1887            (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1888                if (this_cpu_read(mce_num_banks) > 8)
1889                        mce_banks[8].ctl = 0;
1890        }
1891
1892        intel_init_cmci();
1893        intel_init_lmce();
1894        mce_adjust_timer = cmci_intel_adjust_timer;
1895}
1896
1897static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
1898{
1899        intel_clear_lmce();
1900}
1901
1902static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1903{
1904        switch (c->x86_vendor) {
1905        case X86_VENDOR_INTEL:
1906                mce_intel_feature_init(c);
1907                mce_adjust_timer = cmci_intel_adjust_timer;
1908                break;
1909
1910        case X86_VENDOR_AMD: {
1911                mce_amd_feature_init(c);
1912                break;
1913                }
1914
1915        case X86_VENDOR_HYGON:
1916                mce_hygon_feature_init(c);
1917                break;
1918
1919        case X86_VENDOR_CENTAUR:
1920                mce_centaur_feature_init(c);
1921                break;
1922
1923        case X86_VENDOR_ZHAOXIN:
1924                mce_zhaoxin_feature_init(c);
1925                break;
1926
1927        default:
1928                break;
1929        }
1930}
1931
1932static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1933{
1934        switch (c->x86_vendor) {
1935        case X86_VENDOR_INTEL:
1936                mce_intel_feature_clear(c);
1937                break;
1938
1939        case X86_VENDOR_ZHAOXIN:
1940                mce_zhaoxin_feature_clear(c);
1941                break;
1942
1943        default:
1944                break;
1945        }
1946}
1947
1948static void mce_start_timer(struct timer_list *t)
1949{
1950        unsigned long iv = check_interval * HZ;
1951
1952        if (mca_cfg.ignore_ce || !iv)
1953                return;
1954
1955        this_cpu_write(mce_next_interval, iv);
1956        __start_timer(t, iv);
1957}
1958
1959static void __mcheck_cpu_setup_timer(void)
1960{
1961        struct timer_list *t = this_cpu_ptr(&mce_timer);
1962
1963        timer_setup(t, mce_timer_fn, TIMER_PINNED);
1964}
1965
1966static void __mcheck_cpu_init_timer(void)
1967{
1968        struct timer_list *t = this_cpu_ptr(&mce_timer);
1969
1970        timer_setup(t, mce_timer_fn, TIMER_PINNED);
1971        mce_start_timer(t);
1972}
1973
1974bool filter_mce(struct mce *m)
1975{
1976        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1977                return amd_filter_mce(m);
1978        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1979                return intel_filter_mce(m);
1980
1981        return false;
1982}
1983
1984/* Handle unconfigured int18 (should never happen) */
1985static noinstr void unexpected_machine_check(struct pt_regs *regs)
1986{
1987        instrumentation_begin();
1988        pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1989               smp_processor_id());
1990        instrumentation_end();
1991}
1992
1993/* Call the installed machine check handler for this CPU setup. */
1994void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
1995
1996static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
1997{
1998        irqentry_state_t irq_state;
1999
2000        WARN_ON_ONCE(user_mode(regs));
2001
2002        /*
2003         * Only required when from kernel mode. See
2004         * mce_check_crashing_cpu() for details.
2005         */
2006        if (machine_check_vector == do_machine_check &&
2007            mce_check_crashing_cpu())
2008                return;
2009
2010        irq_state = irqentry_nmi_enter(regs);
2011        /*
2012         * The call targets are marked noinstr, but objtool can't figure
2013         * that out because it's an indirect call. Annotate it.
2014         */
2015        instrumentation_begin();
2016
2017        machine_check_vector(regs);
2018
2019        instrumentation_end();
2020        irqentry_nmi_exit(regs, irq_state);
2021}
2022
2023static __always_inline void exc_machine_check_user(struct pt_regs *regs)
2024{
2025        irqentry_enter_from_user_mode(regs);
2026        instrumentation_begin();
2027
2028        machine_check_vector(regs);
2029
2030        instrumentation_end();
2031        irqentry_exit_to_user_mode(regs);
2032}
2033
2034#ifdef CONFIG_X86_64
2035/* MCE hit kernel mode */
2036DEFINE_IDTENTRY_MCE(exc_machine_check)
2037{
2038        unsigned long dr7;
2039
2040        dr7 = local_db_save();
2041        exc_machine_check_kernel(regs);
2042        local_db_restore(dr7);
2043}
2044
2045/* The user mode variant. */
2046DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
2047{
2048        unsigned long dr7;
2049
2050        dr7 = local_db_save();
2051        exc_machine_check_user(regs);
2052        local_db_restore(dr7);
2053}
2054#else
2055/* 32bit unified entry point */
2056DEFINE_IDTENTRY_RAW(exc_machine_check)
2057{
2058        unsigned long dr7;
2059
2060        dr7 = local_db_save();
2061        if (user_mode(regs))
2062                exc_machine_check_user(regs);
2063        else
2064                exc_machine_check_kernel(regs);
2065        local_db_restore(dr7);
2066}
2067#endif
2068
2069/*
2070 * Called for each booted CPU to set up machine checks.
2071 * Must be called with preempt off:
2072 */
2073void mcheck_cpu_init(struct cpuinfo_x86 *c)
2074{
2075        if (mca_cfg.disabled)
2076                return;
2077
2078        if (__mcheck_cpu_ancient_init(c))
2079                return;
2080
2081        if (!mce_available(c))
2082                return;
2083
2084        __mcheck_cpu_cap_init();
2085
2086        if (__mcheck_cpu_apply_quirks(c) < 0) {
2087                mca_cfg.disabled = 1;
2088                return;
2089        }
2090
2091        if (mce_gen_pool_init()) {
2092                mca_cfg.disabled = 1;
2093                pr_emerg("Couldn't allocate MCE records pool!\n");
2094                return;
2095        }
2096
2097        machine_check_vector = do_machine_check;
2098
2099        __mcheck_cpu_init_early(c);
2100        __mcheck_cpu_init_generic();
2101        __mcheck_cpu_init_vendor(c);
2102        __mcheck_cpu_init_clear_banks();
2103        __mcheck_cpu_check_banks();
2104        __mcheck_cpu_setup_timer();
2105}
2106
2107/*
2108 * Called for each booted CPU to clear some machine checks opt-ins
2109 */
2110void mcheck_cpu_clear(struct cpuinfo_x86 *c)
2111{
2112        if (mca_cfg.disabled)
2113                return;
2114
2115        if (!mce_available(c))
2116                return;
2117
2118        /*
2119         * Possibly to clear general settings generic to x86
2120         * __mcheck_cpu_clear_generic(c);
2121         */
2122        __mcheck_cpu_clear_vendor(c);
2123
2124}
2125
2126static void __mce_disable_bank(void *arg)
2127{
2128        int bank = *((int *)arg);
2129        __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2130        cmci_disable_bank(bank);
2131}
2132
2133void mce_disable_bank(int bank)
2134{
2135        if (bank >= this_cpu_read(mce_num_banks)) {
2136                pr_warn(FW_BUG
2137                        "Ignoring request to disable invalid MCA bank %d.\n",
2138                        bank);
2139                return;
2140        }
2141        set_bit(bank, mce_banks_ce_disabled);
2142        on_each_cpu(__mce_disable_bank, &bank, 1);
2143}
2144
2145/*
2146 * mce=off Disables machine check
2147 * mce=no_cmci Disables CMCI
2148 * mce=no_lmce Disables LMCE
2149 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2150 * mce=print_all Print all machine check logs to console
2151 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2152 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2153 *      monarchtimeout is how long to wait for other CPUs on machine
2154 *      check, or 0 to not wait
2155 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
2156        and older.
2157 * mce=nobootlog Don't log MCEs from before booting.
2158 * mce=bios_cmci_threshold Don't program the CMCI threshold
2159 * mce=recovery force enable copy_mc_fragile()
2160 */
2161static int __init mcheck_enable(char *str)
2162{
2163        struct mca_config *cfg = &mca_cfg;
2164
2165        if (*str == 0) {
2166                enable_p5_mce();
2167                return 1;
2168        }
2169        if (*str == '=')
2170                str++;
2171        if (!strcmp(str, "off"))
2172                cfg->disabled = 1;
2173        else if (!strcmp(str, "no_cmci"))
2174                cfg->cmci_disabled = true;
2175        else if (!strcmp(str, "no_lmce"))
2176                cfg->lmce_disabled = 1;
2177        else if (!strcmp(str, "dont_log_ce"))
2178                cfg->dont_log_ce = true;
2179        else if (!strcmp(str, "print_all"))
2180                cfg->print_all = true;
2181        else if (!strcmp(str, "ignore_ce"))
2182                cfg->ignore_ce = true;
2183        else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2184                cfg->bootlog = (str[0] == 'b');
2185        else if (!strcmp(str, "bios_cmci_threshold"))
2186                cfg->bios_cmci_threshold = 1;
2187        else if (!strcmp(str, "recovery"))
2188                cfg->recovery = 1;
2189        else if (isdigit(str[0])) {
2190                if (get_option(&str, &cfg->tolerant) == 2)
2191                        get_option(&str, &(cfg->monarch_timeout));
2192        } else {
2193                pr_info("mce argument %s ignored. Please use /sys\n", str);
2194                return 0;
2195        }
2196        return 1;
2197}
2198__setup("mce", mcheck_enable);
2199
2200int __init mcheck_init(void)
2201{
2202        mce_register_decode_chain(&early_nb);
2203        mce_register_decode_chain(&mce_uc_nb);
2204        mce_register_decode_chain(&mce_default_nb);
2205        mcheck_vendor_init_severity();
2206
2207        INIT_WORK(&mce_work, mce_gen_pool_process);
2208        init_irq_work(&mce_irq_work, mce_irq_work_cb);
2209
2210        return 0;
2211}
2212
2213/*
2214 * mce_syscore: PM support
2215 */
2216
2217/*
2218 * Disable machine checks on suspend and shutdown. We can't really handle
2219 * them later.
2220 */
2221static void mce_disable_error_reporting(void)
2222{
2223        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2224        int i;
2225
2226        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2227                struct mce_bank *b = &mce_banks[i];
2228
2229                if (b->init)
2230                        wrmsrl(msr_ops.ctl(i), 0);
2231        }
2232        return;
2233}
2234
2235static void vendor_disable_error_reporting(void)
2236{
2237        /*
2238         * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
2239         * MSRs are socket-wide. Disabling them for just a single offlined CPU
2240         * is bad, since it will inhibit reporting for all shared resources on
2241         * the socket like the last level cache (LLC), the integrated memory
2242         * controller (iMC), etc.
2243         */
2244        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2245            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2246            boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2247            boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
2248                return;
2249
2250        mce_disable_error_reporting();
2251}
2252
2253static int mce_syscore_suspend(void)
2254{
2255        vendor_disable_error_reporting();
2256        return 0;
2257}
2258
2259static void mce_syscore_shutdown(void)
2260{
2261        vendor_disable_error_reporting();
2262}
2263
2264/*
2265 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2266 * Only one CPU is active at this time, the others get re-added later using
2267 * CPU hotplug:
2268 */
2269static void mce_syscore_resume(void)
2270{
2271        __mcheck_cpu_init_generic();
2272        __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2273        __mcheck_cpu_init_clear_banks();
2274}
2275
2276static struct syscore_ops mce_syscore_ops = {
2277        .suspend        = mce_syscore_suspend,
2278        .shutdown       = mce_syscore_shutdown,
2279        .resume         = mce_syscore_resume,
2280};
2281
2282/*
2283 * mce_device: Sysfs support
2284 */
2285
2286static void mce_cpu_restart(void *data)
2287{
2288        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2289                return;
2290        __mcheck_cpu_init_generic();
2291        __mcheck_cpu_init_clear_banks();
2292        __mcheck_cpu_init_timer();
2293}
2294
2295/* Reinit MCEs after user configuration changes */
2296static void mce_restart(void)
2297{
2298        mce_timer_delete_all();
2299        on_each_cpu(mce_cpu_restart, NULL, 1);
2300}
2301
2302/* Toggle features for corrected errors */
2303static void mce_disable_cmci(void *data)
2304{
2305        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2306                return;
2307        cmci_clear();
2308}
2309
2310static void mce_enable_ce(void *all)
2311{
2312        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2313                return;
2314        cmci_reenable();
2315        cmci_recheck();
2316        if (all)
2317                __mcheck_cpu_init_timer();
2318}
2319
2320static struct bus_type mce_subsys = {
2321        .name           = "machinecheck",
2322        .dev_name       = "machinecheck",
2323};
2324
2325DEFINE_PER_CPU(struct device *, mce_device);
2326
2327static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2328{
2329        return container_of(attr, struct mce_bank_dev, attr);
2330}
2331
2332static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2333                         char *buf)
2334{
2335        u8 bank = attr_to_bank(attr)->bank;
2336        struct mce_bank *b;
2337
2338        if (bank >= per_cpu(mce_num_banks, s->id))
2339                return -EINVAL;
2340
2341        b = &per_cpu(mce_banks_array, s->id)[bank];
2342
2343        if (!b->init)
2344                return -ENODEV;
2345
2346        return sprintf(buf, "%llx\n", b->ctl);
2347}
2348
2349static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2350                        const char *buf, size_t size)
2351{
2352        u8 bank = attr_to_bank(attr)->bank;
2353        struct mce_bank *b;
2354        u64 new;
2355
2356        if (kstrtou64(buf, 0, &new) < 0)
2357                return -EINVAL;
2358
2359        if (bank >= per_cpu(mce_num_banks, s->id))
2360                return -EINVAL;
2361
2362        b = &per_cpu(mce_banks_array, s->id)[bank];
2363
2364        if (!b->init)
2365                return -ENODEV;
2366
2367        b->ctl = new;
2368        mce_restart();
2369
2370        return size;
2371}
2372
2373static ssize_t set_ignore_ce(struct device *s,
2374                             struct device_attribute *attr,
2375                             const char *buf, size_t size)
2376{
2377        u64 new;
2378
2379        if (kstrtou64(buf, 0, &new) < 0)
2380                return -EINVAL;
2381
2382        mutex_lock(&mce_sysfs_mutex);
2383        if (mca_cfg.ignore_ce ^ !!new) {
2384                if (new) {
2385                        /* disable ce features */
2386                        mce_timer_delete_all();
2387                        on_each_cpu(mce_disable_cmci, NULL, 1);
2388                        mca_cfg.ignore_ce = true;
2389                } else {
2390                        /* enable ce features */
2391                        mca_cfg.ignore_ce = false;
2392                        on_each_cpu(mce_enable_ce, (void *)1, 1);
2393                }
2394        }
2395        mutex_unlock(&mce_sysfs_mutex);
2396
2397        return size;
2398}
2399
2400static ssize_t set_cmci_disabled(struct device *s,
2401                                 struct device_attribute *attr,
2402                                 const char *buf, size_t size)
2403{
2404        u64 new;
2405
2406        if (kstrtou64(buf, 0, &new) < 0)
2407                return -EINVAL;
2408
2409        mutex_lock(&mce_sysfs_mutex);
2410        if (mca_cfg.cmci_disabled ^ !!new) {
2411                if (new) {
2412                        /* disable cmci */
2413                        on_each_cpu(mce_disable_cmci, NULL, 1);
2414                        mca_cfg.cmci_disabled = true;
2415                } else {
2416                        /* enable cmci */
2417                        mca_cfg.cmci_disabled = false;
2418                        on_each_cpu(mce_enable_ce, NULL, 1);
2419                }
2420        }
2421        mutex_unlock(&mce_sysfs_mutex);
2422
2423        return size;
2424}
2425
2426static ssize_t store_int_with_restart(struct device *s,
2427                                      struct device_attribute *attr,
2428                                      const char *buf, size_t size)
2429{
2430        unsigned long old_check_interval = check_interval;
2431        ssize_t ret = device_store_ulong(s, attr, buf, size);
2432
2433        if (check_interval == old_check_interval)
2434                return ret;
2435
2436        mutex_lock(&mce_sysfs_mutex);
2437        mce_restart();
2438        mutex_unlock(&mce_sysfs_mutex);
2439
2440        return ret;
2441}
2442
2443static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2444static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2445static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2446static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
2447
2448static struct dev_ext_attribute dev_attr_check_interval = {
2449        __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2450        &check_interval
2451};
2452
2453static struct dev_ext_attribute dev_attr_ignore_ce = {
2454        __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2455        &mca_cfg.ignore_ce
2456};
2457
2458static struct dev_ext_attribute dev_attr_cmci_disabled = {
2459        __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2460        &mca_cfg.cmci_disabled
2461};
2462
2463static struct device_attribute *mce_device_attrs[] = {
2464        &dev_attr_tolerant.attr,
2465        &dev_attr_check_interval.attr,
2466#ifdef CONFIG_X86_MCELOG_LEGACY
2467        &dev_attr_trigger,
2468#endif
2469        &dev_attr_monarch_timeout.attr,
2470        &dev_attr_dont_log_ce.attr,
2471        &dev_attr_print_all.attr,
2472        &dev_attr_ignore_ce.attr,
2473        &dev_attr_cmci_disabled.attr,
2474        NULL
2475};
2476
2477static cpumask_var_t mce_device_initialized;
2478
2479static void mce_device_release(struct device *dev)
2480{
2481        kfree(dev);
2482}
2483
2484/* Per CPU device init. All of the CPUs still share the same bank device: */
2485static int mce_device_create(unsigned int cpu)
2486{
2487        struct device *dev;
2488        int err;
2489        int i, j;
2490
2491        if (!mce_available(&boot_cpu_data))
2492                return -EIO;
2493
2494        dev = per_cpu(mce_device, cpu);
2495        if (dev)
2496                return 0;
2497
2498        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2499        if (!dev)
2500                return -ENOMEM;
2501        dev->id  = cpu;
2502        dev->bus = &mce_subsys;
2503        dev->release = &mce_device_release;
2504
2505        err = device_register(dev);
2506        if (err) {
2507                put_device(dev);
2508                return err;
2509        }
2510
2511        for (i = 0; mce_device_attrs[i]; i++) {
2512                err = device_create_file(dev, mce_device_attrs[i]);
2513                if (err)
2514                        goto error;
2515        }
2516        for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2517                err = device_create_file(dev, &mce_bank_devs[j].attr);
2518                if (err)
2519                        goto error2;
2520        }
2521        cpumask_set_cpu(cpu, mce_device_initialized);
2522        per_cpu(mce_device, cpu) = dev;
2523
2524        return 0;
2525error2:
2526        while (--j >= 0)
2527                device_remove_file(dev, &mce_bank_devs[j].attr);
2528error:
2529        while (--i >= 0)
2530                device_remove_file(dev, mce_device_attrs[i]);
2531
2532        device_unregister(dev);
2533
2534        return err;
2535}
2536
2537static void mce_device_remove(unsigned int cpu)
2538{
2539        struct device *dev = per_cpu(mce_device, cpu);
2540        int i;
2541
2542        if (!cpumask_test_cpu(cpu, mce_device_initialized))
2543                return;
2544
2545        for (i = 0; mce_device_attrs[i]; i++)
2546                device_remove_file(dev, mce_device_attrs[i]);
2547
2548        for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2549                device_remove_file(dev, &mce_bank_devs[i].attr);
2550
2551        device_unregister(dev);
2552        cpumask_clear_cpu(cpu, mce_device_initialized);
2553        per_cpu(mce_device, cpu) = NULL;
2554}
2555
2556/* Make sure there are no machine checks on offlined CPUs. */
2557static void mce_disable_cpu(void)
2558{
2559        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2560                return;
2561
2562        if (!cpuhp_tasks_frozen)
2563                cmci_clear();
2564
2565        vendor_disable_error_reporting();
2566}
2567
2568static void mce_reenable_cpu(void)
2569{
2570        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2571        int i;
2572
2573        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2574                return;
2575
2576        if (!cpuhp_tasks_frozen)
2577                cmci_reenable();
2578        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2579                struct mce_bank *b = &mce_banks[i];
2580
2581                if (b->init)
2582                        wrmsrl(msr_ops.ctl(i), b->ctl);
2583        }
2584}
2585
2586static int mce_cpu_dead(unsigned int cpu)
2587{
2588        mce_intel_hcpu_update(cpu);
2589
2590        /* intentionally ignoring frozen here */
2591        if (!cpuhp_tasks_frozen)
2592                cmci_rediscover();
2593        return 0;
2594}
2595
2596static int mce_cpu_online(unsigned int cpu)
2597{
2598        struct timer_list *t = this_cpu_ptr(&mce_timer);
2599        int ret;
2600
2601        mce_device_create(cpu);
2602
2603        ret = mce_threshold_create_device(cpu);
2604        if (ret) {
2605                mce_device_remove(cpu);
2606                return ret;
2607        }
2608        mce_reenable_cpu();
2609        mce_start_timer(t);
2610        return 0;
2611}
2612
2613static int mce_cpu_pre_down(unsigned int cpu)
2614{
2615        struct timer_list *t = this_cpu_ptr(&mce_timer);
2616
2617        mce_disable_cpu();
2618        del_timer_sync(t);
2619        mce_threshold_remove_device(cpu);
2620        mce_device_remove(cpu);
2621        return 0;
2622}
2623
2624static __init void mce_init_banks(void)
2625{
2626        int i;
2627
2628        for (i = 0; i < MAX_NR_BANKS; i++) {
2629                struct mce_bank_dev *b = &mce_bank_devs[i];
2630                struct device_attribute *a = &b->attr;
2631
2632                b->bank = i;
2633
2634                sysfs_attr_init(&a->attr);
2635                a->attr.name    = b->attrname;
2636                snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2637
2638                a->attr.mode    = 0644;
2639                a->show         = show_bank;
2640                a->store        = set_bank;
2641        }
2642}
2643
2644/*
2645 * When running on XEN, this initcall is ordered against the XEN mcelog
2646 * initcall:
2647 *
2648 *   device_initcall(xen_late_init_mcelog);
2649 *   device_initcall_sync(mcheck_init_device);
2650 */
2651static __init int mcheck_init_device(void)
2652{
2653        int err;
2654
2655        /*
2656         * Check if we have a spare virtual bit. This will only become
2657         * a problem if/when we move beyond 5-level page tables.
2658         */
2659        MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2660
2661        if (!mce_available(&boot_cpu_data)) {
2662                err = -EIO;
2663                goto err_out;
2664        }
2665
2666        if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2667                err = -ENOMEM;
2668                goto err_out;
2669        }
2670
2671        mce_init_banks();
2672
2673        err = subsys_system_register(&mce_subsys, NULL);
2674        if (err)
2675                goto err_out_mem;
2676
2677        err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2678                                mce_cpu_dead);
2679        if (err)
2680                goto err_out_mem;
2681
2682        /*
2683         * Invokes mce_cpu_online() on all CPUs which are online when
2684         * the state is installed.
2685         */
2686        err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2687                                mce_cpu_online, mce_cpu_pre_down);
2688        if (err < 0)
2689                goto err_out_online;
2690
2691        register_syscore_ops(&mce_syscore_ops);
2692
2693        return 0;
2694
2695err_out_online:
2696        cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2697
2698err_out_mem:
2699        free_cpumask_var(mce_device_initialized);
2700
2701err_out:
2702        pr_err("Unable to init MCE device (rc: %d)\n", err);
2703
2704        return err;
2705}
2706device_initcall_sync(mcheck_init_device);
2707
2708/*
2709 * Old style boot options parsing. Only for compatibility.
2710 */
2711static int __init mcheck_disable(char *str)
2712{
2713        mca_cfg.disabled = 1;
2714        return 1;
2715}
2716__setup("nomce", mcheck_disable);
2717
2718#ifdef CONFIG_DEBUG_FS
2719struct dentry *mce_get_debugfs_dir(void)
2720{
2721        static struct dentry *dmce;
2722
2723        if (!dmce)
2724                dmce = debugfs_create_dir("mce", NULL);
2725
2726        return dmce;
2727}
2728
2729static void mce_reset(void)
2730{
2731        cpu_missing = 0;
2732        atomic_set(&mce_fake_panicked, 0);
2733        atomic_set(&mce_executing, 0);
2734        atomic_set(&mce_callin, 0);
2735        atomic_set(&global_nwo, 0);
2736        cpumask_setall(&mce_missing_cpus);
2737}
2738
2739static int fake_panic_get(void *data, u64 *val)
2740{
2741        *val = fake_panic;
2742        return 0;
2743}
2744
2745static int fake_panic_set(void *data, u64 val)
2746{
2747        mce_reset();
2748        fake_panic = val;
2749        return 0;
2750}
2751
2752DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2753                         "%llu\n");
2754
2755static void __init mcheck_debugfs_init(void)
2756{
2757        struct dentry *dmce;
2758
2759        dmce = mce_get_debugfs_dir();
2760        debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2761                                   &fake_panic_fops);
2762}
2763#else
2764static void __init mcheck_debugfs_init(void) { }
2765#endif
2766
2767static int __init mcheck_late_init(void)
2768{
2769        if (mca_cfg.recovery)
2770                enable_copy_mc_fragile();
2771
2772        mcheck_debugfs_init();
2773
2774        /*
2775         * Flush out everything that has been logged during early boot, now that
2776         * everything has been initialized (workqueues, decoders, ...).
2777         */
2778        mce_schedule_work();
2779
2780        return 0;
2781}
2782late_initcall(mcheck_late_init);
2783