~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/mce/core.c

Version: ~ [ linux-5.15-rc1 ] ~ [ linux-5.14.5 ] ~ [ linux-5.13.18 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.66 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.147 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.206 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.246 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.282 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.283 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Machine check handler.
  3  *
  4  * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5  * Rest from unknown author(s).
  6  * 2004 Andi Kleen. Rewrote most of it.
  7  * Copyright 2008 Intel Corporation
  8  * Author: Andi Kleen
  9  */
 10 
 11 #include <linux/thread_info.h>
 12 #include <linux/capability.h>
 13 #include <linux/miscdevice.h>
 14 #include <linux/ratelimit.h>
 15 #include <linux/rcupdate.h>
 16 #include <linux/kobject.h>
 17 #include <linux/uaccess.h>
 18 #include <linux/kdebug.h>
 19 #include <linux/kernel.h>
 20 #include <linux/percpu.h>
 21 #include <linux/string.h>
 22 #include <linux/device.h>
 23 #include <linux/syscore_ops.h>
 24 #include <linux/delay.h>
 25 #include <linux/ctype.h>
 26 #include <linux/sched.h>
 27 #include <linux/sysfs.h>
 28 #include <linux/types.h>
 29 #include <linux/slab.h>
 30 #include <linux/init.h>
 31 #include <linux/kmod.h>
 32 #include <linux/poll.h>
 33 #include <linux/nmi.h>
 34 #include <linux/cpu.h>
 35 #include <linux/ras.h>
 36 #include <linux/smp.h>
 37 #include <linux/fs.h>
 38 #include <linux/mm.h>
 39 #include <linux/debugfs.h>
 40 #include <linux/irq_work.h>
 41 #include <linux/export.h>
 42 #include <linux/jump_label.h>
 43 #include <linux/set_memory.h>
 44 
 45 #include <asm/intel-family.h>
 46 #include <asm/processor.h>
 47 #include <asm/traps.h>
 48 #include <asm/tlbflush.h>
 49 #include <asm/mce.h>
 50 #include <asm/msr.h>
 51 #include <asm/reboot.h>
 52 
 53 #include "internal.h"
 54 
 55 static DEFINE_MUTEX(mce_log_mutex);
 56 
 57 /* sysfs synchronization */
 58 static DEFINE_MUTEX(mce_sysfs_mutex);
 59 
 60 #define CREATE_TRACE_POINTS
 61 #include <trace/events/mce.h>
 62 
 63 #define SPINUNIT                100     /* 100ns */
 64 
 65 DEFINE_PER_CPU(unsigned, mce_exception_count);
 66 
 67 struct mce_bank *mce_banks __read_mostly;
 68 struct mce_vendor_flags mce_flags __read_mostly;
 69 
 70 struct mca_config mca_cfg __read_mostly = {
 71         .bootlog  = -1,
 72         /*
 73          * Tolerant levels:
 74          * 0: always panic on uncorrected errors, log corrected errors
 75          * 1: panic or SIGBUS on uncorrected errors, log corrected errors
 76          * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
 77          * 3: never panic or SIGBUS, log all errors (for testing only)
 78          */
 79         .tolerant = 1,
 80         .monarch_timeout = -1
 81 };
 82 
 83 static DEFINE_PER_CPU(struct mce, mces_seen);
 84 static unsigned long mce_need_notify;
 85 static int cpu_missing;
 86 
 87 /*
 88  * MCA banks polled by the period polling timer for corrected events.
 89  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 90  */
 91 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 92         [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
 93 };
 94 
 95 /*
 96  * MCA banks controlled through firmware first for corrected errors.
 97  * This is a global list of banks for which we won't enable CMCI and we
 98  * won't poll. Firmware controls these banks and is responsible for
 99  * reporting corrected errors through GHES. Uncorrected/recoverable
100  * errors are still notified through a machine check.
101  */
102 mce_banks_t mce_banks_ce_disabled;
103 
104 static struct work_struct mce_work;
105 static struct irq_work mce_irq_work;
106 
107 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
108 
109 /*
110  * CPU/chipset specific EDAC code can register a notifier call here to print
111  * MCE errors in a human-readable form.
112  */
113 BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
114 
115 /* Do initial initialization of a struct mce */
116 void mce_setup(struct mce *m)
117 {
118         memset(m, 0, sizeof(struct mce));
119         m->cpu = m->extcpu = smp_processor_id();
120         /* need the internal __ version to avoid deadlocks */
121         m->time = __ktime_get_real_seconds();
122         m->cpuvendor = boot_cpu_data.x86_vendor;
123         m->cpuid = cpuid_eax(1);
124         m->socketid = cpu_data(m->extcpu).phys_proc_id;
125         m->apicid = cpu_data(m->extcpu).initial_apicid;
126         rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
127 
128         if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
129                 rdmsrl(MSR_PPIN, m->ppin);
130 
131         m->microcode = boot_cpu_data.microcode;
132 }
133 
134 DEFINE_PER_CPU(struct mce, injectm);
135 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
136 
137 void mce_log(struct mce *m)
138 {
139         if (!mce_gen_pool_add(m))
140                 irq_work_queue(&mce_irq_work);
141 }
142 
143 void mce_inject_log(struct mce *m)
144 {
145         mutex_lock(&mce_log_mutex);
146         mce_log(m);
147         mutex_unlock(&mce_log_mutex);
148 }
149 EXPORT_SYMBOL_GPL(mce_inject_log);
150 
151 static struct notifier_block mce_srao_nb;
152 
153 /*
154  * We run the default notifier if we have only the SRAO, the first and the
155  * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
156  * notifiers registered on the chain.
157  */
158 #define NUM_DEFAULT_NOTIFIERS   3
159 static atomic_t num_notifiers;
160 
161 void mce_register_decode_chain(struct notifier_block *nb)
162 {
163         if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
164                 return;
165 
166         atomic_inc(&num_notifiers);
167 
168         blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
169 }
170 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
171 
172 void mce_unregister_decode_chain(struct notifier_block *nb)
173 {
174         atomic_dec(&num_notifiers);
175 
176         blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
177 }
178 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
179 
180 static inline u32 ctl_reg(int bank)
181 {
182         return MSR_IA32_MCx_CTL(bank);
183 }
184 
185 static inline u32 status_reg(int bank)
186 {
187         return MSR_IA32_MCx_STATUS(bank);
188 }
189 
190 static inline u32 addr_reg(int bank)
191 {
192         return MSR_IA32_MCx_ADDR(bank);
193 }
194 
195 static inline u32 misc_reg(int bank)
196 {
197         return MSR_IA32_MCx_MISC(bank);
198 }
199 
200 static inline u32 smca_ctl_reg(int bank)
201 {
202         return MSR_AMD64_SMCA_MCx_CTL(bank);
203 }
204 
205 static inline u32 smca_status_reg(int bank)
206 {
207         return MSR_AMD64_SMCA_MCx_STATUS(bank);
208 }
209 
210 static inline u32 smca_addr_reg(int bank)
211 {
212         return MSR_AMD64_SMCA_MCx_ADDR(bank);
213 }
214 
215 static inline u32 smca_misc_reg(int bank)
216 {
217         return MSR_AMD64_SMCA_MCx_MISC(bank);
218 }
219 
220 struct mca_msr_regs msr_ops = {
221         .ctl    = ctl_reg,
222         .status = status_reg,
223         .addr   = addr_reg,
224         .misc   = misc_reg
225 };
226 
227 static void __print_mce(struct mce *m)
228 {
229         pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
230                  m->extcpu,
231                  (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
232                  m->mcgstatus, m->bank, m->status);
233 
234         if (m->ip) {
235                 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
236                         !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
237                         m->cs, m->ip);
238 
239                 if (m->cs == __KERNEL_CS)
240                         pr_cont("{%pS}", (void *)(unsigned long)m->ip);
241                 pr_cont("\n");
242         }
243 
244         pr_emerg(HW_ERR "TSC %llx ", m->tsc);
245         if (m->addr)
246                 pr_cont("ADDR %llx ", m->addr);
247         if (m->misc)
248                 pr_cont("MISC %llx ", m->misc);
249 
250         if (mce_flags.smca) {
251                 if (m->synd)
252                         pr_cont("SYND %llx ", m->synd);
253                 if (m->ipid)
254                         pr_cont("IPID %llx ", m->ipid);
255         }
256 
257         pr_cont("\n");
258         /*
259          * Note this output is parsed by external tools and old fields
260          * should not be changed.
261          */
262         pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
263                 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
264                 m->microcode);
265 }
266 
267 static void print_mce(struct mce *m)
268 {
269         __print_mce(m);
270 
271         if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
272                 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
273 }
274 
275 #define PANIC_TIMEOUT 5 /* 5 seconds */
276 
277 static atomic_t mce_panicked;
278 
279 static int fake_panic;
280 static atomic_t mce_fake_panicked;
281 
282 /* Panic in progress. Enable interrupts and wait for final IPI */
283 static void wait_for_panic(void)
284 {
285         long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
286 
287         preempt_disable();
288         local_irq_enable();
289         while (timeout-- > 0)
290                 udelay(1);
291         if (panic_timeout == 0)
292                 panic_timeout = mca_cfg.panic_timeout;
293         panic("Panicing machine check CPU died");
294 }
295 
296 static void mce_panic(const char *msg, struct mce *final, char *exp)
297 {
298         int apei_err = 0;
299         struct llist_node *pending;
300         struct mce_evt_llist *l;
301 
302         if (!fake_panic) {
303                 /*
304                  * Make sure only one CPU runs in machine check panic
305                  */
306                 if (atomic_inc_return(&mce_panicked) > 1)
307                         wait_for_panic();
308                 barrier();
309 
310                 bust_spinlocks(1);
311                 console_verbose();
312         } else {
313                 /* Don't log too much for fake panic */
314                 if (atomic_inc_return(&mce_fake_panicked) > 1)
315                         return;
316         }
317         pending = mce_gen_pool_prepare_records();
318         /* First print corrected ones that are still unlogged */
319         llist_for_each_entry(l, pending, llnode) {
320                 struct mce *m = &l->mce;
321                 if (!(m->status & MCI_STATUS_UC)) {
322                         print_mce(m);
323                         if (!apei_err)
324                                 apei_err = apei_write_mce(m);
325                 }
326         }
327         /* Now print uncorrected but with the final one last */
328         llist_for_each_entry(l, pending, llnode) {
329                 struct mce *m = &l->mce;
330                 if (!(m->status & MCI_STATUS_UC))
331                         continue;
332                 if (!final || mce_cmp(m, final)) {
333                         print_mce(m);
334                         if (!apei_err)
335                                 apei_err = apei_write_mce(m);
336                 }
337         }
338         if (final) {
339                 print_mce(final);
340                 if (!apei_err)
341                         apei_err = apei_write_mce(final);
342         }
343         if (cpu_missing)
344                 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
345         if (exp)
346                 pr_emerg(HW_ERR "Machine check: %s\n", exp);
347         if (!fake_panic) {
348                 if (panic_timeout == 0)
349                         panic_timeout = mca_cfg.panic_timeout;
350                 panic(msg);
351         } else
352                 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
353 }
354 
355 /* Support code for software error injection */
356 
357 static int msr_to_offset(u32 msr)
358 {
359         unsigned bank = __this_cpu_read(injectm.bank);
360 
361         if (msr == mca_cfg.rip_msr)
362                 return offsetof(struct mce, ip);
363         if (msr == msr_ops.status(bank))
364                 return offsetof(struct mce, status);
365         if (msr == msr_ops.addr(bank))
366                 return offsetof(struct mce, addr);
367         if (msr == msr_ops.misc(bank))
368                 return offsetof(struct mce, misc);
369         if (msr == MSR_IA32_MCG_STATUS)
370                 return offsetof(struct mce, mcgstatus);
371         return -1;
372 }
373 
374 /* MSR access wrappers used for error injection */
375 static u64 mce_rdmsrl(u32 msr)
376 {
377         u64 v;
378 
379         if (__this_cpu_read(injectm.finished)) {
380                 int offset = msr_to_offset(msr);
381 
382                 if (offset < 0)
383                         return 0;
384                 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
385         }
386 
387         if (rdmsrl_safe(msr, &v)) {
388                 WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
389                 /*
390                  * Return zero in case the access faulted. This should
391                  * not happen normally but can happen if the CPU does
392                  * something weird, or if the code is buggy.
393                  */
394                 v = 0;
395         }
396 
397         return v;
398 }
399 
400 static void mce_wrmsrl(u32 msr, u64 v)
401 {
402         if (__this_cpu_read(injectm.finished)) {
403                 int offset = msr_to_offset(msr);
404 
405                 if (offset >= 0)
406                         *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
407                 return;
408         }
409         wrmsrl(msr, v);
410 }
411 
412 /*
413  * Collect all global (w.r.t. this processor) status about this machine
414  * check into our "mce" struct so that we can use it later to assess
415  * the severity of the problem as we read per-bank specific details.
416  */
417 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
418 {
419         mce_setup(m);
420 
421         m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
422         if (regs) {
423                 /*
424                  * Get the address of the instruction at the time of
425                  * the machine check error.
426                  */
427                 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
428                         m->ip = regs->ip;
429                         m->cs = regs->cs;
430 
431                         /*
432                          * When in VM86 mode make the cs look like ring 3
433                          * always. This is a lie, but it's better than passing
434                          * the additional vm86 bit around everywhere.
435                          */
436                         if (v8086_mode(regs))
437                                 m->cs |= 3;
438                 }
439                 /* Use accurate RIP reporting if available. */
440                 if (mca_cfg.rip_msr)
441                         m->ip = mce_rdmsrl(mca_cfg.rip_msr);
442         }
443 }
444 
445 int mce_available(struct cpuinfo_x86 *c)
446 {
447         if (mca_cfg.disabled)
448                 return 0;
449         return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
450 }
451 
452 static void mce_schedule_work(void)
453 {
454         if (!mce_gen_pool_empty())
455                 schedule_work(&mce_work);
456 }
457 
458 static void mce_irq_work_cb(struct irq_work *entry)
459 {
460         mce_schedule_work();
461 }
462 
463 static void mce_report_event(struct pt_regs *regs)
464 {
465         if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
466                 mce_notify_irq();
467                 /*
468                  * Triggering the work queue here is just an insurance
469                  * policy in case the syscall exit notify handler
470                  * doesn't run soon enough or ends up running on the
471                  * wrong CPU (can happen when audit sleeps)
472                  */
473                 mce_schedule_work();
474                 return;
475         }
476 
477         irq_work_queue(&mce_irq_work);
478 }
479 
480 /*
481  * Check if the address reported by the CPU is in a format we can parse.
482  * It would be possible to add code for most other cases, but all would
483  * be somewhat complicated (e.g. segment offset would require an instruction
484  * parser). So only support physical addresses up to page granuality for now.
485  */
486 int mce_usable_address(struct mce *m)
487 {
488         if (!(m->status & MCI_STATUS_ADDRV))
489                 return 0;
490 
491         /* Checks after this one are Intel-specific: */
492         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
493                 return 1;
494 
495         if (!(m->status & MCI_STATUS_MISCV))
496                 return 0;
497 
498         if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
499                 return 0;
500 
501         if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
502                 return 0;
503 
504         return 1;
505 }
506 EXPORT_SYMBOL_GPL(mce_usable_address);
507 
508 bool mce_is_memory_error(struct mce *m)
509 {
510         if (m->cpuvendor == X86_VENDOR_AMD ||
511             m->cpuvendor == X86_VENDOR_HYGON) {
512                 return amd_mce_is_memory_error(m);
513         } else if (m->cpuvendor == X86_VENDOR_INTEL) {
514                 /*
515                  * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
516                  *
517                  * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
518                  * indicating a memory error. Bit 8 is used for indicating a
519                  * cache hierarchy error. The combination of bit 2 and bit 3
520                  * is used for indicating a `generic' cache hierarchy error
521                  * But we can't just blindly check the above bits, because if
522                  * bit 11 is set, then it is a bus/interconnect error - and
523                  * either way the above bits just gives more detail on what
524                  * bus/interconnect error happened. Note that bit 12 can be
525                  * ignored, as it's the "filter" bit.
526                  */
527                 return (m->status & 0xef80) == BIT(7) ||
528                        (m->status & 0xef00) == BIT(8) ||
529                        (m->status & 0xeffc) == 0xc;
530         }
531 
532         return false;
533 }
534 EXPORT_SYMBOL_GPL(mce_is_memory_error);
535 
536 bool mce_is_correctable(struct mce *m)
537 {
538         if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
539                 return false;
540 
541         if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
542                 return false;
543 
544         if (m->status & MCI_STATUS_UC)
545                 return false;
546 
547         return true;
548 }
549 EXPORT_SYMBOL_GPL(mce_is_correctable);
550 
551 static bool cec_add_mce(struct mce *m)
552 {
553         if (!m)
554                 return false;
555 
556         /* We eat only correctable DRAM errors with usable addresses. */
557         if (mce_is_memory_error(m) &&
558             mce_is_correctable(m)  &&
559             mce_usable_address(m))
560                 if (!cec_add_elem(m->addr >> PAGE_SHIFT))
561                         return true;
562 
563         return false;
564 }
565 
566 static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
567                               void *data)
568 {
569         struct mce *m = (struct mce *)data;
570 
571         if (!m)
572                 return NOTIFY_DONE;
573 
574         if (cec_add_mce(m))
575                 return NOTIFY_STOP;
576 
577         /* Emit the trace record: */
578         trace_mce_record(m);
579 
580         set_bit(0, &mce_need_notify);
581 
582         mce_notify_irq();
583 
584         return NOTIFY_DONE;
585 }
586 
587 static struct notifier_block first_nb = {
588         .notifier_call  = mce_first_notifier,
589         .priority       = MCE_PRIO_FIRST,
590 };
591 
592 static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
593                                 void *data)
594 {
595         struct mce *mce = (struct mce *)data;
596         unsigned long pfn;
597 
598         if (!mce)
599                 return NOTIFY_DONE;
600 
601         if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
602                 pfn = mce->addr >> PAGE_SHIFT;
603                 if (!memory_failure(pfn, 0))
604                         set_mce_nospec(pfn);
605         }
606 
607         return NOTIFY_OK;
608 }
609 static struct notifier_block mce_srao_nb = {
610         .notifier_call  = srao_decode_notifier,
611         .priority       = MCE_PRIO_SRAO,
612 };
613 
614 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
615                                 void *data)
616 {
617         struct mce *m = (struct mce *)data;
618 
619         if (!m)
620                 return NOTIFY_DONE;
621 
622         if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS)
623                 return NOTIFY_DONE;
624 
625         __print_mce(m);
626 
627         return NOTIFY_DONE;
628 }
629 
630 static struct notifier_block mce_default_nb = {
631         .notifier_call  = mce_default_notifier,
632         /* lowest prio, we want it to run last. */
633         .priority       = MCE_PRIO_LOWEST,
634 };
635 
636 /*
637  * Read ADDR and MISC registers.
638  */
639 static void mce_read_aux(struct mce *m, int i)
640 {
641         if (m->status & MCI_STATUS_MISCV)
642                 m->misc = mce_rdmsrl(msr_ops.misc(i));
643 
644         if (m->status & MCI_STATUS_ADDRV) {
645                 m->addr = mce_rdmsrl(msr_ops.addr(i));
646 
647                 /*
648                  * Mask the reported address by the reported granularity.
649                  */
650                 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
651                         u8 shift = MCI_MISC_ADDR_LSB(m->misc);
652                         m->addr >>= shift;
653                         m->addr <<= shift;
654                 }
655 
656                 /*
657                  * Extract [55:<lsb>] where lsb is the least significant
658                  * *valid* bit of the address bits.
659                  */
660                 if (mce_flags.smca) {
661                         u8 lsb = (m->addr >> 56) & 0x3f;
662 
663                         m->addr &= GENMASK_ULL(55, lsb);
664                 }
665         }
666 
667         if (mce_flags.smca) {
668                 m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
669 
670                 if (m->status & MCI_STATUS_SYNDV)
671                         m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
672         }
673 }
674 
675 DEFINE_PER_CPU(unsigned, mce_poll_count);
676 
677 /*
678  * Poll for corrected events or events that happened before reset.
679  * Those are just logged through /dev/mcelog.
680  *
681  * This is executed in standard interrupt context.
682  *
683  * Note: spec recommends to panic for fatal unsignalled
684  * errors here. However this would be quite problematic --
685  * we would need to reimplement the Monarch handling and
686  * it would mess up the exclusion between exception handler
687  * and poll handler -- * so we skip this for now.
688  * These cases should not happen anyways, or only when the CPU
689  * is already totally * confused. In this case it's likely it will
690  * not fully execute the machine check handler either.
691  */
692 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
693 {
694         bool error_seen = false;
695         struct mce m;
696         int i;
697 
698         this_cpu_inc(mce_poll_count);
699 
700         mce_gather_info(&m, NULL);
701 
702         if (flags & MCP_TIMESTAMP)
703                 m.tsc = rdtsc();
704 
705         for (i = 0; i < mca_cfg.banks; i++) {
706                 if (!mce_banks[i].ctl || !test_bit(i, *b))
707                         continue;
708 
709                 m.misc = 0;
710                 m.addr = 0;
711                 m.bank = i;
712 
713                 barrier();
714                 m.status = mce_rdmsrl(msr_ops.status(i));
715 
716                 /* If this entry is not valid, ignore it */
717                 if (!(m.status & MCI_STATUS_VAL))
718                         continue;
719 
720                 /*
721                  * If we are logging everything (at CPU online) or this
722                  * is a corrected error, then we must log it.
723                  */
724                 if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
725                         goto log_it;
726 
727                 /*
728                  * Newer Intel systems that support software error
729                  * recovery need to make additional checks. Other
730                  * CPUs should skip over uncorrected errors, but log
731                  * everything else.
732                  */
733                 if (!mca_cfg.ser) {
734                         if (m.status & MCI_STATUS_UC)
735                                 continue;
736                         goto log_it;
737                 }
738 
739                 /* Log "not enabled" (speculative) errors */
740                 if (!(m.status & MCI_STATUS_EN))
741                         goto log_it;
742 
743                 /*
744                  * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
745                  * UC == 1 && PCC == 0 && S == 0
746                  */
747                 if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
748                         goto log_it;
749 
750                 /*
751                  * Skip anything else. Presumption is that our read of this
752                  * bank is racing with a machine check. Leave the log alone
753                  * for do_machine_check() to deal with it.
754                  */
755                 continue;
756 
757 log_it:
758                 error_seen = true;
759 
760                 mce_read_aux(&m, i);
761 
762                 m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
763 
764                 /*
765                  * Don't get the IP here because it's unlikely to
766                  * have anything to do with the actual error location.
767                  */
768                 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
769                         mce_log(&m);
770                 else if (mce_usable_address(&m)) {
771                         /*
772                          * Although we skipped logging this, we still want
773                          * to take action. Add to the pool so the registered
774                          * notifiers will see it.
775                          */
776                         if (!mce_gen_pool_add(&m))
777                                 mce_schedule_work();
778                 }
779 
780                 /*
781                  * Clear state for this bank.
782                  */
783                 mce_wrmsrl(msr_ops.status(i), 0);
784         }
785 
786         /*
787          * Don't clear MCG_STATUS here because it's only defined for
788          * exceptions.
789          */
790 
791         sync_core();
792 
793         return error_seen;
794 }
795 EXPORT_SYMBOL_GPL(machine_check_poll);
796 
797 /*
798  * Do a quick check if any of the events requires a panic.
799  * This decides if we keep the events around or clear them.
800  */
801 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
802                           struct pt_regs *regs)
803 {
804         char *tmp;
805         int i;
806 
807         for (i = 0; i < mca_cfg.banks; i++) {
808                 m->status = mce_rdmsrl(msr_ops.status(i));
809                 if (!(m->status & MCI_STATUS_VAL))
810                         continue;
811 
812                 __set_bit(i, validp);
813                 if (quirk_no_way_out)
814                         quirk_no_way_out(i, m, regs);
815 
816                 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
817                         m->bank = i;
818                         mce_read_aux(m, i);
819                         *msg = tmp;
820                         return 1;
821                 }
822         }
823         return 0;
824 }
825 
826 /*
827  * Variable to establish order between CPUs while scanning.
828  * Each CPU spins initially until executing is equal its number.
829  */
830 static atomic_t mce_executing;
831 
832 /*
833  * Defines order of CPUs on entry. First CPU becomes Monarch.
834  */
835 static atomic_t mce_callin;
836 
837 /*
838  * Check if a timeout waiting for other CPUs happened.
839  */
840 static int mce_timed_out(u64 *t, const char *msg)
841 {
842         /*
843          * The others already did panic for some reason.
844          * Bail out like in a timeout.
845          * rmb() to tell the compiler that system_state
846          * might have been modified by someone else.
847          */
848         rmb();
849         if (atomic_read(&mce_panicked))
850                 wait_for_panic();
851         if (!mca_cfg.monarch_timeout)
852                 goto out;
853         if ((s64)*t < SPINUNIT) {
854                 if (mca_cfg.tolerant <= 1)
855                         mce_panic(msg, NULL, NULL);
856                 cpu_missing = 1;
857                 return 1;
858         }
859         *t -= SPINUNIT;
860 out:
861         touch_nmi_watchdog();
862         return 0;
863 }
864 
865 /*
866  * The Monarch's reign.  The Monarch is the CPU who entered
867  * the machine check handler first. It waits for the others to
868  * raise the exception too and then grades them. When any
869  * error is fatal panic. Only then let the others continue.
870  *
871  * The other CPUs entering the MCE handler will be controlled by the
872  * Monarch. They are called Subjects.
873  *
874  * This way we prevent any potential data corruption in a unrecoverable case
875  * and also makes sure always all CPU's errors are examined.
876  *
877  * Also this detects the case of a machine check event coming from outer
878  * space (not detected by any CPUs) In this case some external agent wants
879  * us to shut down, so panic too.
880  *
881  * The other CPUs might still decide to panic if the handler happens
882  * in a unrecoverable place, but in this case the system is in a semi-stable
883  * state and won't corrupt anything by itself. It's ok to let the others
884  * continue for a bit first.
885  *
886  * All the spin loops have timeouts; when a timeout happens a CPU
887  * typically elects itself to be Monarch.
888  */
889 static void mce_reign(void)
890 {
891         int cpu;
892         struct mce *m = NULL;
893         int global_worst = 0;
894         char *msg = NULL;
895         char *nmsg = NULL;
896 
897         /*
898          * This CPU is the Monarch and the other CPUs have run
899          * through their handlers.
900          * Grade the severity of the errors of all the CPUs.
901          */
902         for_each_possible_cpu(cpu) {
903                 int severity = mce_severity(&per_cpu(mces_seen, cpu),
904                                             mca_cfg.tolerant,
905                                             &nmsg, true);
906                 if (severity > global_worst) {
907                         msg = nmsg;
908                         global_worst = severity;
909                         m = &per_cpu(mces_seen, cpu);
910                 }
911         }
912 
913         /*
914          * Cannot recover? Panic here then.
915          * This dumps all the mces in the log buffer and stops the
916          * other CPUs.
917          */
918         if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
919                 mce_panic("Fatal machine check", m, msg);
920 
921         /*
922          * For UC somewhere we let the CPU who detects it handle it.
923          * Also must let continue the others, otherwise the handling
924          * CPU could deadlock on a lock.
925          */
926 
927         /*
928          * No machine check event found. Must be some external
929          * source or one CPU is hung. Panic.
930          */
931         if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
932                 mce_panic("Fatal machine check from unknown source", NULL, NULL);
933 
934         /*
935          * Now clear all the mces_seen so that they don't reappear on
936          * the next mce.
937          */
938         for_each_possible_cpu(cpu)
939                 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
940 }
941 
942 static atomic_t global_nwo;
943 
944 /*
945  * Start of Monarch synchronization. This waits until all CPUs have
946  * entered the exception handler and then determines if any of them
947  * saw a fatal event that requires panic. Then it executes them
948  * in the entry order.
949  * TBD double check parallel CPU hotunplug
950  */
951 static int mce_start(int *no_way_out)
952 {
953         int order;
954         int cpus = num_online_cpus();
955         u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
956 
957         if (!timeout)
958                 return -1;
959 
960         atomic_add(*no_way_out, &global_nwo);
961         /*
962          * Rely on the implied barrier below, such that global_nwo
963          * is updated before mce_callin.
964          */
965         order = atomic_inc_return(&mce_callin);
966 
967         /*
968          * Wait for everyone.
969          */
970         while (atomic_read(&mce_callin) != cpus) {
971                 if (mce_timed_out(&timeout,
972                                   "Timeout: Not all CPUs entered broadcast exception handler")) {
973                         atomic_set(&global_nwo, 0);
974                         return -1;
975                 }
976                 ndelay(SPINUNIT);
977         }
978 
979         /*
980          * mce_callin should be read before global_nwo
981          */
982         smp_rmb();
983 
984         if (order == 1) {
985                 /*
986                  * Monarch: Starts executing now, the others wait.
987                  */
988                 atomic_set(&mce_executing, 1);
989         } else {
990                 /*
991                  * Subject: Now start the scanning loop one by one in
992                  * the original callin order.
993                  * This way when there are any shared banks it will be
994                  * only seen by one CPU before cleared, avoiding duplicates.
995                  */
996                 while (atomic_read(&mce_executing) < order) {
997                         if (mce_timed_out(&timeout,
998                                           "Timeout: Subject CPUs unable to finish machine check processing")) {
999                                 atomic_set(&global_nwo, 0);
1000                                 return -1;
1001                         }
1002                         ndelay(SPINUNIT);
1003                 }
1004         }
1005 
1006         /*
1007          * Cache the global no_way_out state.
1008          */
1009         *no_way_out = atomic_read(&global_nwo);
1010 
1011         return order;
1012 }
1013 
1014 /*
1015  * Synchronize between CPUs after main scanning loop.
1016  * This invokes the bulk of the Monarch processing.
1017  */
1018 static int mce_end(int order)
1019 {
1020         int ret = -1;
1021         u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1022 
1023         if (!timeout)
1024                 goto reset;
1025         if (order < 0)
1026                 goto reset;
1027 
1028         /*
1029          * Allow others to run.
1030          */
1031         atomic_inc(&mce_executing);
1032 
1033         if (order == 1) {
1034                 /* CHECKME: Can this race with a parallel hotplug? */
1035                 int cpus = num_online_cpus();
1036 
1037                 /*
1038                  * Monarch: Wait for everyone to go through their scanning
1039                  * loops.
1040                  */
1041                 while (atomic_read(&mce_executing) <= cpus) {
1042                         if (mce_timed_out(&timeout,
1043                                           "Timeout: Monarch CPU unable to finish machine check processing"))
1044                                 goto reset;
1045                         ndelay(SPINUNIT);
1046                 }
1047 
1048                 mce_reign();
1049                 barrier();
1050                 ret = 0;
1051         } else {
1052                 /*
1053                  * Subject: Wait for Monarch to finish.
1054                  */
1055                 while (atomic_read(&mce_executing) != 0) {
1056                         if (mce_timed_out(&timeout,
1057                                           "Timeout: Monarch CPU did not finish machine check processing"))
1058                                 goto reset;
1059                         ndelay(SPINUNIT);
1060                 }
1061 
1062                 /*
1063                  * Don't reset anything. That's done by the Monarch.
1064                  */
1065                 return 0;
1066         }
1067 
1068         /*
1069          * Reset all global state.
1070          */
1071 reset:
1072         atomic_set(&global_nwo, 0);
1073         atomic_set(&mce_callin, 0);
1074         barrier();
1075 
1076         /*
1077          * Let others run again.
1078          */
1079         atomic_set(&mce_executing, 0);
1080         return ret;
1081 }
1082 
1083 static void mce_clear_state(unsigned long *toclear)
1084 {
1085         int i;
1086 
1087         for (i = 0; i < mca_cfg.banks; i++) {
1088                 if (test_bit(i, toclear))
1089                         mce_wrmsrl(msr_ops.status(i), 0);
1090         }
1091 }
1092 
1093 static int do_memory_failure(struct mce *m)
1094 {
1095         int flags = MF_ACTION_REQUIRED;
1096         int ret;
1097 
1098         pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
1099         if (!(m->mcgstatus & MCG_STATUS_RIPV))
1100                 flags |= MF_MUST_KILL;
1101         ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
1102         if (ret)
1103                 pr_err("Memory error not recovered");
1104         else
1105                 set_mce_nospec(m->addr >> PAGE_SHIFT);
1106         return ret;
1107 }
1108 
1109 
1110 /*
1111  * Cases where we avoid rendezvous handler timeout:
1112  * 1) If this CPU is offline.
1113  *
1114  * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1115  *  skip those CPUs which remain looping in the 1st kernel - see
1116  *  crash_nmi_callback().
1117  *
1118  * Note: there still is a small window between kexec-ing and the new,
1119  * kdump kernel establishing a new #MC handler where a broadcasted MCE
1120  * might not get handled properly.
1121  */
1122 static bool __mc_check_crashing_cpu(int cpu)
1123 {
1124         if (cpu_is_offline(cpu) ||
1125             (crashing_cpu != -1 && crashing_cpu != cpu)) {
1126                 u64 mcgstatus;
1127 
1128                 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1129                 if (mcgstatus & MCG_STATUS_RIPV) {
1130                         mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1131                         return true;
1132                 }
1133         }
1134         return false;
1135 }
1136 
1137 static void __mc_scan_banks(struct mce *m, struct mce *final,
1138                             unsigned long *toclear, unsigned long *valid_banks,
1139                             int no_way_out, int *worst)
1140 {
1141         struct mca_config *cfg = &mca_cfg;
1142         int severity, i;
1143 
1144         for (i = 0; i < cfg->banks; i++) {
1145                 __clear_bit(i, toclear);
1146                 if (!test_bit(i, valid_banks))
1147                         continue;
1148 
1149                 if (!mce_banks[i].ctl)
1150                         continue;
1151 
1152                 m->misc = 0;
1153                 m->addr = 0;
1154                 m->bank = i;
1155 
1156                 m->status = mce_rdmsrl(msr_ops.status(i));
1157                 if (!(m->status & MCI_STATUS_VAL))
1158                         continue;
1159 
1160                 /*
1161                  * Corrected or non-signaled errors are handled by
1162                  * machine_check_poll(). Leave them alone, unless this panics.
1163                  */
1164                 if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1165                         !no_way_out)
1166                         continue;
1167 
1168                 /* Set taint even when machine check was not enabled. */
1169                 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1170 
1171                 severity = mce_severity(m, cfg->tolerant, NULL, true);
1172 
1173                 /*
1174                  * When machine check was for corrected/deferred handler don't
1175                  * touch, unless we're panicking.
1176                  */
1177                 if ((severity == MCE_KEEP_SEVERITY ||
1178                      severity == MCE_UCNA_SEVERITY) && !no_way_out)
1179                         continue;
1180 
1181                 __set_bit(i, toclear);
1182 
1183                 /* Machine check event was not enabled. Clear, but ignore. */
1184                 if (severity == MCE_NO_SEVERITY)
1185                         continue;
1186 
1187                 mce_read_aux(m, i);
1188 
1189                 /* assuming valid severity level != 0 */
1190                 m->severity = severity;
1191 
1192                 mce_log(m);
1193 
1194                 if (severity > *worst) {
1195                         *final = *m;
1196                         *worst = severity;
1197                 }
1198         }
1199 
1200         /* mce_clear_state will clear *final, save locally for use later */
1201         *m = *final;
1202 }
1203 
1204 /*
1205  * The actual machine check handler. This only handles real
1206  * exceptions when something got corrupted coming in through int 18.
1207  *
1208  * This is executed in NMI context not subject to normal locking rules. This
1209  * implies that most kernel services cannot be safely used. Don't even
1210  * think about putting a printk in there!
1211  *
1212  * On Intel systems this is entered on all CPUs in parallel through
1213  * MCE broadcast. However some CPUs might be broken beyond repair,
1214  * so be always careful when synchronizing with others.
1215  */
1216 void do_machine_check(struct pt_regs *regs, long error_code)
1217 {
1218         DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1219         DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1220         struct mca_config *cfg = &mca_cfg;
1221         int cpu = smp_processor_id();
1222         char *msg = "Unknown";
1223         struct mce m, *final;
1224         int worst = 0;
1225 
1226         /*
1227          * Establish sequential order between the CPUs entering the machine
1228          * check handler.
1229          */
1230         int order = -1;
1231 
1232         /*
1233          * If no_way_out gets set, there is no safe way to recover from this
1234          * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1235          */
1236         int no_way_out = 0;
1237 
1238         /*
1239          * If kill_it gets set, there might be a way to recover from this
1240          * error.
1241          */
1242         int kill_it = 0;
1243 
1244         /*
1245          * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1246          * on Intel.
1247          */
1248         int lmce = 1;
1249 
1250         if (__mc_check_crashing_cpu(cpu))
1251                 return;
1252 
1253         ist_enter(regs);
1254 
1255         this_cpu_inc(mce_exception_count);
1256 
1257         mce_gather_info(&m, regs);
1258         m.tsc = rdtsc();
1259 
1260         final = this_cpu_ptr(&mces_seen);
1261         *final = m;
1262 
1263         memset(valid_banks, 0, sizeof(valid_banks));
1264         no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1265 
1266         barrier();
1267 
1268         /*
1269          * When no restart IP might need to kill or panic.
1270          * Assume the worst for now, but if we find the
1271          * severity is MCE_AR_SEVERITY we have other options.
1272          */
1273         if (!(m.mcgstatus & MCG_STATUS_RIPV))
1274                 kill_it = 1;
1275 
1276         /*
1277          * Check if this MCE is signaled to only this logical processor,
1278          * on Intel only.
1279          */
1280         if (m.cpuvendor == X86_VENDOR_INTEL)
1281                 lmce = m.mcgstatus & MCG_STATUS_LMCES;
1282 
1283         /*
1284          * Local machine check may already know that we have to panic.
1285          * Broadcast machine check begins rendezvous in mce_start()
1286          * Go through all banks in exclusion of the other CPUs. This way we
1287          * don't report duplicated events on shared banks because the first one
1288          * to see it will clear it.
1289          */
1290         if (lmce) {
1291                 if (no_way_out)
1292                         mce_panic("Fatal local machine check", &m, msg);
1293         } else {
1294                 order = mce_start(&no_way_out);
1295         }
1296 
1297         __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
1298 
1299         if (!no_way_out)
1300                 mce_clear_state(toclear);
1301 
1302         /*
1303          * Do most of the synchronization with other CPUs.
1304          * When there's any problem use only local no_way_out state.
1305          */
1306         if (!lmce) {
1307                 if (mce_end(order) < 0)
1308                         no_way_out = worst >= MCE_PANIC_SEVERITY;
1309         } else {
1310                 /*
1311                  * If there was a fatal machine check we should have
1312                  * already called mce_panic earlier in this function.
1313                  * Since we re-read the banks, we might have found
1314                  * something new. Check again to see if we found a
1315                  * fatal error. We call "mce_severity()" again to
1316                  * make sure we have the right "msg".
1317                  */
1318                 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
1319                         mce_severity(&m, cfg->tolerant, &msg, true);
1320                         mce_panic("Local fatal machine check!", &m, msg);
1321                 }
1322         }
1323 
1324         /*
1325          * If tolerant is at an insane level we drop requests to kill
1326          * processes and continue even when there is no way out.
1327          */
1328         if (cfg->tolerant == 3)
1329                 kill_it = 0;
1330         else if (no_way_out)
1331                 mce_panic("Fatal machine check on current CPU", &m, msg);
1332 
1333         if (worst > 0)
1334                 mce_report_event(regs);
1335         mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1336 
1337         sync_core();
1338 
1339         if (worst != MCE_AR_SEVERITY && !kill_it)
1340                 goto out_ist;
1341 
1342         /* Fault was in user mode and we need to take some action */
1343         if ((m.cs & 3) == 3) {
1344                 ist_begin_non_atomic(regs);
1345                 local_irq_enable();
1346 
1347                 if (kill_it || do_memory_failure(&m))
1348                         force_sig(SIGBUS, current);
1349                 local_irq_disable();
1350                 ist_end_non_atomic();
1351         } else {
1352                 if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
1353                         mce_panic("Failed kernel mode recovery", &m, NULL);
1354         }
1355 
1356 out_ist:
1357         ist_exit(regs);
1358 }
1359 EXPORT_SYMBOL_GPL(do_machine_check);
1360 
1361 #ifndef CONFIG_MEMORY_FAILURE
1362 int memory_failure(unsigned long pfn, int flags)
1363 {
1364         /* mce_severity() should not hand us an ACTION_REQUIRED error */
1365         BUG_ON(flags & MF_ACTION_REQUIRED);
1366         pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1367                "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1368                pfn);
1369 
1370         return 0;
1371 }
1372 #endif
1373 
1374 /*
1375  * Periodic polling timer for "silent" machine check errors.  If the
1376  * poller finds an MCE, poll 2x faster.  When the poller finds no more
1377  * errors, poll 2x slower (up to check_interval seconds).
1378  */
1379 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1380 
1381 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1382 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1383 
1384 static unsigned long mce_adjust_timer_default(unsigned long interval)
1385 {
1386         return interval;
1387 }
1388 
1389 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1390 
1391 static void __start_timer(struct timer_list *t, unsigned long interval)
1392 {
1393         unsigned long when = jiffies + interval;
1394         unsigned long flags;
1395 
1396         local_irq_save(flags);
1397 
1398         if (!timer_pending(t) || time_before(when, t->expires))
1399                 mod_timer(t, round_jiffies(when));
1400 
1401         local_irq_restore(flags);
1402 }
1403 
1404 static void mce_timer_fn(struct timer_list *t)
1405 {
1406         struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1407         unsigned long iv;
1408 
1409         WARN_ON(cpu_t != t);
1410 
1411         iv = __this_cpu_read(mce_next_interval);
1412 
1413         if (mce_available(this_cpu_ptr(&cpu_info))) {
1414                 machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1415 
1416                 if (mce_intel_cmci_poll()) {
1417                         iv = mce_adjust_timer(iv);
1418                         goto done;
1419                 }
1420         }
1421 
1422         /*
1423          * Alert userspace if needed. If we logged an MCE, reduce the polling
1424          * interval, otherwise increase the polling interval.
1425          */
1426         if (mce_notify_irq())
1427                 iv = max(iv / 2, (unsigned long) HZ/100);
1428         else
1429                 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1430 
1431 done:
1432         __this_cpu_write(mce_next_interval, iv);
1433         __start_timer(t, iv);
1434 }
1435 
1436 /*
1437  * Ensure that the timer is firing in @interval from now.
1438  */
1439 void mce_timer_kick(unsigned long interval)
1440 {
1441         struct timer_list *t = this_cpu_ptr(&mce_timer);
1442         unsigned long iv = __this_cpu_read(mce_next_interval);
1443 
1444         __start_timer(t, interval);
1445 
1446         if (interval < iv)
1447                 __this_cpu_write(mce_next_interval, interval);
1448 }
1449 
1450 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1451 static void mce_timer_delete_all(void)
1452 {
1453         int cpu;
1454 
1455         for_each_online_cpu(cpu)
1456                 del_timer_sync(&per_cpu(mce_timer, cpu));
1457 }
1458 
1459 /*
1460  * Notify the user(s) about new machine check events.
1461  * Can be called from interrupt context, but not from machine check/NMI
1462  * context.
1463  */
1464 int mce_notify_irq(void)
1465 {
1466         /* Not more than two messages every minute */
1467         static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1468 
1469         if (test_and_clear_bit(0, &mce_need_notify)) {
1470                 mce_work_trigger();
1471 
1472                 if (__ratelimit(&ratelimit))
1473                         pr_info(HW_ERR "Machine check events logged\n");
1474 
1475                 return 1;
1476         }
1477         return 0;
1478 }
1479 EXPORT_SYMBOL_GPL(mce_notify_irq);
1480 
1481 static int __mcheck_cpu_mce_banks_init(void)
1482 {
1483         int i;
1484 
1485         mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
1486         if (!mce_banks)
1487                 return -ENOMEM;
1488 
1489         for (i = 0; i < MAX_NR_BANKS; i++) {
1490                 struct mce_bank *b = &mce_banks[i];
1491 
1492                 b->ctl = -1ULL;
1493                 b->init = 1;
1494         }
1495         return 0;
1496 }
1497 
1498 /*
1499  * Initialize Machine Checks for a CPU.
1500  */
1501 static int __mcheck_cpu_cap_init(void)
1502 {
1503         u64 cap;
1504         u8 b;
1505 
1506         rdmsrl(MSR_IA32_MCG_CAP, cap);
1507 
1508         b = cap & MCG_BANKCNT_MASK;
1509         if (WARN_ON_ONCE(b > MAX_NR_BANKS))
1510                 b = MAX_NR_BANKS;
1511 
1512         mca_cfg.banks = max(mca_cfg.banks, b);
1513 
1514         if (!mce_banks) {
1515                 int err = __mcheck_cpu_mce_banks_init();
1516                 if (err)
1517                         return err;
1518         }
1519 
1520         /* Use accurate RIP reporting if available. */
1521         if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1522                 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1523 
1524         if (cap & MCG_SER_P)
1525                 mca_cfg.ser = 1;
1526 
1527         return 0;
1528 }
1529 
1530 static void __mcheck_cpu_init_generic(void)
1531 {
1532         enum mcp_flags m_fl = 0;
1533         mce_banks_t all_banks;
1534         u64 cap;
1535 
1536         if (!mca_cfg.bootlog)
1537                 m_fl = MCP_DONTLOG;
1538 
1539         /*
1540          * Log the machine checks left over from the previous reset.
1541          */
1542         bitmap_fill(all_banks, MAX_NR_BANKS);
1543         machine_check_poll(MCP_UC | m_fl, &all_banks);
1544 
1545         cr4_set_bits(X86_CR4_MCE);
1546 
1547         rdmsrl(MSR_IA32_MCG_CAP, cap);
1548         if (cap & MCG_CTL_P)
1549                 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1550 }
1551 
1552 static void __mcheck_cpu_init_clear_banks(void)
1553 {
1554         int i;
1555 
1556         for (i = 0; i < mca_cfg.banks; i++) {
1557                 struct mce_bank *b = &mce_banks[i];
1558 
1559                 if (!b->init)
1560                         continue;
1561                 wrmsrl(msr_ops.ctl(i), b->ctl);
1562                 wrmsrl(msr_ops.status(i), 0);
1563         }
1564 }
1565 
1566 /*
1567  * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1568  * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1569  * Vol 3B Table 15-20). But this confuses both the code that determines
1570  * whether the machine check occurred in kernel or user mode, and also
1571  * the severity assessment code. Pretend that EIPV was set, and take the
1572  * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1573  */
1574 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1575 {
1576         if (bank != 0)
1577                 return;
1578         if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1579                 return;
1580         if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1581                           MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1582                           MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1583                           MCACOD)) !=
1584                          (MCI_STATUS_UC|MCI_STATUS_EN|
1585                           MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1586                           MCI_STATUS_AR|MCACOD_INSTR))
1587                 return;
1588 
1589         m->mcgstatus |= MCG_STATUS_EIPV;
1590         m->ip = regs->ip;
1591         m->cs = regs->cs;
1592 }
1593 
1594 /* Add per CPU specific workarounds here */
1595 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1596 {
1597         struct mca_config *cfg = &mca_cfg;
1598 
1599         if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1600                 pr_info("unknown CPU type - not enabling MCE support\n");
1601                 return -EOPNOTSUPP;
1602         }
1603 
1604         /* This should be disabled by the BIOS, but isn't always */
1605         if (c->x86_vendor == X86_VENDOR_AMD) {
1606                 if (c->x86 == 15 && cfg->banks > 4) {
1607                         /*
1608                          * disable GART TBL walk error reporting, which
1609                          * trips off incorrectly with the IOMMU & 3ware
1610                          * & Cerberus:
1611                          */
1612                         clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1613                 }
1614                 if (c->x86 < 0x11 && cfg->bootlog < 0) {
1615                         /*
1616                          * Lots of broken BIOS around that don't clear them
1617                          * by default and leave crap in there. Don't log:
1618                          */
1619                         cfg->bootlog = 0;
1620                 }
1621                 /*
1622                  * Various K7s with broken bank 0 around. Always disable
1623                  * by default.
1624                  */
1625                 if (c->x86 == 6 && cfg->banks > 0)
1626                         mce_banks[0].ctl = 0;
1627 
1628                 /*
1629                  * overflow_recov is supported for F15h Models 00h-0fh
1630                  * even though we don't have a CPUID bit for it.
1631                  */
1632                 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1633                         mce_flags.overflow_recov = 1;
1634 
1635         }
1636 
1637         if (c->x86_vendor == X86_VENDOR_INTEL) {
1638                 /*
1639                  * SDM documents that on family 6 bank 0 should not be written
1640                  * because it aliases to another special BIOS controlled
1641                  * register.
1642                  * But it's not aliased anymore on model 0x1a+
1643                  * Don't ignore bank 0 completely because there could be a
1644                  * valid event later, merely don't write CTL0.
1645                  */
1646 
1647                 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1648                         mce_banks[0].init = 0;
1649 
1650                 /*
1651                  * All newer Intel systems support MCE broadcasting. Enable
1652                  * synchronization with a one second timeout.
1653                  */
1654                 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1655                         cfg->monarch_timeout < 0)
1656                         cfg->monarch_timeout = USEC_PER_SEC;
1657 
1658                 /*
1659                  * There are also broken BIOSes on some Pentium M and
1660                  * earlier systems:
1661                  */
1662                 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1663                         cfg->bootlog = 0;
1664 
1665                 if (c->x86 == 6 && c->x86_model == 45)
1666                         quirk_no_way_out = quirk_sandybridge_ifu;
1667         }
1668         if (cfg->monarch_timeout < 0)
1669                 cfg->monarch_timeout = 0;
1670         if (cfg->bootlog != 0)
1671                 cfg->panic_timeout = 30;
1672 
1673         return 0;
1674 }
1675 
1676 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1677 {
1678         if (c->x86 != 5)
1679                 return 0;
1680 
1681         switch (c->x86_vendor) {
1682         case X86_VENDOR_INTEL:
1683                 intel_p5_mcheck_init(c);
1684                 return 1;
1685                 break;
1686         case X86_VENDOR_CENTAUR:
1687                 winchip_mcheck_init(c);
1688                 return 1;
1689                 break;
1690         default:
1691                 return 0;
1692         }
1693 
1694         return 0;
1695 }
1696 
1697 /*
1698  * Init basic CPU features needed for early decoding of MCEs.
1699  */
1700 static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1701 {
1702         if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
1703                 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1704                 mce_flags.succor         = !!cpu_has(c, X86_FEATURE_SUCCOR);
1705                 mce_flags.smca           = !!cpu_has(c, X86_FEATURE_SMCA);
1706 
1707                 if (mce_flags.smca) {
1708                         msr_ops.ctl     = smca_ctl_reg;
1709                         msr_ops.status  = smca_status_reg;
1710                         msr_ops.addr    = smca_addr_reg;
1711                         msr_ops.misc    = smca_misc_reg;
1712                 }
1713         }
1714 }
1715 
1716 static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
1717 {
1718         struct mca_config *cfg = &mca_cfg;
1719 
1720          /*
1721           * All newer Centaur CPUs support MCE broadcasting. Enable
1722           * synchronization with a one second timeout.
1723           */
1724         if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
1725              c->x86 > 6) {
1726                 if (cfg->monarch_timeout < 0)
1727                         cfg->monarch_timeout = USEC_PER_SEC;
1728         }
1729 }
1730 
1731 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1732 {
1733         switch (c->x86_vendor) {
1734         case X86_VENDOR_INTEL:
1735                 mce_intel_feature_init(c);
1736                 mce_adjust_timer = cmci_intel_adjust_timer;
1737                 break;
1738 
1739         case X86_VENDOR_AMD: {
1740                 mce_amd_feature_init(c);
1741                 break;
1742                 }
1743 
1744         case X86_VENDOR_HYGON:
1745                 mce_hygon_feature_init(c);
1746                 break;
1747 
1748         case X86_VENDOR_CENTAUR:
1749                 mce_centaur_feature_init(c);
1750                 break;
1751 
1752         default:
1753                 break;
1754         }
1755 }
1756 
1757 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1758 {
1759         switch (c->x86_vendor) {
1760         case X86_VENDOR_INTEL:
1761                 mce_intel_feature_clear(c);
1762                 break;
1763         default:
1764                 break;
1765         }
1766 }
1767 
1768 static void mce_start_timer(struct timer_list *t)
1769 {
1770         unsigned long iv = check_interval * HZ;
1771 
1772         if (mca_cfg.ignore_ce || !iv)
1773                 return;
1774 
1775         this_cpu_write(mce_next_interval, iv);
1776         __start_timer(t, iv);
1777 }
1778 
1779 static void __mcheck_cpu_setup_timer(void)
1780 {
1781         struct timer_list *t = this_cpu_ptr(&mce_timer);
1782 
1783         timer_setup(t, mce_timer_fn, TIMER_PINNED);
1784 }
1785 
1786 static void __mcheck_cpu_init_timer(void)
1787 {
1788         struct timer_list *t = this_cpu_ptr(&mce_timer);
1789 
1790         timer_setup(t, mce_timer_fn, TIMER_PINNED);
1791         mce_start_timer(t);
1792 }
1793 
1794 bool filter_mce(struct mce *m)
1795 {
1796         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1797                 return amd_filter_mce(m);
1798 
1799         return false;
1800 }
1801 
1802 /* Handle unconfigured int18 (should never happen) */
1803 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1804 {
1805         pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1806                smp_processor_id());
1807 }
1808 
1809 /* Call the installed machine check handler for this CPU setup. */
1810 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1811                                                 unexpected_machine_check;
1812 
1813 dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
1814 {
1815         machine_check_vector(regs, error_code);
1816 }
1817 
1818 /*
1819  * Called for each booted CPU to set up machine checks.
1820  * Must be called with preempt off:
1821  */
1822 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1823 {
1824         if (mca_cfg.disabled)
1825                 return;
1826 
1827         if (__mcheck_cpu_ancient_init(c))
1828                 return;
1829 
1830         if (!mce_available(c))
1831                 return;
1832 
1833         if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1834                 mca_cfg.disabled = 1;
1835                 return;
1836         }
1837 
1838         if (mce_gen_pool_init()) {
1839                 mca_cfg.disabled = 1;
1840                 pr_emerg("Couldn't allocate MCE records pool!\n");
1841                 return;
1842         }
1843 
1844         machine_check_vector = do_machine_check;
1845 
1846         __mcheck_cpu_init_early(c);
1847         __mcheck_cpu_init_generic();
1848         __mcheck_cpu_init_vendor(c);
1849         __mcheck_cpu_init_clear_banks();
1850         __mcheck_cpu_setup_timer();
1851 }
1852 
1853 /*
1854  * Called for each booted CPU to clear some machine checks opt-ins
1855  */
1856 void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1857 {
1858         if (mca_cfg.disabled)
1859                 return;
1860 
1861         if (!mce_available(c))
1862                 return;
1863 
1864         /*
1865          * Possibly to clear general settings generic to x86
1866          * __mcheck_cpu_clear_generic(c);
1867          */
1868         __mcheck_cpu_clear_vendor(c);
1869 
1870 }
1871 
1872 static void __mce_disable_bank(void *arg)
1873 {
1874         int bank = *((int *)arg);
1875         __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1876         cmci_disable_bank(bank);
1877 }
1878 
1879 void mce_disable_bank(int bank)
1880 {
1881         if (bank >= mca_cfg.banks) {
1882                 pr_warn(FW_BUG
1883                         "Ignoring request to disable invalid MCA bank %d.\n",
1884                         bank);
1885                 return;
1886         }
1887         set_bit(bank, mce_banks_ce_disabled);
1888         on_each_cpu(__mce_disable_bank, &bank, 1);
1889 }
1890 
1891 /*
1892  * mce=off Disables machine check
1893  * mce=no_cmci Disables CMCI
1894  * mce=no_lmce Disables LMCE
1895  * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1896  * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1897  * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1898  *      monarchtimeout is how long to wait for other CPUs on machine
1899  *      check, or 0 to not wait
1900  * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
1901         and older.
1902  * mce=nobootlog Don't log MCEs from before booting.
1903  * mce=bios_cmci_threshold Don't program the CMCI threshold
1904  * mce=recovery force enable memcpy_mcsafe()
1905  */
1906 static int __init mcheck_enable(char *str)
1907 {
1908         struct mca_config *cfg = &mca_cfg;
1909 
1910         if (*str == 0) {
1911                 enable_p5_mce();
1912                 return 1;
1913         }
1914         if (*str == '=')
1915                 str++;
1916         if (!strcmp(str, "off"))
1917                 cfg->disabled = 1;
1918         else if (!strcmp(str, "no_cmci"))
1919                 cfg->cmci_disabled = true;
1920         else if (!strcmp(str, "no_lmce"))
1921                 cfg->lmce_disabled = 1;
1922         else if (!strcmp(str, "dont_log_ce"))
1923                 cfg->dont_log_ce = true;
1924         else if (!strcmp(str, "ignore_ce"))
1925                 cfg->ignore_ce = true;
1926         else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1927                 cfg->bootlog = (str[0] == 'b');
1928         else if (!strcmp(str, "bios_cmci_threshold"))
1929                 cfg->bios_cmci_threshold = 1;
1930         else if (!strcmp(str, "recovery"))
1931                 cfg->recovery = 1;
1932         else if (isdigit(str[0])) {
1933                 if (get_option(&str, &cfg->tolerant) == 2)
1934                         get_option(&str, &(cfg->monarch_timeout));
1935         } else {
1936                 pr_info("mce argument %s ignored. Please use /sys\n", str);
1937                 return 0;
1938         }
1939         return 1;
1940 }
1941 __setup("mce", mcheck_enable);
1942 
1943 int __init mcheck_init(void)
1944 {
1945         mcheck_intel_therm_init();
1946         mce_register_decode_chain(&first_nb);
1947         mce_register_decode_chain(&mce_srao_nb);
1948         mce_register_decode_chain(&mce_default_nb);
1949         mcheck_vendor_init_severity();
1950 
1951         INIT_WORK(&mce_work, mce_gen_pool_process);
1952         init_irq_work(&mce_irq_work, mce_irq_work_cb);
1953 
1954         return 0;
1955 }
1956 
1957 /*
1958  * mce_syscore: PM support
1959  */
1960 
1961 /*
1962  * Disable machine checks on suspend and shutdown. We can't really handle
1963  * them later.
1964  */
1965 static void mce_disable_error_reporting(void)
1966 {
1967         int i;
1968 
1969         for (i = 0; i < mca_cfg.banks; i++) {
1970                 struct mce_bank *b = &mce_banks[i];
1971 
1972                 if (b->init)
1973                         wrmsrl(msr_ops.ctl(i), 0);
1974         }
1975         return;
1976 }
1977 
1978 static void vendor_disable_error_reporting(void)
1979 {
1980         /*
1981          * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs
1982          * are socket-wide.
1983          * Disabling them for just a single offlined CPU is bad, since it will
1984          * inhibit reporting for all shared resources on the socket like the
1985          * last level cache (LLC), the integrated memory controller (iMC), etc.
1986          */
1987         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
1988             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
1989             boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1990                 return;
1991 
1992         mce_disable_error_reporting();
1993 }
1994 
1995 static int mce_syscore_suspend(void)
1996 {
1997         vendor_disable_error_reporting();
1998         return 0;
1999 }
2000 
2001 static void mce_syscore_shutdown(void)
2002 {
2003         vendor_disable_error_reporting();
2004 }
2005 
2006 /*
2007  * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2008  * Only one CPU is active at this time, the others get re-added later using
2009  * CPU hotplug:
2010  */
2011 static void mce_syscore_resume(void)
2012 {
2013         __mcheck_cpu_init_generic();
2014         __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2015         __mcheck_cpu_init_clear_banks();
2016 }
2017 
2018 static struct syscore_ops mce_syscore_ops = {
2019         .suspend        = mce_syscore_suspend,
2020         .shutdown       = mce_syscore_shutdown,
2021         .resume         = mce_syscore_resume,
2022 };
2023 
2024 /*
2025  * mce_device: Sysfs support
2026  */
2027 
2028 static void mce_cpu_restart(void *data)
2029 {
2030         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2031                 return;
2032         __mcheck_cpu_init_generic();
2033         __mcheck_cpu_init_clear_banks();
2034         __mcheck_cpu_init_timer();
2035 }
2036 
2037 /* Reinit MCEs after user configuration changes */
2038 static void mce_restart(void)
2039 {
2040         mce_timer_delete_all();
2041         on_each_cpu(mce_cpu_restart, NULL, 1);
2042 }
2043 
2044 /* Toggle features for corrected errors */
2045 static void mce_disable_cmci(void *data)
2046 {
2047         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2048                 return;
2049         cmci_clear();
2050 }
2051 
2052 static void mce_enable_ce(void *all)
2053 {
2054         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2055                 return;
2056         cmci_reenable();
2057         cmci_recheck();
2058         if (all)
2059                 __mcheck_cpu_init_timer();
2060 }
2061 
2062 static struct bus_type mce_subsys = {
2063         .name           = "machinecheck",
2064         .dev_name       = "machinecheck",
2065 };
2066 
2067 DEFINE_PER_CPU(struct device *, mce_device);
2068 
2069 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2070 {
2071         return container_of(attr, struct mce_bank, attr);
2072 }
2073 
2074 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2075                          char *buf)
2076 {
2077         return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2078 }
2079 
2080 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2081                         const char *buf, size_t size)
2082 {
2083         u64 new;
2084 
2085         if (kstrtou64(buf, 0, &new) < 0)
2086                 return -EINVAL;
2087 
2088         attr_to_bank(attr)->ctl = new;
2089         mce_restart();
2090 
2091         return size;
2092 }
2093 
2094 static ssize_t set_ignore_ce(struct device *s,
2095                              struct device_attribute *attr,
2096                              const char *buf, size_t size)
2097 {
2098         u64 new;
2099 
2100         if (kstrtou64(buf, 0, &new) < 0)
2101                 return -EINVAL;
2102 
2103         mutex_lock(&mce_sysfs_mutex);
2104         if (mca_cfg.ignore_ce ^ !!new) {
2105                 if (new) {
2106                         /* disable ce features */
2107                         mce_timer_delete_all();
2108                         on_each_cpu(mce_disable_cmci, NULL, 1);
2109                         mca_cfg.ignore_ce = true;
2110                 } else {
2111                         /* enable ce features */
2112                         mca_cfg.ignore_ce = false;
2113                         on_each_cpu(mce_enable_ce, (void *)1, 1);
2114                 }
2115         }
2116         mutex_unlock(&mce_sysfs_mutex);
2117 
2118         return size;
2119 }
2120 
2121 static ssize_t set_cmci_disabled(struct device *s,
2122                                  struct device_attribute *attr,
2123                                  const char *buf, size_t size)
2124 {
2125         u64 new;
2126 
2127         if (kstrtou64(buf, 0, &new) < 0)
2128                 return -EINVAL;
2129 
2130         mutex_lock(&mce_sysfs_mutex);
2131         if (mca_cfg.cmci_disabled ^ !!new) {
2132                 if (new) {
2133                         /* disable cmci */
2134                         on_each_cpu(mce_disable_cmci, NULL, 1);
2135                         mca_cfg.cmci_disabled = true;
2136                 } else {
2137                         /* enable cmci */
2138                         mca_cfg.cmci_disabled = false;
2139                         on_each_cpu(mce_enable_ce, NULL, 1);
2140                 }
2141         }
2142         mutex_unlock(&mce_sysfs_mutex);
2143 
2144         return size;
2145 }
2146 
2147 static ssize_t store_int_with_restart(struct device *s,
2148                                       struct device_attribute *attr,
2149                                       const char *buf, size_t size)
2150 {
2151         unsigned long old_check_interval = check_interval;
2152         ssize_t ret = device_store_ulong(s, attr, buf, size);
2153 
2154         if (check_interval == old_check_interval)
2155                 return ret;
2156 
2157         mutex_lock(&mce_sysfs_mutex);
2158         mce_restart();
2159         mutex_unlock(&mce_sysfs_mutex);
2160 
2161         return ret;
2162 }
2163 
2164 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2165 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2166 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2167 
2168 static struct dev_ext_attribute dev_attr_check_interval = {
2169         __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2170         &check_interval
2171 };
2172 
2173 static struct dev_ext_attribute dev_attr_ignore_ce = {
2174         __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2175         &mca_cfg.ignore_ce
2176 };
2177 
2178 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2179         __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2180         &mca_cfg.cmci_disabled
2181 };
2182 
2183 static struct device_attribute *mce_device_attrs[] = {
2184         &dev_attr_tolerant.attr,
2185         &dev_attr_check_interval.attr,
2186 #ifdef CONFIG_X86_MCELOG_LEGACY
2187         &dev_attr_trigger,
2188 #endif
2189         &dev_attr_monarch_timeout.attr,
2190         &dev_attr_dont_log_ce.attr,
2191         &dev_attr_ignore_ce.attr,
2192         &dev_attr_cmci_disabled.attr,
2193         NULL
2194 };
2195 
2196 static cpumask_var_t mce_device_initialized;
2197 
2198 static void mce_device_release(struct device *dev)
2199 {
2200         kfree(dev);
2201 }
2202 
2203 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
2204 static int mce_device_create(unsigned int cpu)
2205 {
2206         struct device *dev;
2207         int err;
2208         int i, j;
2209 
2210         if (!mce_available(&boot_cpu_data))
2211                 return -EIO;
2212 
2213         dev = per_cpu(mce_device, cpu);
2214         if (dev)
2215                 return 0;
2216 
2217         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2218         if (!dev)
2219                 return -ENOMEM;
2220         dev->id  = cpu;
2221         dev->bus = &mce_subsys;
2222         dev->release = &mce_device_release;
2223 
2224         err = device_register(dev);
2225         if (err) {
2226                 put_device(dev);
2227                 return err;
2228         }
2229 
2230         for (i = 0; mce_device_attrs[i]; i++) {
2231                 err = device_create_file(dev, mce_device_attrs[i]);
2232                 if (err)
2233                         goto error;
2234         }
2235         for (j = 0; j < mca_cfg.banks; j++) {
2236                 err = device_create_file(dev, &mce_banks[j].attr);
2237                 if (err)
2238                         goto error2;
2239         }
2240         cpumask_set_cpu(cpu, mce_device_initialized);
2241         per_cpu(mce_device, cpu) = dev;
2242 
2243         return 0;
2244 error2:
2245         while (--j >= 0)
2246                 device_remove_file(dev, &mce_banks[j].attr);
2247 error:
2248         while (--i >= 0)
2249                 device_remove_file(dev, mce_device_attrs[i]);
2250 
2251         device_unregister(dev);
2252 
2253         return err;
2254 }
2255 
2256 static void mce_device_remove(unsigned int cpu)
2257 {
2258         struct device *dev = per_cpu(mce_device, cpu);
2259         int i;
2260 
2261         if (!cpumask_test_cpu(cpu, mce_device_initialized))
2262                 return;
2263 
2264         for (i = 0; mce_device_attrs[i]; i++)
2265                 device_remove_file(dev, mce_device_attrs[i]);
2266 
2267         for (i = 0; i < mca_cfg.banks; i++)
2268                 device_remove_file(dev, &mce_banks[i].attr);
2269 
2270         device_unregister(dev);
2271         cpumask_clear_cpu(cpu, mce_device_initialized);
2272         per_cpu(mce_device, cpu) = NULL;
2273 }
2274 
2275 /* Make sure there are no machine checks on offlined CPUs. */
2276 static void mce_disable_cpu(void)
2277 {
2278         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2279                 return;
2280 
2281         if (!cpuhp_tasks_frozen)
2282                 cmci_clear();
2283 
2284         vendor_disable_error_reporting();
2285 }
2286 
2287 static void mce_reenable_cpu(void)
2288 {
2289         int i;
2290 
2291         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2292                 return;
2293 
2294         if (!cpuhp_tasks_frozen)
2295                 cmci_reenable();
2296         for (i = 0; i < mca_cfg.banks; i++) {
2297                 struct mce_bank *b = &mce_banks[i];
2298 
2299                 if (b->init)
2300                         wrmsrl(msr_ops.ctl(i), b->ctl);
2301         }
2302 }
2303 
2304 static int mce_cpu_dead(unsigned int cpu)
2305 {
2306         mce_intel_hcpu_update(cpu);
2307 
2308         /* intentionally ignoring frozen here */
2309         if (!cpuhp_tasks_frozen)
2310                 cmci_rediscover();
2311         return 0;
2312 }
2313 
2314 static int mce_cpu_online(unsigned int cpu)
2315 {
2316         struct timer_list *t = this_cpu_ptr(&mce_timer);
2317         int ret;
2318 
2319         mce_device_create(cpu);
2320 
2321         ret = mce_threshold_create_device(cpu);
2322         if (ret) {
2323                 mce_device_remove(cpu);
2324                 return ret;
2325         }
2326         mce_reenable_cpu();
2327         mce_start_timer(t);
2328         return 0;
2329 }
2330 
2331 static int mce_cpu_pre_down(unsigned int cpu)
2332 {
2333         struct timer_list *t = this_cpu_ptr(&mce_timer);
2334 
2335         mce_disable_cpu();
2336         del_timer_sync(t);
2337         mce_threshold_remove_device(cpu);
2338         mce_device_remove(cpu);
2339         return 0;
2340 }
2341 
2342 static __init void mce_init_banks(void)
2343 {
2344         int i;
2345 
2346         for (i = 0; i < mca_cfg.banks; i++) {
2347                 struct mce_bank *b = &mce_banks[i];
2348                 struct device_attribute *a = &b->attr;
2349 
2350                 sysfs_attr_init(&a->attr);
2351                 a->attr.name    = b->attrname;
2352                 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2353 
2354                 a->attr.mode    = 0644;
2355                 a->show         = show_bank;
2356                 a->store        = set_bank;
2357         }
2358 }
2359 
2360 static __init int mcheck_init_device(void)
2361 {
2362         int err;
2363 
2364         /*
2365          * Check if we have a spare virtual bit. This will only become
2366          * a problem if/when we move beyond 5-level page tables.
2367          */
2368         MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2369 
2370         if (!mce_available(&boot_cpu_data)) {
2371                 err = -EIO;
2372                 goto err_out;
2373         }
2374 
2375         if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2376                 err = -ENOMEM;
2377                 goto err_out;
2378         }
2379 
2380         mce_init_banks();
2381 
2382         err = subsys_system_register(&mce_subsys, NULL);
2383         if (err)
2384                 goto err_out_mem;
2385 
2386         err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2387                                 mce_cpu_dead);
2388         if (err)
2389                 goto err_out_mem;
2390 
2391         err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2392                                 mce_cpu_online, mce_cpu_pre_down);
2393         if (err < 0)
2394                 goto err_out_online;
2395 
2396         register_syscore_ops(&mce_syscore_ops);
2397 
2398         return 0;
2399 
2400 err_out_online:
2401         cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2402 
2403 err_out_mem:
2404         free_cpumask_var(mce_device_initialized);
2405 
2406 err_out:
2407         pr_err("Unable to init MCE device (rc: %d)\n", err);
2408 
2409         return err;
2410 }
2411 device_initcall_sync(mcheck_init_device);
2412 
2413 /*
2414  * Old style boot options parsing. Only for compatibility.
2415  */
2416 static int __init mcheck_disable(char *str)
2417 {
2418         mca_cfg.disabled = 1;
2419         return 1;
2420 }
2421 __setup("nomce", mcheck_disable);
2422 
2423 #ifdef CONFIG_DEBUG_FS
2424 struct dentry *mce_get_debugfs_dir(void)
2425 {
2426         static struct dentry *dmce;
2427 
2428         if (!dmce)
2429                 dmce = debugfs_create_dir("mce", NULL);
2430 
2431         return dmce;
2432 }
2433 
2434 static void mce_reset(void)
2435 {
2436         cpu_missing = 0;
2437         atomic_set(&mce_fake_panicked, 0);
2438         atomic_set(&mce_executing, 0);
2439         atomic_set(&mce_callin, 0);
2440         atomic_set(&global_nwo, 0);
2441 }
2442 
2443 static int fake_panic_get(void *data, u64 *val)
2444 {
2445         *val = fake_panic;
2446         return 0;
2447 }
2448 
2449 static int fake_panic_set(void *data, u64 val)
2450 {
2451         mce_reset();
2452         fake_panic = val;
2453         return 0;
2454 }
2455 
2456 DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2457                         fake_panic_set, "%llu\n");
2458 
2459 static int __init mcheck_debugfs_init(void)
2460 {
2461         struct dentry *dmce, *ffake_panic;
2462 
2463         dmce = mce_get_debugfs_dir();
2464         if (!dmce)
2465                 return -ENOMEM;
2466         ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2467                                           &fake_panic_fops);
2468         if (!ffake_panic)
2469                 return -ENOMEM;
2470 
2471         return 0;
2472 }
2473 #else
2474 static int __init mcheck_debugfs_init(void) { return -EINVAL; }
2475 #endif
2476 
2477 DEFINE_STATIC_KEY_FALSE(mcsafe_key);
2478 EXPORT_SYMBOL_GPL(mcsafe_key);
2479 
2480 static int __init mcheck_late_init(void)
2481 {
2482         pr_info("Using %d MCE banks\n", mca_cfg.banks);
2483 
2484         if (mca_cfg.recovery)
2485                 static_branch_inc(&mcsafe_key);
2486 
2487         mcheck_debugfs_init();
2488         cec_init();
2489 
2490         /*
2491          * Flush out everything that has been logged during early boot, now that
2492          * everything has been initialized (workqueues, decoders, ...).
2493          */
2494         mce_schedule_work();
2495 
2496         return 0;
2497 }
2498 late_initcall(mcheck_late_init);
2499 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp