~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/mcheck/mce.c

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Machine check handler.
  3  *
  4  * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5  * Rest from unknown author(s).
  6  * 2004 Andi Kleen. Rewrote most of it.
  7  * Copyright 2008 Intel Corporation
  8  * Author: Andi Kleen
  9  */
 10 
 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12 
 13 #include <linux/thread_info.h>
 14 #include <linux/capability.h>
 15 #include <linux/miscdevice.h>
 16 #include <linux/ratelimit.h>
 17 #include <linux/kallsyms.h>
 18 #include <linux/rcupdate.h>
 19 #include <linux/kobject.h>
 20 #include <linux/uaccess.h>
 21 #include <linux/kdebug.h>
 22 #include <linux/kernel.h>
 23 #include <linux/percpu.h>
 24 #include <linux/string.h>
 25 #include <linux/device.h>
 26 #include <linux/syscore_ops.h>
 27 #include <linux/delay.h>
 28 #include <linux/ctype.h>
 29 #include <linux/sched.h>
 30 #include <linux/sysfs.h>
 31 #include <linux/types.h>
 32 #include <linux/slab.h>
 33 #include <linux/init.h>
 34 #include <linux/kmod.h>
 35 #include <linux/poll.h>
 36 #include <linux/nmi.h>
 37 #include <linux/cpu.h>
 38 #include <linux/smp.h>
 39 #include <linux/fs.h>
 40 #include <linux/mm.h>
 41 #include <linux/debugfs.h>
 42 #include <linux/irq_work.h>
 43 #include <linux/export.h>
 44 
 45 #include <asm/processor.h>
 46 #include <asm/traps.h>
 47 #include <asm/tlbflush.h>
 48 #include <asm/mce.h>
 49 #include <asm/msr.h>
 50 
 51 #include "mce-internal.h"
 52 
 53 static DEFINE_MUTEX(mce_chrdev_read_mutex);
 54 
 55 #define mce_log_get_idx_check(p) \
 56 ({ \
 57         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
 58                          !lockdep_is_held(&mce_chrdev_read_mutex), \
 59                          "suspicious mce_log_get_idx_check() usage"); \
 60         smp_load_acquire(&(p)); \
 61 })
 62 
 63 #define CREATE_TRACE_POINTS
 64 #include <trace/events/mce.h>
 65 
 66 #define SPINUNIT                100     /* 100ns */
 67 
 68 DEFINE_PER_CPU(unsigned, mce_exception_count);
 69 
 70 struct mce_bank *mce_banks __read_mostly;
 71 struct mce_vendor_flags mce_flags __read_mostly;
 72 
 73 struct mca_config mca_cfg __read_mostly = {
 74         .bootlog  = -1,
 75         /*
 76          * Tolerant levels:
 77          * 0: always panic on uncorrected errors, log corrected errors
 78          * 1: panic or SIGBUS on uncorrected errors, log corrected errors
 79          * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
 80          * 3: never panic or SIGBUS, log all errors (for testing only)
 81          */
 82         .tolerant = 1,
 83         .monarch_timeout = -1
 84 };
 85 
 86 /* User mode helper program triggered by machine check event */
 87 static unsigned long            mce_need_notify;
 88 static char                     mce_helper[128];
 89 static char                     *mce_helper_argv[2] = { mce_helper, NULL };
 90 
 91 static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
 92 
 93 static DEFINE_PER_CPU(struct mce, mces_seen);
 94 static int                      cpu_missing;
 95 
 96 /*
 97  * MCA banks polled by the period polling timer for corrected events.
 98  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 99  */
100 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
101         [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
102 };
103 
104 /*
105  * MCA banks controlled through firmware first for corrected errors.
106  * This is a global list of banks for which we won't enable CMCI and we
107  * won't poll. Firmware controls these banks and is responsible for
108  * reporting corrected errors through GHES. Uncorrected/recoverable
109  * errors are still notified through a machine check.
110  */
111 mce_banks_t mce_banks_ce_disabled;
112 
113 static struct work_struct mce_work;
114 static struct irq_work mce_irq_work;
115 
116 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
117 
118 /*
119  * CPU/chipset specific EDAC code can register a notifier call here to print
120  * MCE errors in a human-readable form.
121  */
122 ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
123 
124 /* Do initial initialization of a struct mce */
125 void mce_setup(struct mce *m)
126 {
127         memset(m, 0, sizeof(struct mce));
128         m->cpu = m->extcpu = smp_processor_id();
129         m->tsc = rdtsc();
130         /* We hope get_seconds stays lockless */
131         m->time = get_seconds();
132         m->cpuvendor = boot_cpu_data.x86_vendor;
133         m->cpuid = cpuid_eax(1);
134         m->socketid = cpu_data(m->extcpu).phys_proc_id;
135         m->apicid = cpu_data(m->extcpu).initial_apicid;
136         rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
137 }
138 
139 DEFINE_PER_CPU(struct mce, injectm);
140 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
141 
142 /*
143  * Lockless MCE logging infrastructure.
144  * This avoids deadlocks on printk locks without having to break locks. Also
145  * separate MCEs from kernel messages to avoid bogus bug reports.
146  */
147 
148 static struct mce_log mcelog = {
149         .signature      = MCE_LOG_SIGNATURE,
150         .len            = MCE_LOG_LEN,
151         .recordlen      = sizeof(struct mce),
152 };
153 
154 void mce_log(struct mce *mce)
155 {
156         unsigned next, entry;
157 
158         /* Emit the trace record: */
159         trace_mce_record(mce);
160 
161         if (!mce_gen_pool_add(mce))
162                 irq_work_queue(&mce_irq_work);
163 
164         wmb();
165         for (;;) {
166                 entry = mce_log_get_idx_check(mcelog.next);
167                 for (;;) {
168 
169                         /*
170                          * When the buffer fills up discard new entries.
171                          * Assume that the earlier errors are the more
172                          * interesting ones:
173                          */
174                         if (entry >= MCE_LOG_LEN) {
175                                 set_bit(MCE_OVERFLOW,
176                                         (unsigned long *)&mcelog.flags);
177                                 return;
178                         }
179                         /* Old left over entry. Skip: */
180                         if (mcelog.entry[entry].finished) {
181                                 entry++;
182                                 continue;
183                         }
184                         break;
185                 }
186                 smp_rmb();
187                 next = entry + 1;
188                 if (cmpxchg(&mcelog.next, entry, next) == entry)
189                         break;
190         }
191         memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
192         wmb();
193         mcelog.entry[entry].finished = 1;
194         wmb();
195 
196         set_bit(0, &mce_need_notify);
197 }
198 
199 void mce_inject_log(struct mce *m)
200 {
201         mutex_lock(&mce_chrdev_read_mutex);
202         mce_log(m);
203         mutex_unlock(&mce_chrdev_read_mutex);
204 }
205 EXPORT_SYMBOL_GPL(mce_inject_log);
206 
207 static struct notifier_block mce_srao_nb;
208 
209 void mce_register_decode_chain(struct notifier_block *nb)
210 {
211         /* Ensure SRAO notifier has the highest priority in the decode chain. */
212         if (nb != &mce_srao_nb && nb->priority == INT_MAX)
213                 nb->priority -= 1;
214 
215         atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
216 }
217 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
218 
219 void mce_unregister_decode_chain(struct notifier_block *nb)
220 {
221         atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
222 }
223 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
224 
225 static inline u32 ctl_reg(int bank)
226 {
227         return MSR_IA32_MCx_CTL(bank);
228 }
229 
230 static inline u32 status_reg(int bank)
231 {
232         return MSR_IA32_MCx_STATUS(bank);
233 }
234 
235 static inline u32 addr_reg(int bank)
236 {
237         return MSR_IA32_MCx_ADDR(bank);
238 }
239 
240 static inline u32 misc_reg(int bank)
241 {
242         return MSR_IA32_MCx_MISC(bank);
243 }
244 
245 static inline u32 smca_ctl_reg(int bank)
246 {
247         return MSR_AMD64_SMCA_MCx_CTL(bank);
248 }
249 
250 static inline u32 smca_status_reg(int bank)
251 {
252         return MSR_AMD64_SMCA_MCx_STATUS(bank);
253 }
254 
255 static inline u32 smca_addr_reg(int bank)
256 {
257         return MSR_AMD64_SMCA_MCx_ADDR(bank);
258 }
259 
260 static inline u32 smca_misc_reg(int bank)
261 {
262         return MSR_AMD64_SMCA_MCx_MISC(bank);
263 }
264 
265 struct mca_msr_regs msr_ops = {
266         .ctl    = ctl_reg,
267         .status = status_reg,
268         .addr   = addr_reg,
269         .misc   = misc_reg
270 };
271 
272 static void print_mce(struct mce *m)
273 {
274         int ret = 0;
275 
276         pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
277                m->extcpu, m->mcgstatus, m->bank, m->status);
278 
279         if (m->ip) {
280                 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
281                         !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
282                                 m->cs, m->ip);
283 
284                 if (m->cs == __KERNEL_CS)
285                         print_symbol("{%s}", m->ip);
286                 pr_cont("\n");
287         }
288 
289         pr_emerg(HW_ERR "TSC %llx ", m->tsc);
290         if (m->addr)
291                 pr_cont("ADDR %llx ", m->addr);
292         if (m->misc)
293                 pr_cont("MISC %llx ", m->misc);
294 
295         pr_cont("\n");
296         /*
297          * Note this output is parsed by external tools and old fields
298          * should not be changed.
299          */
300         pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
301                 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
302                 cpu_data(m->extcpu).microcode);
303 
304         /*
305          * Print out human-readable details about the MCE error,
306          * (if the CPU has an implementation for that)
307          */
308         ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
309         if (ret == NOTIFY_STOP)
310                 return;
311 
312         pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
313 }
314 
315 #define PANIC_TIMEOUT 5 /* 5 seconds */
316 
317 static atomic_t mce_panicked;
318 
319 static int fake_panic;
320 static atomic_t mce_fake_panicked;
321 
322 /* Panic in progress. Enable interrupts and wait for final IPI */
323 static void wait_for_panic(void)
324 {
325         long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
326 
327         preempt_disable();
328         local_irq_enable();
329         while (timeout-- > 0)
330                 udelay(1);
331         if (panic_timeout == 0)
332                 panic_timeout = mca_cfg.panic_timeout;
333         panic("Panicing machine check CPU died");
334 }
335 
336 static void mce_panic(const char *msg, struct mce *final, char *exp)
337 {
338         int apei_err = 0;
339         struct llist_node *pending;
340         struct mce_evt_llist *l;
341 
342         if (!fake_panic) {
343                 /*
344                  * Make sure only one CPU runs in machine check panic
345                  */
346                 if (atomic_inc_return(&mce_panicked) > 1)
347                         wait_for_panic();
348                 barrier();
349 
350                 bust_spinlocks(1);
351                 console_verbose();
352         } else {
353                 /* Don't log too much for fake panic */
354                 if (atomic_inc_return(&mce_fake_panicked) > 1)
355                         return;
356         }
357         pending = mce_gen_pool_prepare_records();
358         /* First print corrected ones that are still unlogged */
359         llist_for_each_entry(l, pending, llnode) {
360                 struct mce *m = &l->mce;
361                 if (!(m->status & MCI_STATUS_UC)) {
362                         print_mce(m);
363                         if (!apei_err)
364                                 apei_err = apei_write_mce(m);
365                 }
366         }
367         /* Now print uncorrected but with the final one last */
368         llist_for_each_entry(l, pending, llnode) {
369                 struct mce *m = &l->mce;
370                 if (!(m->status & MCI_STATUS_UC))
371                         continue;
372                 if (!final || mce_cmp(m, final)) {
373                         print_mce(m);
374                         if (!apei_err)
375                                 apei_err = apei_write_mce(m);
376                 }
377         }
378         if (final) {
379                 print_mce(final);
380                 if (!apei_err)
381                         apei_err = apei_write_mce(final);
382         }
383         if (cpu_missing)
384                 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
385         if (exp)
386                 pr_emerg(HW_ERR "Machine check: %s\n", exp);
387         if (!fake_panic) {
388                 if (panic_timeout == 0)
389                         panic_timeout = mca_cfg.panic_timeout;
390                 panic(msg);
391         } else
392                 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
393 }
394 
395 /* Support code for software error injection */
396 
397 static int msr_to_offset(u32 msr)
398 {
399         unsigned bank = __this_cpu_read(injectm.bank);
400 
401         if (msr == mca_cfg.rip_msr)
402                 return offsetof(struct mce, ip);
403         if (msr == msr_ops.status(bank))
404                 return offsetof(struct mce, status);
405         if (msr == msr_ops.addr(bank))
406                 return offsetof(struct mce, addr);
407         if (msr == msr_ops.misc(bank))
408                 return offsetof(struct mce, misc);
409         if (msr == MSR_IA32_MCG_STATUS)
410                 return offsetof(struct mce, mcgstatus);
411         return -1;
412 }
413 
414 /* MSR access wrappers used for error injection */
415 static u64 mce_rdmsrl(u32 msr)
416 {
417         u64 v;
418 
419         if (__this_cpu_read(injectm.finished)) {
420                 int offset = msr_to_offset(msr);
421 
422                 if (offset < 0)
423                         return 0;
424                 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
425         }
426 
427         if (rdmsrl_safe(msr, &v)) {
428                 WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
429                 /*
430                  * Return zero in case the access faulted. This should
431                  * not happen normally but can happen if the CPU does
432                  * something weird, or if the code is buggy.
433                  */
434                 v = 0;
435         }
436 
437         return v;
438 }
439 
440 static void mce_wrmsrl(u32 msr, u64 v)
441 {
442         if (__this_cpu_read(injectm.finished)) {
443                 int offset = msr_to_offset(msr);
444 
445                 if (offset >= 0)
446                         *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
447                 return;
448         }
449         wrmsrl(msr, v);
450 }
451 
452 /*
453  * Collect all global (w.r.t. this processor) status about this machine
454  * check into our "mce" struct so that we can use it later to assess
455  * the severity of the problem as we read per-bank specific details.
456  */
457 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
458 {
459         mce_setup(m);
460 
461         m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
462         if (regs) {
463                 /*
464                  * Get the address of the instruction at the time of
465                  * the machine check error.
466                  */
467                 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
468                         m->ip = regs->ip;
469                         m->cs = regs->cs;
470 
471                         /*
472                          * When in VM86 mode make the cs look like ring 3
473                          * always. This is a lie, but it's better than passing
474                          * the additional vm86 bit around everywhere.
475                          */
476                         if (v8086_mode(regs))
477                                 m->cs |= 3;
478                 }
479                 /* Use accurate RIP reporting if available. */
480                 if (mca_cfg.rip_msr)
481                         m->ip = mce_rdmsrl(mca_cfg.rip_msr);
482         }
483 }
484 
485 int mce_available(struct cpuinfo_x86 *c)
486 {
487         if (mca_cfg.disabled)
488                 return 0;
489         return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
490 }
491 
492 static void mce_schedule_work(void)
493 {
494         if (!mce_gen_pool_empty() && keventd_up())
495                 schedule_work(&mce_work);
496 }
497 
498 static void mce_irq_work_cb(struct irq_work *entry)
499 {
500         mce_notify_irq();
501         mce_schedule_work();
502 }
503 
504 static void mce_report_event(struct pt_regs *regs)
505 {
506         if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
507                 mce_notify_irq();
508                 /*
509                  * Triggering the work queue here is just an insurance
510                  * policy in case the syscall exit notify handler
511                  * doesn't run soon enough or ends up running on the
512                  * wrong CPU (can happen when audit sleeps)
513                  */
514                 mce_schedule_work();
515                 return;
516         }
517 
518         irq_work_queue(&mce_irq_work);
519 }
520 
521 /*
522  * Check if the address reported by the CPU is in a format we can parse.
523  * It would be possible to add code for most other cases, but all would
524  * be somewhat complicated (e.g. segment offset would require an instruction
525  * parser). So only support physical addresses up to page granuality for now.
526  */
527 static int mce_usable_address(struct mce *m)
528 {
529         if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
530                 return 0;
531 
532         /* Checks after this one are Intel-specific: */
533         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
534                 return 1;
535 
536         if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
537                 return 0;
538         if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
539                 return 0;
540         return 1;
541 }
542 
543 static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
544                                 void *data)
545 {
546         struct mce *mce = (struct mce *)data;
547         unsigned long pfn;
548 
549         if (!mce)
550                 return NOTIFY_DONE;
551 
552         if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
553                 pfn = mce->addr >> PAGE_SHIFT;
554                 memory_failure(pfn, MCE_VECTOR, 0);
555         }
556 
557         return NOTIFY_OK;
558 }
559 static struct notifier_block mce_srao_nb = {
560         .notifier_call  = srao_decode_notifier,
561         .priority = INT_MAX,
562 };
563 
564 /*
565  * Read ADDR and MISC registers.
566  */
567 static void mce_read_aux(struct mce *m, int i)
568 {
569         if (m->status & MCI_STATUS_MISCV)
570                 m->misc = mce_rdmsrl(msr_ops.misc(i));
571         if (m->status & MCI_STATUS_ADDRV) {
572                 m->addr = mce_rdmsrl(msr_ops.addr(i));
573 
574                 /*
575                  * Mask the reported address by the reported granularity.
576                  */
577                 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
578                         u8 shift = MCI_MISC_ADDR_LSB(m->misc);
579                         m->addr >>= shift;
580                         m->addr <<= shift;
581                 }
582         }
583 }
584 
585 static bool memory_error(struct mce *m)
586 {
587         struct cpuinfo_x86 *c = &boot_cpu_data;
588 
589         if (c->x86_vendor == X86_VENDOR_AMD) {
590                 /* ErrCodeExt[20:16] */
591                 u8 xec = (m->status >> 16) & 0x1f;
592 
593                 return (xec == 0x0 || xec == 0x8);
594         } else if (c->x86_vendor == X86_VENDOR_INTEL) {
595                 /*
596                  * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
597                  *
598                  * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
599                  * indicating a memory error. Bit 8 is used for indicating a
600                  * cache hierarchy error. The combination of bit 2 and bit 3
601                  * is used for indicating a `generic' cache hierarchy error
602                  * But we can't just blindly check the above bits, because if
603                  * bit 11 is set, then it is a bus/interconnect error - and
604                  * either way the above bits just gives more detail on what
605                  * bus/interconnect error happened. Note that bit 12 can be
606                  * ignored, as it's the "filter" bit.
607                  */
608                 return (m->status & 0xef80) == BIT(7) ||
609                        (m->status & 0xef00) == BIT(8) ||
610                        (m->status & 0xeffc) == 0xc;
611         }
612 
613         return false;
614 }
615 
616 DEFINE_PER_CPU(unsigned, mce_poll_count);
617 
618 /*
619  * Poll for corrected events or events that happened before reset.
620  * Those are just logged through /dev/mcelog.
621  *
622  * This is executed in standard interrupt context.
623  *
624  * Note: spec recommends to panic for fatal unsignalled
625  * errors here. However this would be quite problematic --
626  * we would need to reimplement the Monarch handling and
627  * it would mess up the exclusion between exception handler
628  * and poll hander -- * so we skip this for now.
629  * These cases should not happen anyways, or only when the CPU
630  * is already totally * confused. In this case it's likely it will
631  * not fully execute the machine check handler either.
632  */
633 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
634 {
635         bool error_seen = false;
636         struct mce m;
637         int severity;
638         int i;
639 
640         this_cpu_inc(mce_poll_count);
641 
642         mce_gather_info(&m, NULL);
643 
644         for (i = 0; i < mca_cfg.banks; i++) {
645                 if (!mce_banks[i].ctl || !test_bit(i, *b))
646                         continue;
647 
648                 m.misc = 0;
649                 m.addr = 0;
650                 m.bank = i;
651                 m.tsc = 0;
652 
653                 barrier();
654                 m.status = mce_rdmsrl(msr_ops.status(i));
655                 if (!(m.status & MCI_STATUS_VAL))
656                         continue;
657 
658 
659                 /*
660                  * Uncorrected or signalled events are handled by the exception
661                  * handler when it is enabled, so don't process those here.
662                  *
663                  * TBD do the same check for MCI_STATUS_EN here?
664                  */
665                 if (!(flags & MCP_UC) &&
666                     (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
667                         continue;
668 
669                 error_seen = true;
670 
671                 mce_read_aux(&m, i);
672 
673                 if (!(flags & MCP_TIMESTAMP))
674                         m.tsc = 0;
675 
676                 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
677 
678                 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
679                         if (m.status & MCI_STATUS_ADDRV)
680                                 m.severity = severity;
681 
682                 /*
683                  * Don't get the IP here because it's unlikely to
684                  * have anything to do with the actual error location.
685                  */
686                 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
687                         mce_log(&m);
688                 else if (mce_usable_address(&m)) {
689                         /*
690                          * Although we skipped logging this, we still want
691                          * to take action. Add to the pool so the registered
692                          * notifiers will see it.
693                          */
694                         if (!mce_gen_pool_add(&m))
695                                 mce_schedule_work();
696                 }
697 
698                 /*
699                  * Clear state for this bank.
700                  */
701                 mce_wrmsrl(msr_ops.status(i), 0);
702         }
703 
704         /*
705          * Don't clear MCG_STATUS here because it's only defined for
706          * exceptions.
707          */
708 
709         sync_core();
710 
711         return error_seen;
712 }
713 EXPORT_SYMBOL_GPL(machine_check_poll);
714 
715 /*
716  * Do a quick check if any of the events requires a panic.
717  * This decides if we keep the events around or clear them.
718  */
719 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
720                           struct pt_regs *regs)
721 {
722         int i, ret = 0;
723         char *tmp;
724 
725         for (i = 0; i < mca_cfg.banks; i++) {
726                 m->status = mce_rdmsrl(msr_ops.status(i));
727                 if (m->status & MCI_STATUS_VAL) {
728                         __set_bit(i, validp);
729                         if (quirk_no_way_out)
730                                 quirk_no_way_out(i, m, regs);
731                 }
732 
733                 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
734                         *msg = tmp;
735                         ret = 1;
736                 }
737         }
738         return ret;
739 }
740 
741 /*
742  * Variable to establish order between CPUs while scanning.
743  * Each CPU spins initially until executing is equal its number.
744  */
745 static atomic_t mce_executing;
746 
747 /*
748  * Defines order of CPUs on entry. First CPU becomes Monarch.
749  */
750 static atomic_t mce_callin;
751 
752 /*
753  * Check if a timeout waiting for other CPUs happened.
754  */
755 static int mce_timed_out(u64 *t, const char *msg)
756 {
757         /*
758          * The others already did panic for some reason.
759          * Bail out like in a timeout.
760          * rmb() to tell the compiler that system_state
761          * might have been modified by someone else.
762          */
763         rmb();
764         if (atomic_read(&mce_panicked))
765                 wait_for_panic();
766         if (!mca_cfg.monarch_timeout)
767                 goto out;
768         if ((s64)*t < SPINUNIT) {
769                 if (mca_cfg.tolerant <= 1)
770                         mce_panic(msg, NULL, NULL);
771                 cpu_missing = 1;
772                 return 1;
773         }
774         *t -= SPINUNIT;
775 out:
776         touch_nmi_watchdog();
777         return 0;
778 }
779 
780 /*
781  * The Monarch's reign.  The Monarch is the CPU who entered
782  * the machine check handler first. It waits for the others to
783  * raise the exception too and then grades them. When any
784  * error is fatal panic. Only then let the others continue.
785  *
786  * The other CPUs entering the MCE handler will be controlled by the
787  * Monarch. They are called Subjects.
788  *
789  * This way we prevent any potential data corruption in a unrecoverable case
790  * and also makes sure always all CPU's errors are examined.
791  *
792  * Also this detects the case of a machine check event coming from outer
793  * space (not detected by any CPUs) In this case some external agent wants
794  * us to shut down, so panic too.
795  *
796  * The other CPUs might still decide to panic if the handler happens
797  * in a unrecoverable place, but in this case the system is in a semi-stable
798  * state and won't corrupt anything by itself. It's ok to let the others
799  * continue for a bit first.
800  *
801  * All the spin loops have timeouts; when a timeout happens a CPU
802  * typically elects itself to be Monarch.
803  */
804 static void mce_reign(void)
805 {
806         int cpu;
807         struct mce *m = NULL;
808         int global_worst = 0;
809         char *msg = NULL;
810         char *nmsg = NULL;
811 
812         /*
813          * This CPU is the Monarch and the other CPUs have run
814          * through their handlers.
815          * Grade the severity of the errors of all the CPUs.
816          */
817         for_each_possible_cpu(cpu) {
818                 int severity = mce_severity(&per_cpu(mces_seen, cpu),
819                                             mca_cfg.tolerant,
820                                             &nmsg, true);
821                 if (severity > global_worst) {
822                         msg = nmsg;
823                         global_worst = severity;
824                         m = &per_cpu(mces_seen, cpu);
825                 }
826         }
827 
828         /*
829          * Cannot recover? Panic here then.
830          * This dumps all the mces in the log buffer and stops the
831          * other CPUs.
832          */
833         if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
834                 mce_panic("Fatal machine check", m, msg);
835 
836         /*
837          * For UC somewhere we let the CPU who detects it handle it.
838          * Also must let continue the others, otherwise the handling
839          * CPU could deadlock on a lock.
840          */
841 
842         /*
843          * No machine check event found. Must be some external
844          * source or one CPU is hung. Panic.
845          */
846         if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
847                 mce_panic("Fatal machine check from unknown source", NULL, NULL);
848 
849         /*
850          * Now clear all the mces_seen so that they don't reappear on
851          * the next mce.
852          */
853         for_each_possible_cpu(cpu)
854                 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
855 }
856 
857 static atomic_t global_nwo;
858 
859 /*
860  * Start of Monarch synchronization. This waits until all CPUs have
861  * entered the exception handler and then determines if any of them
862  * saw a fatal event that requires panic. Then it executes them
863  * in the entry order.
864  * TBD double check parallel CPU hotunplug
865  */
866 static int mce_start(int *no_way_out)
867 {
868         int order;
869         int cpus = num_online_cpus();
870         u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
871 
872         if (!timeout)
873                 return -1;
874 
875         atomic_add(*no_way_out, &global_nwo);
876         /*
877          * Rely on the implied barrier below, such that global_nwo
878          * is updated before mce_callin.
879          */
880         order = atomic_inc_return(&mce_callin);
881 
882         /*
883          * Wait for everyone.
884          */
885         while (atomic_read(&mce_callin) != cpus) {
886                 if (mce_timed_out(&timeout,
887                                   "Timeout: Not all CPUs entered broadcast exception handler")) {
888                         atomic_set(&global_nwo, 0);
889                         return -1;
890                 }
891                 ndelay(SPINUNIT);
892         }
893 
894         /*
895          * mce_callin should be read before global_nwo
896          */
897         smp_rmb();
898 
899         if (order == 1) {
900                 /*
901                  * Monarch: Starts executing now, the others wait.
902                  */
903                 atomic_set(&mce_executing, 1);
904         } else {
905                 /*
906                  * Subject: Now start the scanning loop one by one in
907                  * the original callin order.
908                  * This way when there are any shared banks it will be
909                  * only seen by one CPU before cleared, avoiding duplicates.
910                  */
911                 while (atomic_read(&mce_executing) < order) {
912                         if (mce_timed_out(&timeout,
913                                           "Timeout: Subject CPUs unable to finish machine check processing")) {
914                                 atomic_set(&global_nwo, 0);
915                                 return -1;
916                         }
917                         ndelay(SPINUNIT);
918                 }
919         }
920 
921         /*
922          * Cache the global no_way_out state.
923          */
924         *no_way_out = atomic_read(&global_nwo);
925 
926         return order;
927 }
928 
929 /*
930  * Synchronize between CPUs after main scanning loop.
931  * This invokes the bulk of the Monarch processing.
932  */
933 static int mce_end(int order)
934 {
935         int ret = -1;
936         u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
937 
938         if (!timeout)
939                 goto reset;
940         if (order < 0)
941                 goto reset;
942 
943         /*
944          * Allow others to run.
945          */
946         atomic_inc(&mce_executing);
947 
948         if (order == 1) {
949                 /* CHECKME: Can this race with a parallel hotplug? */
950                 int cpus = num_online_cpus();
951 
952                 /*
953                  * Monarch: Wait for everyone to go through their scanning
954                  * loops.
955                  */
956                 while (atomic_read(&mce_executing) <= cpus) {
957                         if (mce_timed_out(&timeout,
958                                           "Timeout: Monarch CPU unable to finish machine check processing"))
959                                 goto reset;
960                         ndelay(SPINUNIT);
961                 }
962 
963                 mce_reign();
964                 barrier();
965                 ret = 0;
966         } else {
967                 /*
968                  * Subject: Wait for Monarch to finish.
969                  */
970                 while (atomic_read(&mce_executing) != 0) {
971                         if (mce_timed_out(&timeout,
972                                           "Timeout: Monarch CPU did not finish machine check processing"))
973                                 goto reset;
974                         ndelay(SPINUNIT);
975                 }
976 
977                 /*
978                  * Don't reset anything. That's done by the Monarch.
979                  */
980                 return 0;
981         }
982 
983         /*
984          * Reset all global state.
985          */
986 reset:
987         atomic_set(&global_nwo, 0);
988         atomic_set(&mce_callin, 0);
989         barrier();
990 
991         /*
992          * Let others run again.
993          */
994         atomic_set(&mce_executing, 0);
995         return ret;
996 }
997 
998 static void mce_clear_state(unsigned long *toclear)
999 {
1000         int i;
1001 
1002         for (i = 0; i < mca_cfg.banks; i++) {
1003                 if (test_bit(i, toclear))
1004                         mce_wrmsrl(msr_ops.status(i), 0);
1005         }
1006 }
1007 
1008 static int do_memory_failure(struct mce *m)
1009 {
1010         int flags = MF_ACTION_REQUIRED;
1011         int ret;
1012 
1013         pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
1014         if (!(m->mcgstatus & MCG_STATUS_RIPV))
1015                 flags |= MF_MUST_KILL;
1016         ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
1017         if (ret)
1018                 pr_err("Memory error not recovered");
1019         return ret;
1020 }
1021 
1022 /*
1023  * The actual machine check handler. This only handles real
1024  * exceptions when something got corrupted coming in through int 18.
1025  *
1026  * This is executed in NMI context not subject to normal locking rules. This
1027  * implies that most kernel services cannot be safely used. Don't even
1028  * think about putting a printk in there!
1029  *
1030  * On Intel systems this is entered on all CPUs in parallel through
1031  * MCE broadcast. However some CPUs might be broken beyond repair,
1032  * so be always careful when synchronizing with others.
1033  */
1034 void do_machine_check(struct pt_regs *regs, long error_code)
1035 {
1036         struct mca_config *cfg = &mca_cfg;
1037         struct mce m, *final;
1038         int i;
1039         int worst = 0;
1040         int severity;
1041 
1042         /*
1043          * Establish sequential order between the CPUs entering the machine
1044          * check handler.
1045          */
1046         int order = -1;
1047         /*
1048          * If no_way_out gets set, there is no safe way to recover from this
1049          * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1050          */
1051         int no_way_out = 0;
1052         /*
1053          * If kill_it gets set, there might be a way to recover from this
1054          * error.
1055          */
1056         int kill_it = 0;
1057         DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1058         DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1059         char *msg = "Unknown";
1060 
1061         /*
1062          * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1063          * on Intel.
1064          */
1065         int lmce = 1;
1066 
1067         /* If this CPU is offline, just bail out. */
1068         if (cpu_is_offline(smp_processor_id())) {
1069                 u64 mcgstatus;
1070 
1071                 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1072                 if (mcgstatus & MCG_STATUS_RIPV) {
1073                         mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1074                         return;
1075                 }
1076         }
1077 
1078         ist_enter(regs);
1079 
1080         this_cpu_inc(mce_exception_count);
1081 
1082         if (!cfg->banks)
1083                 goto out;
1084 
1085         mce_gather_info(&m, regs);
1086 
1087         final = this_cpu_ptr(&mces_seen);
1088         *final = m;
1089 
1090         memset(valid_banks, 0, sizeof(valid_banks));
1091         no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1092 
1093         barrier();
1094 
1095         /*
1096          * When no restart IP might need to kill or panic.
1097          * Assume the worst for now, but if we find the
1098          * severity is MCE_AR_SEVERITY we have other options.
1099          */
1100         if (!(m.mcgstatus & MCG_STATUS_RIPV))
1101                 kill_it = 1;
1102 
1103         /*
1104          * Check if this MCE is signaled to only this logical processor,
1105          * on Intel only.
1106          */
1107         if (m.cpuvendor == X86_VENDOR_INTEL)
1108                 lmce = m.mcgstatus & MCG_STATUS_LMCES;
1109 
1110         /*
1111          * Go through all banks in exclusion of the other CPUs. This way we
1112          * don't report duplicated events on shared banks because the first one
1113          * to see it will clear it. If this is a Local MCE, then no need to
1114          * perform rendezvous.
1115          */
1116         if (!lmce)
1117                 order = mce_start(&no_way_out);
1118 
1119         for (i = 0; i < cfg->banks; i++) {
1120                 __clear_bit(i, toclear);
1121                 if (!test_bit(i, valid_banks))
1122                         continue;
1123                 if (!mce_banks[i].ctl)
1124                         continue;
1125 
1126                 m.misc = 0;
1127                 m.addr = 0;
1128                 m.bank = i;
1129 
1130                 m.status = mce_rdmsrl(msr_ops.status(i));
1131                 if ((m.status & MCI_STATUS_VAL) == 0)
1132                         continue;
1133 
1134                 /*
1135                  * Non uncorrected or non signaled errors are handled by
1136                  * machine_check_poll. Leave them alone, unless this panics.
1137                  */
1138                 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1139                         !no_way_out)
1140                         continue;
1141 
1142                 /*
1143                  * Set taint even when machine check was not enabled.
1144                  */
1145                 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1146 
1147                 severity = mce_severity(&m, cfg->tolerant, NULL, true);
1148 
1149                 /*
1150                  * When machine check was for corrected/deferred handler don't
1151                  * touch, unless we're panicing.
1152                  */
1153                 if ((severity == MCE_KEEP_SEVERITY ||
1154                      severity == MCE_UCNA_SEVERITY) && !no_way_out)
1155                         continue;
1156                 __set_bit(i, toclear);
1157                 if (severity == MCE_NO_SEVERITY) {
1158                         /*
1159                          * Machine check event was not enabled. Clear, but
1160                          * ignore.
1161                          */
1162                         continue;
1163                 }
1164 
1165                 mce_read_aux(&m, i);
1166 
1167                 /* assuming valid severity level != 0 */
1168                 m.severity = severity;
1169 
1170                 mce_log(&m);
1171 
1172                 if (severity > worst) {
1173                         *final = m;
1174                         worst = severity;
1175                 }
1176         }
1177 
1178         /* mce_clear_state will clear *final, save locally for use later */
1179         m = *final;
1180 
1181         if (!no_way_out)
1182                 mce_clear_state(toclear);
1183 
1184         /*
1185          * Do most of the synchronization with other CPUs.
1186          * When there's any problem use only local no_way_out state.
1187          */
1188         if (!lmce) {
1189                 if (mce_end(order) < 0)
1190                         no_way_out = worst >= MCE_PANIC_SEVERITY;
1191         } else {
1192                 /*
1193                  * Local MCE skipped calling mce_reign()
1194                  * If we found a fatal error, we need to panic here.
1195                  */
1196                  if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
1197                         mce_panic("Machine check from unknown source",
1198                                 NULL, NULL);
1199         }
1200 
1201         /*
1202          * If tolerant is at an insane level we drop requests to kill
1203          * processes and continue even when there is no way out.
1204          */
1205         if (cfg->tolerant == 3)
1206                 kill_it = 0;
1207         else if (no_way_out)
1208                 mce_panic("Fatal machine check on current CPU", &m, msg);
1209 
1210         if (worst > 0)
1211                 mce_report_event(regs);
1212         mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1213 out:
1214         sync_core();
1215 
1216         if (worst != MCE_AR_SEVERITY && !kill_it)
1217                 goto out_ist;
1218 
1219         /* Fault was in user mode and we need to take some action */
1220         if ((m.cs & 3) == 3) {
1221                 ist_begin_non_atomic(regs);
1222                 local_irq_enable();
1223 
1224                 if (kill_it || do_memory_failure(&m))
1225                         force_sig(SIGBUS, current);
1226                 local_irq_disable();
1227                 ist_end_non_atomic();
1228         } else {
1229                 if (!fixup_exception(regs, X86_TRAP_MC))
1230                         mce_panic("Failed kernel mode recovery", &m, NULL);
1231         }
1232 
1233 out_ist:
1234         ist_exit(regs);
1235 }
1236 EXPORT_SYMBOL_GPL(do_machine_check);
1237 
1238 #ifndef CONFIG_MEMORY_FAILURE
1239 int memory_failure(unsigned long pfn, int vector, int flags)
1240 {
1241         /* mce_severity() should not hand us an ACTION_REQUIRED error */
1242         BUG_ON(flags & MF_ACTION_REQUIRED);
1243         pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1244                "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1245                pfn);
1246 
1247         return 0;
1248 }
1249 #endif
1250 
1251 /*
1252  * Action optional processing happens here (picking up
1253  * from the list of faulting pages that do_machine_check()
1254  * placed into the genpool).
1255  */
1256 static void mce_process_work(struct work_struct *dummy)
1257 {
1258         mce_gen_pool_process();
1259 }
1260 
1261 #ifdef CONFIG_X86_MCE_INTEL
1262 /***
1263  * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
1264  * @cpu: The CPU on which the event occurred.
1265  * @status: Event status information
1266  *
1267  * This function should be called by the thermal interrupt after the
1268  * event has been processed and the decision was made to log the event
1269  * further.
1270  *
1271  * The status parameter will be saved to the 'status' field of 'struct mce'
1272  * and historically has been the register value of the
1273  * MSR_IA32_THERMAL_STATUS (Intel) msr.
1274  */
1275 void mce_log_therm_throt_event(__u64 status)
1276 {
1277         struct mce m;
1278 
1279         mce_setup(&m);
1280         m.bank = MCE_THERMAL_BANK;
1281         m.status = status;
1282         mce_log(&m);
1283 }
1284 #endif /* CONFIG_X86_MCE_INTEL */
1285 
1286 /*
1287  * Periodic polling timer for "silent" machine check errors.  If the
1288  * poller finds an MCE, poll 2x faster.  When the poller finds no more
1289  * errors, poll 2x slower (up to check_interval seconds).
1290  */
1291 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1292 
1293 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1294 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1295 
1296 static unsigned long mce_adjust_timer_default(unsigned long interval)
1297 {
1298         return interval;
1299 }
1300 
1301 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1302 
1303 static void __restart_timer(struct timer_list *t, unsigned long interval)
1304 {
1305         unsigned long when = jiffies + interval;
1306         unsigned long flags;
1307 
1308         local_irq_save(flags);
1309 
1310         if (timer_pending(t)) {
1311                 if (time_before(when, t->expires))
1312                         mod_timer(t, when);
1313         } else {
1314                 t->expires = round_jiffies(when);
1315                 add_timer_on(t, smp_processor_id());
1316         }
1317 
1318         local_irq_restore(flags);
1319 }
1320 
1321 static void mce_timer_fn(unsigned long data)
1322 {
1323         struct timer_list *t = this_cpu_ptr(&mce_timer);
1324         int cpu = smp_processor_id();
1325         unsigned long iv;
1326 
1327         WARN_ON(cpu != data);
1328 
1329         iv = __this_cpu_read(mce_next_interval);
1330 
1331         if (mce_available(this_cpu_ptr(&cpu_info))) {
1332                 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
1333 
1334                 if (mce_intel_cmci_poll()) {
1335                         iv = mce_adjust_timer(iv);
1336                         goto done;
1337                 }
1338         }
1339 
1340         /*
1341          * Alert userspace if needed. If we logged an MCE, reduce the polling
1342          * interval, otherwise increase the polling interval.
1343          */
1344         if (mce_notify_irq())
1345                 iv = max(iv / 2, (unsigned long) HZ/100);
1346         else
1347                 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1348 
1349 done:
1350         __this_cpu_write(mce_next_interval, iv);
1351         __restart_timer(t, iv);
1352 }
1353 
1354 /*
1355  * Ensure that the timer is firing in @interval from now.
1356  */
1357 void mce_timer_kick(unsigned long interval)
1358 {
1359         struct timer_list *t = this_cpu_ptr(&mce_timer);
1360         unsigned long iv = __this_cpu_read(mce_next_interval);
1361 
1362         __restart_timer(t, interval);
1363 
1364         if (interval < iv)
1365                 __this_cpu_write(mce_next_interval, interval);
1366 }
1367 
1368 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1369 static void mce_timer_delete_all(void)
1370 {
1371         int cpu;
1372 
1373         for_each_online_cpu(cpu)
1374                 del_timer_sync(&per_cpu(mce_timer, cpu));
1375 }
1376 
1377 static void mce_do_trigger(struct work_struct *work)
1378 {
1379         call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1380 }
1381 
1382 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1383 
1384 /*
1385  * Notify the user(s) about new machine check events.
1386  * Can be called from interrupt context, but not from machine check/NMI
1387  * context.
1388  */
1389 int mce_notify_irq(void)
1390 {
1391         /* Not more than two messages every minute */
1392         static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1393 
1394         if (test_and_clear_bit(0, &mce_need_notify)) {
1395                 /* wake processes polling /dev/mcelog */
1396                 wake_up_interruptible(&mce_chrdev_wait);
1397 
1398                 if (mce_helper[0])
1399                         schedule_work(&mce_trigger_work);
1400 
1401                 if (__ratelimit(&ratelimit))
1402                         pr_info(HW_ERR "Machine check events logged\n");
1403 
1404                 return 1;
1405         }
1406         return 0;
1407 }
1408 EXPORT_SYMBOL_GPL(mce_notify_irq);
1409 
1410 static int __mcheck_cpu_mce_banks_init(void)
1411 {
1412         int i;
1413         u8 num_banks = mca_cfg.banks;
1414 
1415         mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1416         if (!mce_banks)
1417                 return -ENOMEM;
1418 
1419         for (i = 0; i < num_banks; i++) {
1420                 struct mce_bank *b = &mce_banks[i];
1421 
1422                 b->ctl = -1ULL;
1423                 b->init = 1;
1424         }
1425         return 0;
1426 }
1427 
1428 /*
1429  * Initialize Machine Checks for a CPU.
1430  */
1431 static int __mcheck_cpu_cap_init(void)
1432 {
1433         unsigned b;
1434         u64 cap;
1435 
1436         rdmsrl(MSR_IA32_MCG_CAP, cap);
1437 
1438         b = cap & MCG_BANKCNT_MASK;
1439         if (!mca_cfg.banks)
1440                 pr_info("CPU supports %d MCE banks\n", b);
1441 
1442         if (b > MAX_NR_BANKS) {
1443                 pr_warn("Using only %u machine check banks out of %u\n",
1444                         MAX_NR_BANKS, b);
1445                 b = MAX_NR_BANKS;
1446         }
1447 
1448         /* Don't support asymmetric configurations today */
1449         WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1450         mca_cfg.banks = b;
1451 
1452         if (!mce_banks) {
1453                 int err = __mcheck_cpu_mce_banks_init();
1454 
1455                 if (err)
1456                         return err;
1457         }
1458 
1459         /* Use accurate RIP reporting if available. */
1460         if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1461                 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1462 
1463         if (cap & MCG_SER_P)
1464                 mca_cfg.ser = true;
1465 
1466         return 0;
1467 }
1468 
1469 static void __mcheck_cpu_init_generic(void)
1470 {
1471         enum mcp_flags m_fl = 0;
1472         mce_banks_t all_banks;
1473         u64 cap;
1474 
1475         if (!mca_cfg.bootlog)
1476                 m_fl = MCP_DONTLOG;
1477 
1478         /*
1479          * Log the machine checks left over from the previous reset.
1480          */
1481         bitmap_fill(all_banks, MAX_NR_BANKS);
1482         machine_check_poll(MCP_UC | m_fl, &all_banks);
1483 
1484         cr4_set_bits(X86_CR4_MCE);
1485 
1486         rdmsrl(MSR_IA32_MCG_CAP, cap);
1487         if (cap & MCG_CTL_P)
1488                 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1489 }
1490 
1491 static void __mcheck_cpu_init_clear_banks(void)
1492 {
1493         int i;
1494 
1495         for (i = 0; i < mca_cfg.banks; i++) {
1496                 struct mce_bank *b = &mce_banks[i];
1497 
1498                 if (!b->init)
1499                         continue;
1500                 wrmsrl(msr_ops.ctl(i), b->ctl);
1501                 wrmsrl(msr_ops.status(i), 0);
1502         }
1503 }
1504 
1505 /*
1506  * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1507  * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1508  * Vol 3B Table 15-20). But this confuses both the code that determines
1509  * whether the machine check occurred in kernel or user mode, and also
1510  * the severity assessment code. Pretend that EIPV was set, and take the
1511  * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1512  */
1513 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1514 {
1515         if (bank != 0)
1516                 return;
1517         if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1518                 return;
1519         if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1520                           MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1521                           MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1522                           MCACOD)) !=
1523                          (MCI_STATUS_UC|MCI_STATUS_EN|
1524                           MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1525                           MCI_STATUS_AR|MCACOD_INSTR))
1526                 return;
1527 
1528         m->mcgstatus |= MCG_STATUS_EIPV;
1529         m->ip = regs->ip;
1530         m->cs = regs->cs;
1531 }
1532 
1533 /* Add per CPU specific workarounds here */
1534 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1535 {
1536         struct mca_config *cfg = &mca_cfg;
1537 
1538         if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1539                 pr_info("unknown CPU type - not enabling MCE support\n");
1540                 return -EOPNOTSUPP;
1541         }
1542 
1543         /* This should be disabled by the BIOS, but isn't always */
1544         if (c->x86_vendor == X86_VENDOR_AMD) {
1545                 if (c->x86 == 15 && cfg->banks > 4) {
1546                         /*
1547                          * disable GART TBL walk error reporting, which
1548                          * trips off incorrectly with the IOMMU & 3ware
1549                          * & Cerberus:
1550                          */
1551                         clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1552                 }
1553                 if (c->x86 < 17 && cfg->bootlog < 0) {
1554                         /*
1555                          * Lots of broken BIOS around that don't clear them
1556                          * by default and leave crap in there. Don't log:
1557                          */
1558                         cfg->bootlog = 0;
1559                 }
1560                 /*
1561                  * Various K7s with broken bank 0 around. Always disable
1562                  * by default.
1563                  */
1564                 if (c->x86 == 6 && cfg->banks > 0)
1565                         mce_banks[0].ctl = 0;
1566 
1567                 /*
1568                  * overflow_recov is supported for F15h Models 00h-0fh
1569                  * even though we don't have a CPUID bit for it.
1570                  */
1571                 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1572                         mce_flags.overflow_recov = 1;
1573 
1574                 /*
1575                  * Turn off MC4_MISC thresholding banks on those models since
1576                  * they're not supported there.
1577                  */
1578                 if (c->x86 == 0x15 &&
1579                     (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1580                         int i;
1581                         u64 hwcr;
1582                         bool need_toggle;
1583                         u32 msrs[] = {
1584                                 0x00000413, /* MC4_MISC0 */
1585                                 0xc0000408, /* MC4_MISC1 */
1586                         };
1587 
1588                         rdmsrl(MSR_K7_HWCR, hwcr);
1589 
1590                         /* McStatusWrEn has to be set */
1591                         need_toggle = !(hwcr & BIT(18));
1592 
1593                         if (need_toggle)
1594                                 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1595 
1596                         /* Clear CntP bit safely */
1597                         for (i = 0; i < ARRAY_SIZE(msrs); i++)
1598                                 msr_clear_bit(msrs[i], 62);
1599 
1600                         /* restore old settings */
1601                         if (need_toggle)
1602                                 wrmsrl(MSR_K7_HWCR, hwcr);
1603                 }
1604         }
1605 
1606         if (c->x86_vendor == X86_VENDOR_INTEL) {
1607                 /*
1608                  * SDM documents that on family 6 bank 0 should not be written
1609                  * because it aliases to another special BIOS controlled
1610                  * register.
1611                  * But it's not aliased anymore on model 0x1a+
1612                  * Don't ignore bank 0 completely because there could be a
1613                  * valid event later, merely don't write CTL0.
1614                  */
1615 
1616                 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1617                         mce_banks[0].init = 0;
1618 
1619                 /*
1620                  * All newer Intel systems support MCE broadcasting. Enable
1621                  * synchronization with a one second timeout.
1622                  */
1623                 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1624                         cfg->monarch_timeout < 0)
1625                         cfg->monarch_timeout = USEC_PER_SEC;
1626 
1627                 /*
1628                  * There are also broken BIOSes on some Pentium M and
1629                  * earlier systems:
1630                  */
1631                 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1632                         cfg->bootlog = 0;
1633 
1634                 if (c->x86 == 6 && c->x86_model == 45)
1635                         quirk_no_way_out = quirk_sandybridge_ifu;
1636                 /*
1637                  * MCG_CAP.MCG_SER_P is necessary but not sufficient to know
1638                  * whether this processor will actually generate recoverable
1639                  * machine checks. Check to see if this is an E7 model Xeon.
1640                  * We can't do a model number check because E5 and E7 use the
1641                  * same model number. E5 doesn't support recovery, E7 does.
1642                  */
1643                 if (mca_cfg.recovery || (mca_cfg.ser &&
1644                         !strncmp(c->x86_model_id,
1645                                  "Intel(R) Xeon(R) CPU E7-", 24)))
1646                         set_cpu_cap(c, X86_FEATURE_MCE_RECOVERY);
1647         }
1648         if (cfg->monarch_timeout < 0)
1649                 cfg->monarch_timeout = 0;
1650         if (cfg->bootlog != 0)
1651                 cfg->panic_timeout = 30;
1652 
1653         return 0;
1654 }
1655 
1656 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1657 {
1658         if (c->x86 != 5)
1659                 return 0;
1660 
1661         switch (c->x86_vendor) {
1662         case X86_VENDOR_INTEL:
1663                 intel_p5_mcheck_init(c);
1664                 return 1;
1665                 break;
1666         case X86_VENDOR_CENTAUR:
1667                 winchip_mcheck_init(c);
1668                 return 1;
1669                 break;
1670         default:
1671                 return 0;
1672         }
1673 
1674         return 0;
1675 }
1676 
1677 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1678 {
1679         switch (c->x86_vendor) {
1680         case X86_VENDOR_INTEL:
1681                 mce_intel_feature_init(c);
1682                 mce_adjust_timer = cmci_intel_adjust_timer;
1683                 break;
1684 
1685         case X86_VENDOR_AMD: {
1686                 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1687                 mce_flags.succor         = !!cpu_has(c, X86_FEATURE_SUCCOR);
1688                 mce_flags.smca           = !!cpu_has(c, X86_FEATURE_SMCA);
1689 
1690                 /*
1691                  * Install proper ops for Scalable MCA enabled processors
1692                  */
1693                 if (mce_flags.smca) {
1694                         msr_ops.ctl     = smca_ctl_reg;
1695                         msr_ops.status  = smca_status_reg;
1696                         msr_ops.addr    = smca_addr_reg;
1697                         msr_ops.misc    = smca_misc_reg;
1698                 }
1699                 mce_amd_feature_init(c);
1700 
1701                 break;
1702                 }
1703 
1704         default:
1705                 break;
1706         }
1707 }
1708 
1709 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1710 {
1711         switch (c->x86_vendor) {
1712         case X86_VENDOR_INTEL:
1713                 mce_intel_feature_clear(c);
1714                 break;
1715         default:
1716                 break;
1717         }
1718 }
1719 
1720 static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1721 {
1722         unsigned long iv = check_interval * HZ;
1723 
1724         if (mca_cfg.ignore_ce || !iv)
1725                 return;
1726 
1727         per_cpu(mce_next_interval, cpu) = iv;
1728 
1729         t->expires = round_jiffies(jiffies + iv);
1730         add_timer_on(t, cpu);
1731 }
1732 
1733 static void __mcheck_cpu_init_timer(void)
1734 {
1735         struct timer_list *t = this_cpu_ptr(&mce_timer);
1736         unsigned int cpu = smp_processor_id();
1737 
1738         setup_pinned_timer(t, mce_timer_fn, cpu);
1739         mce_start_timer(cpu, t);
1740 }
1741 
1742 /* Handle unconfigured int18 (should never happen) */
1743 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1744 {
1745         pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1746                smp_processor_id());
1747 }
1748 
1749 /* Call the installed machine check handler for this CPU setup. */
1750 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1751                                                 unexpected_machine_check;
1752 
1753 /*
1754  * Called for each booted CPU to set up machine checks.
1755  * Must be called with preempt off:
1756  */
1757 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1758 {
1759         if (mca_cfg.disabled)
1760                 return;
1761 
1762         if (__mcheck_cpu_ancient_init(c))
1763                 return;
1764 
1765         if (!mce_available(c))
1766                 return;
1767 
1768         if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1769                 mca_cfg.disabled = true;
1770                 return;
1771         }
1772 
1773         if (mce_gen_pool_init()) {
1774                 mca_cfg.disabled = true;
1775                 pr_emerg("Couldn't allocate MCE records pool!\n");
1776                 return;
1777         }
1778 
1779         machine_check_vector = do_machine_check;
1780 
1781         __mcheck_cpu_init_generic();
1782         __mcheck_cpu_init_vendor(c);
1783         __mcheck_cpu_init_clear_banks();
1784         __mcheck_cpu_init_timer();
1785 }
1786 
1787 /*
1788  * Called for each booted CPU to clear some machine checks opt-ins
1789  */
1790 void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1791 {
1792         if (mca_cfg.disabled)
1793                 return;
1794 
1795         if (!mce_available(c))
1796                 return;
1797 
1798         /*
1799          * Possibly to clear general settings generic to x86
1800          * __mcheck_cpu_clear_generic(c);
1801          */
1802         __mcheck_cpu_clear_vendor(c);
1803 
1804 }
1805 
1806 /*
1807  * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1808  */
1809 
1810 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1811 static int mce_chrdev_open_count;       /* #times opened */
1812 static int mce_chrdev_open_exclu;       /* already open exclusive? */
1813 
1814 static int mce_chrdev_open(struct inode *inode, struct file *file)
1815 {
1816         spin_lock(&mce_chrdev_state_lock);
1817 
1818         if (mce_chrdev_open_exclu ||
1819             (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1820                 spin_unlock(&mce_chrdev_state_lock);
1821 
1822                 return -EBUSY;
1823         }
1824 
1825         if (file->f_flags & O_EXCL)
1826                 mce_chrdev_open_exclu = 1;
1827         mce_chrdev_open_count++;
1828 
1829         spin_unlock(&mce_chrdev_state_lock);
1830 
1831         return nonseekable_open(inode, file);
1832 }
1833 
1834 static int mce_chrdev_release(struct inode *inode, struct file *file)
1835 {
1836         spin_lock(&mce_chrdev_state_lock);
1837 
1838         mce_chrdev_open_count--;
1839         mce_chrdev_open_exclu = 0;
1840 
1841         spin_unlock(&mce_chrdev_state_lock);
1842 
1843         return 0;
1844 }
1845 
1846 static void collect_tscs(void *data)
1847 {
1848         unsigned long *cpu_tsc = (unsigned long *)data;
1849 
1850         cpu_tsc[smp_processor_id()] = rdtsc();
1851 }
1852 
1853 static int mce_apei_read_done;
1854 
1855 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1856 static int __mce_read_apei(char __user **ubuf, size_t usize)
1857 {
1858         int rc;
1859         u64 record_id;
1860         struct mce m;
1861 
1862         if (usize < sizeof(struct mce))
1863                 return -EINVAL;
1864 
1865         rc = apei_read_mce(&m, &record_id);
1866         /* Error or no more MCE record */
1867         if (rc <= 0) {
1868                 mce_apei_read_done = 1;
1869                 /*
1870                  * When ERST is disabled, mce_chrdev_read() should return
1871                  * "no record" instead of "no device."
1872                  */
1873                 if (rc == -ENODEV)
1874                         return 0;
1875                 return rc;
1876         }
1877         rc = -EFAULT;
1878         if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1879                 return rc;
1880         /*
1881          * In fact, we should have cleared the record after that has
1882          * been flushed to the disk or sent to network in
1883          * /sbin/mcelog, but we have no interface to support that now,
1884          * so just clear it to avoid duplication.
1885          */
1886         rc = apei_clear_mce(record_id);
1887         if (rc) {
1888                 mce_apei_read_done = 1;
1889                 return rc;
1890         }
1891         *ubuf += sizeof(struct mce);
1892 
1893         return 0;
1894 }
1895 
1896 static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1897                                 size_t usize, loff_t *off)
1898 {
1899         char __user *buf = ubuf;
1900         unsigned long *cpu_tsc;
1901         unsigned prev, next;
1902         int i, err;
1903 
1904         cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1905         if (!cpu_tsc)
1906                 return -ENOMEM;
1907 
1908         mutex_lock(&mce_chrdev_read_mutex);
1909 
1910         if (!mce_apei_read_done) {
1911                 err = __mce_read_apei(&buf, usize);
1912                 if (err || buf != ubuf)
1913                         goto out;
1914         }
1915 
1916         next = mce_log_get_idx_check(mcelog.next);
1917 
1918         /* Only supports full reads right now */
1919         err = -EINVAL;
1920         if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1921                 goto out;
1922 
1923         err = 0;
1924         prev = 0;
1925         do {
1926                 for (i = prev; i < next; i++) {
1927                         unsigned long start = jiffies;
1928                         struct mce *m = &mcelog.entry[i];
1929 
1930                         while (!m->finished) {
1931                                 if (time_after_eq(jiffies, start + 2)) {
1932                                         memset(m, 0, sizeof(*m));
1933                                         goto timeout;
1934                                 }
1935                                 cpu_relax();
1936                         }
1937                         smp_rmb();
1938                         err |= copy_to_user(buf, m, sizeof(*m));
1939                         buf += sizeof(*m);
1940 timeout:
1941                         ;
1942                 }
1943 
1944                 memset(mcelog.entry + prev, 0,
1945                        (next - prev) * sizeof(struct mce));
1946                 prev = next;
1947                 next = cmpxchg(&mcelog.next, prev, 0);
1948         } while (next != prev);
1949 
1950         synchronize_sched();
1951 
1952         /*
1953          * Collect entries that were still getting written before the
1954          * synchronize.
1955          */
1956         on_each_cpu(collect_tscs, cpu_tsc, 1);
1957 
1958         for (i = next; i < MCE_LOG_LEN; i++) {
1959                 struct mce *m = &mcelog.entry[i];
1960 
1961                 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1962                         err |= copy_to_user(buf, m, sizeof(*m));
1963                         smp_rmb();
1964                         buf += sizeof(*m);
1965                         memset(m, 0, sizeof(*m));
1966                 }
1967         }
1968 
1969         if (err)
1970                 err = -EFAULT;
1971 
1972 out:
1973         mutex_unlock(&mce_chrdev_read_mutex);
1974         kfree(cpu_tsc);
1975 
1976         return err ? err : buf - ubuf;
1977 }
1978 
1979 static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1980 {
1981         poll_wait(file, &mce_chrdev_wait, wait);
1982         if (READ_ONCE(mcelog.next))
1983                 return POLLIN | POLLRDNORM;
1984         if (!mce_apei_read_done && apei_check_mce())
1985                 return POLLIN | POLLRDNORM;
1986         return 0;
1987 }
1988 
1989 static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1990                                 unsigned long arg)
1991 {
1992         int __user *p = (int __user *)arg;
1993 
1994         if (!capable(CAP_SYS_ADMIN))
1995                 return -EPERM;
1996 
1997         switch (cmd) {
1998         case MCE_GET_RECORD_LEN:
1999                 return put_user(sizeof(struct mce), p);
2000         case MCE_GET_LOG_LEN:
2001                 return put_user(MCE_LOG_LEN, p);
2002         case MCE_GETCLEAR_FLAGS: {
2003                 unsigned flags;
2004 
2005                 do {
2006                         flags = mcelog.flags;
2007                 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
2008 
2009                 return put_user(flags, p);
2010         }
2011         default:
2012                 return -ENOTTY;
2013         }
2014 }
2015 
2016 static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
2017                             size_t usize, loff_t *off);
2018 
2019 void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
2020                              const char __user *ubuf,
2021                              size_t usize, loff_t *off))
2022 {
2023         mce_write = fn;
2024 }
2025 EXPORT_SYMBOL_GPL(register_mce_write_callback);
2026 
2027 static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
2028                                 size_t usize, loff_t *off)
2029 {
2030         if (mce_write)
2031                 return mce_write(filp, ubuf, usize, off);
2032         else
2033                 return -EINVAL;
2034 }
2035 
2036 static const struct file_operations mce_chrdev_ops = {
2037         .open                   = mce_chrdev_open,
2038         .release                = mce_chrdev_release,
2039         .read                   = mce_chrdev_read,
2040         .write                  = mce_chrdev_write,
2041         .poll                   = mce_chrdev_poll,
2042         .unlocked_ioctl         = mce_chrdev_ioctl,
2043         .llseek                 = no_llseek,
2044 };
2045 
2046 static struct miscdevice mce_chrdev_device = {
2047         MISC_MCELOG_MINOR,
2048         "mcelog",
2049         &mce_chrdev_ops,
2050 };
2051 
2052 static void __mce_disable_bank(void *arg)
2053 {
2054         int bank = *((int *)arg);
2055         __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2056         cmci_disable_bank(bank);
2057 }
2058 
2059 void mce_disable_bank(int bank)
2060 {
2061         if (bank >= mca_cfg.banks) {
2062                 pr_warn(FW_BUG
2063                         "Ignoring request to disable invalid MCA bank %d.\n",
2064                         bank);
2065                 return;
2066         }
2067         set_bit(bank, mce_banks_ce_disabled);
2068         on_each_cpu(__mce_disable_bank, &bank, 1);
2069 }
2070 
2071 /*
2072  * mce=off Disables machine check
2073  * mce=no_cmci Disables CMCI
2074  * mce=no_lmce Disables LMCE
2075  * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2076  * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2077  * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2078  *      monarchtimeout is how long to wait for other CPUs on machine
2079  *      check, or 0 to not wait
2080  * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
2081  * mce=nobootlog Don't log MCEs from before booting.
2082  * mce=bios_cmci_threshold Don't program the CMCI threshold
2083  */
2084 static int __init mcheck_enable(char *str)
2085 {
2086         struct mca_config *cfg = &mca_cfg;
2087 
2088         if (*str == 0) {
2089                 enable_p5_mce();
2090                 return 1;
2091         }
2092         if (*str == '=')
2093                 str++;
2094         if (!strcmp(str, "off"))
2095                 cfg->disabled = true;
2096         else if (!strcmp(str, "no_cmci"))
2097                 cfg->cmci_disabled = true;
2098         else if (!strcmp(str, "no_lmce"))
2099                 cfg->lmce_disabled = true;
2100         else if (!strcmp(str, "dont_log_ce"))
2101                 cfg->dont_log_ce = true;
2102         else if (!strcmp(str, "ignore_ce"))
2103                 cfg->ignore_ce = true;
2104         else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2105                 cfg->bootlog = (str[0] == 'b');
2106         else if (!strcmp(str, "bios_cmci_threshold"))
2107                 cfg->bios_cmci_threshold = true;
2108         else if (!strcmp(str, "recovery"))
2109                 cfg->recovery = true;
2110         else if (isdigit(str[0])) {
2111                 if (get_option(&str, &cfg->tolerant) == 2)
2112                         get_option(&str, &(cfg->monarch_timeout));
2113         } else {
2114                 pr_info("mce argument %s ignored. Please use /sys\n", str);
2115                 return 0;
2116         }
2117         return 1;
2118 }
2119 __setup("mce", mcheck_enable);
2120 
2121 int __init mcheck_init(void)
2122 {
2123         mcheck_intel_therm_init();
2124         mce_register_decode_chain(&mce_srao_nb);
2125         mcheck_vendor_init_severity();
2126 
2127         INIT_WORK(&mce_work, mce_process_work);
2128         init_irq_work(&mce_irq_work, mce_irq_work_cb);
2129 
2130         return 0;
2131 }
2132 
2133 /*
2134  * mce_syscore: PM support
2135  */
2136 
2137 /*
2138  * Disable machine checks on suspend and shutdown. We can't really handle
2139  * them later.
2140  */
2141 static void mce_disable_error_reporting(void)
2142 {
2143         int i;
2144 
2145         for (i = 0; i < mca_cfg.banks; i++) {
2146                 struct mce_bank *b = &mce_banks[i];
2147 
2148                 if (b->init)
2149                         wrmsrl(msr_ops.ctl(i), 0);
2150         }
2151         return;
2152 }
2153 
2154 static void vendor_disable_error_reporting(void)
2155 {
2156         /*
2157          * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
2158          * Disabling them for just a single offlined CPU is bad, since it will
2159          * inhibit reporting for all shared resources on the socket like the
2160          * last level cache (LLC), the integrated memory controller (iMC), etc.
2161          */
2162         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2163                 return;
2164 
2165         mce_disable_error_reporting();
2166 }
2167 
2168 static int mce_syscore_suspend(void)
2169 {
2170         vendor_disable_error_reporting();
2171         return 0;
2172 }
2173 
2174 static void mce_syscore_shutdown(void)
2175 {
2176         vendor_disable_error_reporting();
2177 }
2178 
2179 /*
2180  * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2181  * Only one CPU is active at this time, the others get re-added later using
2182  * CPU hotplug:
2183  */
2184 static void mce_syscore_resume(void)
2185 {
2186         __mcheck_cpu_init_generic();
2187         __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2188         __mcheck_cpu_init_clear_banks();
2189 }
2190 
2191 static struct syscore_ops mce_syscore_ops = {
2192         .suspend        = mce_syscore_suspend,
2193         .shutdown       = mce_syscore_shutdown,
2194         .resume         = mce_syscore_resume,
2195 };
2196 
2197 /*
2198  * mce_device: Sysfs support
2199  */
2200 
2201 static void mce_cpu_restart(void *data)
2202 {
2203         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2204                 return;
2205         __mcheck_cpu_init_generic();
2206         __mcheck_cpu_init_clear_banks();
2207         __mcheck_cpu_init_timer();
2208 }
2209 
2210 /* Reinit MCEs after user configuration changes */
2211 static void mce_restart(void)
2212 {
2213         mce_timer_delete_all();
2214         on_each_cpu(mce_cpu_restart, NULL, 1);
2215 }
2216 
2217 /* Toggle features for corrected errors */
2218 static void mce_disable_cmci(void *data)
2219 {
2220         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2221                 return;
2222         cmci_clear();
2223 }
2224 
2225 static void mce_enable_ce(void *all)
2226 {
2227         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2228                 return;
2229         cmci_reenable();
2230         cmci_recheck();
2231         if (all)
2232                 __mcheck_cpu_init_timer();
2233 }
2234 
2235 static struct bus_type mce_subsys = {
2236         .name           = "machinecheck",
2237         .dev_name       = "machinecheck",
2238 };
2239 
2240 DEFINE_PER_CPU(struct device *, mce_device);
2241 
2242 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2243 
2244 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2245 {
2246         return container_of(attr, struct mce_bank, attr);
2247 }
2248 
2249 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2250                          char *buf)
2251 {
2252         return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2253 }
2254 
2255 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2256                         const char *buf, size_t size)
2257 {
2258         u64 new;
2259 
2260         if (kstrtou64(buf, 0, &new) < 0)
2261                 return -EINVAL;
2262 
2263         attr_to_bank(attr)->ctl = new;
2264         mce_restart();
2265 
2266         return size;
2267 }
2268 
2269 static ssize_t
2270 show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2271 {
2272         strcpy(buf, mce_helper);
2273         strcat(buf, "\n");
2274         return strlen(mce_helper) + 1;
2275 }
2276 
2277 static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
2278                                 const char *buf, size_t siz)
2279 {
2280         char *p;
2281 
2282         strncpy(mce_helper, buf, sizeof(mce_helper));
2283         mce_helper[sizeof(mce_helper)-1] = 0;
2284         p = strchr(mce_helper, '\n');
2285 
2286         if (p)
2287                 *p = 0;
2288 
2289         return strlen(mce_helper) + !!p;
2290 }
2291 
2292 static ssize_t set_ignore_ce(struct device *s,
2293                              struct device_attribute *attr,
2294                              const char *buf, size_t size)
2295 {
2296         u64 new;
2297 
2298         if (kstrtou64(buf, 0, &new) < 0)
2299                 return -EINVAL;
2300 
2301         if (mca_cfg.ignore_ce ^ !!new) {
2302                 if (new) {
2303                         /* disable ce features */
2304                         mce_timer_delete_all();
2305                         on_each_cpu(mce_disable_cmci, NULL, 1);
2306                         mca_cfg.ignore_ce = true;
2307                 } else {
2308                         /* enable ce features */
2309                         mca_cfg.ignore_ce = false;
2310                         on_each_cpu(mce_enable_ce, (void *)1, 1);
2311                 }
2312         }
2313         return size;
2314 }
2315 
2316 static ssize_t set_cmci_disabled(struct device *s,
2317                                  struct device_attribute *attr,
2318                                  const char *buf, size_t size)
2319 {
2320         u64 new;
2321 
2322         if (kstrtou64(buf, 0, &new) < 0)
2323                 return -EINVAL;
2324 
2325         if (mca_cfg.cmci_disabled ^ !!new) {
2326                 if (new) {
2327                         /* disable cmci */
2328                         on_each_cpu(mce_disable_cmci, NULL, 1);
2329                         mca_cfg.cmci_disabled = true;
2330                 } else {
2331                         /* enable cmci */
2332                         mca_cfg.cmci_disabled = false;
2333                         on_each_cpu(mce_enable_ce, NULL, 1);
2334                 }
2335         }
2336         return size;
2337 }
2338 
2339 static ssize_t store_int_with_restart(struct device *s,
2340                                       struct device_attribute *attr,
2341                                       const char *buf, size_t size)
2342 {
2343         ssize_t ret = device_store_int(s, attr, buf, size);
2344         mce_restart();
2345         return ret;
2346 }
2347 
2348 static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2349 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2350 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2351 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2352 
2353 static struct dev_ext_attribute dev_attr_check_interval = {
2354         __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2355         &check_interval
2356 };
2357 
2358 static struct dev_ext_attribute dev_attr_ignore_ce = {
2359         __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2360         &mca_cfg.ignore_ce
2361 };
2362 
2363 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2364         __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2365         &mca_cfg.cmci_disabled
2366 };
2367 
2368 static struct device_attribute *mce_device_attrs[] = {
2369         &dev_attr_tolerant.attr,
2370         &dev_attr_check_interval.attr,
2371         &dev_attr_trigger,
2372         &dev_attr_monarch_timeout.attr,
2373         &dev_attr_dont_log_ce.attr,
2374         &dev_attr_ignore_ce.attr,
2375         &dev_attr_cmci_disabled.attr,
2376         NULL
2377 };
2378 
2379 static cpumask_var_t mce_device_initialized;
2380 
2381 static void mce_device_release(struct device *dev)
2382 {
2383         kfree(dev);
2384 }
2385 
2386 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
2387 static int mce_device_create(unsigned int cpu)
2388 {
2389         struct device *dev;
2390         int err;
2391         int i, j;
2392 
2393         if (!mce_available(&boot_cpu_data))
2394                 return -EIO;
2395 
2396         dev = kzalloc(sizeof *dev, GFP_KERNEL);
2397         if (!dev)
2398                 return -ENOMEM;
2399         dev->id  = cpu;
2400         dev->bus = &mce_subsys;
2401         dev->release = &mce_device_release;
2402 
2403         err = device_register(dev);
2404         if (err) {
2405                 put_device(dev);
2406                 return err;
2407         }
2408 
2409         for (i = 0; mce_device_attrs[i]; i++) {
2410                 err = device_create_file(dev, mce_device_attrs[i]);
2411                 if (err)
2412                         goto error;
2413         }
2414         for (j = 0; j < mca_cfg.banks; j++) {
2415                 err = device_create_file(dev, &mce_banks[j].attr);
2416                 if (err)
2417                         goto error2;
2418         }
2419         cpumask_set_cpu(cpu, mce_device_initialized);
2420         per_cpu(mce_device, cpu) = dev;
2421 
2422         return 0;
2423 error2:
2424         while (--j >= 0)
2425                 device_remove_file(dev, &mce_banks[j].attr);
2426 error:
2427         while (--i >= 0)
2428                 device_remove_file(dev, mce_device_attrs[i]);
2429 
2430         device_unregister(dev);
2431 
2432         return err;
2433 }
2434 
2435 static void mce_device_remove(unsigned int cpu)
2436 {
2437         struct device *dev = per_cpu(mce_device, cpu);
2438         int i;
2439 
2440         if (!cpumask_test_cpu(cpu, mce_device_initialized))
2441                 return;
2442 
2443         for (i = 0; mce_device_attrs[i]; i++)
2444                 device_remove_file(dev, mce_device_attrs[i]);
2445 
2446         for (i = 0; i < mca_cfg.banks; i++)
2447                 device_remove_file(dev, &mce_banks[i].attr);
2448 
2449         device_unregister(dev);
2450         cpumask_clear_cpu(cpu, mce_device_initialized);
2451         per_cpu(mce_device, cpu) = NULL;
2452 }
2453 
2454 /* Make sure there are no machine checks on offlined CPUs. */
2455 static void mce_disable_cpu(void *h)
2456 {
2457         unsigned long action = *(unsigned long *)h;
2458 
2459         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2460                 return;
2461 
2462         if (!(action & CPU_TASKS_FROZEN))
2463                 cmci_clear();
2464 
2465         vendor_disable_error_reporting();
2466 }
2467 
2468 static void mce_reenable_cpu(void *h)
2469 {
2470         unsigned long action = *(unsigned long *)h;
2471         int i;
2472 
2473         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2474                 return;
2475 
2476         if (!(action & CPU_TASKS_FROZEN))
2477                 cmci_reenable();
2478         for (i = 0; i < mca_cfg.banks; i++) {
2479                 struct mce_bank *b = &mce_banks[i];
2480 
2481                 if (b->init)
2482                         wrmsrl(msr_ops.ctl(i), b->ctl);
2483         }
2484 }
2485 
2486 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
2487 static int
2488 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2489 {
2490         unsigned int cpu = (unsigned long)hcpu;
2491         struct timer_list *t = &per_cpu(mce_timer, cpu);
2492 
2493         switch (action & ~CPU_TASKS_FROZEN) {
2494         case CPU_ONLINE:
2495                 mce_device_create(cpu);
2496                 if (threshold_cpu_callback)
2497                         threshold_cpu_callback(action, cpu);
2498                 break;
2499         case CPU_DEAD:
2500                 if (threshold_cpu_callback)
2501                         threshold_cpu_callback(action, cpu);
2502                 mce_device_remove(cpu);
2503                 mce_intel_hcpu_update(cpu);
2504 
2505                 /* intentionally ignoring frozen here */
2506                 if (!(action & CPU_TASKS_FROZEN))
2507                         cmci_rediscover();
2508                 break;
2509         case CPU_DOWN_PREPARE:
2510                 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2511                 del_timer_sync(t);
2512                 break;
2513         case CPU_DOWN_FAILED:
2514                 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2515                 mce_start_timer(cpu, t);
2516                 break;
2517         }
2518 
2519         return NOTIFY_OK;
2520 }
2521 
2522 static struct notifier_block mce_cpu_notifier = {
2523         .notifier_call = mce_cpu_callback,
2524 };
2525 
2526 static __init void mce_init_banks(void)
2527 {
2528         int i;
2529 
2530         for (i = 0; i < mca_cfg.banks; i++) {
2531                 struct mce_bank *b = &mce_banks[i];
2532                 struct device_attribute *a = &b->attr;
2533 
2534                 sysfs_attr_init(&a->attr);
2535                 a->attr.name    = b->attrname;
2536                 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2537 
2538                 a->attr.mode    = 0644;
2539                 a->show         = show_bank;
2540                 a->store        = set_bank;
2541         }
2542 }
2543 
2544 static __init int mcheck_init_device(void)
2545 {
2546         int err;
2547         int i = 0;
2548 
2549         if (!mce_available(&boot_cpu_data)) {
2550                 err = -EIO;
2551                 goto err_out;
2552         }
2553 
2554         if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2555                 err = -ENOMEM;
2556                 goto err_out;
2557         }
2558 
2559         mce_init_banks();
2560 
2561         err = subsys_system_register(&mce_subsys, NULL);
2562         if (err)
2563                 goto err_out_mem;
2564 
2565         cpu_notifier_register_begin();
2566         for_each_online_cpu(i) {
2567                 err = mce_device_create(i);
2568                 if (err) {
2569                         /*
2570                          * Register notifier anyway (and do not unreg it) so
2571                          * that we don't leave undeleted timers, see notifier
2572                          * callback above.
2573                          */
2574                         __register_hotcpu_notifier(&mce_cpu_notifier);
2575                         cpu_notifier_register_done();
2576                         goto err_device_create;
2577                 }
2578         }
2579 
2580         __register_hotcpu_notifier(&mce_cpu_notifier);
2581         cpu_notifier_register_done();
2582 
2583         register_syscore_ops(&mce_syscore_ops);
2584 
2585         /* register character device /dev/mcelog */
2586         err = misc_register(&mce_chrdev_device);
2587         if (err)
2588                 goto err_register;
2589 
2590         return 0;
2591 
2592 err_register:
2593         unregister_syscore_ops(&mce_syscore_ops);
2594 
2595 err_device_create:
2596         /*
2597          * We didn't keep track of which devices were created above, but
2598          * even if we had, the set of online cpus might have changed.
2599          * Play safe and remove for every possible cpu, since
2600          * mce_device_remove() will do the right thing.
2601          */
2602         for_each_possible_cpu(i)
2603                 mce_device_remove(i);
2604 
2605 err_out_mem:
2606         free_cpumask_var(mce_device_initialized);
2607 
2608 err_out:
2609         pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
2610 
2611         return err;
2612 }
2613 device_initcall_sync(mcheck_init_device);
2614 
2615 /*
2616  * Old style boot options parsing. Only for compatibility.
2617  */
2618 static int __init mcheck_disable(char *str)
2619 {
2620         mca_cfg.disabled = true;
2621         return 1;
2622 }
2623 __setup("nomce", mcheck_disable);
2624 
2625 #ifdef CONFIG_DEBUG_FS
2626 struct dentry *mce_get_debugfs_dir(void)
2627 {
2628         static struct dentry *dmce;
2629 
2630         if (!dmce)
2631                 dmce = debugfs_create_dir("mce", NULL);
2632 
2633         return dmce;
2634 }
2635 
2636 static void mce_reset(void)
2637 {
2638         cpu_missing = 0;
2639         atomic_set(&mce_fake_panicked, 0);
2640         atomic_set(&mce_executing, 0);
2641         atomic_set(&mce_callin, 0);
2642         atomic_set(&global_nwo, 0);
2643 }
2644 
2645 static int fake_panic_get(void *data, u64 *val)
2646 {
2647         *val = fake_panic;
2648         return 0;
2649 }
2650 
2651 static int fake_panic_set(void *data, u64 val)
2652 {
2653         mce_reset();
2654         fake_panic = val;
2655         return 0;
2656 }
2657 
2658 DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2659                         fake_panic_set, "%llu\n");
2660 
2661 static int __init mcheck_debugfs_init(void)
2662 {
2663         struct dentry *dmce, *ffake_panic;
2664 
2665         dmce = mce_get_debugfs_dir();
2666         if (!dmce)
2667                 return -ENOMEM;
2668         ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2669                                           &fake_panic_fops);
2670         if (!ffake_panic)
2671                 return -ENOMEM;
2672 
2673         return 0;
2674 }
2675 #else
2676 static int __init mcheck_debugfs_init(void) { return -EINVAL; }
2677 #endif
2678 
2679 static int __init mcheck_late_init(void)
2680 {
2681         mcheck_debugfs_init();
2682 
2683         /*
2684          * Flush out everything that has been logged during early boot, now that
2685          * everything has been initialized (workqueues, decoders, ...).
2686          */
2687         mce_schedule_work();
2688 
2689         return 0;
2690 }
2691 late_initcall(mcheck_late_init);
2692 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp