~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/nmi.c

Version: ~ [ linux-6.1-rc7 ] ~ [ linux-6.0.10 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.80 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.156 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.225 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.267 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.300 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.334 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *  Copyright (C) 1991, 1992  Linus Torvalds
  4  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  5  *  Copyright (C) 2011  Don Zickus Red Hat, Inc.
  6  *
  7  *  Pentium III FXSR, SSE support
  8  *      Gareth Hughes <gareth@valinux.com>, May 2000
  9  */
 10 
 11 /*
 12  * Handle hardware traps and faults.
 13  */
 14 #include <linux/spinlock.h>
 15 #include <linux/kprobes.h>
 16 #include <linux/kdebug.h>
 17 #include <linux/sched/debug.h>
 18 #include <linux/nmi.h>
 19 #include <linux/debugfs.h>
 20 #include <linux/delay.h>
 21 #include <linux/hardirq.h>
 22 #include <linux/ratelimit.h>
 23 #include <linux/slab.h>
 24 #include <linux/export.h>
 25 #include <linux/atomic.h>
 26 #include <linux/sched/clock.h>
 27 
 28 #if defined(CONFIG_EDAC)
 29 #include <linux/edac.h>
 30 #endif
 31 
 32 #include <asm/cpu_entry_area.h>
 33 #include <asm/traps.h>
 34 #include <asm/mach_traps.h>
 35 #include <asm/nmi.h>
 36 #include <asm/x86_init.h>
 37 #include <asm/reboot.h>
 38 #include <asm/cache.h>
 39 #include <asm/nospec-branch.h>
 40 
 41 #define CREATE_TRACE_POINTS
 42 #include <trace/events/nmi.h>
 43 
 44 struct nmi_desc {
 45         raw_spinlock_t lock;
 46         struct list_head head;
 47 };
 48 
 49 static struct nmi_desc nmi_desc[NMI_MAX] = 
 50 {
 51         {
 52                 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
 53                 .head = LIST_HEAD_INIT(nmi_desc[0].head),
 54         },
 55         {
 56                 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
 57                 .head = LIST_HEAD_INIT(nmi_desc[1].head),
 58         },
 59         {
 60                 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
 61                 .head = LIST_HEAD_INIT(nmi_desc[2].head),
 62         },
 63         {
 64                 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
 65                 .head = LIST_HEAD_INIT(nmi_desc[3].head),
 66         },
 67 
 68 };
 69 
 70 struct nmi_stats {
 71         unsigned int normal;
 72         unsigned int unknown;
 73         unsigned int external;
 74         unsigned int swallow;
 75 };
 76 
 77 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
 78 
 79 static int ignore_nmis __read_mostly;
 80 
 81 int unknown_nmi_panic;
 82 /*
 83  * Prevent NMI reason port (0x61) being accessed simultaneously, can
 84  * only be used in NMI handler.
 85  */
 86 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
 87 
 88 static int __init setup_unknown_nmi_panic(char *str)
 89 {
 90         unknown_nmi_panic = 1;
 91         return 1;
 92 }
 93 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
 94 
 95 #define nmi_to_desc(type) (&nmi_desc[type])
 96 
 97 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
 98 
 99 static int __init nmi_warning_debugfs(void)
100 {
101         debugfs_create_u64("nmi_longest_ns", 0644,
102                         arch_debugfs_dir, &nmi_longest_ns);
103         return 0;
104 }
105 fs_initcall(nmi_warning_debugfs);
106 
107 static void nmi_check_duration(struct nmiaction *action, u64 duration)
108 {
109         u64 whole_msecs = READ_ONCE(action->max_duration);
110         int remainder_ns, decimal_msecs;
111 
112         if (duration < nmi_longest_ns || duration < action->max_duration)
113                 return;
114 
115         action->max_duration = duration;
116 
117         remainder_ns = do_div(whole_msecs, (1000 * 1000));
118         decimal_msecs = remainder_ns / 1000;
119 
120         printk_ratelimited(KERN_INFO
121                 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
122                 action->handler, whole_msecs, decimal_msecs);
123 }
124 
125 static int nmi_handle(unsigned int type, struct pt_regs *regs)
126 {
127         struct nmi_desc *desc = nmi_to_desc(type);
128         struct nmiaction *a;
129         int handled=0;
130 
131         rcu_read_lock();
132 
133         /*
134          * NMIs are edge-triggered, which means if you have enough
135          * of them concurrently, you can lose some because only one
136          * can be latched at any given time.  Walk the whole list
137          * to handle those situations.
138          */
139         list_for_each_entry_rcu(a, &desc->head, list) {
140                 int thishandled;
141                 u64 delta;
142 
143                 delta = sched_clock();
144                 thishandled = a->handler(type, regs);
145                 handled += thishandled;
146                 delta = sched_clock() - delta;
147                 trace_nmi_handler(a->handler, (int)delta, thishandled);
148 
149                 nmi_check_duration(a, delta);
150         }
151 
152         rcu_read_unlock();
153 
154         /* return total number of NMI events handled */
155         return handled;
156 }
157 NOKPROBE_SYMBOL(nmi_handle);
158 
159 int __register_nmi_handler(unsigned int type, struct nmiaction *action)
160 {
161         struct nmi_desc *desc = nmi_to_desc(type);
162         unsigned long flags;
163 
164         if (!action->handler)
165                 return -EINVAL;
166 
167         raw_spin_lock_irqsave(&desc->lock, flags);
168 
169         /*
170          * Indicate if there are multiple registrations on the
171          * internal NMI handler call chains (SERR and IO_CHECK).
172          */
173         WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
174         WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
175 
176         /*
177          * some handlers need to be executed first otherwise a fake
178          * event confuses some handlers (kdump uses this flag)
179          */
180         if (action->flags & NMI_FLAG_FIRST)
181                 list_add_rcu(&action->list, &desc->head);
182         else
183                 list_add_tail_rcu(&action->list, &desc->head);
184         
185         raw_spin_unlock_irqrestore(&desc->lock, flags);
186         return 0;
187 }
188 EXPORT_SYMBOL(__register_nmi_handler);
189 
190 void unregister_nmi_handler(unsigned int type, const char *name)
191 {
192         struct nmi_desc *desc = nmi_to_desc(type);
193         struct nmiaction *n;
194         unsigned long flags;
195 
196         raw_spin_lock_irqsave(&desc->lock, flags);
197 
198         list_for_each_entry_rcu(n, &desc->head, list) {
199                 /*
200                  * the name passed in to describe the nmi handler
201                  * is used as the lookup key
202                  */
203                 if (!strcmp(n->name, name)) {
204                         WARN(in_nmi(),
205                                 "Trying to free NMI (%s) from NMI context!\n", n->name);
206                         list_del_rcu(&n->list);
207                         break;
208                 }
209         }
210 
211         raw_spin_unlock_irqrestore(&desc->lock, flags);
212         synchronize_rcu();
213 }
214 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
215 
216 static void
217 pci_serr_error(unsigned char reason, struct pt_regs *regs)
218 {
219         /* check to see if anyone registered against these types of errors */
220         if (nmi_handle(NMI_SERR, regs))
221                 return;
222 
223         pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
224                  reason, smp_processor_id());
225 
226         if (panic_on_unrecovered_nmi)
227                 nmi_panic(regs, "NMI: Not continuing");
228 
229         pr_emerg("Dazed and confused, but trying to continue\n");
230 
231         /* Clear and disable the PCI SERR error line. */
232         reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
233         outb(reason, NMI_REASON_PORT);
234 }
235 NOKPROBE_SYMBOL(pci_serr_error);
236 
237 static void
238 io_check_error(unsigned char reason, struct pt_regs *regs)
239 {
240         unsigned long i;
241 
242         /* check to see if anyone registered against these types of errors */
243         if (nmi_handle(NMI_IO_CHECK, regs))
244                 return;
245 
246         pr_emerg(
247         "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
248                  reason, smp_processor_id());
249         show_regs(regs);
250 
251         if (panic_on_io_nmi) {
252                 nmi_panic(regs, "NMI IOCK error: Not continuing");
253 
254                 /*
255                  * If we end up here, it means we have received an NMI while
256                  * processing panic(). Simply return without delaying and
257                  * re-enabling NMIs.
258                  */
259                 return;
260         }
261 
262         /* Re-enable the IOCK line, wait for a few seconds */
263         reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
264         outb(reason, NMI_REASON_PORT);
265 
266         i = 20000;
267         while (--i) {
268                 touch_nmi_watchdog();
269                 udelay(100);
270         }
271 
272         reason &= ~NMI_REASON_CLEAR_IOCHK;
273         outb(reason, NMI_REASON_PORT);
274 }
275 NOKPROBE_SYMBOL(io_check_error);
276 
277 static void
278 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
279 {
280         int handled;
281 
282         /*
283          * Use 'false' as back-to-back NMIs are dealt with one level up.
284          * Of course this makes having multiple 'unknown' handlers useless
285          * as only the first one is ever run (unless it can actually determine
286          * if it caused the NMI)
287          */
288         handled = nmi_handle(NMI_UNKNOWN, regs);
289         if (handled) {
290                 __this_cpu_add(nmi_stats.unknown, handled);
291                 return;
292         }
293 
294         __this_cpu_add(nmi_stats.unknown, 1);
295 
296         pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
297                  reason, smp_processor_id());
298 
299         pr_emerg("Do you have a strange power saving mode enabled?\n");
300         if (unknown_nmi_panic || panic_on_unrecovered_nmi)
301                 nmi_panic(regs, "NMI: Not continuing");
302 
303         pr_emerg("Dazed and confused, but trying to continue\n");
304 }
305 NOKPROBE_SYMBOL(unknown_nmi_error);
306 
307 static DEFINE_PER_CPU(bool, swallow_nmi);
308 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
309 
310 static void default_do_nmi(struct pt_regs *regs)
311 {
312         unsigned char reason = 0;
313         int handled;
314         bool b2b = false;
315 
316         /*
317          * CPU-specific NMI must be processed before non-CPU-specific
318          * NMI, otherwise we may lose it, because the CPU-specific
319          * NMI can not be detected/processed on other CPUs.
320          */
321 
322         /*
323          * Back-to-back NMIs are interesting because they can either
324          * be two NMI or more than two NMIs (any thing over two is dropped
325          * due to NMI being edge-triggered).  If this is the second half
326          * of the back-to-back NMI, assume we dropped things and process
327          * more handlers.  Otherwise reset the 'swallow' NMI behaviour
328          */
329         if (regs->ip == __this_cpu_read(last_nmi_rip))
330                 b2b = true;
331         else
332                 __this_cpu_write(swallow_nmi, false);
333 
334         __this_cpu_write(last_nmi_rip, regs->ip);
335 
336         handled = nmi_handle(NMI_LOCAL, regs);
337         __this_cpu_add(nmi_stats.normal, handled);
338         if (handled) {
339                 /*
340                  * There are cases when a NMI handler handles multiple
341                  * events in the current NMI.  One of these events may
342                  * be queued for in the next NMI.  Because the event is
343                  * already handled, the next NMI will result in an unknown
344                  * NMI.  Instead lets flag this for a potential NMI to
345                  * swallow.
346                  */
347                 if (handled > 1)
348                         __this_cpu_write(swallow_nmi, true);
349                 return;
350         }
351 
352         /*
353          * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
354          *
355          * Another CPU may be processing panic routines while holding
356          * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
357          * and if so, call its callback directly.  If there is no CPU preparing
358          * crash dump, we simply loop here.
359          */
360         while (!raw_spin_trylock(&nmi_reason_lock)) {
361                 run_crash_ipi_callback(regs);
362                 cpu_relax();
363         }
364 
365         reason = x86_platform.get_nmi_reason();
366 
367         if (reason & NMI_REASON_MASK) {
368                 if (reason & NMI_REASON_SERR)
369                         pci_serr_error(reason, regs);
370                 else if (reason & NMI_REASON_IOCHK)
371                         io_check_error(reason, regs);
372 #ifdef CONFIG_X86_32
373                 /*
374                  * Reassert NMI in case it became active
375                  * meanwhile as it's edge-triggered:
376                  */
377                 reassert_nmi();
378 #endif
379                 __this_cpu_add(nmi_stats.external, 1);
380                 raw_spin_unlock(&nmi_reason_lock);
381                 return;
382         }
383         raw_spin_unlock(&nmi_reason_lock);
384 
385         /*
386          * Only one NMI can be latched at a time.  To handle
387          * this we may process multiple nmi handlers at once to
388          * cover the case where an NMI is dropped.  The downside
389          * to this approach is we may process an NMI prematurely,
390          * while its real NMI is sitting latched.  This will cause
391          * an unknown NMI on the next run of the NMI processing.
392          *
393          * We tried to flag that condition above, by setting the
394          * swallow_nmi flag when we process more than one event.
395          * This condition is also only present on the second half
396          * of a back-to-back NMI, so we flag that condition too.
397          *
398          * If both are true, we assume we already processed this
399          * NMI previously and we swallow it.  Otherwise we reset
400          * the logic.
401          *
402          * There are scenarios where we may accidentally swallow
403          * a 'real' unknown NMI.  For example, while processing
404          * a perf NMI another perf NMI comes in along with a
405          * 'real' unknown NMI.  These two NMIs get combined into
406          * one (as described above).  When the next NMI gets
407          * processed, it will be flagged by perf as handled, but
408          * no one will know that there was a 'real' unknown NMI sent
409          * also.  As a result it gets swallowed.  Or if the first
410          * perf NMI returns two events handled then the second
411          * NMI will get eaten by the logic below, again losing a
412          * 'real' unknown NMI.  But this is the best we can do
413          * for now.
414          */
415         if (b2b && __this_cpu_read(swallow_nmi))
416                 __this_cpu_add(nmi_stats.swallow, 1);
417         else
418                 unknown_nmi_error(reason, regs);
419 }
420 NOKPROBE_SYMBOL(default_do_nmi);
421 
422 /*
423  * NMIs can page fault or hit breakpoints which will cause it to lose
424  * its NMI context with the CPU when the breakpoint or page fault does an IRET.
425  *
426  * As a result, NMIs can nest if NMIs get unmasked due an IRET during
427  * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
428  * if the outer NMI came from kernel mode, but we can still nest if the
429  * outer NMI came from user mode.
430  *
431  * To handle these nested NMIs, we have three states:
432  *
433  *  1) not running
434  *  2) executing
435  *  3) latched
436  *
437  * When no NMI is in progress, it is in the "not running" state.
438  * When an NMI comes in, it goes into the "executing" state.
439  * Normally, if another NMI is triggered, it does not interrupt
440  * the running NMI and the HW will simply latch it so that when
441  * the first NMI finishes, it will restart the second NMI.
442  * (Note, the latch is binary, thus multiple NMIs triggering,
443  *  when one is running, are ignored. Only one NMI is restarted.)
444  *
445  * If an NMI executes an iret, another NMI can preempt it. We do not
446  * want to allow this new NMI to run, but we want to execute it when the
447  * first one finishes.  We set the state to "latched", and the exit of
448  * the first NMI will perform a dec_return, if the result is zero
449  * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
450  * dec_return would have set the state to NMI_EXECUTING (what we want it
451  * to be when we are running). In this case, we simply jump back to
452  * rerun the NMI handler again, and restart the 'latched' NMI.
453  *
454  * No trap (breakpoint or page fault) should be hit before nmi_restart,
455  * thus there is no race between the first check of state for NOT_RUNNING
456  * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
457  * at this point.
458  *
459  * In case the NMI takes a page fault, we need to save off the CR2
460  * because the NMI could have preempted another page fault and corrupt
461  * the CR2 that is about to be read. As nested NMIs must be restarted
462  * and they can not take breakpoints or page faults, the update of the
463  * CR2 must be done before converting the nmi state back to NOT_RUNNING.
464  * Otherwise, there would be a race of another nested NMI coming in
465  * after setting state to NOT_RUNNING but before updating the nmi_cr2.
466  */
467 enum nmi_states {
468         NMI_NOT_RUNNING = 0,
469         NMI_EXECUTING,
470         NMI_LATCHED,
471 };
472 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
473 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
474 
475 #ifdef CONFIG_X86_64
476 /*
477  * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
478  * some care, the inner breakpoint will clobber the outer breakpoint's
479  * stack.
480  *
481  * If a breakpoint is being processed, and the debug stack is being
482  * used, if an NMI comes in and also hits a breakpoint, the stack
483  * pointer will be set to the same fixed address as the breakpoint that
484  * was interrupted, causing that stack to be corrupted. To handle this
485  * case, check if the stack that was interrupted is the debug stack, and
486  * if so, change the IDT so that new breakpoints will use the current
487  * stack and not switch to the fixed address. On return of the NMI,
488  * switch back to the original IDT.
489  */
490 static DEFINE_PER_CPU(int, update_debug_stack);
491 
492 static bool notrace is_debug_stack(unsigned long addr)
493 {
494         struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks);
495         unsigned long top = CEA_ESTACK_TOP(cs, DB);
496         unsigned long bot = CEA_ESTACK_BOT(cs, DB1);
497 
498         if (__this_cpu_read(debug_stack_usage))
499                 return true;
500         /*
501          * Note, this covers the guard page between DB and DB1 as well to
502          * avoid two checks. But by all means @addr can never point into
503          * the guard page.
504          */
505         return addr >= bot && addr < top;
506 }
507 NOKPROBE_SYMBOL(is_debug_stack);
508 #endif
509 
510 dotraplinkage notrace void
511 do_nmi(struct pt_regs *regs, long error_code)
512 {
513         if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
514                 return;
515 
516         if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
517                 this_cpu_write(nmi_state, NMI_LATCHED);
518                 return;
519         }
520         this_cpu_write(nmi_state, NMI_EXECUTING);
521         this_cpu_write(nmi_cr2, read_cr2());
522 nmi_restart:
523 
524 #ifdef CONFIG_X86_64
525         /*
526          * If we interrupted a breakpoint, it is possible that
527          * the nmi handler will have breakpoints too. We need to
528          * change the IDT such that breakpoints that happen here
529          * continue to use the NMI stack.
530          */
531         if (unlikely(is_debug_stack(regs->sp))) {
532                 debug_stack_set_zero();
533                 this_cpu_write(update_debug_stack, 1);
534         }
535 #endif
536 
537         nmi_enter();
538 
539         inc_irq_stat(__nmi_count);
540 
541         if (!ignore_nmis)
542                 default_do_nmi(regs);
543 
544         nmi_exit();
545 
546 #ifdef CONFIG_X86_64
547         if (unlikely(this_cpu_read(update_debug_stack))) {
548                 debug_stack_reset();
549                 this_cpu_write(update_debug_stack, 0);
550         }
551 #endif
552 
553         if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
554                 write_cr2(this_cpu_read(nmi_cr2));
555         if (this_cpu_dec_return(nmi_state))
556                 goto nmi_restart;
557 
558         if (user_mode(regs))
559                 mds_user_clear_cpu_buffers();
560 }
561 NOKPROBE_SYMBOL(do_nmi);
562 
563 void stop_nmi(void)
564 {
565         ignore_nmis++;
566 }
567 
568 void restart_nmi(void)
569 {
570         ignore_nmis--;
571 }
572 
573 /* reset the back-to-back NMI logic */
574 void local_touch_nmi(void)
575 {
576         __this_cpu_write(last_nmi_rip, 0);
577 }
578 EXPORT_SYMBOL_GPL(local_touch_nmi);
579 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp