~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/irq_32.c

Version: ~ [ linux-4.19-rc5 ] ~ [ linux-4.18.9 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.71 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.128 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.157 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.122 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.57 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  4  *
  5  * This file contains the lowest level x86-specific interrupt
  6  * entry, irq-stacks and irq statistics code. All the remaining
  7  * irq logic is done by the generic kernel/irq/ code and
  8  * by the x86-specific irq controller code. (e.g. i8259.c and
  9  * io_apic.c.)
 10  */
 11 
 12 #include <linux/seq_file.h>
 13 #include <linux/interrupt.h>
 14 #include <linux/irq.h>
 15 #include <linux/kernel_stat.h>
 16 #include <linux/notifier.h>
 17 #include <linux/cpu.h>
 18 #include <linux/delay.h>
 19 #include <linux/uaccess.h>
 20 #include <linux/percpu.h>
 21 #include <linux/mm.h>
 22 
 23 #include <asm/apic.h>
 24 #include <asm/nospec-branch.h>
 25 
 26 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 27 
 28 int sysctl_panic_on_stackoverflow __read_mostly;
 29 
 30 /* Debugging check for stack overflow: is there less than 1KB free? */
 31 static int check_stack_overflow(void)
 32 {
 33         long sp;
 34 
 35         __asm__ __volatile__("andl %%esp,%0" :
 36                              "=r" (sp) : "" (THREAD_SIZE - 1));
 37 
 38         return sp < (sizeof(struct thread_info) + STACK_WARN);
 39 }
 40 
 41 static void print_stack_overflow(void)
 42 {
 43         printk(KERN_WARNING "low stack detected by irq handler\n");
 44         dump_stack();
 45         if (sysctl_panic_on_stackoverflow)
 46                 panic("low stack detected by irq handler - check messages\n");
 47 }
 48 
 49 #else
 50 static inline int check_stack_overflow(void) { return 0; }
 51 static inline void print_stack_overflow(void) { }
 52 #endif
 53 
 54 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
 55 DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
 56 
 57 static void call_on_stack(void *func, void *stack)
 58 {
 59         asm volatile("xchgl     %%ebx,%%esp     \n"
 60                      CALL_NOSPEC
 61                      "movl      %%ebx,%%esp     \n"
 62                      : "=b" (stack)
 63                      : "" (stack),
 64                        [thunk_target] "D"(func)
 65                      : "memory", "cc", "edx", "ecx", "eax");
 66 }
 67 
 68 static inline void *current_stack(void)
 69 {
 70         return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
 71 }
 72 
 73 static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
 74 {
 75         struct irq_stack *curstk, *irqstk;
 76         u32 *isp, *prev_esp, arg1;
 77 
 78         curstk = (struct irq_stack *) current_stack();
 79         irqstk = __this_cpu_read(hardirq_stack);
 80 
 81         /*
 82          * this is where we switch to the IRQ stack. However, if we are
 83          * already using the IRQ stack (because we interrupted a hardirq
 84          * handler) we can't do that and just have to keep using the
 85          * current stack (which is the irq stack already after all)
 86          */
 87         if (unlikely(curstk == irqstk))
 88                 return 0;
 89 
 90         isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
 91 
 92         /* Save the next esp at the bottom of the stack */
 93         prev_esp = (u32 *)irqstk;
 94         *prev_esp = current_stack_pointer;
 95 
 96         if (unlikely(overflow))
 97                 call_on_stack(print_stack_overflow, isp);
 98 
 99         asm volatile("xchgl     %%ebx,%%esp     \n"
100                      CALL_NOSPEC
101                      "movl      %%ebx,%%esp     \n"
102                      : "=a" (arg1), "=b" (isp)
103                      :  "" (desc),   "1" (isp),
104                         [thunk_target] "D" (desc->handle_irq)
105                      : "memory", "cc", "ecx");
106         return 1;
107 }
108 
109 /*
110  * allocate per-cpu stacks for hardirq and for softirq processing
111  */
112 void irq_ctx_init(int cpu)
113 {
114         struct irq_stack *irqstk;
115 
116         if (per_cpu(hardirq_stack, cpu))
117                 return;
118 
119         irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
120                                                THREADINFO_GFP,
121                                                THREAD_SIZE_ORDER));
122         per_cpu(hardirq_stack, cpu) = irqstk;
123 
124         irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
125                                                THREADINFO_GFP,
126                                                THREAD_SIZE_ORDER));
127         per_cpu(softirq_stack, cpu) = irqstk;
128 
129         printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
130                cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
131 }
132 
133 void do_softirq_own_stack(void)
134 {
135         struct irq_stack *irqstk;
136         u32 *isp, *prev_esp;
137 
138         irqstk = __this_cpu_read(softirq_stack);
139 
140         /* build the stack frame on the softirq stack */
141         isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
142 
143         /* Push the previous esp onto the stack */
144         prev_esp = (u32 *)irqstk;
145         *prev_esp = current_stack_pointer;
146 
147         call_on_stack(__do_softirq, isp);
148 }
149 
150 bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
151 {
152         int overflow = check_stack_overflow();
153 
154         if (IS_ERR_OR_NULL(desc))
155                 return false;
156 
157         if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
158                 if (unlikely(overflow))
159                         print_stack_overflow();
160                 generic_handle_irq_desc(desc);
161         }
162 
163         return true;
164 }
165 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp