~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/irq.c

Version: ~ [ linux-5.2-rc1 ] ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  Derived from arch/i386/kernel/irq.c
  3  *    Copyright (C) 1992 Linus Torvalds
  4  *  Adapted from arch/i386 by Gary Thomas
  5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
  7  *    Copyright (C) 1996-2001 Cort Dougan
  8  *  Adapted for Power Macintosh by Paul Mackerras
  9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
 10  *
 11  * This program is free software; you can redistribute it and/or
 12  * modify it under the terms of the GNU General Public License
 13  * as published by the Free Software Foundation; either version
 14  * 2 of the License, or (at your option) any later version.
 15  *
 16  * This file contains the code used by various IRQ handling routines:
 17  * asking for different IRQ's should be done through these routines
 18  * instead of just grabbing them. Thus setups with different IRQ numbers
 19  * shouldn't result in any weird surprises, and installing new handlers
 20  * should be easier.
 21  *
 22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
 23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
 24  * mask register (of which only 16 are defined), hence the weird shifting
 25  * and complement of the cached_irq_mask.  I want to be able to stuff
 26  * this right into the SIU SMASK register.
 27  * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
 28  * to reduce code space and undefined function references.
 29  */
 30 
 31 #undef DEBUG
 32 
 33 #include <linux/export.h>
 34 #include <linux/threads.h>
 35 #include <linux/kernel_stat.h>
 36 #include <linux/signal.h>
 37 #include <linux/sched.h>
 38 #include <linux/ptrace.h>
 39 #include <linux/ioport.h>
 40 #include <linux/interrupt.h>
 41 #include <linux/timex.h>
 42 #include <linux/init.h>
 43 #include <linux/slab.h>
 44 #include <linux/delay.h>
 45 #include <linux/irq.h>
 46 #include <linux/seq_file.h>
 47 #include <linux/cpumask.h>
 48 #include <linux/profile.h>
 49 #include <linux/bitops.h>
 50 #include <linux/list.h>
 51 #include <linux/radix-tree.h>
 52 #include <linux/mutex.h>
 53 #include <linux/pci.h>
 54 #include <linux/debugfs.h>
 55 #include <linux/of.h>
 56 #include <linux/of_irq.h>
 57 
 58 #include <linux/uaccess.h>
 59 #include <asm/io.h>
 60 #include <asm/pgtable.h>
 61 #include <asm/irq.h>
 62 #include <asm/cache.h>
 63 #include <asm/prom.h>
 64 #include <asm/ptrace.h>
 65 #include <asm/machdep.h>
 66 #include <asm/udbg.h>
 67 #include <asm/smp.h>
 68 #include <asm/livepatch.h>
 69 #include <asm/asm-prototypes.h>
 70 #include <asm/hw_irq.h>
 71 
 72 #ifdef CONFIG_PPC64
 73 #include <asm/paca.h>
 74 #include <asm/firmware.h>
 75 #include <asm/lv1call.h>
 76 #endif
 77 #define CREATE_TRACE_POINTS
 78 #include <asm/trace.h>
 79 #include <asm/cpu_has_feature.h>
 80 
 81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 82 EXPORT_PER_CPU_SYMBOL(irq_stat);
 83 
 84 int __irq_offset_value;
 85 
 86 #ifdef CONFIG_PPC32
 87 EXPORT_SYMBOL(__irq_offset_value);
 88 atomic_t ppc_n_lost_interrupts;
 89 
 90 #ifdef CONFIG_TAU_INT
 91 extern int tau_initialized;
 92 extern int tau_interrupts(int);
 93 #endif
 94 #endif /* CONFIG_PPC32 */
 95 
 96 #ifdef CONFIG_PPC64
 97 
 98 int distribute_irqs = 1;
 99 
100 static inline notrace unsigned long get_irq_happened(void)
101 {
102         unsigned long happened;
103 
104         __asm__ __volatile__("lbz %0,%1(13)"
105         : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
106 
107         return happened;
108 }
109 
110 static inline notrace int decrementer_check_overflow(void)
111 {
112         u64 now = get_tb_or_rtc();
113         u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
114  
115         return now >= *next_tb;
116 }
117 
118 /* This is called whenever we are re-enabling interrupts
119  * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
120  * there's an EE, DEC or DBELL to generate.
121  *
122  * This is called in two contexts: From arch_local_irq_restore()
123  * before soft-enabling interrupts, and from the exception exit
124  * path when returning from an interrupt from a soft-disabled to
125  * a soft enabled context. In both case we have interrupts hard
126  * disabled.
127  *
128  * We take care of only clearing the bits we handled in the
129  * PACA irq_happened field since we can only re-emit one at a
130  * time and we don't want to "lose" one.
131  */
132 notrace unsigned int __check_irq_replay(void)
133 {
134         /*
135          * We use local_paca rather than get_paca() to avoid all
136          * the debug_smp_processor_id() business in this low level
137          * function
138          */
139         unsigned char happened = local_paca->irq_happened;
140 
141         /*
142          * We are responding to the next interrupt, so interrupt-off
143          * latencies should be reset here.
144          */
145         trace_hardirqs_on();
146         trace_hardirqs_off();
147 
148         if (happened & PACA_IRQ_HARD_DIS) {
149                 /* Clear bit 0 which we wouldn't clear otherwise */
150                 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
151 
152                 /*
153                  * We may have missed a decrementer interrupt if hard disabled.
154                  * Check the decrementer register in case we had a rollover
155                  * while hard disabled.
156                  */
157                 if (!(happened & PACA_IRQ_DEC)) {
158                         if (decrementer_check_overflow()) {
159                                 local_paca->irq_happened |= PACA_IRQ_DEC;
160                                 happened |= PACA_IRQ_DEC;
161                         }
162                 }
163         }
164 
165         /*
166          * Force the delivery of pending soft-disabled interrupts on PS3.
167          * Any HV call will have this side effect.
168          */
169         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
170                 u64 tmp, tmp2;
171                 lv1_get_version_info(&tmp, &tmp2);
172         }
173 
174         /*
175          * Check if an hypervisor Maintenance interrupt happened.
176          * This is a higher priority interrupt than the others, so
177          * replay it first.
178          */
179         if (happened & PACA_IRQ_HMI) {
180                 local_paca->irq_happened &= ~PACA_IRQ_HMI;
181                 return 0xe60;
182         }
183 
184         if (happened & PACA_IRQ_DEC) {
185                 local_paca->irq_happened &= ~PACA_IRQ_DEC;
186                 return 0x900;
187         }
188 
189         if (happened & PACA_IRQ_PMI) {
190                 local_paca->irq_happened &= ~PACA_IRQ_PMI;
191                 return 0xf00;
192         }
193 
194         if (happened & PACA_IRQ_EE) {
195                 local_paca->irq_happened &= ~PACA_IRQ_EE;
196                 return 0x500;
197         }
198 
199 #ifdef CONFIG_PPC_BOOK3E
200         /*
201          * Check if an EPR external interrupt happened this bit is typically
202          * set if we need to handle another "edge" interrupt from within the
203          * MPIC "EPR" handler.
204          */
205         if (happened & PACA_IRQ_EE_EDGE) {
206                 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
207                 return 0x500;
208         }
209 
210         if (happened & PACA_IRQ_DBELL) {
211                 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
212                 return 0x280;
213         }
214 #else
215         if (happened & PACA_IRQ_DBELL) {
216                 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
217                 return 0xa00;
218         }
219 #endif /* CONFIG_PPC_BOOK3E */
220 
221         /* There should be nothing left ! */
222         BUG_ON(local_paca->irq_happened != 0);
223 
224         return 0;
225 }
226 
227 notrace void arch_local_irq_restore(unsigned long mask)
228 {
229         unsigned char irq_happened;
230         unsigned int replay;
231 
232         /* Write the new soft-enabled value */
233         irq_soft_mask_set(mask);
234         if (mask)
235                 return;
236 
237         /*
238          * From this point onward, we can take interrupts, preempt,
239          * etc... unless we got hard-disabled. We check if an event
240          * happened. If none happened, we know we can just return.
241          *
242          * We may have preempted before the check below, in which case
243          * we are checking the "new" CPU instead of the old one. This
244          * is only a problem if an event happened on the "old" CPU.
245          *
246          * External interrupt events will have caused interrupts to
247          * be hard-disabled, so there is no problem, we
248          * cannot have preempted.
249          */
250         irq_happened = get_irq_happened();
251         if (!irq_happened)
252                 return;
253 
254         /*
255          * We need to hard disable to get a trusted value from
256          * __check_irq_replay(). We also need to soft-disable
257          * again to avoid warnings in there due to the use of
258          * per-cpu variables.
259          *
260          * We know that if the value in irq_happened is exactly 0x01
261          * then we are already hard disabled (there are other less
262          * common cases that we'll ignore for now), so we skip the
263          * (expensive) mtmsrd.
264          */
265         if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
266                 __hard_irq_disable();
267 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
268         else {
269                 /*
270                  * We should already be hard disabled here. We had bugs
271                  * where that wasn't the case so let's dbl check it and
272                  * warn if we are wrong. Only do that when IRQ tracing
273                  * is enabled as mfmsr() can be costly.
274                  */
275                 if (WARN_ON(mfmsr() & MSR_EE))
276                         __hard_irq_disable();
277         }
278 #endif
279 
280         irq_soft_mask_set(IRQS_ALL_DISABLED);
281         trace_hardirqs_off();
282 
283         /*
284          * Check if anything needs to be re-emitted. We haven't
285          * soft-enabled yet to avoid warnings in decrementer_check_overflow
286          * accessing per-cpu variables
287          */
288         replay = __check_irq_replay();
289 
290         /* We can soft-enable now */
291         trace_hardirqs_on();
292         irq_soft_mask_set(IRQS_ENABLED);
293 
294         /*
295          * And replay if we have to. This will return with interrupts
296          * hard-enabled.
297          */
298         if (replay) {
299                 __replay_interrupt(replay);
300                 return;
301         }
302 
303         /* Finally, let's ensure we are hard enabled */
304         __hard_irq_enable();
305 }
306 EXPORT_SYMBOL(arch_local_irq_restore);
307 
308 /*
309  * This is specifically called by assembly code to re-enable interrupts
310  * if they are currently disabled. This is typically called before
311  * schedule() or do_signal() when returning to userspace. We do it
312  * in C to avoid the burden of dealing with lockdep etc...
313  *
314  * NOTE: This is called with interrupts hard disabled but not marked
315  * as such in paca->irq_happened, so we need to resync this.
316  */
317 void notrace restore_interrupts(void)
318 {
319         if (irqs_disabled()) {
320                 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
321                 local_irq_enable();
322         } else
323                 __hard_irq_enable();
324 }
325 
326 /*
327  * This is a helper to use when about to go into idle low-power
328  * when the latter has the side effect of re-enabling interrupts
329  * (such as calling H_CEDE under pHyp).
330  *
331  * You call this function with interrupts soft-disabled (this is
332  * already the case when ppc_md.power_save is called). The function
333  * will return whether to enter power save or just return.
334  *
335  * In the former case, it will have notified lockdep of interrupts
336  * being re-enabled and generally sanitized the lazy irq state,
337  * and in the latter case it will leave with interrupts hard
338  * disabled and marked as such, so the local_irq_enable() call
339  * in arch_cpu_idle() will properly re-enable everything.
340  */
341 bool prep_irq_for_idle(void)
342 {
343         /*
344          * First we need to hard disable to ensure no interrupt
345          * occurs before we effectively enter the low power state
346          */
347         __hard_irq_disable();
348         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
349 
350         /*
351          * If anything happened while we were soft-disabled,
352          * we return now and do not enter the low power state.
353          */
354         if (lazy_irq_pending())
355                 return false;
356 
357         /* Tell lockdep we are about to re-enable */
358         trace_hardirqs_on();
359 
360         /*
361          * Mark interrupts as soft-enabled and clear the
362          * PACA_IRQ_HARD_DIS from the pending mask since we
363          * are about to hard enable as well as a side effect
364          * of entering the low power state.
365          */
366         local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
367         irq_soft_mask_set(IRQS_ENABLED);
368 
369         /* Tell the caller to enter the low power state */
370         return true;
371 }
372 
373 #ifdef CONFIG_PPC_BOOK3S
374 /*
375  * This is for idle sequences that return with IRQs off, but the
376  * idle state itself wakes on interrupt. Tell the irq tracer that
377  * IRQs are enabled for the duration of idle so it does not get long
378  * off times. Must be paired with fini_irq_for_idle_irqsoff.
379  */
380 bool prep_irq_for_idle_irqsoff(void)
381 {
382         WARN_ON(!irqs_disabled());
383 
384         /*
385          * First we need to hard disable to ensure no interrupt
386          * occurs before we effectively enter the low power state
387          */
388         __hard_irq_disable();
389         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
390 
391         /*
392          * If anything happened while we were soft-disabled,
393          * we return now and do not enter the low power state.
394          */
395         if (lazy_irq_pending())
396                 return false;
397 
398         /* Tell lockdep we are about to re-enable */
399         trace_hardirqs_on();
400 
401         return true;
402 }
403 
404 /*
405  * Take the SRR1 wakeup reason, index into this table to find the
406  * appropriate irq_happened bit.
407  *
408  * Sytem reset exceptions taken in idle state also come through here,
409  * but they are NMI interrupts so do not need to wait for IRQs to be
410  * restored, and should be taken as early as practical. These are marked
411  * with 0xff in the table. The Power ISA specifies 0100b as the system
412  * reset interrupt reason.
413  */
414 #define IRQ_SYSTEM_RESET        0xff
415 
416 static const u8 srr1_to_lazyirq[0x10] = {
417         0, 0, 0,
418         PACA_IRQ_DBELL,
419         IRQ_SYSTEM_RESET,
420         PACA_IRQ_DBELL,
421         PACA_IRQ_DEC,
422         0,
423         PACA_IRQ_EE,
424         PACA_IRQ_EE,
425         PACA_IRQ_HMI,
426         0, 0, 0, 0, 0 };
427 
428 void replay_system_reset(void)
429 {
430         struct pt_regs regs;
431 
432         ppc_save_regs(&regs);
433         regs.trap = 0x100;
434         get_paca()->in_nmi = 1;
435         system_reset_exception(&regs);
436         get_paca()->in_nmi = 0;
437 }
438 EXPORT_SYMBOL_GPL(replay_system_reset);
439 
440 void irq_set_pending_from_srr1(unsigned long srr1)
441 {
442         unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
443         u8 reason = srr1_to_lazyirq[idx];
444 
445         /*
446          * Take the system reset now, which is immediately after registers
447          * are restored from idle. It's an NMI, so interrupts need not be
448          * re-enabled before it is taken.
449          */
450         if (unlikely(reason == IRQ_SYSTEM_RESET)) {
451                 replay_system_reset();
452                 return;
453         }
454 
455         /*
456          * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
457          * so this can be called unconditionally with the SRR1 wake
458          * reason as returned by the idle code, which uses 0 to mean no
459          * interrupt.
460          *
461          * If a future CPU was to designate this as an interrupt reason,
462          * then a new index for no interrupt must be assigned.
463          */
464         local_paca->irq_happened |= reason;
465 }
466 #endif /* CONFIG_PPC_BOOK3S */
467 
468 /*
469  * Force a replay of the external interrupt handler on this CPU.
470  */
471 void force_external_irq_replay(void)
472 {
473         /*
474          * This must only be called with interrupts soft-disabled,
475          * the replay will happen when re-enabling.
476          */
477         WARN_ON(!arch_irqs_disabled());
478 
479         /*
480          * Interrupts must always be hard disabled before irq_happened is
481          * modified (to prevent lost update in case of interrupt between
482          * load and store).
483          */
484         __hard_irq_disable();
485         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
486 
487         /* Indicate in the PACA that we have an interrupt to replay */
488         local_paca->irq_happened |= PACA_IRQ_EE;
489 }
490 
491 #endif /* CONFIG_PPC64 */
492 
493 int arch_show_interrupts(struct seq_file *p, int prec)
494 {
495         int j;
496 
497 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
498         if (tau_initialized) {
499                 seq_printf(p, "%*s: ", prec, "TAU");
500                 for_each_online_cpu(j)
501                         seq_printf(p, "%10u ", tau_interrupts(j));
502                 seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
503         }
504 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
505 
506         seq_printf(p, "%*s: ", prec, "LOC");
507         for_each_online_cpu(j)
508                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
509         seq_printf(p, "  Local timer interrupts for timer event device\n");
510 
511         seq_printf(p, "%*s: ", prec, "LOC");
512         for_each_online_cpu(j)
513                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
514         seq_printf(p, "  Local timer interrupts for others\n");
515 
516         seq_printf(p, "%*s: ", prec, "SPU");
517         for_each_online_cpu(j)
518                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
519         seq_printf(p, "  Spurious interrupts\n");
520 
521         seq_printf(p, "%*s: ", prec, "PMI");
522         for_each_online_cpu(j)
523                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
524         seq_printf(p, "  Performance monitoring interrupts\n");
525 
526         seq_printf(p, "%*s: ", prec, "MCE");
527         for_each_online_cpu(j)
528                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
529         seq_printf(p, "  Machine check exceptions\n");
530 
531         if (cpu_has_feature(CPU_FTR_HVMODE)) {
532                 seq_printf(p, "%*s: ", prec, "HMI");
533                 for_each_online_cpu(j)
534                         seq_printf(p, "%10u ",
535                                         per_cpu(irq_stat, j).hmi_exceptions);
536                 seq_printf(p, "  Hypervisor Maintenance Interrupts\n");
537         }
538 
539         seq_printf(p, "%*s: ", prec, "NMI");
540         for_each_online_cpu(j)
541                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
542         seq_printf(p, "  System Reset interrupts\n");
543 
544 #ifdef CONFIG_PPC_WATCHDOG
545         seq_printf(p, "%*s: ", prec, "WDG");
546         for_each_online_cpu(j)
547                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
548         seq_printf(p, "  Watchdog soft-NMI interrupts\n");
549 #endif
550 
551 #ifdef CONFIG_PPC_DOORBELL
552         if (cpu_has_feature(CPU_FTR_DBELL)) {
553                 seq_printf(p, "%*s: ", prec, "DBL");
554                 for_each_online_cpu(j)
555                         seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
556                 seq_printf(p, "  Doorbell interrupts\n");
557         }
558 #endif
559 
560         return 0;
561 }
562 
563 /*
564  * /proc/stat helpers
565  */
566 u64 arch_irq_stat_cpu(unsigned int cpu)
567 {
568         u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
569 
570         sum += per_cpu(irq_stat, cpu).pmu_irqs;
571         sum += per_cpu(irq_stat, cpu).mce_exceptions;
572         sum += per_cpu(irq_stat, cpu).spurious_irqs;
573         sum += per_cpu(irq_stat, cpu).timer_irqs_others;
574         sum += per_cpu(irq_stat, cpu).hmi_exceptions;
575         sum += per_cpu(irq_stat, cpu).sreset_irqs;
576 #ifdef CONFIG_PPC_WATCHDOG
577         sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
578 #endif
579 #ifdef CONFIG_PPC_DOORBELL
580         sum += per_cpu(irq_stat, cpu).doorbell_irqs;
581 #endif
582 
583         return sum;
584 }
585 
586 static inline void check_stack_overflow(void)
587 {
588 #ifdef CONFIG_DEBUG_STACKOVERFLOW
589         long sp;
590 
591         sp = current_stack_pointer() & (THREAD_SIZE-1);
592 
593         /* check for stack overflow: is there less than 2KB free? */
594         if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
595                 pr_err("do_IRQ: stack overflow: %ld\n",
596                         sp - sizeof(struct thread_info));
597                 dump_stack();
598         }
599 #endif
600 }
601 
602 void __do_irq(struct pt_regs *regs)
603 {
604         unsigned int irq;
605 
606         irq_enter();
607 
608         trace_irq_entry(regs);
609 
610         check_stack_overflow();
611 
612         /*
613          * Query the platform PIC for the interrupt & ack it.
614          *
615          * This will typically lower the interrupt line to the CPU
616          */
617         irq = ppc_md.get_irq();
618 
619         /* We can hard enable interrupts now to allow perf interrupts */
620         may_hard_irq_enable();
621 
622         /* And finally process it */
623         if (unlikely(!irq))
624                 __this_cpu_inc(irq_stat.spurious_irqs);
625         else
626                 generic_handle_irq(irq);
627 
628         trace_irq_exit(regs);
629 
630         irq_exit();
631 }
632 
633 void do_IRQ(struct pt_regs *regs)
634 {
635         struct pt_regs *old_regs = set_irq_regs(regs);
636         struct thread_info *curtp, *irqtp, *sirqtp;
637 
638         /* Switch to the irq stack to handle this */
639         curtp = current_thread_info();
640         irqtp = hardirq_ctx[raw_smp_processor_id()];
641         sirqtp = softirq_ctx[raw_smp_processor_id()];
642 
643         /* Already there ? */
644         if (unlikely(curtp == irqtp || curtp == sirqtp)) {
645                 __do_irq(regs);
646                 set_irq_regs(old_regs);
647                 return;
648         }
649 
650         /* Prepare the thread_info in the irq stack */
651         irqtp->task = curtp->task;
652         irqtp->flags = 0;
653 
654         /* Copy the preempt_count so that the [soft]irq checks work. */
655         irqtp->preempt_count = curtp->preempt_count;
656 
657         /* Switch stack and call */
658         call_do_irq(regs, irqtp);
659 
660         /* Restore stack limit */
661         irqtp->task = NULL;
662 
663         /* Copy back updates to the thread_info */
664         if (irqtp->flags)
665                 set_bits(irqtp->flags, &curtp->flags);
666 
667         set_irq_regs(old_regs);
668 }
669 
670 void __init init_IRQ(void)
671 {
672         if (ppc_md.init_IRQ)
673                 ppc_md.init_IRQ();
674 
675         exc_lvl_ctx_init();
676 
677         irq_ctx_init();
678 }
679 
680 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
681 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
682 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
683 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
684 
685 void exc_lvl_ctx_init(void)
686 {
687         struct thread_info *tp;
688         int i, cpu_nr;
689 
690         for_each_possible_cpu(i) {
691 #ifdef CONFIG_PPC64
692                 cpu_nr = i;
693 #else
694 #ifdef CONFIG_SMP
695                 cpu_nr = get_hard_smp_processor_id(i);
696 #else
697                 cpu_nr = 0;
698 #endif
699 #endif
700 
701                 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
702                 tp = critirq_ctx[cpu_nr];
703                 tp->cpu = cpu_nr;
704                 tp->preempt_count = 0;
705 
706 #ifdef CONFIG_BOOKE
707                 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
708                 tp = dbgirq_ctx[cpu_nr];
709                 tp->cpu = cpu_nr;
710                 tp->preempt_count = 0;
711 
712                 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
713                 tp = mcheckirq_ctx[cpu_nr];
714                 tp->cpu = cpu_nr;
715                 tp->preempt_count = HARDIRQ_OFFSET;
716 #endif
717         }
718 }
719 #endif
720 
721 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
722 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
723 
724 void irq_ctx_init(void)
725 {
726         struct thread_info *tp;
727         int i;
728 
729         for_each_possible_cpu(i) {
730                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
731                 tp = softirq_ctx[i];
732                 tp->cpu = i;
733                 klp_init_thread_info(tp);
734 
735                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
736                 tp = hardirq_ctx[i];
737                 tp->cpu = i;
738                 klp_init_thread_info(tp);
739         }
740 }
741 
742 void do_softirq_own_stack(void)
743 {
744         struct thread_info *curtp, *irqtp;
745 
746         curtp = current_thread_info();
747         irqtp = softirq_ctx[smp_processor_id()];
748         irqtp->task = curtp->task;
749         irqtp->flags = 0;
750         call_do_softirq(irqtp);
751         irqtp->task = NULL;
752 
753         /* Set any flag that may have been set on the
754          * alternate stack
755          */
756         if (irqtp->flags)
757                 set_bits(irqtp->flags, &curtp->flags);
758 }
759 
760 irq_hw_number_t virq_to_hw(unsigned int virq)
761 {
762         struct irq_data *irq_data = irq_get_irq_data(virq);
763         return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
764 }
765 EXPORT_SYMBOL_GPL(virq_to_hw);
766 
767 #ifdef CONFIG_SMP
768 int irq_choose_cpu(const struct cpumask *mask)
769 {
770         int cpuid;
771 
772         if (cpumask_equal(mask, cpu_online_mask)) {
773                 static int irq_rover;
774                 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
775                 unsigned long flags;
776 
777                 /* Round-robin distribution... */
778 do_round_robin:
779                 raw_spin_lock_irqsave(&irq_rover_lock, flags);
780 
781                 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
782                 if (irq_rover >= nr_cpu_ids)
783                         irq_rover = cpumask_first(cpu_online_mask);
784 
785                 cpuid = irq_rover;
786 
787                 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
788         } else {
789                 cpuid = cpumask_first_and(mask, cpu_online_mask);
790                 if (cpuid >= nr_cpu_ids)
791                         goto do_round_robin;
792         }
793 
794         return get_hard_smp_processor_id(cpuid);
795 }
796 #else
797 int irq_choose_cpu(const struct cpumask *mask)
798 {
799         return hard_smp_processor_id();
800 }
801 #endif
802 
803 int arch_early_irq_init(void)
804 {
805         return 0;
806 }
807 
808 #ifdef CONFIG_PPC64
809 static int __init setup_noirqdistrib(char *str)
810 {
811         distribute_irqs = 0;
812         return 1;
813 }
814 
815 __setup("noirqdistrib", setup_noirqdistrib);
816 #endif /* CONFIG_PPC64 */
817 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp