~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/irq.c

Version: ~ [ linux-5.1-rc5 ] ~ [ linux-5.0.7 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.34 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.111 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.168 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.178 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.138 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.65 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  Derived from arch/i386/kernel/irq.c
  3  *    Copyright (C) 1992 Linus Torvalds
  4  *  Adapted from arch/i386 by Gary Thomas
  5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
  7  *    Copyright (C) 1996-2001 Cort Dougan
  8  *  Adapted for Power Macintosh by Paul Mackerras
  9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
 10  *
 11  * This program is free software; you can redistribute it and/or
 12  * modify it under the terms of the GNU General Public License
 13  * as published by the Free Software Foundation; either version
 14  * 2 of the License, or (at your option) any later version.
 15  *
 16  * This file contains the code used by various IRQ handling routines:
 17  * asking for different IRQ's should be done through these routines
 18  * instead of just grabbing them. Thus setups with different IRQ numbers
 19  * shouldn't result in any weird surprises, and installing new handlers
 20  * should be easier.
 21  *
 22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
 23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
 24  * mask register (of which only 16 are defined), hence the weird shifting
 25  * and complement of the cached_irq_mask.  I want to be able to stuff
 26  * this right into the SIU SMASK register.
 27  * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
 28  * to reduce code space and undefined function references.
 29  */
 30 
 31 #undef DEBUG
 32 
 33 #include <linux/export.h>
 34 #include <linux/threads.h>
 35 #include <linux/kernel_stat.h>
 36 #include <linux/signal.h>
 37 #include <linux/sched.h>
 38 #include <linux/ptrace.h>
 39 #include <linux/ioport.h>
 40 #include <linux/interrupt.h>
 41 #include <linux/timex.h>
 42 #include <linux/init.h>
 43 #include <linux/slab.h>
 44 #include <linux/delay.h>
 45 #include <linux/irq.h>
 46 #include <linux/seq_file.h>
 47 #include <linux/cpumask.h>
 48 #include <linux/profile.h>
 49 #include <linux/bitops.h>
 50 #include <linux/list.h>
 51 #include <linux/radix-tree.h>
 52 #include <linux/mutex.h>
 53 #include <linux/pci.h>
 54 #include <linux/debugfs.h>
 55 #include <linux/of.h>
 56 #include <linux/of_irq.h>
 57 
 58 #include <linux/uaccess.h>
 59 #include <asm/io.h>
 60 #include <asm/pgtable.h>
 61 #include <asm/irq.h>
 62 #include <asm/cache.h>
 63 #include <asm/prom.h>
 64 #include <asm/ptrace.h>
 65 #include <asm/machdep.h>
 66 #include <asm/udbg.h>
 67 #include <asm/smp.h>
 68 #include <asm/livepatch.h>
 69 #include <asm/asm-prototypes.h>
 70 #include <asm/hw_irq.h>
 71 
 72 #ifdef CONFIG_PPC64
 73 #include <asm/paca.h>
 74 #include <asm/firmware.h>
 75 #include <asm/lv1call.h>
 76 #endif
 77 #define CREATE_TRACE_POINTS
 78 #include <asm/trace.h>
 79 #include <asm/cpu_has_feature.h>
 80 
 81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 82 EXPORT_PER_CPU_SYMBOL(irq_stat);
 83 
 84 int __irq_offset_value;
 85 
 86 #ifdef CONFIG_PPC32
 87 EXPORT_SYMBOL(__irq_offset_value);
 88 atomic_t ppc_n_lost_interrupts;
 89 
 90 #ifdef CONFIG_TAU_INT
 91 extern int tau_initialized;
 92 u32 tau_interrupts(unsigned long cpu);
 93 #endif
 94 #endif /* CONFIG_PPC32 */
 95 
 96 #ifdef CONFIG_PPC64
 97 
 98 int distribute_irqs = 1;
 99 
100 static inline notrace unsigned long get_irq_happened(void)
101 {
102         unsigned long happened;
103 
104         __asm__ __volatile__("lbz %0,%1(13)"
105         : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
106 
107         return happened;
108 }
109 
110 static inline notrace int decrementer_check_overflow(void)
111 {
112         u64 now = get_tb_or_rtc();
113         u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
114  
115         return now >= *next_tb;
116 }
117 
118 /* This is called whenever we are re-enabling interrupts
119  * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
120  * there's an EE, DEC or DBELL to generate.
121  *
122  * This is called in two contexts: From arch_local_irq_restore()
123  * before soft-enabling interrupts, and from the exception exit
124  * path when returning from an interrupt from a soft-disabled to
125  * a soft enabled context. In both case we have interrupts hard
126  * disabled.
127  *
128  * We take care of only clearing the bits we handled in the
129  * PACA irq_happened field since we can only re-emit one at a
130  * time and we don't want to "lose" one.
131  */
132 notrace unsigned int __check_irq_replay(void)
133 {
134         /*
135          * We use local_paca rather than get_paca() to avoid all
136          * the debug_smp_processor_id() business in this low level
137          * function
138          */
139         unsigned char happened = local_paca->irq_happened;
140 
141         /*
142          * We are responding to the next interrupt, so interrupt-off
143          * latencies should be reset here.
144          */
145         trace_hardirqs_on();
146         trace_hardirqs_off();
147 
148         /*
149          * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
150          * not be set, which means interrupts have only just been hard
151          * disabled as part of the local_irq_restore or interrupt return
152          * code. In that case, skip the decrementr check becaus it's
153          * expensive to read the TB.
154          *
155          * HARD_DIS then gets cleared here, but it's reconciled later.
156          * Either local_irq_disable will replay the interrupt and that
157          * will reconcile state like other hard interrupts. Or interrupt
158          * retur will replay the interrupt and in that case it sets
159          * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
160          */
161         if (happened & PACA_IRQ_HARD_DIS) {
162                 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
163 
164                 /*
165                  * We may have missed a decrementer interrupt if hard disabled.
166                  * Check the decrementer register in case we had a rollover
167                  * while hard disabled.
168                  */
169                 if (!(happened & PACA_IRQ_DEC)) {
170                         if (decrementer_check_overflow()) {
171                                 local_paca->irq_happened |= PACA_IRQ_DEC;
172                                 happened |= PACA_IRQ_DEC;
173                         }
174                 }
175         }
176 
177         /*
178          * Force the delivery of pending soft-disabled interrupts on PS3.
179          * Any HV call will have this side effect.
180          */
181         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
182                 u64 tmp, tmp2;
183                 lv1_get_version_info(&tmp, &tmp2);
184         }
185 
186         /*
187          * Check if an hypervisor Maintenance interrupt happened.
188          * This is a higher priority interrupt than the others, so
189          * replay it first.
190          */
191         if (happened & PACA_IRQ_HMI) {
192                 local_paca->irq_happened &= ~PACA_IRQ_HMI;
193                 return 0xe60;
194         }
195 
196         if (happened & PACA_IRQ_DEC) {
197                 local_paca->irq_happened &= ~PACA_IRQ_DEC;
198                 return 0x900;
199         }
200 
201         if (happened & PACA_IRQ_PMI) {
202                 local_paca->irq_happened &= ~PACA_IRQ_PMI;
203                 return 0xf00;
204         }
205 
206         if (happened & PACA_IRQ_EE) {
207                 local_paca->irq_happened &= ~PACA_IRQ_EE;
208                 return 0x500;
209         }
210 
211 #ifdef CONFIG_PPC_BOOK3E
212         /*
213          * Check if an EPR external interrupt happened this bit is typically
214          * set if we need to handle another "edge" interrupt from within the
215          * MPIC "EPR" handler.
216          */
217         if (happened & PACA_IRQ_EE_EDGE) {
218                 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
219                 return 0x500;
220         }
221 
222         if (happened & PACA_IRQ_DBELL) {
223                 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
224                 return 0x280;
225         }
226 #else
227         if (happened & PACA_IRQ_DBELL) {
228                 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
229                 return 0xa00;
230         }
231 #endif /* CONFIG_PPC_BOOK3E */
232 
233         /* There should be nothing left ! */
234         BUG_ON(local_paca->irq_happened != 0);
235 
236         return 0;
237 }
238 
239 notrace void arch_local_irq_restore(unsigned long mask)
240 {
241         unsigned char irq_happened;
242         unsigned int replay;
243 
244         /* Write the new soft-enabled value */
245         irq_soft_mask_set(mask);
246         if (mask)
247                 return;
248 
249         /*
250          * From this point onward, we can take interrupts, preempt,
251          * etc... unless we got hard-disabled. We check if an event
252          * happened. If none happened, we know we can just return.
253          *
254          * We may have preempted before the check below, in which case
255          * we are checking the "new" CPU instead of the old one. This
256          * is only a problem if an event happened on the "old" CPU.
257          *
258          * External interrupt events will have caused interrupts to
259          * be hard-disabled, so there is no problem, we
260          * cannot have preempted.
261          */
262         irq_happened = get_irq_happened();
263         if (!irq_happened) {
264                 /*
265                  * FIXME. Here we'd like to be able to do:
266                  *
267                  * #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
268                  *   WARN_ON(!(mfmsr() & MSR_EE));
269                  * #endif
270                  *
271                  * But currently it hits in a few paths, we should fix those and
272                  * enable the warning.
273                  */
274                 return;
275         }
276 
277         /*
278          * We need to hard disable to get a trusted value from
279          * __check_irq_replay(). We also need to soft-disable
280          * again to avoid warnings in there due to the use of
281          * per-cpu variables.
282          */
283         if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
284 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
285                 WARN_ON(!(mfmsr() & MSR_EE));
286 #endif
287                 __hard_irq_disable();
288 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
289         } else {
290                 /*
291                  * We should already be hard disabled here. We had bugs
292                  * where that wasn't the case so let's dbl check it and
293                  * warn if we are wrong. Only do that when IRQ tracing
294                  * is enabled as mfmsr() can be costly.
295                  */
296                 if (WARN_ON(mfmsr() & MSR_EE))
297                         __hard_irq_disable();
298 #endif
299         }
300 
301         irq_soft_mask_set(IRQS_ALL_DISABLED);
302         trace_hardirqs_off();
303 
304         /*
305          * Check if anything needs to be re-emitted. We haven't
306          * soft-enabled yet to avoid warnings in decrementer_check_overflow
307          * accessing per-cpu variables
308          */
309         replay = __check_irq_replay();
310 
311         /* We can soft-enable now */
312         trace_hardirqs_on();
313         irq_soft_mask_set(IRQS_ENABLED);
314 
315         /*
316          * And replay if we have to. This will return with interrupts
317          * hard-enabled.
318          */
319         if (replay) {
320                 __replay_interrupt(replay);
321                 return;
322         }
323 
324         /* Finally, let's ensure we are hard enabled */
325         __hard_irq_enable();
326 }
327 EXPORT_SYMBOL(arch_local_irq_restore);
328 
329 /*
330  * This is specifically called by assembly code to re-enable interrupts
331  * if they are currently disabled. This is typically called before
332  * schedule() or do_signal() when returning to userspace. We do it
333  * in C to avoid the burden of dealing with lockdep etc...
334  *
335  * NOTE: This is called with interrupts hard disabled but not marked
336  * as such in paca->irq_happened, so we need to resync this.
337  */
338 void notrace restore_interrupts(void)
339 {
340         if (irqs_disabled()) {
341                 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
342                 local_irq_enable();
343         } else
344                 __hard_irq_enable();
345 }
346 
347 /*
348  * This is a helper to use when about to go into idle low-power
349  * when the latter has the side effect of re-enabling interrupts
350  * (such as calling H_CEDE under pHyp).
351  *
352  * You call this function with interrupts soft-disabled (this is
353  * already the case when ppc_md.power_save is called). The function
354  * will return whether to enter power save or just return.
355  *
356  * In the former case, it will have notified lockdep of interrupts
357  * being re-enabled and generally sanitized the lazy irq state,
358  * and in the latter case it will leave with interrupts hard
359  * disabled and marked as such, so the local_irq_enable() call
360  * in arch_cpu_idle() will properly re-enable everything.
361  */
362 bool prep_irq_for_idle(void)
363 {
364         /*
365          * First we need to hard disable to ensure no interrupt
366          * occurs before we effectively enter the low power state
367          */
368         __hard_irq_disable();
369         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
370 
371         /*
372          * If anything happened while we were soft-disabled,
373          * we return now and do not enter the low power state.
374          */
375         if (lazy_irq_pending())
376                 return false;
377 
378         /* Tell lockdep we are about to re-enable */
379         trace_hardirqs_on();
380 
381         /*
382          * Mark interrupts as soft-enabled and clear the
383          * PACA_IRQ_HARD_DIS from the pending mask since we
384          * are about to hard enable as well as a side effect
385          * of entering the low power state.
386          */
387         local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
388         irq_soft_mask_set(IRQS_ENABLED);
389 
390         /* Tell the caller to enter the low power state */
391         return true;
392 }
393 
394 #ifdef CONFIG_PPC_BOOK3S
395 /*
396  * This is for idle sequences that return with IRQs off, but the
397  * idle state itself wakes on interrupt. Tell the irq tracer that
398  * IRQs are enabled for the duration of idle so it does not get long
399  * off times. Must be paired with fini_irq_for_idle_irqsoff.
400  */
401 bool prep_irq_for_idle_irqsoff(void)
402 {
403         WARN_ON(!irqs_disabled());
404 
405         /*
406          * First we need to hard disable to ensure no interrupt
407          * occurs before we effectively enter the low power state
408          */
409         __hard_irq_disable();
410         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
411 
412         /*
413          * If anything happened while we were soft-disabled,
414          * we return now and do not enter the low power state.
415          */
416         if (lazy_irq_pending())
417                 return false;
418 
419         /* Tell lockdep we are about to re-enable */
420         trace_hardirqs_on();
421 
422         return true;
423 }
424 
425 /*
426  * Take the SRR1 wakeup reason, index into this table to find the
427  * appropriate irq_happened bit.
428  *
429  * Sytem reset exceptions taken in idle state also come through here,
430  * but they are NMI interrupts so do not need to wait for IRQs to be
431  * restored, and should be taken as early as practical. These are marked
432  * with 0xff in the table. The Power ISA specifies 0100b as the system
433  * reset interrupt reason.
434  */
435 #define IRQ_SYSTEM_RESET        0xff
436 
437 static const u8 srr1_to_lazyirq[0x10] = {
438         0, 0, 0,
439         PACA_IRQ_DBELL,
440         IRQ_SYSTEM_RESET,
441         PACA_IRQ_DBELL,
442         PACA_IRQ_DEC,
443         0,
444         PACA_IRQ_EE,
445         PACA_IRQ_EE,
446         PACA_IRQ_HMI,
447         0, 0, 0, 0, 0 };
448 
449 void replay_system_reset(void)
450 {
451         struct pt_regs regs;
452 
453         ppc_save_regs(&regs);
454         regs.trap = 0x100;
455         get_paca()->in_nmi = 1;
456         system_reset_exception(&regs);
457         get_paca()->in_nmi = 0;
458 }
459 EXPORT_SYMBOL_GPL(replay_system_reset);
460 
461 void irq_set_pending_from_srr1(unsigned long srr1)
462 {
463         unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
464         u8 reason = srr1_to_lazyirq[idx];
465 
466         /*
467          * Take the system reset now, which is immediately after registers
468          * are restored from idle. It's an NMI, so interrupts need not be
469          * re-enabled before it is taken.
470          */
471         if (unlikely(reason == IRQ_SYSTEM_RESET)) {
472                 replay_system_reset();
473                 return;
474         }
475 
476         /*
477          * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
478          * so this can be called unconditionally with the SRR1 wake
479          * reason as returned by the idle code, which uses 0 to mean no
480          * interrupt.
481          *
482          * If a future CPU was to designate this as an interrupt reason,
483          * then a new index for no interrupt must be assigned.
484          */
485         local_paca->irq_happened |= reason;
486 }
487 #endif /* CONFIG_PPC_BOOK3S */
488 
489 /*
490  * Force a replay of the external interrupt handler on this CPU.
491  */
492 void force_external_irq_replay(void)
493 {
494         /*
495          * This must only be called with interrupts soft-disabled,
496          * the replay will happen when re-enabling.
497          */
498         WARN_ON(!arch_irqs_disabled());
499 
500         /*
501          * Interrupts must always be hard disabled before irq_happened is
502          * modified (to prevent lost update in case of interrupt between
503          * load and store).
504          */
505         __hard_irq_disable();
506         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
507 
508         /* Indicate in the PACA that we have an interrupt to replay */
509         local_paca->irq_happened |= PACA_IRQ_EE;
510 }
511 
512 #endif /* CONFIG_PPC64 */
513 
514 int arch_show_interrupts(struct seq_file *p, int prec)
515 {
516         int j;
517 
518 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
519         if (tau_initialized) {
520                 seq_printf(p, "%*s: ", prec, "TAU");
521                 for_each_online_cpu(j)
522                         seq_printf(p, "%10u ", tau_interrupts(j));
523                 seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
524         }
525 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
526 
527         seq_printf(p, "%*s: ", prec, "LOC");
528         for_each_online_cpu(j)
529                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
530         seq_printf(p, "  Local timer interrupts for timer event device\n");
531 
532         seq_printf(p, "%*s: ", prec, "BCT");
533         for_each_online_cpu(j)
534                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
535         seq_printf(p, "  Broadcast timer interrupts for timer event device\n");
536 
537         seq_printf(p, "%*s: ", prec, "LOC");
538         for_each_online_cpu(j)
539                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
540         seq_printf(p, "  Local timer interrupts for others\n");
541 
542         seq_printf(p, "%*s: ", prec, "SPU");
543         for_each_online_cpu(j)
544                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
545         seq_printf(p, "  Spurious interrupts\n");
546 
547         seq_printf(p, "%*s: ", prec, "PMI");
548         for_each_online_cpu(j)
549                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
550         seq_printf(p, "  Performance monitoring interrupts\n");
551 
552         seq_printf(p, "%*s: ", prec, "MCE");
553         for_each_online_cpu(j)
554                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
555         seq_printf(p, "  Machine check exceptions\n");
556 
557         if (cpu_has_feature(CPU_FTR_HVMODE)) {
558                 seq_printf(p, "%*s: ", prec, "HMI");
559                 for_each_online_cpu(j)
560                         seq_printf(p, "%10u ",
561                                         per_cpu(irq_stat, j).hmi_exceptions);
562                 seq_printf(p, "  Hypervisor Maintenance Interrupts\n");
563         }
564 
565         seq_printf(p, "%*s: ", prec, "NMI");
566         for_each_online_cpu(j)
567                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
568         seq_printf(p, "  System Reset interrupts\n");
569 
570 #ifdef CONFIG_PPC_WATCHDOG
571         seq_printf(p, "%*s: ", prec, "WDG");
572         for_each_online_cpu(j)
573                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
574         seq_printf(p, "  Watchdog soft-NMI interrupts\n");
575 #endif
576 
577 #ifdef CONFIG_PPC_DOORBELL
578         if (cpu_has_feature(CPU_FTR_DBELL)) {
579                 seq_printf(p, "%*s: ", prec, "DBL");
580                 for_each_online_cpu(j)
581                         seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
582                 seq_printf(p, "  Doorbell interrupts\n");
583         }
584 #endif
585 
586         return 0;
587 }
588 
589 /*
590  * /proc/stat helpers
591  */
592 u64 arch_irq_stat_cpu(unsigned int cpu)
593 {
594         u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
595 
596         sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
597         sum += per_cpu(irq_stat, cpu).pmu_irqs;
598         sum += per_cpu(irq_stat, cpu).mce_exceptions;
599         sum += per_cpu(irq_stat, cpu).spurious_irqs;
600         sum += per_cpu(irq_stat, cpu).timer_irqs_others;
601         sum += per_cpu(irq_stat, cpu).hmi_exceptions;
602         sum += per_cpu(irq_stat, cpu).sreset_irqs;
603 #ifdef CONFIG_PPC_WATCHDOG
604         sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
605 #endif
606 #ifdef CONFIG_PPC_DOORBELL
607         sum += per_cpu(irq_stat, cpu).doorbell_irqs;
608 #endif
609 
610         return sum;
611 }
612 
613 static inline void check_stack_overflow(void)
614 {
615 #ifdef CONFIG_DEBUG_STACKOVERFLOW
616         long sp;
617 
618         sp = current_stack_pointer() & (THREAD_SIZE-1);
619 
620         /* check for stack overflow: is there less than 2KB free? */
621         if (unlikely(sp < 2048)) {
622                 pr_err("do_IRQ: stack overflow: %ld\n", sp);
623                 dump_stack();
624         }
625 #endif
626 }
627 
628 void __do_irq(struct pt_regs *regs)
629 {
630         unsigned int irq;
631 
632         irq_enter();
633 
634         trace_irq_entry(regs);
635 
636         check_stack_overflow();
637 
638         /*
639          * Query the platform PIC for the interrupt & ack it.
640          *
641          * This will typically lower the interrupt line to the CPU
642          */
643         irq = ppc_md.get_irq();
644 
645         /* We can hard enable interrupts now to allow perf interrupts */
646         may_hard_irq_enable();
647 
648         /* And finally process it */
649         if (unlikely(!irq))
650                 __this_cpu_inc(irq_stat.spurious_irqs);
651         else
652                 generic_handle_irq(irq);
653 
654         trace_irq_exit(regs);
655 
656         irq_exit();
657 }
658 
659 void do_IRQ(struct pt_regs *regs)
660 {
661         struct pt_regs *old_regs = set_irq_regs(regs);
662         void *cursp, *irqsp, *sirqsp;
663 
664         /* Switch to the irq stack to handle this */
665         cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
666         irqsp = hardirq_ctx[raw_smp_processor_id()];
667         sirqsp = softirq_ctx[raw_smp_processor_id()];
668 
669         /* Already there ? */
670         if (unlikely(cursp == irqsp || cursp == sirqsp)) {
671                 __do_irq(regs);
672                 set_irq_regs(old_regs);
673                 return;
674         }
675         /* Switch stack and call */
676         call_do_irq(regs, irqsp);
677 
678         set_irq_regs(old_regs);
679 }
680 
681 void __init init_IRQ(void)
682 {
683         if (ppc_md.init_IRQ)
684                 ppc_md.init_IRQ();
685 }
686 
687 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
688 void   *critirq_ctx[NR_CPUS] __read_mostly;
689 void    *dbgirq_ctx[NR_CPUS] __read_mostly;
690 void *mcheckirq_ctx[NR_CPUS] __read_mostly;
691 #endif
692 
693 void *softirq_ctx[NR_CPUS] __read_mostly;
694 void *hardirq_ctx[NR_CPUS] __read_mostly;
695 
696 void do_softirq_own_stack(void)
697 {
698         call_do_softirq(softirq_ctx[smp_processor_id()]);
699 }
700 
701 irq_hw_number_t virq_to_hw(unsigned int virq)
702 {
703         struct irq_data *irq_data = irq_get_irq_data(virq);
704         return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
705 }
706 EXPORT_SYMBOL_GPL(virq_to_hw);
707 
708 #ifdef CONFIG_SMP
709 int irq_choose_cpu(const struct cpumask *mask)
710 {
711         int cpuid;
712 
713         if (cpumask_equal(mask, cpu_online_mask)) {
714                 static int irq_rover;
715                 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
716                 unsigned long flags;
717 
718                 /* Round-robin distribution... */
719 do_round_robin:
720                 raw_spin_lock_irqsave(&irq_rover_lock, flags);
721 
722                 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
723                 if (irq_rover >= nr_cpu_ids)
724                         irq_rover = cpumask_first(cpu_online_mask);
725 
726                 cpuid = irq_rover;
727 
728                 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
729         } else {
730                 cpuid = cpumask_first_and(mask, cpu_online_mask);
731                 if (cpuid >= nr_cpu_ids)
732                         goto do_round_robin;
733         }
734 
735         return get_hard_smp_processor_id(cpuid);
736 }
737 #else
738 int irq_choose_cpu(const struct cpumask *mask)
739 {
740         return hard_smp_processor_id();
741 }
742 #endif
743 
744 #ifdef CONFIG_PPC64
745 static int __init setup_noirqdistrib(char *str)
746 {
747         distribute_irqs = 0;
748         return 1;
749 }
750 
751 __setup("noirqdistrib", setup_noirqdistrib);
752 #endif /* CONFIG_PPC64 */
753 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp