~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/irq.c

Version: ~ [ linux-5.2-rc1 ] ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  Derived from arch/i386/kernel/irq.c
  3  *    Copyright (C) 1992 Linus Torvalds
  4  *  Adapted from arch/i386 by Gary Thomas
  5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
  7  *    Copyright (C) 1996-2001 Cort Dougan
  8  *  Adapted for Power Macintosh by Paul Mackerras
  9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
 10  *
 11  * This program is free software; you can redistribute it and/or
 12  * modify it under the terms of the GNU General Public License
 13  * as published by the Free Software Foundation; either version
 14  * 2 of the License, or (at your option) any later version.
 15  *
 16  * This file contains the code used by various IRQ handling routines:
 17  * asking for different IRQ's should be done through these routines
 18  * instead of just grabbing them. Thus setups with different IRQ numbers
 19  * shouldn't result in any weird surprises, and installing new handlers
 20  * should be easier.
 21  *
 22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
 23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
 24  * mask register (of which only 16 are defined), hence the weird shifting
 25  * and complement of the cached_irq_mask.  I want to be able to stuff
 26  * this right into the SIU SMASK register.
 27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
 28  * to reduce code space and undefined function references.
 29  */
 30 
 31 #undef DEBUG
 32 
 33 #include <linux/export.h>
 34 #include <linux/threads.h>
 35 #include <linux/kernel_stat.h>
 36 #include <linux/signal.h>
 37 #include <linux/sched.h>
 38 #include <linux/ptrace.h>
 39 #include <linux/ioport.h>
 40 #include <linux/interrupt.h>
 41 #include <linux/timex.h>
 42 #include <linux/init.h>
 43 #include <linux/slab.h>
 44 #include <linux/delay.h>
 45 #include <linux/irq.h>
 46 #include <linux/seq_file.h>
 47 #include <linux/cpumask.h>
 48 #include <linux/profile.h>
 49 #include <linux/bitops.h>
 50 #include <linux/list.h>
 51 #include <linux/radix-tree.h>
 52 #include <linux/mutex.h>
 53 #include <linux/pci.h>
 54 #include <linux/debugfs.h>
 55 #include <linux/of.h>
 56 #include <linux/of_irq.h>
 57 
 58 #include <asm/uaccess.h>
 59 #include <asm/io.h>
 60 #include <asm/pgtable.h>
 61 #include <asm/irq.h>
 62 #include <asm/cache.h>
 63 #include <asm/prom.h>
 64 #include <asm/ptrace.h>
 65 #include <asm/machdep.h>
 66 #include <asm/udbg.h>
 67 #include <asm/smp.h>
 68 #include <asm/debug.h>
 69 
 70 #ifdef CONFIG_PPC64
 71 #include <asm/paca.h>
 72 #include <asm/firmware.h>
 73 #include <asm/lv1call.h>
 74 #endif
 75 #define CREATE_TRACE_POINTS
 76 #include <asm/trace.h>
 77 
 78 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 79 EXPORT_PER_CPU_SYMBOL(irq_stat);
 80 
 81 int __irq_offset_value;
 82 
 83 #ifdef CONFIG_PPC32
 84 EXPORT_SYMBOL(__irq_offset_value);
 85 atomic_t ppc_n_lost_interrupts;
 86 
 87 #ifdef CONFIG_TAU_INT
 88 extern int tau_initialized;
 89 extern int tau_interrupts(int);
 90 #endif
 91 #endif /* CONFIG_PPC32 */
 92 
 93 #ifdef CONFIG_PPC64
 94 
 95 int distribute_irqs = 1;
 96 
 97 static inline notrace unsigned long get_irq_happened(void)
 98 {
 99         unsigned long happened;
100 
101         __asm__ __volatile__("lbz %0,%1(13)"
102         : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
103 
104         return happened;
105 }
106 
107 static inline notrace void set_soft_enabled(unsigned long enable)
108 {
109         __asm__ __volatile__("stb %0,%1(13)"
110         : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
111 }
112 
113 static inline notrace int decrementer_check_overflow(void)
114 {
115         u64 now = get_tb_or_rtc();
116         u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
117  
118         return now >= *next_tb;
119 }
120 
121 /* This is called whenever we are re-enabling interrupts
122  * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
123  * there's an EE, DEC or DBELL to generate.
124  *
125  * This is called in two contexts: From arch_local_irq_restore()
126  * before soft-enabling interrupts, and from the exception exit
127  * path when returning from an interrupt from a soft-disabled to
128  * a soft enabled context. In both case we have interrupts hard
129  * disabled.
130  *
131  * We take care of only clearing the bits we handled in the
132  * PACA irq_happened field since we can only re-emit one at a
133  * time and we don't want to "lose" one.
134  */
135 notrace unsigned int __check_irq_replay(void)
136 {
137         /*
138          * We use local_paca rather than get_paca() to avoid all
139          * the debug_smp_processor_id() business in this low level
140          * function
141          */
142         unsigned char happened = local_paca->irq_happened;
143 
144         /* Clear bit 0 which we wouldn't clear otherwise */
145         local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
146 
147         /*
148          * Force the delivery of pending soft-disabled interrupts on PS3.
149          * Any HV call will have this side effect.
150          */
151         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
152                 u64 tmp, tmp2;
153                 lv1_get_version_info(&tmp, &tmp2);
154         }
155 
156         /*
157          * We may have missed a decrementer interrupt. We check the
158          * decrementer itself rather than the paca irq_happened field
159          * in case we also had a rollover while hard disabled
160          */
161         local_paca->irq_happened &= ~PACA_IRQ_DEC;
162         if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
163                 return 0x900;
164 
165         /* Finally check if an external interrupt happened */
166         local_paca->irq_happened &= ~PACA_IRQ_EE;
167         if (happened & PACA_IRQ_EE)
168                 return 0x500;
169 
170 #ifdef CONFIG_PPC_BOOK3E
171         /* Finally check if an EPR external interrupt happened
172          * this bit is typically set if we need to handle another
173          * "edge" interrupt from within the MPIC "EPR" handler
174          */
175         local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
176         if (happened & PACA_IRQ_EE_EDGE)
177                 return 0x500;
178 
179         local_paca->irq_happened &= ~PACA_IRQ_DBELL;
180         if (happened & PACA_IRQ_DBELL)
181                 return 0x280;
182 #else
183         local_paca->irq_happened &= ~PACA_IRQ_DBELL;
184         if (happened & PACA_IRQ_DBELL) {
185                 if (cpu_has_feature(CPU_FTR_HVMODE))
186                         return 0xe80;
187                 return 0xa00;
188         }
189 #endif /* CONFIG_PPC_BOOK3E */
190 
191         /* Check if an hypervisor Maintenance interrupt happened */
192         local_paca->irq_happened &= ~PACA_IRQ_HMI;
193         if (happened & PACA_IRQ_HMI)
194                 return 0xe60;
195 
196         /* There should be nothing left ! */
197         BUG_ON(local_paca->irq_happened != 0);
198 
199         return 0;
200 }
201 
202 notrace void arch_local_irq_restore(unsigned long en)
203 {
204         unsigned char irq_happened;
205         unsigned int replay;
206 
207         /* Write the new soft-enabled value */
208         set_soft_enabled(en);
209         if (!en)
210                 return;
211         /*
212          * From this point onward, we can take interrupts, preempt,
213          * etc... unless we got hard-disabled. We check if an event
214          * happened. If none happened, we know we can just return.
215          *
216          * We may have preempted before the check below, in which case
217          * we are checking the "new" CPU instead of the old one. This
218          * is only a problem if an event happened on the "old" CPU.
219          *
220          * External interrupt events will have caused interrupts to
221          * be hard-disabled, so there is no problem, we
222          * cannot have preempted.
223          */
224         irq_happened = get_irq_happened();
225         if (!irq_happened)
226                 return;
227 
228         /*
229          * We need to hard disable to get a trusted value from
230          * __check_irq_replay(). We also need to soft-disable
231          * again to avoid warnings in there due to the use of
232          * per-cpu variables.
233          *
234          * We know that if the value in irq_happened is exactly 0x01
235          * then we are already hard disabled (there are other less
236          * common cases that we'll ignore for now), so we skip the
237          * (expensive) mtmsrd.
238          */
239         if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
240                 __hard_irq_disable();
241 #ifdef CONFIG_TRACE_IRQFLAGS
242         else {
243                 /*
244                  * We should already be hard disabled here. We had bugs
245                  * where that wasn't the case so let's dbl check it and
246                  * warn if we are wrong. Only do that when IRQ tracing
247                  * is enabled as mfmsr() can be costly.
248                  */
249                 if (WARN_ON(mfmsr() & MSR_EE))
250                         __hard_irq_disable();
251         }
252 #endif /* CONFIG_TRACE_IRQFLAG */
253 
254         set_soft_enabled(0);
255 
256         /*
257          * Check if anything needs to be re-emitted. We haven't
258          * soft-enabled yet to avoid warnings in decrementer_check_overflow
259          * accessing per-cpu variables
260          */
261         replay = __check_irq_replay();
262 
263         /* We can soft-enable now */
264         set_soft_enabled(1);
265 
266         /*
267          * And replay if we have to. This will return with interrupts
268          * hard-enabled.
269          */
270         if (replay) {
271                 __replay_interrupt(replay);
272                 return;
273         }
274 
275         /* Finally, let's ensure we are hard enabled */
276         __hard_irq_enable();
277 }
278 EXPORT_SYMBOL(arch_local_irq_restore);
279 
280 /*
281  * This is specifically called by assembly code to re-enable interrupts
282  * if they are currently disabled. This is typically called before
283  * schedule() or do_signal() when returning to userspace. We do it
284  * in C to avoid the burden of dealing with lockdep etc...
285  *
286  * NOTE: This is called with interrupts hard disabled but not marked
287  * as such in paca->irq_happened, so we need to resync this.
288  */
289 void notrace restore_interrupts(void)
290 {
291         if (irqs_disabled()) {
292                 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
293                 local_irq_enable();
294         } else
295                 __hard_irq_enable();
296 }
297 
298 /*
299  * This is a helper to use when about to go into idle low-power
300  * when the latter has the side effect of re-enabling interrupts
301  * (such as calling H_CEDE under pHyp).
302  *
303  * You call this function with interrupts soft-disabled (this is
304  * already the case when ppc_md.power_save is called). The function
305  * will return whether to enter power save or just return.
306  *
307  * In the former case, it will have notified lockdep of interrupts
308  * being re-enabled and generally sanitized the lazy irq state,
309  * and in the latter case it will leave with interrupts hard
310  * disabled and marked as such, so the local_irq_enable() call
311  * in arch_cpu_idle() will properly re-enable everything.
312  */
313 bool prep_irq_for_idle(void)
314 {
315         /*
316          * First we need to hard disable to ensure no interrupt
317          * occurs before we effectively enter the low power state
318          */
319         hard_irq_disable();
320 
321         /*
322          * If anything happened while we were soft-disabled,
323          * we return now and do not enter the low power state.
324          */
325         if (lazy_irq_pending())
326                 return false;
327 
328         /* Tell lockdep we are about to re-enable */
329         trace_hardirqs_on();
330 
331         /*
332          * Mark interrupts as soft-enabled and clear the
333          * PACA_IRQ_HARD_DIS from the pending mask since we
334          * are about to hard enable as well as a side effect
335          * of entering the low power state.
336          */
337         local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
338         local_paca->soft_enabled = 1;
339 
340         /* Tell the caller to enter the low power state */
341         return true;
342 }
343 
344 #endif /* CONFIG_PPC64 */
345 
346 int arch_show_interrupts(struct seq_file *p, int prec)
347 {
348         int j;
349 
350 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
351         if (tau_initialized) {
352                 seq_printf(p, "%*s: ", prec, "TAU");
353                 for_each_online_cpu(j)
354                         seq_printf(p, "%10u ", tau_interrupts(j));
355                 seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
356         }
357 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
358 
359         seq_printf(p, "%*s: ", prec, "LOC");
360         for_each_online_cpu(j)
361                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
362         seq_printf(p, "  Local timer interrupts for timer event device\n");
363 
364         seq_printf(p, "%*s: ", prec, "LOC");
365         for_each_online_cpu(j)
366                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
367         seq_printf(p, "  Local timer interrupts for others\n");
368 
369         seq_printf(p, "%*s: ", prec, "SPU");
370         for_each_online_cpu(j)
371                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
372         seq_printf(p, "  Spurious interrupts\n");
373 
374         seq_printf(p, "%*s: ", prec, "PMI");
375         for_each_online_cpu(j)
376                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
377         seq_printf(p, "  Performance monitoring interrupts\n");
378 
379         seq_printf(p, "%*s: ", prec, "MCE");
380         for_each_online_cpu(j)
381                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
382         seq_printf(p, "  Machine check exceptions\n");
383 
384         if (cpu_has_feature(CPU_FTR_HVMODE)) {
385                 seq_printf(p, "%*s: ", prec, "HMI");
386                 for_each_online_cpu(j)
387                         seq_printf(p, "%10u ",
388                                         per_cpu(irq_stat, j).hmi_exceptions);
389                 seq_printf(p, "  Hypervisor Maintenance Interrupts\n");
390         }
391 
392 #ifdef CONFIG_PPC_DOORBELL
393         if (cpu_has_feature(CPU_FTR_DBELL)) {
394                 seq_printf(p, "%*s: ", prec, "DBL");
395                 for_each_online_cpu(j)
396                         seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
397                 seq_printf(p, "  Doorbell interrupts\n");
398         }
399 #endif
400 
401         return 0;
402 }
403 
404 /*
405  * /proc/stat helpers
406  */
407 u64 arch_irq_stat_cpu(unsigned int cpu)
408 {
409         u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
410 
411         sum += per_cpu(irq_stat, cpu).pmu_irqs;
412         sum += per_cpu(irq_stat, cpu).mce_exceptions;
413         sum += per_cpu(irq_stat, cpu).spurious_irqs;
414         sum += per_cpu(irq_stat, cpu).timer_irqs_others;
415         sum += per_cpu(irq_stat, cpu).hmi_exceptions;
416 #ifdef CONFIG_PPC_DOORBELL
417         sum += per_cpu(irq_stat, cpu).doorbell_irqs;
418 #endif
419 
420         return sum;
421 }
422 
423 #ifdef CONFIG_HOTPLUG_CPU
424 void migrate_irqs(void)
425 {
426         struct irq_desc *desc;
427         unsigned int irq;
428         static int warned;
429         cpumask_var_t mask;
430         const struct cpumask *map = cpu_online_mask;
431 
432         alloc_cpumask_var(&mask, GFP_KERNEL);
433 
434         for_each_irq_desc(irq, desc) {
435                 struct irq_data *data;
436                 struct irq_chip *chip;
437 
438                 data = irq_desc_get_irq_data(desc);
439                 if (irqd_is_per_cpu(data))
440                         continue;
441 
442                 chip = irq_data_get_irq_chip(data);
443 
444                 cpumask_and(mask, irq_data_get_affinity_mask(data), map);
445                 if (cpumask_any(mask) >= nr_cpu_ids) {
446                         pr_warn("Breaking affinity for irq %i\n", irq);
447                         cpumask_copy(mask, map);
448                 }
449                 if (chip->irq_set_affinity)
450                         chip->irq_set_affinity(data, mask, true);
451                 else if (desc->action && !(warned++))
452                         pr_err("Cannot set affinity for irq %i\n", irq);
453         }
454 
455         free_cpumask_var(mask);
456 
457         local_irq_enable();
458         mdelay(1);
459         local_irq_disable();
460 }
461 #endif
462 
463 static inline void check_stack_overflow(void)
464 {
465 #ifdef CONFIG_DEBUG_STACKOVERFLOW
466         long sp;
467 
468         sp = current_stack_pointer() & (THREAD_SIZE-1);
469 
470         /* check for stack overflow: is there less than 2KB free? */
471         if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
472                 pr_err("do_IRQ: stack overflow: %ld\n",
473                         sp - sizeof(struct thread_info));
474                 dump_stack();
475         }
476 #endif
477 }
478 
479 void __do_irq(struct pt_regs *regs)
480 {
481         unsigned int irq;
482 
483         irq_enter();
484 
485         trace_irq_entry(regs);
486 
487         check_stack_overflow();
488 
489         /*
490          * Query the platform PIC for the interrupt & ack it.
491          *
492          * This will typically lower the interrupt line to the CPU
493          */
494         irq = ppc_md.get_irq();
495 
496         /* We can hard enable interrupts now to allow perf interrupts */
497         may_hard_irq_enable();
498 
499         /* And finally process it */
500         if (unlikely(irq == NO_IRQ))
501                 __this_cpu_inc(irq_stat.spurious_irqs);
502         else
503                 generic_handle_irq(irq);
504 
505         trace_irq_exit(regs);
506 
507         irq_exit();
508 }
509 
510 void do_IRQ(struct pt_regs *regs)
511 {
512         struct pt_regs *old_regs = set_irq_regs(regs);
513         struct thread_info *curtp, *irqtp, *sirqtp;
514 
515         /* Switch to the irq stack to handle this */
516         curtp = current_thread_info();
517         irqtp = hardirq_ctx[raw_smp_processor_id()];
518         sirqtp = softirq_ctx[raw_smp_processor_id()];
519 
520         /* Already there ? */
521         if (unlikely(curtp == irqtp || curtp == sirqtp)) {
522                 __do_irq(regs);
523                 set_irq_regs(old_regs);
524                 return;
525         }
526 
527         /* Prepare the thread_info in the irq stack */
528         irqtp->task = curtp->task;
529         irqtp->flags = 0;
530 
531         /* Copy the preempt_count so that the [soft]irq checks work. */
532         irqtp->preempt_count = curtp->preempt_count;
533 
534         /* Switch stack and call */
535         call_do_irq(regs, irqtp);
536 
537         /* Restore stack limit */
538         irqtp->task = NULL;
539 
540         /* Copy back updates to the thread_info */
541         if (irqtp->flags)
542                 set_bits(irqtp->flags, &curtp->flags);
543 
544         set_irq_regs(old_regs);
545 }
546 
547 void __init init_IRQ(void)
548 {
549         if (ppc_md.init_IRQ)
550                 ppc_md.init_IRQ();
551 
552         exc_lvl_ctx_init();
553 
554         irq_ctx_init();
555 }
556 
557 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
558 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
559 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
560 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
561 
562 void exc_lvl_ctx_init(void)
563 {
564         struct thread_info *tp;
565         int i, cpu_nr;
566 
567         for_each_possible_cpu(i) {
568 #ifdef CONFIG_PPC64
569                 cpu_nr = i;
570 #else
571 #ifdef CONFIG_SMP
572                 cpu_nr = get_hard_smp_processor_id(i);
573 #else
574                 cpu_nr = 0;
575 #endif
576 #endif
577 
578                 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
579                 tp = critirq_ctx[cpu_nr];
580                 tp->cpu = cpu_nr;
581                 tp->preempt_count = 0;
582 
583 #ifdef CONFIG_BOOKE
584                 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
585                 tp = dbgirq_ctx[cpu_nr];
586                 tp->cpu = cpu_nr;
587                 tp->preempt_count = 0;
588 
589                 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
590                 tp = mcheckirq_ctx[cpu_nr];
591                 tp->cpu = cpu_nr;
592                 tp->preempt_count = HARDIRQ_OFFSET;
593 #endif
594         }
595 }
596 #endif
597 
598 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
599 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
600 
601 void irq_ctx_init(void)
602 {
603         struct thread_info *tp;
604         int i;
605 
606         for_each_possible_cpu(i) {
607                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
608                 tp = softirq_ctx[i];
609                 tp->cpu = i;
610 
611                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
612                 tp = hardirq_ctx[i];
613                 tp->cpu = i;
614         }
615 }
616 
617 void do_softirq_own_stack(void)
618 {
619         struct thread_info *curtp, *irqtp;
620 
621         curtp = current_thread_info();
622         irqtp = softirq_ctx[smp_processor_id()];
623         irqtp->task = curtp->task;
624         irqtp->flags = 0;
625         call_do_softirq(irqtp);
626         irqtp->task = NULL;
627 
628         /* Set any flag that may have been set on the
629          * alternate stack
630          */
631         if (irqtp->flags)
632                 set_bits(irqtp->flags, &curtp->flags);
633 }
634 
635 irq_hw_number_t virq_to_hw(unsigned int virq)
636 {
637         struct irq_data *irq_data = irq_get_irq_data(virq);
638         return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
639 }
640 EXPORT_SYMBOL_GPL(virq_to_hw);
641 
642 #ifdef CONFIG_SMP
643 int irq_choose_cpu(const struct cpumask *mask)
644 {
645         int cpuid;
646 
647         if (cpumask_equal(mask, cpu_online_mask)) {
648                 static int irq_rover;
649                 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
650                 unsigned long flags;
651 
652                 /* Round-robin distribution... */
653 do_round_robin:
654                 raw_spin_lock_irqsave(&irq_rover_lock, flags);
655 
656                 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
657                 if (irq_rover >= nr_cpu_ids)
658                         irq_rover = cpumask_first(cpu_online_mask);
659 
660                 cpuid = irq_rover;
661 
662                 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
663         } else {
664                 cpuid = cpumask_first_and(mask, cpu_online_mask);
665                 if (cpuid >= nr_cpu_ids)
666                         goto do_round_robin;
667         }
668 
669         return get_hard_smp_processor_id(cpuid);
670 }
671 #else
672 int irq_choose_cpu(const struct cpumask *mask)
673 {
674         return hard_smp_processor_id();
675 }
676 #endif
677 
678 int arch_early_irq_init(void)
679 {
680         return 0;
681 }
682 
683 #ifdef CONFIG_PPC64
684 static int __init setup_noirqdistrib(char *str)
685 {
686         distribute_irqs = 0;
687         return 1;
688 }
689 
690 __setup("noirqdistrib", setup_noirqdistrib);
691 #endif /* CONFIG_PPC64 */
692 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp