~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/ia64/kernel/irq.c

Version: ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      linux/arch/ia64/kernel/irq.c
  3  *
  4  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  5  *
  6  * This file contains the code used by various IRQ handling routines:
  7  * asking for different IRQ's should be done through these routines
  8  * instead of just grabbing them. Thus setups with different IRQ numbers
  9  * shouldn't result in any weird surprises, and installing new handlers
 10  * should be easier.
 11  */
 12 
 13 /*
 14  * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
 15  *
 16  * IRQs are in fact implemented a bit like signal handlers for the kernel.
 17  * Naturally it's not a 1:1 relation, but there are similarities.
 18  */
 19 
 20 #include <linux/config.h>
 21 #include <linux/errno.h>
 22 #include <linux/module.h>
 23 #include <linux/signal.h>
 24 #include <linux/sched.h>
 25 #include <linux/ioport.h>
 26 #include <linux/interrupt.h>
 27 #include <linux/timex.h>
 28 #include <linux/slab.h>
 29 #include <linux/random.h>
 30 #include <linux/smp_lock.h>
 31 #include <linux/init.h>
 32 #include <linux/kernel_stat.h>
 33 #include <linux/irq.h>
 34 #include <linux/proc_fs.h>
 35 #include <linux/seq_file.h>
 36 #include <linux/kallsyms.h>
 37 
 38 #include <asm/atomic.h>
 39 #include <asm/io.h>
 40 #include <asm/smp.h>
 41 #include <asm/system.h>
 42 #include <asm/bitops.h>
 43 #include <asm/uaccess.h>
 44 #include <asm/pgalloc.h>
 45 #include <asm/delay.h>
 46 #include <asm/irq.h>
 47 
 48 
 49 
 50 /*
 51  * Linux has a controller-independent x86 interrupt architecture.
 52  * every controller has a 'controller-template', that is used
 53  * by the main code to do the right thing. Each driver-visible
 54  * interrupt source is transparently wired to the appropriate
 55  * controller. Thus drivers need not be aware of the
 56  * interrupt-controller.
 57  *
 58  * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
 59  * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
 60  * (IO-APICs assumed to be messaging to Pentium local-APICs)
 61  *
 62  * the code is designed to be easily extended with new/different
 63  * interrupt controllers, without having to do assembly magic.
 64  */
 65 
 66 /*
 67  * Controller mappings for all interrupt sources:
 68  */
 69 irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
 70         [0 ... NR_IRQS-1] = {
 71                 .status = IRQ_DISABLED,
 72                 .handler = &no_irq_type,
 73                 .lock = SPIN_LOCK_UNLOCKED
 74         }
 75 };
 76 
 77 #ifdef CONFIG_IA64_GENERIC
 78 irq_desc_t * __ia64_irq_desc (unsigned int irq)
 79 {
 80         return _irq_desc + irq;
 81 }
 82 
 83 ia64_vector __ia64_irq_to_vector (unsigned int irq)
 84 {
 85         return (ia64_vector) irq;
 86 }
 87 
 88 unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
 89 {
 90         return (unsigned int) vec;
 91 }
 92 #endif
 93 
 94 static void register_irq_proc (unsigned int irq);
 95 
 96 /*
 97  * Special irq handlers.
 98  */
 99 
100 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
101 { return IRQ_NONE; }
102 
103 /*
104  * Generic no controller code
105  */
106 
107 static void enable_none(unsigned int irq) { }
108 static unsigned int startup_none(unsigned int irq) { return 0; }
109 static void disable_none(unsigned int irq) { }
110 static void ack_none(unsigned int irq)
111 {
112 /*
113  * 'what should we do if we get a hw irq event on an illegal vector'.
114  * each architecture has to answer this themselves, it doesn't deserve
115  * a generic callback i think.
116  */
117 #ifdef CONFIG_X86
118         printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
119 #ifdef CONFIG_X86_LOCAL_APIC
120         /*
121          * Currently unexpected vectors happen only on SMP and APIC.
122          * We _must_ ack these because every local APIC has only N
123          * irq slots per priority level, and a 'hanging, unacked' IRQ
124          * holds up an irq slot - in excessive cases (when multiple
125          * unexpected vectors occur) that might lock up the APIC
126          * completely.
127          */
128         ack_APIC_irq();
129 #endif
130 #endif
131 #ifdef CONFIG_IA64
132         printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
133 #endif
134 }
135 
136 /* startup is the same as "enable", shutdown is same as "disable" */
137 #define shutdown_none   disable_none
138 #define end_none        enable_none
139 
140 struct hw_interrupt_type no_irq_type = {
141         "none",
142         startup_none,
143         shutdown_none,
144         enable_none,
145         disable_none,
146         ack_none,
147         end_none
148 };
149 
150 atomic_t irq_err_count;
151 #ifdef CONFIG_X86_IO_APIC
152 #ifdef APIC_MISMATCH_DEBUG
153 atomic_t irq_mis_count;
154 #endif
155 #endif
156 
157 /*
158  * Generic, controller-independent functions:
159  */
160 
161 int show_interrupts(struct seq_file *p, void *v)
162 {
163         int i, j;
164         struct irqaction * action;
165         irq_desc_t *idesc;
166         unsigned long flags;
167 
168         seq_puts(p, "           ");
169         for (j=0; j<NR_CPUS; j++)
170                 if (cpu_online(j))
171                         seq_printf(p, "CPU%d       ",j);
172         seq_putc(p, '\n');
173 
174         for (i = 0 ; i < NR_IRQS ; i++) {
175                 idesc = irq_descp(i);
176                 spin_lock_irqsave(&idesc->lock, flags);
177                 action = idesc->action;
178                 if (!action)
179                         goto skip;
180                 seq_printf(p, "%3d: ",i);
181 #ifndef CONFIG_SMP
182                 seq_printf(p, "%10u ", kstat_irqs(i));
183 #else
184                 for (j = 0; j < NR_CPUS; j++)
185                         if (cpu_online(j))
186                                 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
187 #endif
188                 seq_printf(p, " %14s", idesc->handler->typename);
189                 seq_printf(p, "  %s", action->name);
190 
191                 for (action=action->next; action; action = action->next)
192                         seq_printf(p, ", %s", action->name);
193 
194                 seq_putc(p, '\n');
195 skip:
196                 spin_unlock_irqrestore(&idesc->lock, flags);
197         }
198         seq_puts(p, "NMI: ");
199         for (j = 0; j < NR_CPUS; j++)
200                 if (cpu_online(j))
201                         seq_printf(p, "%10u ", nmi_count(j));
202         seq_putc(p, '\n');
203 #ifdef CONFIG_X86_LOCAL_APIC
204         seq_puts(p, "LOC: ");
205         for (j = 0; j < NR_CPUS; j++)
206                 if (cpu_online(j))
207                         seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
208         seq_putc(p, '\n');
209 #endif
210         seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
211 #ifdef CONFIG_X86_IO_APIC
212 #ifdef APIC_MISMATCH_DEBUG
213         seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
214 #endif
215 #endif
216         return 0;
217 }
218 
219 #ifdef CONFIG_SMP
220 inline void synchronize_irq(unsigned int irq)
221 {
222         while (irq_descp(irq)->status & IRQ_INPROGRESS)
223                 cpu_relax();
224 }
225 #endif
226 
227 /*
228  * This should really return information about whether
229  * we should do bottom half handling etc. Right now we
230  * end up _always_ checking the bottom half, which is a
231  * waste of time and is not what some drivers would
232  * prefer.
233  */
234 int handle_IRQ_event(unsigned int irq,
235                 struct pt_regs *regs, struct irqaction *action)
236 {
237         int status = 1; /* Force the "do bottom halves" bit */
238         int retval = 0;
239 
240         if (!(action->flags & SA_INTERRUPT))
241                 local_irq_enable();
242 
243         do {
244                 status |= action->flags;
245                 retval |= action->handler(irq, action->dev_id, regs);
246                 action = action->next;
247         } while (action);
248         if (status & SA_SAMPLE_RANDOM)
249                 add_interrupt_randomness(irq);
250         local_irq_disable();
251         return retval;
252 }
253 
254 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
255 {
256         struct irqaction *action;
257 
258         if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
259                 printk(KERN_ERR "irq event %d: bogus return value %x\n",
260                                 irq, action_ret);
261         } else {
262                 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
263         }
264         dump_stack();
265         printk(KERN_ERR "handlers:\n");
266         action = desc->action;
267         do {
268                 printk(KERN_ERR "[<%p>]", action->handler);
269                 print_symbol(" (%s)",
270                         (unsigned long)action->handler);
271                 printk("\n");
272                 action = action->next;
273         } while (action);
274 }
275 
276 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
277 {
278         static int count = 100;
279 
280         if (count) {
281                 count--;
282                 __report_bad_irq(irq, desc, action_ret);
283         }
284 }
285 
286 static int noirqdebug;
287 
288 static int __init noirqdebug_setup(char *str)
289 {
290         noirqdebug = 1;
291         printk("IRQ lockup detection disabled\n");
292         return 1;
293 }
294 
295 __setup("noirqdebug", noirqdebug_setup);
296 
297 /*
298  * If 99,900 of the previous 100,000 interrupts have not been handled then
299  * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
300  * turn the IRQ off.
301  *
302  * (The other 100-of-100,000 interrupts may have been a correctly-functioning
303  *  device sharing an IRQ with the failing one)
304  *
305  * Called under desc->lock
306  */
307 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
308 {
309         if (action_ret != IRQ_HANDLED) {
310                 desc->irqs_unhandled++;
311                 if (action_ret != IRQ_NONE)
312                         report_bad_irq(irq, desc, action_ret);
313         }
314 
315         desc->irq_count++;
316         if (desc->irq_count < 100000)
317                 return;
318 
319         desc->irq_count = 0;
320         if (desc->irqs_unhandled > 99900) {
321                 /*
322                  * The interrupt is stuck
323                  */
324                 __report_bad_irq(irq, desc, action_ret);
325                 /*
326                  * Now kill the IRQ
327                  */
328                 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
329                 desc->status |= IRQ_DISABLED;
330                 desc->handler->disable(irq);
331         }
332         desc->irqs_unhandled = 0;
333 }
334 
335 /*
336  * Generic enable/disable code: this just calls
337  * down into the PIC-specific version for the actual
338  * hardware disable after having gotten the irq
339  * controller lock.
340  */
341 
342 /**
343  *      disable_irq_nosync - disable an irq without waiting
344  *      @irq: Interrupt to disable
345  *
346  *      Disable the selected interrupt line.  Disables and Enables are
347  *      nested.
348  *      Unlike disable_irq(), this function does not ensure existing
349  *      instances of the IRQ handler have completed before returning.
350  *
351  *      This function may be called from IRQ context.
352  */
353 
354 inline void disable_irq_nosync(unsigned int irq)
355 {
356         irq_desc_t *desc = irq_descp(irq);
357         unsigned long flags;
358 
359         spin_lock_irqsave(&desc->lock, flags);
360         if (!desc->depth++) {
361                 desc->status |= IRQ_DISABLED;
362                 desc->handler->disable(irq);
363         }
364         spin_unlock_irqrestore(&desc->lock, flags);
365 }
366 
367 /**
368  *      disable_irq - disable an irq and wait for completion
369  *      @irq: Interrupt to disable
370  *
371  *      Disable the selected interrupt line.  Enables and Disables are
372  *      nested.
373  *      This function waits for any pending IRQ handlers for this interrupt
374  *      to complete before returning. If you use this function while
375  *      holding a resource the IRQ handler may need you will deadlock.
376  *
377  *      This function may be called - with care - from IRQ context.
378  */
379 
380 void disable_irq(unsigned int irq)
381 {
382         irq_desc_t *desc = irq_descp(irq);
383 
384         disable_irq_nosync(irq);
385         if (desc->action)
386                 synchronize_irq(irq);
387 }
388 
389 /**
390  *      enable_irq - enable handling of an irq
391  *      @irq: Interrupt to enable
392  *
393  *      Undoes the effect of one call to disable_irq().  If this
394  *      matches the last disable, processing of interrupts on this
395  *      IRQ line is re-enabled.
396  *
397  *      This function may be called from IRQ context.
398  */
399 
400 void enable_irq(unsigned int irq)
401 {
402         irq_desc_t *desc = irq_descp(irq);
403         unsigned long flags;
404 
405         spin_lock_irqsave(&desc->lock, flags);
406         switch (desc->depth) {
407         case 1: {
408                 unsigned int status = desc->status & ~IRQ_DISABLED;
409                 desc->status = status;
410                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
411                         desc->status = status | IRQ_REPLAY;
412                         hw_resend_irq(desc->handler,irq);
413                 }
414                 desc->handler->enable(irq);
415                 /* fall-through */
416         }
417         default:
418                 desc->depth--;
419                 break;
420         case 0:
421                 printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
422                        irq, (void *) __builtin_return_address(0));
423         }
424         spin_unlock_irqrestore(&desc->lock, flags);
425 }
426 
427 /*
428  * do_IRQ handles all normal device IRQ's (the special
429  * SMP cross-CPU interrupts have their own specific
430  * handlers).
431  */
432 unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
433 {
434         /*
435          * We ack quickly, we don't want the irq controller
436          * thinking we're snobs just because some other CPU has
437          * disabled global interrupts (we have already done the
438          * INT_ACK cycles, it's too late to try to pretend to the
439          * controller that we aren't taking the interrupt).
440          *
441          * 0 return value means that this irq is already being
442          * handled by some other CPU. (or is disabled)
443          */
444         irq_desc_t *desc = irq_descp(irq);
445         struct irqaction * action;
446         irqreturn_t action_ret;
447         unsigned int status;
448         int cpu;
449 
450         irq_enter();
451         cpu = smp_processor_id(); /* for CONFIG_PREEMPT, this must come after irq_enter()! */
452 
453         kstat_cpu(cpu).irqs[irq]++;
454 
455         if (desc->status & IRQ_PER_CPU) {
456                 /* no locking required for CPU-local interrupts: */
457                 desc->handler->ack(irq);
458                 action_ret = handle_IRQ_event(irq, regs, desc->action);
459                 desc->handler->end(irq);
460                 if (!noirqdebug)
461                         note_interrupt(irq, desc, action_ret);
462         } else {
463                 spin_lock(&desc->lock);
464                 desc->handler->ack(irq);
465                 /*
466                  * REPLAY is when Linux resends an IRQ that was dropped earlier
467                  * WAITING is used by probe to mark irqs that are being tested
468                  */
469                 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
470                 status |= IRQ_PENDING; /* we _want_ to handle it */
471 
472                 /*
473                  * If the IRQ is disabled for whatever reason, we cannot
474                  * use the action we have.
475                  */
476                 action = NULL;
477                 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
478                         action = desc->action;
479                         status &= ~IRQ_PENDING; /* we commit to handling */
480                         status |= IRQ_INPROGRESS; /* we are handling it */
481                 }
482                 desc->status = status;
483 
484                 /*
485                  * If there is no IRQ handler or it was disabled, exit early.
486                  * Since we set PENDING, if another processor is handling
487                  * a different instance of this same irq, the other processor
488                  * will take care of it.
489                  */
490                 if (unlikely(!action))
491                         goto out;
492 
493                 /*
494                  * Edge triggered interrupts need to remember
495                  * pending events.
496                  * This applies to any hw interrupts that allow a second
497                  * instance of the same irq to arrive while we are in do_IRQ
498                  * or in the handler. But the code here only handles the _second_
499                  * instance of the irq, not the third or fourth. So it is mostly
500                  * useful for irq hardware that does not mask cleanly in an
501                  * SMP environment.
502                  */
503                 for (;;) {
504                         spin_unlock(&desc->lock);
505                         action_ret = handle_IRQ_event(irq, regs, action);
506                         spin_lock(&desc->lock);
507                         if (!noirqdebug)
508                                 note_interrupt(irq, desc, action_ret);
509                         if (!(desc->status & IRQ_PENDING))
510                                 break;
511                         desc->status &= ~IRQ_PENDING;
512                 }
513                 desc->status &= ~IRQ_INPROGRESS;
514           out:
515                 /*
516                  * The ->end() handler has to deal with interrupts which got
517                  * disabled while the handler was running.
518                  */
519                 desc->handler->end(irq);
520                 spin_unlock(&desc->lock);
521         }
522         irq_exit();
523         return 1;
524 }
525 
526 /**
527  *      request_irq - allocate an interrupt line
528  *      @irq: Interrupt line to allocate
529  *      @handler: Function to be called when the IRQ occurs
530  *      @irqflags: Interrupt type flags
531  *      @devname: An ascii name for the claiming device
532  *      @dev_id: A cookie passed back to the handler function
533  *
534  *      This call allocates interrupt resources and enables the
535  *      interrupt line and IRQ handling. From the point this
536  *      call is made your handler function may be invoked. Since
537  *      your handler function must clear any interrupt the board 
538  *      raises, you must take care both to initialise your hardware
539  *      and to set up the interrupt handler in the right order.
540  *
541  *      Dev_id must be globally unique. Normally the address of the
542  *      device data structure is used as the cookie. Since the handler
543  *      receives this value it makes sense to use it.
544  *
545  *      If your interrupt is shared you must pass a non NULL dev_id
546  *      as this is required when freeing the interrupt.
547  *
548  *      Flags:
549  *
550  *      SA_SHIRQ                Interrupt is shared
551  *
552  *      SA_INTERRUPT            Disable local interrupts while processing
553  *
554  *      SA_SAMPLE_RANDOM        The interrupt can be used for entropy
555  *
556  */
557 
558 int request_irq(unsigned int irq,
559                 irqreturn_t (*handler)(int, void *, struct pt_regs *),
560                 unsigned long irqflags,
561                 const char * devname,
562                 void *dev_id)
563 {
564         int retval;
565         struct irqaction * action;
566 
567 #if 1
568         /*
569          * Sanity-check: shared interrupts should REALLY pass in
570          * a real dev-ID, otherwise we'll have trouble later trying
571          * to figure out which interrupt is which (messes up the
572          * interrupt freeing logic etc).
573          */
574         if (irqflags & SA_SHIRQ) {
575                 if (!dev_id)
576                         printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
577         }
578 #endif
579 
580         if (irq >= NR_IRQS)
581                 return -EINVAL;
582         if (!handler)
583                 return -EINVAL;
584 
585         action = (struct irqaction *)
586                         kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
587         if (!action)
588                 return -ENOMEM;
589 
590         action->handler = handler;
591         action->flags = irqflags;
592         action->mask = 0;
593         action->name = devname;
594         action->next = NULL;
595         action->dev_id = dev_id;
596 
597         retval = setup_irq(irq, action);
598         if (retval)
599                 kfree(action);
600         return retval;
601 }
602 
603 EXPORT_SYMBOL(request_irq);
604 
605 /**
606  *      free_irq - free an interrupt
607  *      @irq: Interrupt line to free
608  *      @dev_id: Device identity to free
609  *
610  *      Remove an interrupt handler. The handler is removed and if the
611  *      interrupt line is no longer in use by any driver it is disabled.
612  *      On a shared IRQ the caller must ensure the interrupt is disabled
613  *      on the card it drives before calling this function. The function
614  *      does not return until any executing interrupts for this IRQ
615  *      have completed.
616  *
617  *      This function must not be called from interrupt context.
618  */
619 
620 void free_irq(unsigned int irq, void *dev_id)
621 {
622         irq_desc_t *desc;
623         struct irqaction **p;
624         unsigned long flags;
625 
626         if (irq >= NR_IRQS)
627                 return;
628 
629         desc = irq_descp(irq);
630         spin_lock_irqsave(&desc->lock,flags);
631         p = &desc->action;
632         for (;;) {
633                 struct irqaction * action = *p;
634                 if (action) {
635                         struct irqaction **pp = p;
636                         p = &action->next;
637                         if (action->dev_id != dev_id)
638                                 continue;
639 
640                         /* Found it - now remove it from the list of entries */
641                         *pp = action->next;
642                         if (!desc->action) {
643                                 desc->status |= IRQ_DISABLED;
644                                 desc->handler->shutdown(irq);
645                         }
646                         spin_unlock_irqrestore(&desc->lock,flags);
647 
648                         /* Wait to make sure it's not being used on another CPU */
649                         synchronize_irq(irq);
650                         kfree(action);
651                         return;
652                 }
653                 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
654                 spin_unlock_irqrestore(&desc->lock,flags);
655                 return;
656         }
657 }
658 
659 EXPORT_SYMBOL(free_irq);
660 
661 /*
662  * IRQ autodetection code..
663  *
664  * This depends on the fact that any interrupt that
665  * comes in on to an unassigned handler will get stuck
666  * with "IRQ_WAITING" cleared and the interrupt
667  * disabled.
668  */
669 
670 static DECLARE_MUTEX(probe_sem);
671 
672 /**
673  *      probe_irq_on    - begin an interrupt autodetect
674  *
675  *      Commence probing for an interrupt. The interrupts are scanned
676  *      and a mask of potential interrupt lines is returned.
677  *
678  */
679 
680 unsigned long probe_irq_on(void)
681 {
682         unsigned int i;
683         irq_desc_t *desc;
684         unsigned long val;
685         unsigned long delay;
686 
687         down(&probe_sem);
688         /*
689          * something may have generated an irq long ago and we want to
690          * flush such a longstanding irq before considering it as spurious.
691          */
692         for (i = NR_IRQS-1; i > 0; i--)  {
693                 desc = irq_descp(i);
694 
695                 spin_lock_irq(&desc->lock);
696                 if (!desc->action)
697                         desc->handler->startup(i);
698                 spin_unlock_irq(&desc->lock);
699         }
700 
701         /* Wait for longstanding interrupts to trigger. */
702         for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
703                 /* about 20ms delay */ barrier();
704 
705         /*
706          * enable any unassigned irqs
707          * (we must startup again here because if a longstanding irq
708          * happened in the previous stage, it may have masked itself)
709          */
710         for (i = NR_IRQS-1; i > 0; i--) {
711                 desc = irq_descp(i);
712 
713                 spin_lock_irq(&desc->lock);
714                 if (!desc->action) {
715                         desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
716                         if (desc->handler->startup(i))
717                                 desc->status |= IRQ_PENDING;
718                 }
719                 spin_unlock_irq(&desc->lock);
720         }
721 
722         /*
723          * Wait for spurious interrupts to trigger
724          */
725         for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
726                 /* about 100ms delay */ barrier();
727 
728         /*
729          * Now filter out any obviously spurious interrupts
730          */
731         val = 0;
732         for (i = 0; i < NR_IRQS; i++) {
733                 irq_desc_t *desc = irq_descp(i);
734                 unsigned int status;
735 
736                 spin_lock_irq(&desc->lock);
737                 status = desc->status;
738 
739                 if (status & IRQ_AUTODETECT) {
740                         /* It triggered already - consider it spurious. */
741                         if (!(status & IRQ_WAITING)) {
742                                 desc->status = status & ~IRQ_AUTODETECT;
743                                 desc->handler->shutdown(i);
744                         } else
745                                 if (i < 32)
746                                         val |= 1 << i;
747                 }
748                 spin_unlock_irq(&desc->lock);
749         }
750 
751         return val;
752 }
753 
754 EXPORT_SYMBOL(probe_irq_on);
755 
756 /**
757  *      probe_irq_mask - scan a bitmap of interrupt lines
758  *      @val:   mask of interrupts to consider
759  *
760  *      Scan the ISA bus interrupt lines and return a bitmap of
761  *      active interrupts. The interrupt probe logic state is then
762  *      returned to its previous value.
763  *
764  *      Note: we need to scan all the irq's even though we will
765  *      only return ISA irq numbers - just so that we reset them
766  *      all to a known state.
767  */
768 unsigned int probe_irq_mask(unsigned long val)
769 {
770         int i;
771         unsigned int mask;
772 
773         mask = 0;
774         for (i = 0; i < 16; i++) {
775                 irq_desc_t *desc = irq_descp(i);
776                 unsigned int status;
777 
778                 spin_lock_irq(&desc->lock);
779                 status = desc->status;
780 
781                 if (status & IRQ_AUTODETECT) {
782                         if (!(status & IRQ_WAITING))
783                                 mask |= 1 << i;
784 
785                         desc->status = status & ~IRQ_AUTODETECT;
786                         desc->handler->shutdown(i);
787                 }
788                 spin_unlock_irq(&desc->lock);
789         }
790         up(&probe_sem);
791 
792         return mask & val;
793 }
794 
795 /**
796  *      probe_irq_off   - end an interrupt autodetect
797  *      @val: mask of potential interrupts (unused)
798  *
799  *      Scans the unused interrupt lines and returns the line which
800  *      appears to have triggered the interrupt. If no interrupt was
801  *      found then zero is returned. If more than one interrupt is
802  *      found then minus the first candidate is returned to indicate
803  *      their is doubt.
804  *
805  *      The interrupt probe logic state is returned to its previous
806  *      value.
807  *
808  *      BUGS: When used in a module (which arguably shouldn't happen)
809  *      nothing prevents two IRQ probe callers from overlapping. The
810  *      results of this are non-optimal.
811  */
812 
813 int probe_irq_off(unsigned long val)
814 {
815         int i, irq_found, nr_irqs;
816 
817         nr_irqs = 0;
818         irq_found = 0;
819         for (i = 0; i < NR_IRQS; i++) {
820                 irq_desc_t *desc = irq_descp(i);
821                 unsigned int status;
822 
823                 spin_lock_irq(&desc->lock);
824                 status = desc->status;
825 
826                 if (status & IRQ_AUTODETECT) {
827                         if (!(status & IRQ_WAITING)) {
828                                 if (!nr_irqs)
829                                         irq_found = i;
830                                 nr_irqs++;
831                         }
832                         desc->status = status & ~IRQ_AUTODETECT;
833                         desc->handler->shutdown(i);
834                 }
835                 spin_unlock_irq(&desc->lock);
836         }
837         up(&probe_sem);
838 
839         if (nr_irqs > 1)
840                 irq_found = -irq_found;
841         return irq_found;
842 }
843 
844 EXPORT_SYMBOL(probe_irq_off);
845 
846 int setup_irq(unsigned int irq, struct irqaction * new)
847 {
848         int shared = 0;
849         unsigned long flags;
850         struct irqaction *old, **p;
851         irq_desc_t *desc = irq_descp(irq);
852 
853         if (desc->handler == &no_irq_type)
854                 return -ENOSYS;
855         /*
856          * Some drivers like serial.c use request_irq() heavily,
857          * so we have to be careful not to interfere with a
858          * running system.
859          */
860         if (new->flags & SA_SAMPLE_RANDOM) {
861                 /*
862                  * This function might sleep, we want to call it first,
863                  * outside of the atomic block.
864                  * Yes, this might clear the entropy pool if the wrong
865                  * driver is attempted to be loaded, without actually
866                  * installing a new handler, but is this really a problem,
867                  * only the sysadmin is able to do this.
868                  */
869                 rand_initialize_irq(irq);
870         }
871 
872         if (new->flags & SA_PERCPU_IRQ) {
873                 desc->status |= IRQ_PER_CPU;
874                 desc->handler = &irq_type_ia64_lsapic;
875         }
876 
877         /*
878          * The following block of code has to be executed atomically
879          */
880         spin_lock_irqsave(&desc->lock,flags);
881         p = &desc->action;
882         if ((old = *p) != NULL) {
883                 /* Can't share interrupts unless both agree to */
884                 if (!(old->flags & new->flags & SA_SHIRQ)) {
885                         spin_unlock_irqrestore(&desc->lock,flags);
886                         return -EBUSY;
887                 }
888 
889                 /* add new interrupt at end of irq queue */
890                 do {
891                         p = &old->next;
892                         old = *p;
893                 } while (old);
894                 shared = 1;
895         }
896 
897         *p = new;
898 
899         if (!shared) {
900                 desc->depth = 0;
901                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
902                 desc->handler->startup(irq);
903         }
904         spin_unlock_irqrestore(&desc->lock,flags);
905 
906         register_irq_proc(irq);
907         return 0;
908 }
909 
910 static struct proc_dir_entry * root_irq_dir;
911 static struct proc_dir_entry * irq_dir [NR_IRQS];
912 
913 #define HEX_DIGITS (2*sizeof(cpumask_t))
914 
915 static unsigned int parse_hex_value(const char *buffer,
916                 unsigned long count, cpumask_t *ret)
917 {
918         unsigned char hexnum[HEX_DIGITS];
919         cpumask_t value = CPU_MASK_NONE;
920         unsigned long i;
921 
922         if (!count)
923                 return -EINVAL;
924         if (count > HEX_DIGITS)
925                 count = HEX_DIGITS;
926         if (copy_from_user(hexnum, buffer, count))
927                 return -EFAULT;
928 
929         /*
930          * Parse the first 8 characters as a hex string, any non-hex char
931          * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
932          */
933         for (i = 0; i < count; i++) {
934                 unsigned int c = hexnum[i];
935                 int k;
936 
937                 switch (c) {
938                         case '' ... '9': c -= ''; break;
939                         case 'a' ... 'f': c -= 'a'-10; break;
940                         case 'A' ... 'F': c -= 'A'-10; break;
941                 default:
942                         goto out;
943                 }
944                 cpus_shift_left(value, value, 4);
945                 for (k = 0; k < 4; ++k)
946                         if (test_bit(k, (unsigned long *)&c))
947                                 cpu_set(k, value);
948         }
949 out:
950         *ret = value;
951         return 0;
952 }
953 
954 #ifdef CONFIG_SMP
955 
956 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
957 
958 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
959 
960 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
961 
962 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
963 {
964         cpumask_t mask = CPU_MASK_NONE;
965 
966         cpu_set(cpu_logical_id(hwid), mask);
967 
968         if (irq < NR_IRQS) {
969                 irq_affinity[irq] = mask;
970                 irq_redir[irq] = (char) (redir & 0xff);
971         }
972 }
973 
974 static int irq_affinity_read_proc (char *page, char **start, off_t off,
975                         int count, int *eof, void *data)
976 {
977         int k, len;
978         cpumask_t tmp = irq_affinity[(long)data];
979 
980         if (count < HEX_DIGITS+1)
981                 return -EINVAL;
982 
983         len = 0;
984         for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
985                 int j = sprintf(page, "%04hx", (u16)cpus_coerce(tmp));
986                 len += j;
987                 page += j;
988                 cpus_shift_right(tmp, tmp, 16);
989         }
990         len += sprintf(page, "\n");
991         return len;
992 }
993 
994 static int irq_affinity_write_proc (struct file *file, const char *buffer,
995                                     unsigned long count, void *data)
996 {
997         unsigned int irq = (unsigned long) data;
998         int full_count = count, err;
999         cpumask_t new_value, tmp;
1000         const char *buf = buffer;
1001         irq_desc_t *desc = irq_descp(irq);
1002         int redir;
1003 
1004         if (!desc->handler->set_affinity)
1005                 return -EIO;
1006 
1007         if (buf[0] == 'r' || buf[0] == 'R') {
1008                 ++buf;
1009                 while (*buf == ' ') ++buf;
1010                 redir = 1;
1011         } else
1012                 redir = 0;
1013 
1014         err = parse_hex_value(buf, count, &new_value);
1015         if (err)
1016                 return err;
1017 
1018         /*
1019          * Do not allow disabling IRQs completely - it's a too easy
1020          * way to make the system unusable accidentally :-) At least
1021          * one online CPU still has to be targeted.
1022          */
1023         cpus_and(tmp, new_value, cpu_online_map);
1024         if (cpus_empty(tmp))
1025                 return -EINVAL;
1026 
1027         desc->handler->set_affinity(irq | (redir? IA64_IRQ_REDIRECTED : 0), new_value);
1028         return full_count;
1029 }
1030 
1031 #endif /* CONFIG_SMP */
1032 
1033 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1034                         int count, int *eof, void *data)
1035 {
1036         cpumask_t *mask = (cpumask_t *)data;
1037         int k, len = 0;
1038 
1039         if (count < HEX_DIGITS+1)
1040                 return -EINVAL;
1041 
1042         for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
1043                 int j = sprintf(page, "%04hx", (u16)cpus_coerce(*mask));
1044                 len += j;
1045                 page += j;
1046                 cpus_shift_right(*mask, *mask, 16);
1047         }
1048         len += sprintf(page, "\n");
1049         return len;
1050 }
1051 
1052 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1053                                         unsigned long count, void *data)
1054 {
1055         cpumask_t *mask = (cpumask_t *)data;
1056         unsigned long full_count = count, err;
1057         cpumask_t new_value;
1058 
1059         err = parse_hex_value(buffer, count, &new_value);
1060         if (err)
1061                 return err;
1062 
1063         *mask = new_value;
1064         return full_count;
1065 }
1066 
1067 #define MAX_NAMELEN 10
1068 
1069 static void register_irq_proc (unsigned int irq)
1070 {
1071         char name [MAX_NAMELEN];
1072 
1073         if (!root_irq_dir || (irq_descp(irq)->handler == &no_irq_type) || irq_dir[irq])
1074                 return;
1075 
1076         memset(name, 0, MAX_NAMELEN);
1077         sprintf(name, "%d", irq);
1078 
1079         /* create /proc/irq/1234 */
1080         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1081 
1082 #ifdef CONFIG_SMP
1083         {
1084                 struct proc_dir_entry *entry;
1085 
1086                 /* create /proc/irq/1234/smp_affinity */
1087                 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1088 
1089                 if (entry) {
1090                         entry->nlink = 1;
1091                         entry->data = (void *)(long)irq;
1092                         entry->read_proc = irq_affinity_read_proc;
1093                         entry->write_proc = irq_affinity_write_proc;
1094                 }
1095 
1096                 smp_affinity_entry[irq] = entry;
1097         }
1098 #endif
1099 }
1100 
1101 cpumask_t prof_cpu_mask = CPU_MASK_ALL;
1102 
1103 void init_irq_proc (void)
1104 {
1105         struct proc_dir_entry *entry;
1106         int i;
1107 
1108         /* create /proc/irq */
1109         root_irq_dir = proc_mkdir("irq", 0);
1110 
1111         /* create /proc/irq/prof_cpu_mask */
1112         entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1113 
1114         if (!entry)
1115                 return;
1116 
1117         entry->nlink = 1;
1118         entry->data = (void *)&prof_cpu_mask;
1119         entry->read_proc = prof_cpu_mask_read_proc;
1120         entry->write_proc = prof_cpu_mask_write_proc;
1121 
1122         /*
1123          * Create entries for all existing IRQs.
1124          */
1125         for (i = 0; i < NR_IRQS; i++) {
1126                 if (irq_descp(i)->handler == &no_irq_type)
1127                         continue;
1128                 register_irq_proc(i);
1129         }
1130 }
1131 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp