~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/interrupt.h

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /* interrupt.h */
  3 #ifndef _LINUX_INTERRUPT_H
  4 #define _LINUX_INTERRUPT_H
  5 
  6 #include <linux/kernel.h>
  7 #include <linux/bitops.h>
  8 #include <linux/cpumask.h>
  9 #include <linux/irqreturn.h>
 10 #include <linux/irqnr.h>
 11 #include <linux/hardirq.h>
 12 #include <linux/irqflags.h>
 13 #include <linux/hrtimer.h>
 14 #include <linux/kref.h>
 15 #include <linux/workqueue.h>
 16 
 17 #include <linux/atomic.h>
 18 #include <asm/ptrace.h>
 19 #include <asm/irq.h>
 20 #include <asm/sections.h>
 21 
 22 /*
 23  * These correspond to the IORESOURCE_IRQ_* defines in
 24  * linux/ioport.h to select the interrupt line behaviour.  When
 25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
 26  * setting should be assumed to be "as already configured", which
 27  * may be as per machine or firmware initialisation.
 28  */
 29 #define IRQF_TRIGGER_NONE       0x00000000
 30 #define IRQF_TRIGGER_RISING     0x00000001
 31 #define IRQF_TRIGGER_FALLING    0x00000002
 32 #define IRQF_TRIGGER_HIGH       0x00000004
 33 #define IRQF_TRIGGER_LOW        0x00000008
 34 #define IRQF_TRIGGER_MASK       (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
 35                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
 36 #define IRQF_TRIGGER_PROBE      0x00000010
 37 
 38 /*
 39  * These flags used only by the kernel as part of the
 40  * irq handling routines.
 41  *
 42  * IRQF_SHARED - allow sharing the irq among several devices
 43  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
 44  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
 45  * IRQF_PERCPU - Interrupt is per cpu
 46  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
 47  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
 48  *                registered first in a shared interrupt is considered for
 49  *                performance reasons)
 50  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
 51  *                Used by threaded interrupts which need to keep the
 52  *                irq line disabled until the threaded handler has been run.
 53  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
 54  *                   that this interrupt will wake the system from a suspended
 55  *                   state.  See Documentation/power/suspend-and-interrupts.rst
 56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
 57  * IRQF_NO_THREAD - Interrupt cannot be threaded
 58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
 59  *                resume time.
 60  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
 61  *                interrupt handler after suspending interrupts. For system
 62  *                wakeup devices users need to implement wakeup detection in
 63  *                their interrupt handlers.
 64  */
 65 #define IRQF_SHARED             0x00000080
 66 #define IRQF_PROBE_SHARED       0x00000100
 67 #define __IRQF_TIMER            0x00000200
 68 #define IRQF_PERCPU             0x00000400
 69 #define IRQF_NOBALANCING        0x00000800
 70 #define IRQF_IRQPOLL            0x00001000
 71 #define IRQF_ONESHOT            0x00002000
 72 #define IRQF_NO_SUSPEND         0x00004000
 73 #define IRQF_FORCE_RESUME       0x00008000
 74 #define IRQF_NO_THREAD          0x00010000
 75 #define IRQF_EARLY_RESUME       0x00020000
 76 #define IRQF_COND_SUSPEND       0x00040000
 77 
 78 #define IRQF_TIMER              (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 79 
 80 /*
 81  * These values can be returned by request_any_context_irq() and
 82  * describe the context the interrupt will be run in.
 83  *
 84  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
 85  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
 86  */
 87 enum {
 88         IRQC_IS_HARDIRQ = 0,
 89         IRQC_IS_NESTED,
 90 };
 91 
 92 typedef irqreturn_t (*irq_handler_t)(int, void *);
 93 
 94 /**
 95  * struct irqaction - per interrupt action descriptor
 96  * @handler:    interrupt handler function
 97  * @name:       name of the device
 98  * @dev_id:     cookie to identify the device
 99  * @percpu_dev_id:      cookie to identify the device
100  * @next:       pointer to the next irqaction for shared interrupts
101  * @irq:        interrupt number
102  * @flags:      flags (see IRQF_* above)
103  * @thread_fn:  interrupt handler function for threaded interrupts
104  * @thread:     thread pointer for threaded interrupts
105  * @secondary:  pointer to secondary irqaction (force threading)
106  * @thread_flags:       flags related to @thread
107  * @thread_mask:        bitmask for keeping track of @thread activity
108  * @dir:        pointer to the proc/irq/NN/name entry
109  */
110 struct irqaction {
111         irq_handler_t           handler;
112         void                    *dev_id;
113         void __percpu           *percpu_dev_id;
114         struct irqaction        *next;
115         irq_handler_t           thread_fn;
116         struct task_struct      *thread;
117         struct irqaction        *secondary;
118         unsigned int            irq;
119         unsigned int            flags;
120         unsigned long           thread_flags;
121         unsigned long           thread_mask;
122         const char              *name;
123         struct proc_dir_entry   *dir;
124 } ____cacheline_internodealigned_in_smp;
125 
126 extern irqreturn_t no_action(int cpl, void *dev_id);
127 
128 /*
129  * If a (PCI) device interrupt is not connected we set dev->irq to
130  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131  * can distingiush that case from other error returns.
132  *
133  * 0x80000000 is guaranteed to be outside the available range of interrupts
134  * and easy to distinguish from other possible incorrect values.
135  */
136 #define IRQ_NOTCONNECTED        (1U << 31)
137 
138 extern int __must_check
139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
140                      irq_handler_t thread_fn,
141                      unsigned long flags, const char *name, void *dev);
142 
143 /**
144  * request_irq - Add a handler for an interrupt line
145  * @irq:        The interrupt line to allocate
146  * @handler:    Function to be called when the IRQ occurs.
147  *              Primary handler for threaded interrupts
148  *              If NULL, the default primary handler is installed
149  * @flags:      Handling flags
150  * @name:       Name of the device generating this interrupt
151  * @dev:        A cookie passed to the handler function
152  *
153  * This call allocates an interrupt and establishes a handler; see
154  * the documentation for request_threaded_irq() for details.
155  */
156 static inline int __must_check
157 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
158             const char *name, void *dev)
159 {
160         return request_threaded_irq(irq, handler, NULL, flags, name, dev);
161 }
162 
163 extern int __must_check
164 request_any_context_irq(unsigned int irq, irq_handler_t handler,
165                         unsigned long flags, const char *name, void *dev_id);
166 
167 extern int __must_check
168 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
169                      unsigned long flags, const char *devname,
170                      void __percpu *percpu_dev_id);
171 
172 extern int __must_check
173 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
174             const char *name, void *dev);
175 
176 static inline int __must_check
177 request_percpu_irq(unsigned int irq, irq_handler_t handler,
178                    const char *devname, void __percpu *percpu_dev_id)
179 {
180         return __request_percpu_irq(irq, handler, 0,
181                                     devname, percpu_dev_id);
182 }
183 
184 extern int __must_check
185 request_percpu_nmi(unsigned int irq, irq_handler_t handler,
186                    const char *devname, void __percpu *dev);
187 
188 extern const void *free_irq(unsigned int, void *);
189 extern void free_percpu_irq(unsigned int, void __percpu *);
190 
191 extern const void *free_nmi(unsigned int irq, void *dev_id);
192 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
193 
194 struct device;
195 
196 extern int __must_check
197 devm_request_threaded_irq(struct device *dev, unsigned int irq,
198                           irq_handler_t handler, irq_handler_t thread_fn,
199                           unsigned long irqflags, const char *devname,
200                           void *dev_id);
201 
202 static inline int __must_check
203 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
204                  unsigned long irqflags, const char *devname, void *dev_id)
205 {
206         return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
207                                          devname, dev_id);
208 }
209 
210 extern int __must_check
211 devm_request_any_context_irq(struct device *dev, unsigned int irq,
212                  irq_handler_t handler, unsigned long irqflags,
213                  const char *devname, void *dev_id);
214 
215 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
216 
217 /*
218  * On lockdep we dont want to enable hardirqs in hardirq
219  * context. Use local_irq_enable_in_hardirq() to annotate
220  * kernel code that has to do this nevertheless (pretty much
221  * the only valid case is for old/broken hardware that is
222  * insanely slow).
223  *
224  * NOTE: in theory this might break fragile code that relies
225  * on hardirq delivery - in practice we dont seem to have such
226  * places left. So the only effect should be slightly increased
227  * irqs-off latencies.
228  */
229 #ifdef CONFIG_LOCKDEP
230 # define local_irq_enable_in_hardirq()  do { } while (0)
231 #else
232 # define local_irq_enable_in_hardirq()  local_irq_enable()
233 #endif
234 
235 extern void disable_irq_nosync(unsigned int irq);
236 extern bool disable_hardirq(unsigned int irq);
237 extern void disable_irq(unsigned int irq);
238 extern void disable_percpu_irq(unsigned int irq);
239 extern void enable_irq(unsigned int irq);
240 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
241 extern bool irq_percpu_is_enabled(unsigned int irq);
242 extern void irq_wake_thread(unsigned int irq, void *dev_id);
243 
244 extern void disable_nmi_nosync(unsigned int irq);
245 extern void disable_percpu_nmi(unsigned int irq);
246 extern void enable_nmi(unsigned int irq);
247 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
248 extern int prepare_percpu_nmi(unsigned int irq);
249 extern void teardown_percpu_nmi(unsigned int irq);
250 
251 extern int irq_inject_interrupt(unsigned int irq);
252 
253 /* The following three functions are for the core kernel use only. */
254 extern void suspend_device_irqs(void);
255 extern void resume_device_irqs(void);
256 extern void rearm_wake_irq(unsigned int irq);
257 
258 /**
259  * struct irq_affinity_notify - context for notification of IRQ affinity changes
260  * @irq:                Interrupt to which notification applies
261  * @kref:               Reference count, for internal use
262  * @work:               Work item, for internal use
263  * @notify:             Function to be called on change.  This will be
264  *                      called in process context.
265  * @release:            Function to be called on release.  This will be
266  *                      called in process context.  Once registered, the
267  *                      structure must only be freed when this function is
268  *                      called or later.
269  */
270 struct irq_affinity_notify {
271         unsigned int irq;
272         struct kref kref;
273         struct work_struct work;
274         void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
275         void (*release)(struct kref *ref);
276 };
277 
278 #define IRQ_AFFINITY_MAX_SETS  4
279 
280 /**
281  * struct irq_affinity - Description for automatic irq affinity assignements
282  * @pre_vectors:        Don't apply affinity to @pre_vectors at beginning of
283  *                      the MSI(-X) vector space
284  * @post_vectors:       Don't apply affinity to @post_vectors at end of
285  *                      the MSI(-X) vector space
286  * @nr_sets:            The number of interrupt sets for which affinity
287  *                      spreading is required
288  * @set_size:           Array holding the size of each interrupt set
289  * @calc_sets:          Callback for calculating the number and size
290  *                      of interrupt sets
291  * @priv:               Private data for usage by @calc_sets, usually a
292  *                      pointer to driver/device specific data.
293  */
294 struct irq_affinity {
295         unsigned int    pre_vectors;
296         unsigned int    post_vectors;
297         unsigned int    nr_sets;
298         unsigned int    set_size[IRQ_AFFINITY_MAX_SETS];
299         void            (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
300         void            *priv;
301 };
302 
303 /**
304  * struct irq_affinity_desc - Interrupt affinity descriptor
305  * @mask:       cpumask to hold the affinity assignment
306  * @is_managed: 1 if the interrupt is managed internally
307  */
308 struct irq_affinity_desc {
309         struct cpumask  mask;
310         unsigned int    is_managed : 1;
311 };
312 
313 #if defined(CONFIG_SMP)
314 
315 extern cpumask_var_t irq_default_affinity;
316 
317 /* Internal implementation. Use the helpers below */
318 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
319                               bool force);
320 
321 /**
322  * irq_set_affinity - Set the irq affinity of a given irq
323  * @irq:        Interrupt to set affinity
324  * @cpumask:    cpumask
325  *
326  * Fails if cpumask does not contain an online CPU
327  */
328 static inline int
329 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
330 {
331         return __irq_set_affinity(irq, cpumask, false);
332 }
333 
334 /**
335  * irq_force_affinity - Force the irq affinity of a given irq
336  * @irq:        Interrupt to set affinity
337  * @cpumask:    cpumask
338  *
339  * Same as irq_set_affinity, but without checking the mask against
340  * online cpus.
341  *
342  * Solely for low level cpu hotplug code, where we need to make per
343  * cpu interrupts affine before the cpu becomes online.
344  */
345 static inline int
346 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
347 {
348         return __irq_set_affinity(irq, cpumask, true);
349 }
350 
351 extern int irq_can_set_affinity(unsigned int irq);
352 extern int irq_select_affinity(unsigned int irq);
353 
354 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
355 
356 extern int
357 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
358 
359 struct irq_affinity_desc *
360 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
361 
362 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
363                                        const struct irq_affinity *affd);
364 
365 #else /* CONFIG_SMP */
366 
367 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
368 {
369         return -EINVAL;
370 }
371 
372 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
373 {
374         return 0;
375 }
376 
377 static inline int irq_can_set_affinity(unsigned int irq)
378 {
379         return 0;
380 }
381 
382 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
383 
384 static inline int irq_set_affinity_hint(unsigned int irq,
385                                         const struct cpumask *m)
386 {
387         return -EINVAL;
388 }
389 
390 static inline int
391 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
392 {
393         return 0;
394 }
395 
396 static inline struct irq_affinity_desc *
397 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
398 {
399         return NULL;
400 }
401 
402 static inline unsigned int
403 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
404                           const struct irq_affinity *affd)
405 {
406         return maxvec;
407 }
408 
409 #endif /* CONFIG_SMP */
410 
411 /*
412  * Special lockdep variants of irq disabling/enabling.
413  * These should be used for locking constructs that
414  * know that a particular irq context which is disabled,
415  * and which is the only irq-context user of a lock,
416  * that it's safe to take the lock in the irq-disabled
417  * section without disabling hardirqs.
418  *
419  * On !CONFIG_LOCKDEP they are equivalent to the normal
420  * irq disable/enable methods.
421  */
422 static inline void disable_irq_nosync_lockdep(unsigned int irq)
423 {
424         disable_irq_nosync(irq);
425 #ifdef CONFIG_LOCKDEP
426         local_irq_disable();
427 #endif
428 }
429 
430 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
431 {
432         disable_irq_nosync(irq);
433 #ifdef CONFIG_LOCKDEP
434         local_irq_save(*flags);
435 #endif
436 }
437 
438 static inline void disable_irq_lockdep(unsigned int irq)
439 {
440         disable_irq(irq);
441 #ifdef CONFIG_LOCKDEP
442         local_irq_disable();
443 #endif
444 }
445 
446 static inline void enable_irq_lockdep(unsigned int irq)
447 {
448 #ifdef CONFIG_LOCKDEP
449         local_irq_enable();
450 #endif
451         enable_irq(irq);
452 }
453 
454 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
455 {
456 #ifdef CONFIG_LOCKDEP
457         local_irq_restore(*flags);
458 #endif
459         enable_irq(irq);
460 }
461 
462 /* IRQ wakeup (PM) control: */
463 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
464 
465 static inline int enable_irq_wake(unsigned int irq)
466 {
467         return irq_set_irq_wake(irq, 1);
468 }
469 
470 static inline int disable_irq_wake(unsigned int irq)
471 {
472         return irq_set_irq_wake(irq, 0);
473 }
474 
475 /*
476  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
477  */
478 enum irqchip_irq_state {
479         IRQCHIP_STATE_PENDING,          /* Is interrupt pending? */
480         IRQCHIP_STATE_ACTIVE,           /* Is interrupt in progress? */
481         IRQCHIP_STATE_MASKED,           /* Is interrupt masked? */
482         IRQCHIP_STATE_LINE_LEVEL,       /* Is IRQ line high? */
483 };
484 
485 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
486                                  bool *state);
487 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
488                                  bool state);
489 
490 #ifdef CONFIG_IRQ_FORCED_THREADING
491 # ifdef CONFIG_PREEMPT_RT
492 #  define force_irqthreads      (true)
493 # else
494 extern bool force_irqthreads;
495 # endif
496 #else
497 #define force_irqthreads        (0)
498 #endif
499 
500 #ifndef local_softirq_pending
501 
502 #ifndef local_softirq_pending_ref
503 #define local_softirq_pending_ref irq_stat.__softirq_pending
504 #endif
505 
506 #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
507 #define set_softirq_pending(x)  (__this_cpu_write(local_softirq_pending_ref, (x)))
508 #define or_softirq_pending(x)   (__this_cpu_or(local_softirq_pending_ref, (x)))
509 
510 #endif /* local_softirq_pending */
511 
512 /* Some architectures might implement lazy enabling/disabling of
513  * interrupts. In some cases, such as stop_machine, we might want
514  * to ensure that after a local_irq_disable(), interrupts have
515  * really been disabled in hardware. Such architectures need to
516  * implement the following hook.
517  */
518 #ifndef hard_irq_disable
519 #define hard_irq_disable()      do { } while(0)
520 #endif
521 
522 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
523    frequency threaded job scheduling. For almost all the purposes
524    tasklets are more than enough. F.e. all serial device BHs et
525    al. should be converted to tasklets, not to softirqs.
526  */
527 
528 enum
529 {
530         HI_SOFTIRQ=0,
531         TIMER_SOFTIRQ,
532         NET_TX_SOFTIRQ,
533         NET_RX_SOFTIRQ,
534         BLOCK_SOFTIRQ,
535         IRQ_POLL_SOFTIRQ,
536         TASKLET_SOFTIRQ,
537         SCHED_SOFTIRQ,
538         HRTIMER_SOFTIRQ,
539         RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
540 
541         NR_SOFTIRQS
542 };
543 
544 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
545 
546 /* map softirq index to softirq name. update 'softirq_to_name' in
547  * kernel/softirq.c when adding a new softirq.
548  */
549 extern const char * const softirq_to_name[NR_SOFTIRQS];
550 
551 /* softirq mask and active fields moved to irq_cpustat_t in
552  * asm/hardirq.h to get better cache usage.  KAO
553  */
554 
555 struct softirq_action
556 {
557         void    (*action)(struct softirq_action *);
558 };
559 
560 asmlinkage void do_softirq(void);
561 asmlinkage void __do_softirq(void);
562 
563 #ifdef __ARCH_HAS_DO_SOFTIRQ
564 void do_softirq_own_stack(void);
565 #else
566 static inline void do_softirq_own_stack(void)
567 {
568         __do_softirq();
569 }
570 #endif
571 
572 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
573 extern void softirq_init(void);
574 extern void __raise_softirq_irqoff(unsigned int nr);
575 
576 extern void raise_softirq_irqoff(unsigned int nr);
577 extern void raise_softirq(unsigned int nr);
578 
579 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
580 
581 static inline struct task_struct *this_cpu_ksoftirqd(void)
582 {
583         return this_cpu_read(ksoftirqd);
584 }
585 
586 /* Tasklets --- multithreaded analogue of BHs.
587 
588    Main feature differing them of generic softirqs: tasklet
589    is running only on one CPU simultaneously.
590 
591    Main feature differing them of BHs: different tasklets
592    may be run simultaneously on different CPUs.
593 
594    Properties:
595    * If tasklet_schedule() is called, then tasklet is guaranteed
596      to be executed on some cpu at least once after this.
597    * If the tasklet is already scheduled, but its execution is still not
598      started, it will be executed only once.
599    * If this tasklet is already running on another CPU (or schedule is called
600      from tasklet itself), it is rescheduled for later.
601    * Tasklet is strictly serialized wrt itself, but not
602      wrt another tasklets. If client needs some intertask synchronization,
603      he makes it with spinlocks.
604  */
605 
606 struct tasklet_struct
607 {
608         struct tasklet_struct *next;
609         unsigned long state;
610         atomic_t count;
611         void (*func)(unsigned long);
612         unsigned long data;
613 };
614 
615 #define DECLARE_TASKLET(name, func, data) \
616 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
617 
618 #define DECLARE_TASKLET_DISABLED(name, func, data) \
619 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
620 
621 
622 enum
623 {
624         TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
625         TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
626 };
627 
628 #ifdef CONFIG_SMP
629 static inline int tasklet_trylock(struct tasklet_struct *t)
630 {
631         return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
632 }
633 
634 static inline void tasklet_unlock(struct tasklet_struct *t)
635 {
636         smp_mb__before_atomic();
637         clear_bit(TASKLET_STATE_RUN, &(t)->state);
638 }
639 
640 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
641 {
642         while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
643 }
644 #else
645 #define tasklet_trylock(t) 1
646 #define tasklet_unlock_wait(t) do { } while (0)
647 #define tasklet_unlock(t) do { } while (0)
648 #endif
649 
650 extern void __tasklet_schedule(struct tasklet_struct *t);
651 
652 static inline void tasklet_schedule(struct tasklet_struct *t)
653 {
654         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
655                 __tasklet_schedule(t);
656 }
657 
658 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
659 
660 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
661 {
662         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
663                 __tasklet_hi_schedule(t);
664 }
665 
666 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
667 {
668         atomic_inc(&t->count);
669         smp_mb__after_atomic();
670 }
671 
672 static inline void tasklet_disable(struct tasklet_struct *t)
673 {
674         tasklet_disable_nosync(t);
675         tasklet_unlock_wait(t);
676         smp_mb();
677 }
678 
679 static inline void tasklet_enable(struct tasklet_struct *t)
680 {
681         smp_mb__before_atomic();
682         atomic_dec(&t->count);
683 }
684 
685 extern void tasklet_kill(struct tasklet_struct *t);
686 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
687 extern void tasklet_init(struct tasklet_struct *t,
688                          void (*func)(unsigned long), unsigned long data);
689 
690 /*
691  * Autoprobing for irqs:
692  *
693  * probe_irq_on() and probe_irq_off() provide robust primitives
694  * for accurate IRQ probing during kernel initialization.  They are
695  * reasonably simple to use, are not "fooled" by spurious interrupts,
696  * and, unlike other attempts at IRQ probing, they do not get hung on
697  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
698  *
699  * For reasonably foolproof probing, use them as follows:
700  *
701  * 1. clear and/or mask the device's internal interrupt.
702  * 2. sti();
703  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
704  * 4. enable the device and cause it to trigger an interrupt.
705  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
706  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
707  * 7. service the device to clear its pending interrupt.
708  * 8. loop again if paranoia is required.
709  *
710  * probe_irq_on() returns a mask of allocated irq's.
711  *
712  * probe_irq_off() takes the mask as a parameter,
713  * and returns the irq number which occurred,
714  * or zero if none occurred, or a negative irq number
715  * if more than one irq occurred.
716  */
717 
718 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
719 static inline unsigned long probe_irq_on(void)
720 {
721         return 0;
722 }
723 static inline int probe_irq_off(unsigned long val)
724 {
725         return 0;
726 }
727 static inline unsigned int probe_irq_mask(unsigned long val)
728 {
729         return 0;
730 }
731 #else
732 extern unsigned long probe_irq_on(void);        /* returns 0 on failure */
733 extern int probe_irq_off(unsigned long);        /* returns 0 or negative on failure */
734 extern unsigned int probe_irq_mask(unsigned long);      /* returns mask of ISA interrupts */
735 #endif
736 
737 #ifdef CONFIG_PROC_FS
738 /* Initialize /proc/irq/ */
739 extern void init_irq_proc(void);
740 #else
741 static inline void init_irq_proc(void)
742 {
743 }
744 #endif
745 
746 #ifdef CONFIG_IRQ_TIMINGS
747 void irq_timings_enable(void);
748 void irq_timings_disable(void);
749 u64 irq_timings_next_event(u64 now);
750 #endif
751 
752 struct seq_file;
753 int show_interrupts(struct seq_file *p, void *v);
754 int arch_show_interrupts(struct seq_file *p, int prec);
755 
756 extern int early_irq_init(void);
757 extern int arch_probe_nr_irqs(void);
758 extern int arch_early_irq_init(void);
759 
760 /*
761  * We want to know which function is an entrypoint of a hardirq or a softirq.
762  */
763 #ifndef __irq_entry
764 # define __irq_entry     __attribute__((__section__(".irqentry.text")))
765 #endif
766 
767 #define __softirq_entry  __attribute__((__section__(".softirqentry.text")))
768 
769 #endif
770 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp