~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/interrupt.h

Version: ~ [ linux-5.14-rc3 ] ~ [ linux-5.13.5 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.53 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.135 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.198 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.240 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.276 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.276 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /* interrupt.h */
  3 #ifndef _LINUX_INTERRUPT_H
  4 #define _LINUX_INTERRUPT_H
  5 
  6 #include <linux/kernel.h>
  7 #include <linux/bitops.h>
  8 #include <linux/cpumask.h>
  9 #include <linux/irqreturn.h>
 10 #include <linux/irqnr.h>
 11 #include <linux/hardirq.h>
 12 #include <linux/irqflags.h>
 13 #include <linux/hrtimer.h>
 14 #include <linux/kref.h>
 15 #include <linux/workqueue.h>
 16 
 17 #include <linux/atomic.h>
 18 #include <asm/ptrace.h>
 19 #include <asm/irq.h>
 20 #include <asm/sections.h>
 21 
 22 /*
 23  * These correspond to the IORESOURCE_IRQ_* defines in
 24  * linux/ioport.h to select the interrupt line behaviour.  When
 25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
 26  * setting should be assumed to be "as already configured", which
 27  * may be as per machine or firmware initialisation.
 28  */
 29 #define IRQF_TRIGGER_NONE       0x00000000
 30 #define IRQF_TRIGGER_RISING     0x00000001
 31 #define IRQF_TRIGGER_FALLING    0x00000002
 32 #define IRQF_TRIGGER_HIGH       0x00000004
 33 #define IRQF_TRIGGER_LOW        0x00000008
 34 #define IRQF_TRIGGER_MASK       (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
 35                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
 36 #define IRQF_TRIGGER_PROBE      0x00000010
 37 
 38 /*
 39  * These flags used only by the kernel as part of the
 40  * irq handling routines.
 41  *
 42  * IRQF_SHARED - allow sharing the irq among several devices
 43  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
 44  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
 45  * IRQF_PERCPU - Interrupt is per cpu
 46  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
 47  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
 48  *                registered first in a shared interrupt is considered for
 49  *                performance reasons)
 50  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
 51  *                Used by threaded interrupts which need to keep the
 52  *                irq line disabled until the threaded handler has been run.
 53  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
 54  *                   that this interrupt will wake the system from a suspended
 55  *                   state.  See Documentation/power/suspend-and-interrupts.rst
 56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
 57  * IRQF_NO_THREAD - Interrupt cannot be threaded
 58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
 59  *                resume time.
 60  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
 61  *                interrupt handler after suspending interrupts. For system
 62  *                wakeup devices users need to implement wakeup detection in
 63  *                their interrupt handlers.
 64  */
 65 #define IRQF_SHARED             0x00000080
 66 #define IRQF_PROBE_SHARED       0x00000100
 67 #define __IRQF_TIMER            0x00000200
 68 #define IRQF_PERCPU             0x00000400
 69 #define IRQF_NOBALANCING        0x00000800
 70 #define IRQF_IRQPOLL            0x00001000
 71 #define IRQF_ONESHOT            0x00002000
 72 #define IRQF_NO_SUSPEND         0x00004000
 73 #define IRQF_FORCE_RESUME       0x00008000
 74 #define IRQF_NO_THREAD          0x00010000
 75 #define IRQF_EARLY_RESUME       0x00020000
 76 #define IRQF_COND_SUSPEND       0x00040000
 77 
 78 #define IRQF_TIMER              (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 79 
 80 /*
 81  * These values can be returned by request_any_context_irq() and
 82  * describe the context the interrupt will be run in.
 83  *
 84  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
 85  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
 86  */
 87 enum {
 88         IRQC_IS_HARDIRQ = 0,
 89         IRQC_IS_NESTED,
 90 };
 91 
 92 typedef irqreturn_t (*irq_handler_t)(int, void *);
 93 
 94 /**
 95  * struct irqaction - per interrupt action descriptor
 96  * @handler:    interrupt handler function
 97  * @name:       name of the device
 98  * @dev_id:     cookie to identify the device
 99  * @percpu_dev_id:      cookie to identify the device
100  * @next:       pointer to the next irqaction for shared interrupts
101  * @irq:        interrupt number
102  * @flags:      flags (see IRQF_* above)
103  * @thread_fn:  interrupt handler function for threaded interrupts
104  * @thread:     thread pointer for threaded interrupts
105  * @secondary:  pointer to secondary irqaction (force threading)
106  * @thread_flags:       flags related to @thread
107  * @thread_mask:        bitmask for keeping track of @thread activity
108  * @dir:        pointer to the proc/irq/NN/name entry
109  */
110 struct irqaction {
111         irq_handler_t           handler;
112         void                    *dev_id;
113         void __percpu           *percpu_dev_id;
114         struct irqaction        *next;
115         irq_handler_t           thread_fn;
116         struct task_struct      *thread;
117         struct irqaction        *secondary;
118         unsigned int            irq;
119         unsigned int            flags;
120         unsigned long           thread_flags;
121         unsigned long           thread_mask;
122         const char              *name;
123         struct proc_dir_entry   *dir;
124 } ____cacheline_internodealigned_in_smp;
125 
126 extern irqreturn_t no_action(int cpl, void *dev_id);
127 
128 /*
129  * If a (PCI) device interrupt is not connected we set dev->irq to
130  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131  * can distingiush that case from other error returns.
132  *
133  * 0x80000000 is guaranteed to be outside the available range of interrupts
134  * and easy to distinguish from other possible incorrect values.
135  */
136 #define IRQ_NOTCONNECTED        (1U << 31)
137 
138 extern int __must_check
139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
140                      irq_handler_t thread_fn,
141                      unsigned long flags, const char *name, void *dev);
142 
143 static inline int __must_check
144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145             const char *name, void *dev)
146 {
147         return request_threaded_irq(irq, handler, NULL, flags, name, dev);
148 }
149 
150 extern int __must_check
151 request_any_context_irq(unsigned int irq, irq_handler_t handler,
152                         unsigned long flags, const char *name, void *dev_id);
153 
154 extern int __must_check
155 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
156                      unsigned long flags, const char *devname,
157                      void __percpu *percpu_dev_id);
158 
159 extern int __must_check
160 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
161             const char *name, void *dev);
162 
163 static inline int __must_check
164 request_percpu_irq(unsigned int irq, irq_handler_t handler,
165                    const char *devname, void __percpu *percpu_dev_id)
166 {
167         return __request_percpu_irq(irq, handler, 0,
168                                     devname, percpu_dev_id);
169 }
170 
171 extern int __must_check
172 request_percpu_nmi(unsigned int irq, irq_handler_t handler,
173                    const char *devname, void __percpu *dev);
174 
175 extern const void *free_irq(unsigned int, void *);
176 extern void free_percpu_irq(unsigned int, void __percpu *);
177 
178 extern const void *free_nmi(unsigned int irq, void *dev_id);
179 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
180 
181 struct device;
182 
183 extern int __must_check
184 devm_request_threaded_irq(struct device *dev, unsigned int irq,
185                           irq_handler_t handler, irq_handler_t thread_fn,
186                           unsigned long irqflags, const char *devname,
187                           void *dev_id);
188 
189 static inline int __must_check
190 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
191                  unsigned long irqflags, const char *devname, void *dev_id)
192 {
193         return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
194                                          devname, dev_id);
195 }
196 
197 extern int __must_check
198 devm_request_any_context_irq(struct device *dev, unsigned int irq,
199                  irq_handler_t handler, unsigned long irqflags,
200                  const char *devname, void *dev_id);
201 
202 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
203 
204 /*
205  * On lockdep we dont want to enable hardirqs in hardirq
206  * context. Use local_irq_enable_in_hardirq() to annotate
207  * kernel code that has to do this nevertheless (pretty much
208  * the only valid case is for old/broken hardware that is
209  * insanely slow).
210  *
211  * NOTE: in theory this might break fragile code that relies
212  * on hardirq delivery - in practice we dont seem to have such
213  * places left. So the only effect should be slightly increased
214  * irqs-off latencies.
215  */
216 #ifdef CONFIG_LOCKDEP
217 # define local_irq_enable_in_hardirq()  do { } while (0)
218 #else
219 # define local_irq_enable_in_hardirq()  local_irq_enable()
220 #endif
221 
222 extern void disable_irq_nosync(unsigned int irq);
223 extern bool disable_hardirq(unsigned int irq);
224 extern void disable_irq(unsigned int irq);
225 extern void disable_percpu_irq(unsigned int irq);
226 extern void enable_irq(unsigned int irq);
227 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
228 extern bool irq_percpu_is_enabled(unsigned int irq);
229 extern void irq_wake_thread(unsigned int irq, void *dev_id);
230 
231 extern void disable_nmi_nosync(unsigned int irq);
232 extern void disable_percpu_nmi(unsigned int irq);
233 extern void enable_nmi(unsigned int irq);
234 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
235 extern int prepare_percpu_nmi(unsigned int irq);
236 extern void teardown_percpu_nmi(unsigned int irq);
237 
238 /* The following three functions are for the core kernel use only. */
239 extern void suspend_device_irqs(void);
240 extern void resume_device_irqs(void);
241 
242 /**
243  * struct irq_affinity_notify - context for notification of IRQ affinity changes
244  * @irq:                Interrupt to which notification applies
245  * @kref:               Reference count, for internal use
246  * @work:               Work item, for internal use
247  * @notify:             Function to be called on change.  This will be
248  *                      called in process context.
249  * @release:            Function to be called on release.  This will be
250  *                      called in process context.  Once registered, the
251  *                      structure must only be freed when this function is
252  *                      called or later.
253  */
254 struct irq_affinity_notify {
255         unsigned int irq;
256         struct kref kref;
257         struct work_struct work;
258         void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
259         void (*release)(struct kref *ref);
260 };
261 
262 #define IRQ_AFFINITY_MAX_SETS  4
263 
264 /**
265  * struct irq_affinity - Description for automatic irq affinity assignements
266  * @pre_vectors:        Don't apply affinity to @pre_vectors at beginning of
267  *                      the MSI(-X) vector space
268  * @post_vectors:       Don't apply affinity to @post_vectors at end of
269  *                      the MSI(-X) vector space
270  * @nr_sets:            The number of interrupt sets for which affinity
271  *                      spreading is required
272  * @set_size:           Array holding the size of each interrupt set
273  * @calc_sets:          Callback for calculating the number and size
274  *                      of interrupt sets
275  * @priv:               Private data for usage by @calc_sets, usually a
276  *                      pointer to driver/device specific data.
277  */
278 struct irq_affinity {
279         unsigned int    pre_vectors;
280         unsigned int    post_vectors;
281         unsigned int    nr_sets;
282         unsigned int    set_size[IRQ_AFFINITY_MAX_SETS];
283         void            (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
284         void            *priv;
285 };
286 
287 /**
288  * struct irq_affinity_desc - Interrupt affinity descriptor
289  * @mask:       cpumask to hold the affinity assignment
290  * @is_managed: 1 if the interrupt is managed internally
291  */
292 struct irq_affinity_desc {
293         struct cpumask  mask;
294         unsigned int    is_managed : 1;
295 };
296 
297 #if defined(CONFIG_SMP)
298 
299 extern cpumask_var_t irq_default_affinity;
300 
301 /* Internal implementation. Use the helpers below */
302 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
303                               bool force);
304 
305 /**
306  * irq_set_affinity - Set the irq affinity of a given irq
307  * @irq:        Interrupt to set affinity
308  * @cpumask:    cpumask
309  *
310  * Fails if cpumask does not contain an online CPU
311  */
312 static inline int
313 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
314 {
315         return __irq_set_affinity(irq, cpumask, false);
316 }
317 
318 /**
319  * irq_force_affinity - Force the irq affinity of a given irq
320  * @irq:        Interrupt to set affinity
321  * @cpumask:    cpumask
322  *
323  * Same as irq_set_affinity, but without checking the mask against
324  * online cpus.
325  *
326  * Solely for low level cpu hotplug code, where we need to make per
327  * cpu interrupts affine before the cpu becomes online.
328  */
329 static inline int
330 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
331 {
332         return __irq_set_affinity(irq, cpumask, true);
333 }
334 
335 extern int irq_can_set_affinity(unsigned int irq);
336 extern int irq_select_affinity(unsigned int irq);
337 
338 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
339 
340 extern int
341 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
342 
343 struct irq_affinity_desc *
344 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
345 
346 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
347                                        const struct irq_affinity *affd);
348 
349 #else /* CONFIG_SMP */
350 
351 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
352 {
353         return -EINVAL;
354 }
355 
356 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
357 {
358         return 0;
359 }
360 
361 static inline int irq_can_set_affinity(unsigned int irq)
362 {
363         return 0;
364 }
365 
366 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
367 
368 static inline int irq_set_affinity_hint(unsigned int irq,
369                                         const struct cpumask *m)
370 {
371         return -EINVAL;
372 }
373 
374 static inline int
375 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
376 {
377         return 0;
378 }
379 
380 static inline struct irq_affinity_desc *
381 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
382 {
383         return NULL;
384 }
385 
386 static inline unsigned int
387 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
388                           const struct irq_affinity *affd)
389 {
390         return maxvec;
391 }
392 
393 #endif /* CONFIG_SMP */
394 
395 /*
396  * Special lockdep variants of irq disabling/enabling.
397  * These should be used for locking constructs that
398  * know that a particular irq context which is disabled,
399  * and which is the only irq-context user of a lock,
400  * that it's safe to take the lock in the irq-disabled
401  * section without disabling hardirqs.
402  *
403  * On !CONFIG_LOCKDEP they are equivalent to the normal
404  * irq disable/enable methods.
405  */
406 static inline void disable_irq_nosync_lockdep(unsigned int irq)
407 {
408         disable_irq_nosync(irq);
409 #ifdef CONFIG_LOCKDEP
410         local_irq_disable();
411 #endif
412 }
413 
414 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
415 {
416         disable_irq_nosync(irq);
417 #ifdef CONFIG_LOCKDEP
418         local_irq_save(*flags);
419 #endif
420 }
421 
422 static inline void disable_irq_lockdep(unsigned int irq)
423 {
424         disable_irq(irq);
425 #ifdef CONFIG_LOCKDEP
426         local_irq_disable();
427 #endif
428 }
429 
430 static inline void enable_irq_lockdep(unsigned int irq)
431 {
432 #ifdef CONFIG_LOCKDEP
433         local_irq_enable();
434 #endif
435         enable_irq(irq);
436 }
437 
438 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
439 {
440 #ifdef CONFIG_LOCKDEP
441         local_irq_restore(*flags);
442 #endif
443         enable_irq(irq);
444 }
445 
446 /* IRQ wakeup (PM) control: */
447 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
448 
449 static inline int enable_irq_wake(unsigned int irq)
450 {
451         return irq_set_irq_wake(irq, 1);
452 }
453 
454 static inline int disable_irq_wake(unsigned int irq)
455 {
456         return irq_set_irq_wake(irq, 0);
457 }
458 
459 /*
460  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
461  */
462 enum irqchip_irq_state {
463         IRQCHIP_STATE_PENDING,          /* Is interrupt pending? */
464         IRQCHIP_STATE_ACTIVE,           /* Is interrupt in progress? */
465         IRQCHIP_STATE_MASKED,           /* Is interrupt masked? */
466         IRQCHIP_STATE_LINE_LEVEL,       /* Is IRQ line high? */
467 };
468 
469 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
470                                  bool *state);
471 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
472                                  bool state);
473 
474 #ifdef CONFIG_IRQ_FORCED_THREADING
475 extern bool force_irqthreads;
476 #else
477 #define force_irqthreads        (0)
478 #endif
479 
480 #ifndef local_softirq_pending
481 
482 #ifndef local_softirq_pending_ref
483 #define local_softirq_pending_ref irq_stat.__softirq_pending
484 #endif
485 
486 #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
487 #define set_softirq_pending(x)  (__this_cpu_write(local_softirq_pending_ref, (x)))
488 #define or_softirq_pending(x)   (__this_cpu_or(local_softirq_pending_ref, (x)))
489 
490 #endif /* local_softirq_pending */
491 
492 /* Some architectures might implement lazy enabling/disabling of
493  * interrupts. In some cases, such as stop_machine, we might want
494  * to ensure that after a local_irq_disable(), interrupts have
495  * really been disabled in hardware. Such architectures need to
496  * implement the following hook.
497  */
498 #ifndef hard_irq_disable
499 #define hard_irq_disable()      do { } while(0)
500 #endif
501 
502 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
503    frequency threaded job scheduling. For almost all the purposes
504    tasklets are more than enough. F.e. all serial device BHs et
505    al. should be converted to tasklets, not to softirqs.
506  */
507 
508 enum
509 {
510         HI_SOFTIRQ=0,
511         TIMER_SOFTIRQ,
512         NET_TX_SOFTIRQ,
513         NET_RX_SOFTIRQ,
514         BLOCK_SOFTIRQ,
515         IRQ_POLL_SOFTIRQ,
516         TASKLET_SOFTIRQ,
517         SCHED_SOFTIRQ,
518         HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
519                             numbering. Sigh! */
520         RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
521 
522         NR_SOFTIRQS
523 };
524 
525 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
526 
527 /* map softirq index to softirq name. update 'softirq_to_name' in
528  * kernel/softirq.c when adding a new softirq.
529  */
530 extern const char * const softirq_to_name[NR_SOFTIRQS];
531 
532 /* softirq mask and active fields moved to irq_cpustat_t in
533  * asm/hardirq.h to get better cache usage.  KAO
534  */
535 
536 struct softirq_action
537 {
538         void    (*action)(struct softirq_action *);
539 };
540 
541 asmlinkage void do_softirq(void);
542 asmlinkage void __do_softirq(void);
543 
544 #ifdef __ARCH_HAS_DO_SOFTIRQ
545 void do_softirq_own_stack(void);
546 #else
547 static inline void do_softirq_own_stack(void)
548 {
549         __do_softirq();
550 }
551 #endif
552 
553 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
554 extern void softirq_init(void);
555 extern void __raise_softirq_irqoff(unsigned int nr);
556 
557 extern void raise_softirq_irqoff(unsigned int nr);
558 extern void raise_softirq(unsigned int nr);
559 
560 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
561 
562 static inline struct task_struct *this_cpu_ksoftirqd(void)
563 {
564         return this_cpu_read(ksoftirqd);
565 }
566 
567 /* Tasklets --- multithreaded analogue of BHs.
568 
569    Main feature differing them of generic softirqs: tasklet
570    is running only on one CPU simultaneously.
571 
572    Main feature differing them of BHs: different tasklets
573    may be run simultaneously on different CPUs.
574 
575    Properties:
576    * If tasklet_schedule() is called, then tasklet is guaranteed
577      to be executed on some cpu at least once after this.
578    * If the tasklet is already scheduled, but its execution is still not
579      started, it will be executed only once.
580    * If this tasklet is already running on another CPU (or schedule is called
581      from tasklet itself), it is rescheduled for later.
582    * Tasklet is strictly serialized wrt itself, but not
583      wrt another tasklets. If client needs some intertask synchronization,
584      he makes it with spinlocks.
585  */
586 
587 struct tasklet_struct
588 {
589         struct tasklet_struct *next;
590         unsigned long state;
591         atomic_t count;
592         void (*func)(unsigned long);
593         unsigned long data;
594 };
595 
596 #define DECLARE_TASKLET(name, func, data) \
597 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
598 
599 #define DECLARE_TASKLET_DISABLED(name, func, data) \
600 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
601 
602 
603 enum
604 {
605         TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
606         TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
607 };
608 
609 #ifdef CONFIG_SMP
610 static inline int tasklet_trylock(struct tasklet_struct *t)
611 {
612         return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
613 }
614 
615 static inline void tasklet_unlock(struct tasklet_struct *t)
616 {
617         smp_mb__before_atomic();
618         clear_bit(TASKLET_STATE_RUN, &(t)->state);
619 }
620 
621 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
622 {
623         while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
624 }
625 #else
626 #define tasklet_trylock(t) 1
627 #define tasklet_unlock_wait(t) do { } while (0)
628 #define tasklet_unlock(t) do { } while (0)
629 #endif
630 
631 extern void __tasklet_schedule(struct tasklet_struct *t);
632 
633 static inline void tasklet_schedule(struct tasklet_struct *t)
634 {
635         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
636                 __tasklet_schedule(t);
637 }
638 
639 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
640 
641 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
642 {
643         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
644                 __tasklet_hi_schedule(t);
645 }
646 
647 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
648 {
649         atomic_inc(&t->count);
650         smp_mb__after_atomic();
651 }
652 
653 static inline void tasklet_disable(struct tasklet_struct *t)
654 {
655         tasklet_disable_nosync(t);
656         tasklet_unlock_wait(t);
657         smp_mb();
658 }
659 
660 static inline void tasklet_enable(struct tasklet_struct *t)
661 {
662         smp_mb__before_atomic();
663         atomic_dec(&t->count);
664 }
665 
666 extern void tasklet_kill(struct tasklet_struct *t);
667 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
668 extern void tasklet_init(struct tasklet_struct *t,
669                          void (*func)(unsigned long), unsigned long data);
670 
671 /*
672  * Autoprobing for irqs:
673  *
674  * probe_irq_on() and probe_irq_off() provide robust primitives
675  * for accurate IRQ probing during kernel initialization.  They are
676  * reasonably simple to use, are not "fooled" by spurious interrupts,
677  * and, unlike other attempts at IRQ probing, they do not get hung on
678  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
679  *
680  * For reasonably foolproof probing, use them as follows:
681  *
682  * 1. clear and/or mask the device's internal interrupt.
683  * 2. sti();
684  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
685  * 4. enable the device and cause it to trigger an interrupt.
686  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
687  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
688  * 7. service the device to clear its pending interrupt.
689  * 8. loop again if paranoia is required.
690  *
691  * probe_irq_on() returns a mask of allocated irq's.
692  *
693  * probe_irq_off() takes the mask as a parameter,
694  * and returns the irq number which occurred,
695  * or zero if none occurred, or a negative irq number
696  * if more than one irq occurred.
697  */
698 
699 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
700 static inline unsigned long probe_irq_on(void)
701 {
702         return 0;
703 }
704 static inline int probe_irq_off(unsigned long val)
705 {
706         return 0;
707 }
708 static inline unsigned int probe_irq_mask(unsigned long val)
709 {
710         return 0;
711 }
712 #else
713 extern unsigned long probe_irq_on(void);        /* returns 0 on failure */
714 extern int probe_irq_off(unsigned long);        /* returns 0 or negative on failure */
715 extern unsigned int probe_irq_mask(unsigned long);      /* returns mask of ISA interrupts */
716 #endif
717 
718 #ifdef CONFIG_PROC_FS
719 /* Initialize /proc/irq/ */
720 extern void init_irq_proc(void);
721 #else
722 static inline void init_irq_proc(void)
723 {
724 }
725 #endif
726 
727 #ifdef CONFIG_IRQ_TIMINGS
728 void irq_timings_enable(void);
729 void irq_timings_disable(void);
730 u64 irq_timings_next_event(u64 now);
731 #endif
732 
733 struct seq_file;
734 int show_interrupts(struct seq_file *p, void *v);
735 int arch_show_interrupts(struct seq_file *p, int prec);
736 
737 extern int early_irq_init(void);
738 extern int arch_probe_nr_irqs(void);
739 extern int arch_early_irq_init(void);
740 
741 /*
742  * We want to know which function is an entrypoint of a hardirq or a softirq.
743  */
744 #define __irq_entry              __attribute__((__section__(".irqentry.text")))
745 #define __softirq_entry  \
746         __attribute__((__section__(".softirqentry.text")))
747 
748 #endif
749 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp