~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/irq/chip.c

Version: ~ [ linux-5.12-rc1 ] ~ [ linux-5.11.2 ] ~ [ linux-5.10.19 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.101 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.177 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.222 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.258 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.258 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  4  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  5  *
  6  * This file contains the core interrupt handling code, for irq-chip based
  7  * architectures. Detailed information is available in
  8  * Documentation/core-api/genericirq.rst
  9  */
 10 
 11 #include <linux/irq.h>
 12 #include <linux/msi.h>
 13 #include <linux/module.h>
 14 #include <linux/interrupt.h>
 15 #include <linux/kernel_stat.h>
 16 #include <linux/irqdomain.h>
 17 
 18 #include <trace/events/irq.h>
 19 
 20 #include "internals.h"
 21 
 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
 23 {
 24         WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
 25         return IRQ_NONE;
 26 }
 27 
 28 /*
 29  * Chained handlers should never call action on their IRQ. This default
 30  * action will emit warning if such thing happens.
 31  */
 32 struct irqaction chained_action = {
 33         .handler = bad_chained_irq,
 34 };
 35 
 36 /**
 37  *      irq_set_chip - set the irq chip for an irq
 38  *      @irq:   irq number
 39  *      @chip:  pointer to irq chip description structure
 40  */
 41 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
 42 {
 43         unsigned long flags;
 44         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 45 
 46         if (!desc)
 47                 return -EINVAL;
 48 
 49         if (!chip)
 50                 chip = &no_irq_chip;
 51 
 52         desc->irq_data.chip = chip;
 53         irq_put_desc_unlock(desc, flags);
 54         /*
 55          * For !CONFIG_SPARSE_IRQ make the irq show up in
 56          * allocated_irqs.
 57          */
 58         irq_mark_irq(irq);
 59         return 0;
 60 }
 61 EXPORT_SYMBOL(irq_set_chip);
 62 
 63 /**
 64  *      irq_set_type - set the irq trigger type for an irq
 65  *      @irq:   irq number
 66  *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
 67  */
 68 int irq_set_irq_type(unsigned int irq, unsigned int type)
 69 {
 70         unsigned long flags;
 71         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 72         int ret = 0;
 73 
 74         if (!desc)
 75                 return -EINVAL;
 76 
 77         ret = __irq_set_trigger(desc, type);
 78         irq_put_desc_busunlock(desc, flags);
 79         return ret;
 80 }
 81 EXPORT_SYMBOL(irq_set_irq_type);
 82 
 83 /**
 84  *      irq_set_handler_data - set irq handler data for an irq
 85  *      @irq:   Interrupt number
 86  *      @data:  Pointer to interrupt specific data
 87  *
 88  *      Set the hardware irq controller data for an irq
 89  */
 90 int irq_set_handler_data(unsigned int irq, void *data)
 91 {
 92         unsigned long flags;
 93         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 94 
 95         if (!desc)
 96                 return -EINVAL;
 97         desc->irq_common_data.handler_data = data;
 98         irq_put_desc_unlock(desc, flags);
 99         return 0;
100 }
101 EXPORT_SYMBOL(irq_set_handler_data);
102 
103 /**
104  *      irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
105  *      @irq_base:      Interrupt number base
106  *      @irq_offset:    Interrupt number offset
107  *      @entry:         Pointer to MSI descriptor data
108  *
109  *      Set the MSI descriptor entry for an irq at offset
110  */
111 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
112                          struct msi_desc *entry)
113 {
114         unsigned long flags;
115         struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
116 
117         if (!desc)
118                 return -EINVAL;
119         desc->irq_common_data.msi_desc = entry;
120         if (entry && !irq_offset)
121                 entry->irq = irq_base;
122         irq_put_desc_unlock(desc, flags);
123         return 0;
124 }
125 
126 /**
127  *      irq_set_msi_desc - set MSI descriptor data for an irq
128  *      @irq:   Interrupt number
129  *      @entry: Pointer to MSI descriptor data
130  *
131  *      Set the MSI descriptor entry for an irq
132  */
133 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
134 {
135         return irq_set_msi_desc_off(irq, 0, entry);
136 }
137 
138 /**
139  *      irq_set_chip_data - set irq chip data for an irq
140  *      @irq:   Interrupt number
141  *      @data:  Pointer to chip specific data
142  *
143  *      Set the hardware irq chip data for an irq
144  */
145 int irq_set_chip_data(unsigned int irq, void *data)
146 {
147         unsigned long flags;
148         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
149 
150         if (!desc)
151                 return -EINVAL;
152         desc->irq_data.chip_data = data;
153         irq_put_desc_unlock(desc, flags);
154         return 0;
155 }
156 EXPORT_SYMBOL(irq_set_chip_data);
157 
158 struct irq_data *irq_get_irq_data(unsigned int irq)
159 {
160         struct irq_desc *desc = irq_to_desc(irq);
161 
162         return desc ? &desc->irq_data : NULL;
163 }
164 EXPORT_SYMBOL_GPL(irq_get_irq_data);
165 
166 static void irq_state_clr_disabled(struct irq_desc *desc)
167 {
168         irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
169 }
170 
171 static void irq_state_clr_masked(struct irq_desc *desc)
172 {
173         irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
174 }
175 
176 static void irq_state_clr_started(struct irq_desc *desc)
177 {
178         irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
179 }
180 
181 static void irq_state_set_started(struct irq_desc *desc)
182 {
183         irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
184 }
185 
186 enum {
187         IRQ_STARTUP_NORMAL,
188         IRQ_STARTUP_MANAGED,
189         IRQ_STARTUP_ABORT,
190 };
191 
192 #ifdef CONFIG_SMP
193 static int
194 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
195 {
196         struct irq_data *d = irq_desc_get_irq_data(desc);
197 
198         if (!irqd_affinity_is_managed(d))
199                 return IRQ_STARTUP_NORMAL;
200 
201         irqd_clr_managed_shutdown(d);
202 
203         if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
204                 /*
205                  * Catch code which fiddles with enable_irq() on a managed
206                  * and potentially shutdown IRQ. Chained interrupt
207                  * installment or irq auto probing should not happen on
208                  * managed irqs either.
209                  */
210                 if (WARN_ON_ONCE(force))
211                         return IRQ_STARTUP_ABORT;
212                 /*
213                  * The interrupt was requested, but there is no online CPU
214                  * in it's affinity mask. Put it into managed shutdown
215                  * state and let the cpu hotplug mechanism start it up once
216                  * a CPU in the mask becomes available.
217                  */
218                 return IRQ_STARTUP_ABORT;
219         }
220         /*
221          * Managed interrupts have reserved resources, so this should not
222          * happen.
223          */
224         if (WARN_ON(irq_domain_activate_irq(d, false)))
225                 return IRQ_STARTUP_ABORT;
226         return IRQ_STARTUP_MANAGED;
227 }
228 #else
229 static __always_inline int
230 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
231 {
232         return IRQ_STARTUP_NORMAL;
233 }
234 #endif
235 
236 static int __irq_startup(struct irq_desc *desc)
237 {
238         struct irq_data *d = irq_desc_get_irq_data(desc);
239         int ret = 0;
240 
241         /* Warn if this interrupt is not activated but try nevertheless */
242         WARN_ON_ONCE(!irqd_is_activated(d));
243 
244         if (d->chip->irq_startup) {
245                 ret = d->chip->irq_startup(d);
246                 irq_state_clr_disabled(desc);
247                 irq_state_clr_masked(desc);
248         } else {
249                 irq_enable(desc);
250         }
251         irq_state_set_started(desc);
252         return ret;
253 }
254 
255 int irq_startup(struct irq_desc *desc, bool resend, bool force)
256 {
257         struct irq_data *d = irq_desc_get_irq_data(desc);
258         struct cpumask *aff = irq_data_get_affinity_mask(d);
259         int ret = 0;
260 
261         desc->depth = 0;
262 
263         if (irqd_is_started(d)) {
264                 irq_enable(desc);
265         } else {
266                 switch (__irq_startup_managed(desc, aff, force)) {
267                 case IRQ_STARTUP_NORMAL:
268                         ret = __irq_startup(desc);
269                         irq_setup_affinity(desc);
270                         break;
271                 case IRQ_STARTUP_MANAGED:
272                         irq_do_set_affinity(d, aff, false);
273                         ret = __irq_startup(desc);
274                         break;
275                 case IRQ_STARTUP_ABORT:
276                         irqd_set_managed_shutdown(d);
277                         return 0;
278                 }
279         }
280         if (resend)
281                 check_irq_resend(desc);
282 
283         return ret;
284 }
285 
286 int irq_activate(struct irq_desc *desc)
287 {
288         struct irq_data *d = irq_desc_get_irq_data(desc);
289 
290         if (!irqd_affinity_is_managed(d))
291                 return irq_domain_activate_irq(d, false);
292         return 0;
293 }
294 
295 int irq_activate_and_startup(struct irq_desc *desc, bool resend)
296 {
297         if (WARN_ON(irq_activate(desc)))
298                 return 0;
299         return irq_startup(desc, resend, IRQ_START_FORCE);
300 }
301 
302 static void __irq_disable(struct irq_desc *desc, bool mask);
303 
304 void irq_shutdown(struct irq_desc *desc)
305 {
306         if (irqd_is_started(&desc->irq_data)) {
307                 desc->depth = 1;
308                 if (desc->irq_data.chip->irq_shutdown) {
309                         desc->irq_data.chip->irq_shutdown(&desc->irq_data);
310                         irq_state_set_disabled(desc);
311                         irq_state_set_masked(desc);
312                 } else {
313                         __irq_disable(desc, true);
314                 }
315                 irq_state_clr_started(desc);
316         }
317 }
318 
319 
320 void irq_shutdown_and_deactivate(struct irq_desc *desc)
321 {
322         irq_shutdown(desc);
323         /*
324          * This must be called even if the interrupt was never started up,
325          * because the activation can happen before the interrupt is
326          * available for request/startup. It has it's own state tracking so
327          * it's safe to call it unconditionally.
328          */
329         irq_domain_deactivate_irq(&desc->irq_data);
330 }
331 
332 void irq_enable(struct irq_desc *desc)
333 {
334         if (!irqd_irq_disabled(&desc->irq_data)) {
335                 unmask_irq(desc);
336         } else {
337                 irq_state_clr_disabled(desc);
338                 if (desc->irq_data.chip->irq_enable) {
339                         desc->irq_data.chip->irq_enable(&desc->irq_data);
340                         irq_state_clr_masked(desc);
341                 } else {
342                         unmask_irq(desc);
343                 }
344         }
345 }
346 
347 static void __irq_disable(struct irq_desc *desc, bool mask)
348 {
349         if (irqd_irq_disabled(&desc->irq_data)) {
350                 if (mask)
351                         mask_irq(desc);
352         } else {
353                 irq_state_set_disabled(desc);
354                 if (desc->irq_data.chip->irq_disable) {
355                         desc->irq_data.chip->irq_disable(&desc->irq_data);
356                         irq_state_set_masked(desc);
357                 } else if (mask) {
358                         mask_irq(desc);
359                 }
360         }
361 }
362 
363 /**
364  * irq_disable - Mark interrupt disabled
365  * @desc:       irq descriptor which should be disabled
366  *
367  * If the chip does not implement the irq_disable callback, we
368  * use a lazy disable approach. That means we mark the interrupt
369  * disabled, but leave the hardware unmasked. That's an
370  * optimization because we avoid the hardware access for the
371  * common case where no interrupt happens after we marked it
372  * disabled. If an interrupt happens, then the interrupt flow
373  * handler masks the line at the hardware level and marks it
374  * pending.
375  *
376  * If the interrupt chip does not implement the irq_disable callback,
377  * a driver can disable the lazy approach for a particular irq line by
378  * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
379  * be used for devices which cannot disable the interrupt at the
380  * device level under certain circumstances and have to use
381  * disable_irq[_nosync] instead.
382  */
383 void irq_disable(struct irq_desc *desc)
384 {
385         __irq_disable(desc, irq_settings_disable_unlazy(desc));
386 }
387 
388 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
389 {
390         if (desc->irq_data.chip->irq_enable)
391                 desc->irq_data.chip->irq_enable(&desc->irq_data);
392         else
393                 desc->irq_data.chip->irq_unmask(&desc->irq_data);
394         cpumask_set_cpu(cpu, desc->percpu_enabled);
395 }
396 
397 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
398 {
399         if (desc->irq_data.chip->irq_disable)
400                 desc->irq_data.chip->irq_disable(&desc->irq_data);
401         else
402                 desc->irq_data.chip->irq_mask(&desc->irq_data);
403         cpumask_clear_cpu(cpu, desc->percpu_enabled);
404 }
405 
406 static inline void mask_ack_irq(struct irq_desc *desc)
407 {
408         if (desc->irq_data.chip->irq_mask_ack) {
409                 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
410                 irq_state_set_masked(desc);
411         } else {
412                 mask_irq(desc);
413                 if (desc->irq_data.chip->irq_ack)
414                         desc->irq_data.chip->irq_ack(&desc->irq_data);
415         }
416 }
417 
418 void mask_irq(struct irq_desc *desc)
419 {
420         if (irqd_irq_masked(&desc->irq_data))
421                 return;
422 
423         if (desc->irq_data.chip->irq_mask) {
424                 desc->irq_data.chip->irq_mask(&desc->irq_data);
425                 irq_state_set_masked(desc);
426         }
427 }
428 
429 void unmask_irq(struct irq_desc *desc)
430 {
431         if (!irqd_irq_masked(&desc->irq_data))
432                 return;
433 
434         if (desc->irq_data.chip->irq_unmask) {
435                 desc->irq_data.chip->irq_unmask(&desc->irq_data);
436                 irq_state_clr_masked(desc);
437         }
438 }
439 
440 void unmask_threaded_irq(struct irq_desc *desc)
441 {
442         struct irq_chip *chip = desc->irq_data.chip;
443 
444         if (chip->flags & IRQCHIP_EOI_THREADED)
445                 chip->irq_eoi(&desc->irq_data);
446 
447         unmask_irq(desc);
448 }
449 
450 /*
451  *      handle_nested_irq - Handle a nested irq from a irq thread
452  *      @irq:   the interrupt number
453  *
454  *      Handle interrupts which are nested into a threaded interrupt
455  *      handler. The handler function is called inside the calling
456  *      threads context.
457  */
458 void handle_nested_irq(unsigned int irq)
459 {
460         struct irq_desc *desc = irq_to_desc(irq);
461         struct irqaction *action;
462         irqreturn_t action_ret;
463 
464         might_sleep();
465 
466         raw_spin_lock_irq(&desc->lock);
467 
468         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
469 
470         action = desc->action;
471         if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
472                 desc->istate |= IRQS_PENDING;
473                 goto out_unlock;
474         }
475 
476         kstat_incr_irqs_this_cpu(desc);
477         irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
478         raw_spin_unlock_irq(&desc->lock);
479 
480         action_ret = IRQ_NONE;
481         for_each_action_of_desc(desc, action)
482                 action_ret |= action->thread_fn(action->irq, action->dev_id);
483 
484         if (!noirqdebug)
485                 note_interrupt(desc, action_ret);
486 
487         raw_spin_lock_irq(&desc->lock);
488         irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
489 
490 out_unlock:
491         raw_spin_unlock_irq(&desc->lock);
492 }
493 EXPORT_SYMBOL_GPL(handle_nested_irq);
494 
495 static bool irq_check_poll(struct irq_desc *desc)
496 {
497         if (!(desc->istate & IRQS_POLL_INPROGRESS))
498                 return false;
499         return irq_wait_for_poll(desc);
500 }
501 
502 static bool irq_may_run(struct irq_desc *desc)
503 {
504         unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
505 
506         /*
507          * If the interrupt is not in progress and is not an armed
508          * wakeup interrupt, proceed.
509          */
510         if (!irqd_has_set(&desc->irq_data, mask))
511                 return true;
512 
513         /*
514          * If the interrupt is an armed wakeup source, mark it pending
515          * and suspended, disable it and notify the pm core about the
516          * event.
517          */
518         if (irq_pm_check_wakeup(desc))
519                 return false;
520 
521         /*
522          * Handle a potential concurrent poll on a different core.
523          */
524         return irq_check_poll(desc);
525 }
526 
527 /**
528  *      handle_simple_irq - Simple and software-decoded IRQs.
529  *      @desc:  the interrupt description structure for this irq
530  *
531  *      Simple interrupts are either sent from a demultiplexing interrupt
532  *      handler or come from hardware, where no interrupt hardware control
533  *      is necessary.
534  *
535  *      Note: The caller is expected to handle the ack, clear, mask and
536  *      unmask issues if necessary.
537  */
538 void handle_simple_irq(struct irq_desc *desc)
539 {
540         raw_spin_lock(&desc->lock);
541 
542         if (!irq_may_run(desc))
543                 goto out_unlock;
544 
545         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
546 
547         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
548                 desc->istate |= IRQS_PENDING;
549                 goto out_unlock;
550         }
551 
552         kstat_incr_irqs_this_cpu(desc);
553         handle_irq_event(desc);
554 
555 out_unlock:
556         raw_spin_unlock(&desc->lock);
557 }
558 EXPORT_SYMBOL_GPL(handle_simple_irq);
559 
560 /**
561  *      handle_untracked_irq - Simple and software-decoded IRQs.
562  *      @desc:  the interrupt description structure for this irq
563  *
564  *      Untracked interrupts are sent from a demultiplexing interrupt
565  *      handler when the demultiplexer does not know which device it its
566  *      multiplexed irq domain generated the interrupt. IRQ's handled
567  *      through here are not subjected to stats tracking, randomness, or
568  *      spurious interrupt detection.
569  *
570  *      Note: Like handle_simple_irq, the caller is expected to handle
571  *      the ack, clear, mask and unmask issues if necessary.
572  */
573 void handle_untracked_irq(struct irq_desc *desc)
574 {
575         unsigned int flags = 0;
576 
577         raw_spin_lock(&desc->lock);
578 
579         if (!irq_may_run(desc))
580                 goto out_unlock;
581 
582         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
583 
584         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
585                 desc->istate |= IRQS_PENDING;
586                 goto out_unlock;
587         }
588 
589         desc->istate &= ~IRQS_PENDING;
590         irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
591         raw_spin_unlock(&desc->lock);
592 
593         __handle_irq_event_percpu(desc, &flags);
594 
595         raw_spin_lock(&desc->lock);
596         irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
597 
598 out_unlock:
599         raw_spin_unlock(&desc->lock);
600 }
601 EXPORT_SYMBOL_GPL(handle_untracked_irq);
602 
603 /*
604  * Called unconditionally from handle_level_irq() and only for oneshot
605  * interrupts from handle_fasteoi_irq()
606  */
607 static void cond_unmask_irq(struct irq_desc *desc)
608 {
609         /*
610          * We need to unmask in the following cases:
611          * - Standard level irq (IRQF_ONESHOT is not set)
612          * - Oneshot irq which did not wake the thread (caused by a
613          *   spurious interrupt or a primary handler handling it
614          *   completely).
615          */
616         if (!irqd_irq_disabled(&desc->irq_data) &&
617             irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
618                 unmask_irq(desc);
619 }
620 
621 /**
622  *      handle_level_irq - Level type irq handler
623  *      @desc:  the interrupt description structure for this irq
624  *
625  *      Level type interrupts are active as long as the hardware line has
626  *      the active level. This may require to mask the interrupt and unmask
627  *      it after the associated handler has acknowledged the device, so the
628  *      interrupt line is back to inactive.
629  */
630 void handle_level_irq(struct irq_desc *desc)
631 {
632         raw_spin_lock(&desc->lock);
633         mask_ack_irq(desc);
634 
635         if (!irq_may_run(desc))
636                 goto out_unlock;
637 
638         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
639 
640         /*
641          * If its disabled or no action available
642          * keep it masked and get out of here
643          */
644         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
645                 desc->istate |= IRQS_PENDING;
646                 goto out_unlock;
647         }
648 
649         kstat_incr_irqs_this_cpu(desc);
650         handle_irq_event(desc);
651 
652         cond_unmask_irq(desc);
653 
654 out_unlock:
655         raw_spin_unlock(&desc->lock);
656 }
657 EXPORT_SYMBOL_GPL(handle_level_irq);
658 
659 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
660 static inline void preflow_handler(struct irq_desc *desc)
661 {
662         if (desc->preflow_handler)
663                 desc->preflow_handler(&desc->irq_data);
664 }
665 #else
666 static inline void preflow_handler(struct irq_desc *desc) { }
667 #endif
668 
669 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
670 {
671         if (!(desc->istate & IRQS_ONESHOT)) {
672                 chip->irq_eoi(&desc->irq_data);
673                 return;
674         }
675         /*
676          * We need to unmask in the following cases:
677          * - Oneshot irq which did not wake the thread (caused by a
678          *   spurious interrupt or a primary handler handling it
679          *   completely).
680          */
681         if (!irqd_irq_disabled(&desc->irq_data) &&
682             irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
683                 chip->irq_eoi(&desc->irq_data);
684                 unmask_irq(desc);
685         } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
686                 chip->irq_eoi(&desc->irq_data);
687         }
688 }
689 
690 /**
691  *      handle_fasteoi_irq - irq handler for transparent controllers
692  *      @desc:  the interrupt description structure for this irq
693  *
694  *      Only a single callback will be issued to the chip: an ->eoi()
695  *      call when the interrupt has been serviced. This enables support
696  *      for modern forms of interrupt handlers, which handle the flow
697  *      details in hardware, transparently.
698  */
699 void handle_fasteoi_irq(struct irq_desc *desc)
700 {
701         struct irq_chip *chip = desc->irq_data.chip;
702 
703         raw_spin_lock(&desc->lock);
704 
705         if (!irq_may_run(desc))
706                 goto out;
707 
708         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
709 
710         /*
711          * If its disabled or no action available
712          * then mask it and get out of here:
713          */
714         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
715                 desc->istate |= IRQS_PENDING;
716                 mask_irq(desc);
717                 goto out;
718         }
719 
720         kstat_incr_irqs_this_cpu(desc);
721         if (desc->istate & IRQS_ONESHOT)
722                 mask_irq(desc);
723 
724         preflow_handler(desc);
725         handle_irq_event(desc);
726 
727         cond_unmask_eoi_irq(desc, chip);
728 
729         raw_spin_unlock(&desc->lock);
730         return;
731 out:
732         if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
733                 chip->irq_eoi(&desc->irq_data);
734         raw_spin_unlock(&desc->lock);
735 }
736 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
737 
738 /**
739  *      handle_fasteoi_nmi - irq handler for NMI interrupt lines
740  *      @desc:  the interrupt description structure for this irq
741  *
742  *      A simple NMI-safe handler, considering the restrictions
743  *      from request_nmi.
744  *
745  *      Only a single callback will be issued to the chip: an ->eoi()
746  *      call when the interrupt has been serviced. This enables support
747  *      for modern forms of interrupt handlers, which handle the flow
748  *      details in hardware, transparently.
749  */
750 void handle_fasteoi_nmi(struct irq_desc *desc)
751 {
752         struct irq_chip *chip = irq_desc_get_chip(desc);
753         struct irqaction *action = desc->action;
754         unsigned int irq = irq_desc_get_irq(desc);
755         irqreturn_t res;
756 
757         __kstat_incr_irqs_this_cpu(desc);
758 
759         trace_irq_handler_entry(irq, action);
760         /*
761          * NMIs cannot be shared, there is only one action.
762          */
763         res = action->handler(irq, action->dev_id);
764         trace_irq_handler_exit(irq, action, res);
765 
766         if (chip->irq_eoi)
767                 chip->irq_eoi(&desc->irq_data);
768 }
769 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
770 
771 /**
772  *      handle_edge_irq - edge type IRQ handler
773  *      @desc:  the interrupt description structure for this irq
774  *
775  *      Interrupt occures on the falling and/or rising edge of a hardware
776  *      signal. The occurrence is latched into the irq controller hardware
777  *      and must be acked in order to be reenabled. After the ack another
778  *      interrupt can happen on the same source even before the first one
779  *      is handled by the associated event handler. If this happens it
780  *      might be necessary to disable (mask) the interrupt depending on the
781  *      controller hardware. This requires to reenable the interrupt inside
782  *      of the loop which handles the interrupts which have arrived while
783  *      the handler was running. If all pending interrupts are handled, the
784  *      loop is left.
785  */
786 void handle_edge_irq(struct irq_desc *desc)
787 {
788         raw_spin_lock(&desc->lock);
789 
790         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
791 
792         if (!irq_may_run(desc)) {
793                 desc->istate |= IRQS_PENDING;
794                 mask_ack_irq(desc);
795                 goto out_unlock;
796         }
797 
798         /*
799          * If its disabled or no action available then mask it and get
800          * out of here.
801          */
802         if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
803                 desc->istate |= IRQS_PENDING;
804                 mask_ack_irq(desc);
805                 goto out_unlock;
806         }
807 
808         kstat_incr_irqs_this_cpu(desc);
809 
810         /* Start handling the irq */
811         desc->irq_data.chip->irq_ack(&desc->irq_data);
812 
813         do {
814                 if (unlikely(!desc->action)) {
815                         mask_irq(desc);
816                         goto out_unlock;
817                 }
818 
819                 /*
820                  * When another irq arrived while we were handling
821                  * one, we could have masked the irq.
822                  * Renable it, if it was not disabled in meantime.
823                  */
824                 if (unlikely(desc->istate & IRQS_PENDING)) {
825                         if (!irqd_irq_disabled(&desc->irq_data) &&
826                             irqd_irq_masked(&desc->irq_data))
827                                 unmask_irq(desc);
828                 }
829 
830                 handle_irq_event(desc);
831 
832         } while ((desc->istate & IRQS_PENDING) &&
833                  !irqd_irq_disabled(&desc->irq_data));
834 
835 out_unlock:
836         raw_spin_unlock(&desc->lock);
837 }
838 EXPORT_SYMBOL(handle_edge_irq);
839 
840 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
841 /**
842  *      handle_edge_eoi_irq - edge eoi type IRQ handler
843  *      @desc:  the interrupt description structure for this irq
844  *
845  * Similar as the above handle_edge_irq, but using eoi and w/o the
846  * mask/unmask logic.
847  */
848 void handle_edge_eoi_irq(struct irq_desc *desc)
849 {
850         struct irq_chip *chip = irq_desc_get_chip(desc);
851 
852         raw_spin_lock(&desc->lock);
853 
854         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
855 
856         if (!irq_may_run(desc)) {
857                 desc->istate |= IRQS_PENDING;
858                 goto out_eoi;
859         }
860 
861         /*
862          * If its disabled or no action available then mask it and get
863          * out of here.
864          */
865         if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
866                 desc->istate |= IRQS_PENDING;
867                 goto out_eoi;
868         }
869 
870         kstat_incr_irqs_this_cpu(desc);
871 
872         do {
873                 if (unlikely(!desc->action))
874                         goto out_eoi;
875 
876                 handle_irq_event(desc);
877 
878         } while ((desc->istate & IRQS_PENDING) &&
879                  !irqd_irq_disabled(&desc->irq_data));
880 
881 out_eoi:
882         chip->irq_eoi(&desc->irq_data);
883         raw_spin_unlock(&desc->lock);
884 }
885 #endif
886 
887 /**
888  *      handle_percpu_irq - Per CPU local irq handler
889  *      @desc:  the interrupt description structure for this irq
890  *
891  *      Per CPU interrupts on SMP machines without locking requirements
892  */
893 void handle_percpu_irq(struct irq_desc *desc)
894 {
895         struct irq_chip *chip = irq_desc_get_chip(desc);
896 
897         /*
898          * PER CPU interrupts are not serialized. Do not touch
899          * desc->tot_count.
900          */
901         __kstat_incr_irqs_this_cpu(desc);
902 
903         if (chip->irq_ack)
904                 chip->irq_ack(&desc->irq_data);
905 
906         handle_irq_event_percpu(desc);
907 
908         if (chip->irq_eoi)
909                 chip->irq_eoi(&desc->irq_data);
910 }
911 
912 /**
913  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
914  * @desc:       the interrupt description structure for this irq
915  *
916  * Per CPU interrupts on SMP machines without locking requirements. Same as
917  * handle_percpu_irq() above but with the following extras:
918  *
919  * action->percpu_dev_id is a pointer to percpu variables which
920  * contain the real device id for the cpu on which this handler is
921  * called
922  */
923 void handle_percpu_devid_irq(struct irq_desc *desc)
924 {
925         struct irq_chip *chip = irq_desc_get_chip(desc);
926         struct irqaction *action = desc->action;
927         unsigned int irq = irq_desc_get_irq(desc);
928         irqreturn_t res;
929 
930         /*
931          * PER CPU interrupts are not serialized. Do not touch
932          * desc->tot_count.
933          */
934         __kstat_incr_irqs_this_cpu(desc);
935 
936         if (chip->irq_ack)
937                 chip->irq_ack(&desc->irq_data);
938 
939         if (likely(action)) {
940                 trace_irq_handler_entry(irq, action);
941                 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
942                 trace_irq_handler_exit(irq, action, res);
943         } else {
944                 unsigned int cpu = smp_processor_id();
945                 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
946 
947                 if (enabled)
948                         irq_percpu_disable(desc, cpu);
949 
950                 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
951                             enabled ? " and unmasked" : "", irq, cpu);
952         }
953 
954         if (chip->irq_eoi)
955                 chip->irq_eoi(&desc->irq_data);
956 }
957 
958 /**
959  * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
960  *                                   dev ids
961  * @desc:       the interrupt description structure for this irq
962  *
963  * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
964  * as a percpu pointer.
965  */
966 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
967 {
968         struct irq_chip *chip = irq_desc_get_chip(desc);
969         struct irqaction *action = desc->action;
970         unsigned int irq = irq_desc_get_irq(desc);
971         irqreturn_t res;
972 
973         __kstat_incr_irqs_this_cpu(desc);
974 
975         trace_irq_handler_entry(irq, action);
976         res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
977         trace_irq_handler_exit(irq, action, res);
978 
979         if (chip->irq_eoi)
980                 chip->irq_eoi(&desc->irq_data);
981 }
982 
983 static void
984 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
985                      int is_chained, const char *name)
986 {
987         if (!handle) {
988                 handle = handle_bad_irq;
989         } else {
990                 struct irq_data *irq_data = &desc->irq_data;
991 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
992                 /*
993                  * With hierarchical domains we might run into a
994                  * situation where the outermost chip is not yet set
995                  * up, but the inner chips are there.  Instead of
996                  * bailing we install the handler, but obviously we
997                  * cannot enable/startup the interrupt at this point.
998                  */
999                 while (irq_data) {
1000                         if (irq_data->chip != &no_irq_chip)
1001                                 break;
1002                         /*
1003                          * Bail out if the outer chip is not set up
1004                          * and the interrupt supposed to be started
1005                          * right away.
1006                          */
1007                         if (WARN_ON(is_chained))
1008                                 return;
1009                         /* Try the parent */
1010                         irq_data = irq_data->parent_data;
1011                 }
1012 #endif
1013                 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1014                         return;
1015         }
1016 
1017         /* Uninstall? */
1018         if (handle == handle_bad_irq) {
1019                 if (desc->irq_data.chip != &no_irq_chip)
1020                         mask_ack_irq(desc);
1021                 irq_state_set_disabled(desc);
1022                 if (is_chained)
1023                         desc->action = NULL;
1024                 desc->depth = 1;
1025         }
1026         desc->handle_irq = handle;
1027         desc->name = name;
1028 
1029         if (handle != handle_bad_irq && is_chained) {
1030                 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1031 
1032                 /*
1033                  * We're about to start this interrupt immediately,
1034                  * hence the need to set the trigger configuration.
1035                  * But the .set_type callback may have overridden the
1036                  * flow handler, ignoring that we're dealing with a
1037                  * chained interrupt. Reset it immediately because we
1038                  * do know better.
1039                  */
1040                 if (type != IRQ_TYPE_NONE) {
1041                         __irq_set_trigger(desc, type);
1042                         desc->handle_irq = handle;
1043                 }
1044 
1045                 irq_settings_set_noprobe(desc);
1046                 irq_settings_set_norequest(desc);
1047                 irq_settings_set_nothread(desc);
1048                 desc->action = &chained_action;
1049                 irq_activate_and_startup(desc, IRQ_RESEND);
1050         }
1051 }
1052 
1053 void
1054 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1055                   const char *name)
1056 {
1057         unsigned long flags;
1058         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1059 
1060         if (!desc)
1061                 return;
1062 
1063         __irq_do_set_handler(desc, handle, is_chained, name);
1064         irq_put_desc_busunlock(desc, flags);
1065 }
1066 EXPORT_SYMBOL_GPL(__irq_set_handler);
1067 
1068 void
1069 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1070                                  void *data)
1071 {
1072         unsigned long flags;
1073         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1074 
1075         if (!desc)
1076                 return;
1077 
1078         desc->irq_common_data.handler_data = data;
1079         __irq_do_set_handler(desc, handle, 1, NULL);
1080 
1081         irq_put_desc_busunlock(desc, flags);
1082 }
1083 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1084 
1085 void
1086 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
1087                               irq_flow_handler_t handle, const char *name)
1088 {
1089         irq_set_chip(irq, chip);
1090         __irq_set_handler(irq, handle, 0, name);
1091 }
1092 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1093 
1094 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1095 {
1096         unsigned long flags, trigger, tmp;
1097         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1098 
1099         if (!desc)
1100                 return;
1101 
1102         /*
1103          * Warn when a driver sets the no autoenable flag on an already
1104          * active interrupt.
1105          */
1106         WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1107 
1108         irq_settings_clr_and_set(desc, clr, set);
1109 
1110         trigger = irqd_get_trigger_type(&desc->irq_data);
1111 
1112         irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1113                    IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1114         if (irq_settings_has_no_balance_set(desc))
1115                 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1116         if (irq_settings_is_per_cpu(desc))
1117                 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1118         if (irq_settings_can_move_pcntxt(desc))
1119                 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1120         if (irq_settings_is_level(desc))
1121                 irqd_set(&desc->irq_data, IRQD_LEVEL);
1122 
1123         tmp = irq_settings_get_trigger_mask(desc);
1124         if (tmp != IRQ_TYPE_NONE)
1125                 trigger = tmp;
1126 
1127         irqd_set(&desc->irq_data, trigger);
1128 
1129         irq_put_desc_unlock(desc, flags);
1130 }
1131 EXPORT_SYMBOL_GPL(irq_modify_status);
1132 
1133 /**
1134  *      irq_cpu_online - Invoke all irq_cpu_online functions.
1135  *
1136  *      Iterate through all irqs and invoke the chip.irq_cpu_online()
1137  *      for each.
1138  */
1139 void irq_cpu_online(void)
1140 {
1141         struct irq_desc *desc;
1142         struct irq_chip *chip;
1143         unsigned long flags;
1144         unsigned int irq;
1145 
1146         for_each_active_irq(irq) {
1147                 desc = irq_to_desc(irq);
1148                 if (!desc)
1149                         continue;
1150 
1151                 raw_spin_lock_irqsave(&desc->lock, flags);
1152 
1153                 chip = irq_data_get_irq_chip(&desc->irq_data);
1154                 if (chip && chip->irq_cpu_online &&
1155                     (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1156                      !irqd_irq_disabled(&desc->irq_data)))
1157                         chip->irq_cpu_online(&desc->irq_data);
1158 
1159                 raw_spin_unlock_irqrestore(&desc->lock, flags);
1160         }
1161 }
1162 
1163 /**
1164  *      irq_cpu_offline - Invoke all irq_cpu_offline functions.
1165  *
1166  *      Iterate through all irqs and invoke the chip.irq_cpu_offline()
1167  *      for each.
1168  */
1169 void irq_cpu_offline(void)
1170 {
1171         struct irq_desc *desc;
1172         struct irq_chip *chip;
1173         unsigned long flags;
1174         unsigned int irq;
1175 
1176         for_each_active_irq(irq) {
1177                 desc = irq_to_desc(irq);
1178                 if (!desc)
1179                         continue;
1180 
1181                 raw_spin_lock_irqsave(&desc->lock, flags);
1182 
1183                 chip = irq_data_get_irq_chip(&desc->irq_data);
1184                 if (chip && chip->irq_cpu_offline &&
1185                     (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1186                      !irqd_irq_disabled(&desc->irq_data)))
1187                         chip->irq_cpu_offline(&desc->irq_data);
1188 
1189                 raw_spin_unlock_irqrestore(&desc->lock, flags);
1190         }
1191 }
1192 
1193 #ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1194 
1195 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1196 /**
1197  *      handle_fasteoi_ack_irq - irq handler for edge hierarchy
1198  *      stacked on transparent controllers
1199  *
1200  *      @desc:  the interrupt description structure for this irq
1201  *
1202  *      Like handle_fasteoi_irq(), but for use with hierarchy where
1203  *      the irq_chip also needs to have its ->irq_ack() function
1204  *      called.
1205  */
1206 void handle_fasteoi_ack_irq(struct irq_desc *desc)
1207 {
1208         struct irq_chip *chip = desc->irq_data.chip;
1209 
1210         raw_spin_lock(&desc->lock);
1211 
1212         if (!irq_may_run(desc))
1213                 goto out;
1214 
1215         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1216 
1217         /*
1218          * If its disabled or no action available
1219          * then mask it and get out of here:
1220          */
1221         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1222                 desc->istate |= IRQS_PENDING;
1223                 mask_irq(desc);
1224                 goto out;
1225         }
1226 
1227         kstat_incr_irqs_this_cpu(desc);
1228         if (desc->istate & IRQS_ONESHOT)
1229                 mask_irq(desc);
1230 
1231         /* Start handling the irq */
1232         desc->irq_data.chip->irq_ack(&desc->irq_data);
1233 
1234         preflow_handler(desc);
1235         handle_irq_event(desc);
1236 
1237         cond_unmask_eoi_irq(desc, chip);
1238 
1239         raw_spin_unlock(&desc->lock);
1240         return;
1241 out:
1242         if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1243                 chip->irq_eoi(&desc->irq_data);
1244         raw_spin_unlock(&desc->lock);
1245 }
1246 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1247 
1248 /**
1249  *      handle_fasteoi_mask_irq - irq handler for level hierarchy
1250  *      stacked on transparent controllers
1251  *
1252  *      @desc:  the interrupt description structure for this irq
1253  *
1254  *      Like handle_fasteoi_irq(), but for use with hierarchy where
1255  *      the irq_chip also needs to have its ->irq_mask_ack() function
1256  *      called.
1257  */
1258 void handle_fasteoi_mask_irq(struct irq_desc *desc)
1259 {
1260         struct irq_chip *chip = desc->irq_data.chip;
1261 
1262         raw_spin_lock(&desc->lock);
1263         mask_ack_irq(desc);
1264 
1265         if (!irq_may_run(desc))
1266                 goto out;
1267 
1268         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1269 
1270         /*
1271          * If its disabled or no action available
1272          * then mask it and get out of here:
1273          */
1274         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1275                 desc->istate |= IRQS_PENDING;
1276                 mask_irq(desc);
1277                 goto out;
1278         }
1279 
1280         kstat_incr_irqs_this_cpu(desc);
1281         if (desc->istate & IRQS_ONESHOT)
1282                 mask_irq(desc);
1283 
1284         preflow_handler(desc);
1285         handle_irq_event(desc);
1286 
1287         cond_unmask_eoi_irq(desc, chip);
1288 
1289         raw_spin_unlock(&desc->lock);
1290         return;
1291 out:
1292         if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1293                 chip->irq_eoi(&desc->irq_data);
1294         raw_spin_unlock(&desc->lock);
1295 }
1296 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1297 
1298 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1299 
1300 /**
1301  * irq_chip_set_parent_state - set the state of a parent interrupt.
1302  *
1303  * @data: Pointer to interrupt specific data
1304  * @which: State to be restored (one of IRQCHIP_STATE_*)
1305  * @val: Value corresponding to @which
1306  *
1307  * Conditional success, if the underlying irqchip does not implement it.
1308  */
1309 int irq_chip_set_parent_state(struct irq_data *data,
1310                               enum irqchip_irq_state which,
1311                               bool val)
1312 {
1313         data = data->parent_data;
1314 
1315         if (!data || !data->chip->irq_set_irqchip_state)
1316                 return 0;
1317 
1318         return data->chip->irq_set_irqchip_state(data, which, val);
1319 }
1320 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1321 
1322 /**
1323  * irq_chip_get_parent_state - get the state of a parent interrupt.
1324  *
1325  * @data: Pointer to interrupt specific data
1326  * @which: one of IRQCHIP_STATE_* the caller wants to know
1327  * @state: a pointer to a boolean where the state is to be stored
1328  *
1329  * Conditional success, if the underlying irqchip does not implement it.
1330  */
1331 int irq_chip_get_parent_state(struct irq_data *data,
1332                               enum irqchip_irq_state which,
1333                               bool *state)
1334 {
1335         data = data->parent_data;
1336 
1337         if (!data || !data->chip->irq_get_irqchip_state)
1338                 return 0;
1339 
1340         return data->chip->irq_get_irqchip_state(data, which, state);
1341 }
1342 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1343 
1344 /**
1345  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1346  * NULL)
1347  * @data:       Pointer to interrupt specific data
1348  */
1349 void irq_chip_enable_parent(struct irq_data *data)
1350 {
1351         data = data->parent_data;
1352         if (data->chip->irq_enable)
1353                 data->chip->irq_enable(data);
1354         else
1355                 data->chip->irq_unmask(data);
1356 }
1357 EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1358 
1359 /**
1360  * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1361  * NULL)
1362  * @data:       Pointer to interrupt specific data
1363  */
1364 void irq_chip_disable_parent(struct irq_data *data)
1365 {
1366         data = data->parent_data;
1367         if (data->chip->irq_disable)
1368                 data->chip->irq_disable(data);
1369         else
1370                 data->chip->irq_mask(data);
1371 }
1372 EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1373 
1374 /**
1375  * irq_chip_ack_parent - Acknowledge the parent interrupt
1376  * @data:       Pointer to interrupt specific data
1377  */
1378 void irq_chip_ack_parent(struct irq_data *data)
1379 {
1380         data = data->parent_data;
1381         data->chip->irq_ack(data);
1382 }
1383 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1384 
1385 /**
1386  * irq_chip_mask_parent - Mask the parent interrupt
1387  * @data:       Pointer to interrupt specific data
1388  */
1389 void irq_chip_mask_parent(struct irq_data *data)
1390 {
1391         data = data->parent_data;
1392         data->chip->irq_mask(data);
1393 }
1394 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1395 
1396 /**
1397  * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1398  * @data:       Pointer to interrupt specific data
1399  */
1400 void irq_chip_mask_ack_parent(struct irq_data *data)
1401 {
1402         data = data->parent_data;
1403         data->chip->irq_mask_ack(data);
1404 }
1405 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1406 
1407 /**
1408  * irq_chip_unmask_parent - Unmask the parent interrupt
1409  * @data:       Pointer to interrupt specific data
1410  */
1411 void irq_chip_unmask_parent(struct irq_data *data)
1412 {
1413         data = data->parent_data;
1414         data->chip->irq_unmask(data);
1415 }
1416 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1417 
1418 /**
1419  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1420  * @data:       Pointer to interrupt specific data
1421  */
1422 void irq_chip_eoi_parent(struct irq_data *data)
1423 {
1424         data = data->parent_data;
1425         data->chip->irq_eoi(data);
1426 }
1427 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1428 
1429 /**
1430  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1431  * @data:       Pointer to interrupt specific data
1432  * @dest:       The affinity mask to set
1433  * @force:      Flag to enforce setting (disable online checks)
1434  *
1435  * Conditinal, as the underlying parent chip might not implement it.
1436  */
1437 int irq_chip_set_affinity_parent(struct irq_data *data,
1438                                  const struct cpumask *dest, bool force)
1439 {
1440         data = data->parent_data;
1441         if (data->chip->irq_set_affinity)
1442                 return data->chip->irq_set_affinity(data, dest, force);
1443 
1444         return -ENOSYS;
1445 }
1446 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1447 
1448 /**
1449  * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1450  * @data:       Pointer to interrupt specific data
1451  * @type:       IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1452  *
1453  * Conditional, as the underlying parent chip might not implement it.
1454  */
1455 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1456 {
1457         data = data->parent_data;
1458 
1459         if (data->chip->irq_set_type)
1460                 return data->chip->irq_set_type(data, type);
1461 
1462         return -ENOSYS;
1463 }
1464 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1465 
1466 /**
1467  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1468  * @data:       Pointer to interrupt specific data
1469  *
1470  * Iterate through the domain hierarchy of the interrupt and check
1471  * whether a hw retrigger function exists. If yes, invoke it.
1472  */
1473 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1474 {
1475         for (data = data->parent_data; data; data = data->parent_data)
1476                 if (data->chip && data->chip->irq_retrigger)
1477                         return data->chip->irq_retrigger(data);
1478 
1479         return 0;
1480 }
1481 
1482 /**
1483  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1484  * @data:       Pointer to interrupt specific data
1485  * @vcpu_info:  The vcpu affinity information
1486  */
1487 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1488 {
1489         data = data->parent_data;
1490         if (data->chip->irq_set_vcpu_affinity)
1491                 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1492 
1493         return -ENOSYS;
1494 }
1495 
1496 /**
1497  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1498  * @data:       Pointer to interrupt specific data
1499  * @on:         Whether to set or reset the wake-up capability of this irq
1500  *
1501  * Conditional, as the underlying parent chip might not implement it.
1502  */
1503 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1504 {
1505         data = data->parent_data;
1506 
1507         if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1508                 return 0;
1509 
1510         if (data->chip->irq_set_wake)
1511                 return data->chip->irq_set_wake(data, on);
1512 
1513         return -ENOSYS;
1514 }
1515 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1516 
1517 /**
1518  * irq_chip_request_resources_parent - Request resources on the parent interrupt
1519  * @data:       Pointer to interrupt specific data
1520  */
1521 int irq_chip_request_resources_parent(struct irq_data *data)
1522 {
1523         data = data->parent_data;
1524 
1525         if (data->chip->irq_request_resources)
1526                 return data->chip->irq_request_resources(data);
1527 
1528         return -ENOSYS;
1529 }
1530 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1531 
1532 /**
1533  * irq_chip_release_resources_parent - Release resources on the parent interrupt
1534  * @data:       Pointer to interrupt specific data
1535  */
1536 void irq_chip_release_resources_parent(struct irq_data *data)
1537 {
1538         data = data->parent_data;
1539         if (data->chip->irq_release_resources)
1540                 data->chip->irq_release_resources(data);
1541 }
1542 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1543 #endif
1544 
1545 /**
1546  * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1547  * @data:       Pointer to interrupt specific data
1548  * @msg:        Pointer to the MSI message
1549  *
1550  * For hierarchical domains we find the first chip in the hierarchy
1551  * which implements the irq_compose_msi_msg callback. For non
1552  * hierarchical we use the top level chip.
1553  */
1554 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1555 {
1556         struct irq_data *pos = NULL;
1557 
1558 #ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1559         for (; data; data = data->parent_data)
1560 #endif
1561                 if (data->chip && data->chip->irq_compose_msi_msg)
1562                         pos = data;
1563         if (!pos)
1564                 return -ENOSYS;
1565 
1566         pos->chip->irq_compose_msi_msg(pos, msg);
1567 
1568         return 0;
1569 }
1570 
1571 /**
1572  * irq_chip_pm_get - Enable power for an IRQ chip
1573  * @data:       Pointer to interrupt specific data
1574  *
1575  * Enable the power to the IRQ chip referenced by the interrupt data
1576  * structure.
1577  */
1578 int irq_chip_pm_get(struct irq_data *data)
1579 {
1580         int retval;
1581 
1582         if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1583                 retval = pm_runtime_get_sync(data->chip->parent_device);
1584                 if (retval < 0) {
1585                         pm_runtime_put_noidle(data->chip->parent_device);
1586                         return retval;
1587                 }
1588         }
1589 
1590         return 0;
1591 }
1592 
1593 /**
1594  * irq_chip_pm_put - Disable power for an IRQ chip
1595  * @data:       Pointer to interrupt specific data
1596  *
1597  * Disable the power to the IRQ chip referenced by the interrupt data
1598  * structure, belongs. Note that power will only be disabled, once this
1599  * function has been called for all IRQs that have called irq_chip_pm_get().
1600  */
1601 int irq_chip_pm_put(struct irq_data *data)
1602 {
1603         int retval = 0;
1604 
1605         if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1606                 retval = pm_runtime_put(data->chip->parent_device);
1607 
1608         return (retval < 0) ? retval : 0;
1609 }
1610 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp