~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/irq_work.c

Version: ~ [ linux-4.19-rc4 ] ~ [ linux-4.18.8 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.70 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.127 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.156 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.122 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.57 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  3  *
  4  * Provides a framework for enqueueing and running callbacks from hardirq
  5  * context. The enqueueing is NMI-safe.
  6  */
  7 
  8 #include <linux/bug.h>
  9 #include <linux/kernel.h>
 10 #include <linux/export.h>
 11 #include <linux/irq_work.h>
 12 #include <linux/percpu.h>
 13 #include <linux/hardirq.h>
 14 #include <linux/irqflags.h>
 15 #include <linux/sched.h>
 16 #include <linux/tick.h>
 17 #include <linux/cpu.h>
 18 #include <linux/notifier.h>
 19 #include <linux/smp.h>
 20 #include <asm/processor.h>
 21 
 22 
 23 static DEFINE_PER_CPU(struct llist_head, raised_list);
 24 static DEFINE_PER_CPU(struct llist_head, lazy_list);
 25 
 26 /*
 27  * Claim the entry so that no one else will poke at it.
 28  */
 29 static bool irq_work_claim(struct irq_work *work)
 30 {
 31         unsigned long flags, oflags, nflags;
 32 
 33         /*
 34          * Start with our best wish as a premise but only trust any
 35          * flag value after cmpxchg() result.
 36          */
 37         flags = work->flags & ~IRQ_WORK_PENDING;
 38         for (;;) {
 39                 nflags = flags | IRQ_WORK_CLAIMED;
 40                 oflags = cmpxchg(&work->flags, flags, nflags);
 41                 if (oflags == flags)
 42                         break;
 43                 if (oflags & IRQ_WORK_PENDING)
 44                         return false;
 45                 flags = oflags;
 46                 cpu_relax();
 47         }
 48 
 49         return true;
 50 }
 51 
 52 void __weak arch_irq_work_raise(void)
 53 {
 54         /*
 55          * Lame architectures will get the timer tick callback
 56          */
 57 }
 58 
 59 /*
 60  * Enqueue the irq_work @work on @cpu unless it's already pending
 61  * somewhere.
 62  *
 63  * Can be re-enqueued while the callback is still in progress.
 64  */
 65 bool irq_work_queue_on(struct irq_work *work, int cpu)
 66 {
 67         /* All work should have been flushed before going offline */
 68         WARN_ON_ONCE(cpu_is_offline(cpu));
 69 
 70 #ifdef CONFIG_SMP
 71 
 72         /* Arch remote IPI send/receive backend aren't NMI safe */
 73         WARN_ON_ONCE(in_nmi());
 74 
 75         /* Only queue if not already pending */
 76         if (!irq_work_claim(work))
 77                 return false;
 78 
 79         if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
 80                 arch_send_call_function_single_ipi(cpu);
 81 
 82 #else /* #ifdef CONFIG_SMP */
 83         irq_work_queue(work);
 84 #endif /* #else #ifdef CONFIG_SMP */
 85 
 86         return true;
 87 }
 88 
 89 /* Enqueue the irq work @work on the current CPU */
 90 bool irq_work_queue(struct irq_work *work)
 91 {
 92         /* Only queue if not already pending */
 93         if (!irq_work_claim(work))
 94                 return false;
 95 
 96         /* Queue the entry and raise the IPI if needed. */
 97         preempt_disable();
 98 
 99         /* If the work is "lazy", handle it from next tick if any */
100         if (work->flags & IRQ_WORK_LAZY) {
101                 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
102                     tick_nohz_tick_stopped())
103                         arch_irq_work_raise();
104         } else {
105                 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
106                         arch_irq_work_raise();
107         }
108 
109         preempt_enable();
110 
111         return true;
112 }
113 EXPORT_SYMBOL_GPL(irq_work_queue);
114 
115 bool irq_work_needs_cpu(void)
116 {
117         struct llist_head *raised, *lazy;
118 
119         raised = this_cpu_ptr(&raised_list);
120         lazy = this_cpu_ptr(&lazy_list);
121 
122         if (llist_empty(raised) || arch_irq_work_has_interrupt())
123                 if (llist_empty(lazy))
124                         return false;
125 
126         /* All work should have been flushed before going offline */
127         WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
128 
129         return true;
130 }
131 
132 static void irq_work_run_list(struct llist_head *list)
133 {
134         struct irq_work *work, *tmp;
135         struct llist_node *llnode;
136         unsigned long flags;
137 
138         BUG_ON(!irqs_disabled());
139 
140         if (llist_empty(list))
141                 return;
142 
143         llnode = llist_del_all(list);
144         llist_for_each_entry_safe(work, tmp, llnode, llnode) {
145                 /*
146                  * Clear the PENDING bit, after this point the @work
147                  * can be re-used.
148                  * Make it immediately visible so that other CPUs trying
149                  * to claim that work don't rely on us to handle their data
150                  * while we are in the middle of the func.
151                  */
152                 flags = work->flags & ~IRQ_WORK_PENDING;
153                 xchg(&work->flags, flags);
154 
155                 work->func(work);
156                 /*
157                  * Clear the BUSY bit and return to the free state if
158                  * no-one else claimed it meanwhile.
159                  */
160                 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
161         }
162 }
163 
164 /*
165  * hotplug calls this through:
166  *  hotplug_cfd() -> flush_smp_call_function_queue()
167  */
168 void irq_work_run(void)
169 {
170         irq_work_run_list(this_cpu_ptr(&raised_list));
171         irq_work_run_list(this_cpu_ptr(&lazy_list));
172 }
173 EXPORT_SYMBOL_GPL(irq_work_run);
174 
175 void irq_work_tick(void)
176 {
177         struct llist_head *raised = this_cpu_ptr(&raised_list);
178 
179         if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
180                 irq_work_run_list(raised);
181         irq_work_run_list(this_cpu_ptr(&lazy_list));
182 }
183 
184 /*
185  * Synchronize against the irq_work @entry, ensures the entry is not
186  * currently in use.
187  */
188 void irq_work_sync(struct irq_work *work)
189 {
190         lockdep_assert_irqs_enabled();
191 
192         while (work->flags & IRQ_WORK_BUSY)
193                 cpu_relax();
194 }
195 EXPORT_SYMBOL_GPL(irq_work_sync);
196 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp