~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcutiny.c

Version: ~ [ linux-5.6-rc7 ] ~ [ linux-5.5.11 ] ~ [ linux-5.4.27 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.112 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.174 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.217 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.217 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.82 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License as published by
  6  * the Free Software Foundation; either version 2 of the License, or
  7  * (at your option) any later version.
  8  *
  9  * This program is distributed in the hope that it will be useful,
 10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12  * GNU General Public License for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License
 15  * along with this program; if not, write to the Free Software
 16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 17  *
 18  * Copyright IBM Corporation, 2008
 19  *
 20  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 21  *
 22  * For detailed explanation of Read-Copy Update mechanism see -
 23  *              Documentation/RCU
 24  */
 25 #include <linux/completion.h>
 26 #include <linux/interrupt.h>
 27 #include <linux/notifier.h>
 28 #include <linux/rcupdate.h>
 29 #include <linux/kernel.h>
 30 #include <linux/export.h>
 31 #include <linux/mutex.h>
 32 #include <linux/sched.h>
 33 #include <linux/types.h>
 34 #include <linux/init.h>
 35 #include <linux/time.h>
 36 #include <linux/cpu.h>
 37 #include <linux/prefetch.h>
 38 
 39 #ifdef CONFIG_RCU_TRACE
 40 #include <trace/events/rcu.h>
 41 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 42 
 43 #include "rcu.h"
 44 
 45 /* Forward declarations for rcutiny_plugin.h. */
 46 struct rcu_ctrlblk;
 47 static void invoke_rcu_callbacks(void);
 48 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
 49 static void rcu_process_callbacks(struct softirq_action *unused);
 50 static void __call_rcu(struct rcu_head *head,
 51                        void (*func)(struct rcu_head *rcu),
 52                        struct rcu_ctrlblk *rcp);
 53 
 54 #include "rcutiny_plugin.h"
 55 
 56 static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 57 
 58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
 59 static void rcu_idle_enter_common(long long oldval)
 60 {
 61         if (rcu_dynticks_nesting) {
 62                 RCU_TRACE(trace_rcu_dyntick("--=",
 63                                             oldval, rcu_dynticks_nesting));
 64                 return;
 65         }
 66         RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
 67         if (!is_idle_task(current)) {
 68                 struct task_struct *idle = idle_task(smp_processor_id());
 69 
 70                 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
 71                                             oldval, rcu_dynticks_nesting));
 72                 ftrace_dump(DUMP_ALL);
 73                 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 74                           current->pid, current->comm,
 75                           idle->pid, idle->comm); /* must be idle task! */
 76         }
 77         rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
 78 }
 79 
 80 /*
 81  * Enter idle, which is an extended quiescent state if we have fully
 82  * entered that mode (i.e., if the new value of dynticks_nesting is zero).
 83  */
 84 void rcu_idle_enter(void)
 85 {
 86         unsigned long flags;
 87         long long oldval;
 88 
 89         local_irq_save(flags);
 90         oldval = rcu_dynticks_nesting;
 91         WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
 92         if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
 93             DYNTICK_TASK_NEST_VALUE)
 94                 rcu_dynticks_nesting = 0;
 95         else
 96                 rcu_dynticks_nesting  -= DYNTICK_TASK_NEST_VALUE;
 97         rcu_idle_enter_common(oldval);
 98         local_irq_restore(flags);
 99 }
100 EXPORT_SYMBOL_GPL(rcu_idle_enter);
101 
102 /*
103  * Exit an interrupt handler towards idle.
104  */
105 void rcu_irq_exit(void)
106 {
107         unsigned long flags;
108         long long oldval;
109 
110         local_irq_save(flags);
111         oldval = rcu_dynticks_nesting;
112         rcu_dynticks_nesting--;
113         WARN_ON_ONCE(rcu_dynticks_nesting < 0);
114         rcu_idle_enter_common(oldval);
115         local_irq_restore(flags);
116 }
117 
118 /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
119 static void rcu_idle_exit_common(long long oldval)
120 {
121         if (oldval) {
122                 RCU_TRACE(trace_rcu_dyntick("++=",
123                                             oldval, rcu_dynticks_nesting));
124                 return;
125         }
126         RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
127         if (!is_idle_task(current)) {
128                 struct task_struct *idle = idle_task(smp_processor_id());
129 
130                 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
131                           oldval, rcu_dynticks_nesting));
132                 ftrace_dump(DUMP_ALL);
133                 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
134                           current->pid, current->comm,
135                           idle->pid, idle->comm); /* must be idle task! */
136         }
137 }
138 
139 /*
140  * Exit idle, so that we are no longer in an extended quiescent state.
141  */
142 void rcu_idle_exit(void)
143 {
144         unsigned long flags;
145         long long oldval;
146 
147         local_irq_save(flags);
148         oldval = rcu_dynticks_nesting;
149         WARN_ON_ONCE(rcu_dynticks_nesting < 0);
150         if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
151                 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
152         else
153                 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
154         rcu_idle_exit_common(oldval);
155         local_irq_restore(flags);
156 }
157 EXPORT_SYMBOL_GPL(rcu_idle_exit);
158 
159 /*
160  * Enter an interrupt handler, moving away from idle.
161  */
162 void rcu_irq_enter(void)
163 {
164         unsigned long flags;
165         long long oldval;
166 
167         local_irq_save(flags);
168         oldval = rcu_dynticks_nesting;
169         rcu_dynticks_nesting++;
170         WARN_ON_ONCE(rcu_dynticks_nesting == 0);
171         rcu_idle_exit_common(oldval);
172         local_irq_restore(flags);
173 }
174 
175 #ifdef CONFIG_PROVE_RCU
176 
177 /*
178  * Test whether RCU thinks that the current CPU is idle.
179  */
180 int rcu_is_cpu_idle(void)
181 {
182         return !rcu_dynticks_nesting;
183 }
184 EXPORT_SYMBOL(rcu_is_cpu_idle);
185 
186 #endif /* #ifdef CONFIG_PROVE_RCU */
187 
188 /*
189  * Test whether the current CPU was interrupted from idle.  Nested
190  * interrupts don't count, we must be running at the first interrupt
191  * level.
192  */
193 int rcu_is_cpu_rrupt_from_idle(void)
194 {
195         return rcu_dynticks_nesting <= 0;
196 }
197 
198 /*
199  * Helper function for rcu_sched_qs() and rcu_bh_qs().
200  * Also irqs are disabled to avoid confusion due to interrupt handlers
201  * invoking call_rcu().
202  */
203 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
204 {
205         if (rcp->rcucblist != NULL &&
206             rcp->donetail != rcp->curtail) {
207                 rcp->donetail = rcp->curtail;
208                 return 1;
209         }
210 
211         return 0;
212 }
213 
214 /*
215  * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
216  * are at it, given that any rcu quiescent state is also an rcu_bh
217  * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
218  */
219 void rcu_sched_qs(int cpu)
220 {
221         unsigned long flags;
222 
223         local_irq_save(flags);
224         if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
225             rcu_qsctr_help(&rcu_bh_ctrlblk))
226                 invoke_rcu_callbacks();
227         local_irq_restore(flags);
228 }
229 
230 /*
231  * Record an rcu_bh quiescent state.
232  */
233 void rcu_bh_qs(int cpu)
234 {
235         unsigned long flags;
236 
237         local_irq_save(flags);
238         if (rcu_qsctr_help(&rcu_bh_ctrlblk))
239                 invoke_rcu_callbacks();
240         local_irq_restore(flags);
241 }
242 
243 /*
244  * Check to see if the scheduling-clock interrupt came from an extended
245  * quiescent state, and, if so, tell RCU about it.  This function must
246  * be called from hardirq context.  It is normally called from the
247  * scheduling-clock interrupt.
248  */
249 void rcu_check_callbacks(int cpu, int user)
250 {
251         if (user || rcu_is_cpu_rrupt_from_idle())
252                 rcu_sched_qs(cpu);
253         else if (!in_softirq())
254                 rcu_bh_qs(cpu);
255         rcu_preempt_check_callbacks();
256 }
257 
258 /*
259  * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
260  * whose grace period has elapsed.
261  */
262 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
263 {
264         char *rn = NULL;
265         struct rcu_head *next, *list;
266         unsigned long flags;
267         RCU_TRACE(int cb_count = 0);
268 
269         /* If no RCU callbacks ready to invoke, just return. */
270         if (&rcp->rcucblist == rcp->donetail) {
271                 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
272                 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
273                                               ACCESS_ONCE(rcp->rcucblist),
274                                               need_resched(),
275                                               is_idle_task(current),
276                                               rcu_is_callbacks_kthread()));
277                 return;
278         }
279 
280         /* Move the ready-to-invoke callbacks to a local list. */
281         local_irq_save(flags);
282         RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
283         list = rcp->rcucblist;
284         rcp->rcucblist = *rcp->donetail;
285         *rcp->donetail = NULL;
286         if (rcp->curtail == rcp->donetail)
287                 rcp->curtail = &rcp->rcucblist;
288         rcu_preempt_remove_callbacks(rcp);
289         rcp->donetail = &rcp->rcucblist;
290         local_irq_restore(flags);
291 
292         /* Invoke the callbacks on the local list. */
293         RCU_TRACE(rn = rcp->name);
294         while (list) {
295                 next = list->next;
296                 prefetch(next);
297                 debug_rcu_head_unqueue(list);
298                 local_bh_disable();
299                 __rcu_reclaim(rn, list);
300                 local_bh_enable();
301                 list = next;
302                 RCU_TRACE(cb_count++);
303         }
304         RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
305         RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
306                                       is_idle_task(current),
307                                       rcu_is_callbacks_kthread()));
308 }
309 
310 static void rcu_process_callbacks(struct softirq_action *unused)
311 {
312         __rcu_process_callbacks(&rcu_sched_ctrlblk);
313         __rcu_process_callbacks(&rcu_bh_ctrlblk);
314         rcu_preempt_process_callbacks();
315 }
316 
317 /*
318  * Wait for a grace period to elapse.  But it is illegal to invoke
319  * synchronize_sched() from within an RCU read-side critical section.
320  * Therefore, any legal call to synchronize_sched() is a quiescent
321  * state, and so on a UP system, synchronize_sched() need do nothing.
322  * Ditto for synchronize_rcu_bh().  (But Lai Jiangshan points out the
323  * benefits of doing might_sleep() to reduce latency.)
324  *
325  * Cool, huh?  (Due to Josh Triplett.)
326  *
327  * But we want to make this a static inline later.  The cond_resched()
328  * currently makes this problematic.
329  */
330 void synchronize_sched(void)
331 {
332         rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
333                            !lock_is_held(&rcu_lock_map) &&
334                            !lock_is_held(&rcu_sched_lock_map),
335                            "Illegal synchronize_sched() in RCU read-side critical section");
336         cond_resched();
337 }
338 EXPORT_SYMBOL_GPL(synchronize_sched);
339 
340 /*
341  * Helper function for call_rcu() and call_rcu_bh().
342  */
343 static void __call_rcu(struct rcu_head *head,
344                        void (*func)(struct rcu_head *rcu),
345                        struct rcu_ctrlblk *rcp)
346 {
347         unsigned long flags;
348 
349         debug_rcu_head_queue(head);
350         head->func = func;
351         head->next = NULL;
352 
353         local_irq_save(flags);
354         *rcp->curtail = head;
355         rcp->curtail = &head->next;
356         RCU_TRACE(rcp->qlen++);
357         local_irq_restore(flags);
358 }
359 
360 /*
361  * Post an RCU callback to be invoked after the end of an RCU-sched grace
362  * period.  But since we have but one CPU, that would be after any
363  * quiescent state.
364  */
365 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
366 {
367         __call_rcu(head, func, &rcu_sched_ctrlblk);
368 }
369 EXPORT_SYMBOL_GPL(call_rcu_sched);
370 
371 /*
372  * Post an RCU bottom-half callback to be invoked after any subsequent
373  * quiescent state.
374  */
375 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
376 {
377         __call_rcu(head, func, &rcu_bh_ctrlblk);
378 }
379 EXPORT_SYMBOL_GPL(call_rcu_bh);
380 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp