~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/update.c

Version: ~ [ linux-4.20-rc2 ] ~ [ linux-4.19.1 ] ~ [ linux-4.18.18 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.80 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.136 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.163 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.125 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.60 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Read-Copy Update mechanism for mutual exclusion
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License as published by
  6  * the Free Software Foundation; either version 2 of the License, or
  7  * (at your option) any later version.
  8  *
  9  * This program is distributed in the hope that it will be useful,
 10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12  * GNU General Public License for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License
 15  * along with this program; if not, you can access it online at
 16  * http://www.gnu.org/licenses/gpl-2.0.html.
 17  *
 18  * Copyright IBM Corporation, 2001
 19  *
 20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
 21  *          Manfred Spraul <manfred@colorfullife.com>
 22  *
 23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 25  * Papers:
 26  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 27  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 28  *
 29  * For detailed explanation of Read-Copy Update mechanism see -
 30  *              http://lse.sourceforge.net/locking/rcupdate.html
 31  *
 32  */
 33 #include <linux/types.h>
 34 #include <linux/kernel.h>
 35 #include <linux/init.h>
 36 #include <linux/spinlock.h>
 37 #include <linux/smp.h>
 38 #include <linux/interrupt.h>
 39 #include <linux/sched/signal.h>
 40 #include <linux/sched/debug.h>
 41 #include <linux/atomic.h>
 42 #include <linux/bitops.h>
 43 #include <linux/percpu.h>
 44 #include <linux/notifier.h>
 45 #include <linux/cpu.h>
 46 #include <linux/mutex.h>
 47 #include <linux/export.h>
 48 #include <linux/hardirq.h>
 49 #include <linux/delay.h>
 50 #include <linux/moduleparam.h>
 51 #include <linux/kthread.h>
 52 #include <linux/tick.h>
 53 #include <linux/rcupdate_wait.h>
 54 #include <linux/sched/isolation.h>
 55 
 56 #define CREATE_TRACE_POINTS
 57 
 58 #include "rcu.h"
 59 
 60 #ifdef MODULE_PARAM_PREFIX
 61 #undef MODULE_PARAM_PREFIX
 62 #endif
 63 #define MODULE_PARAM_PREFIX "rcupdate."
 64 
 65 #ifndef CONFIG_TINY_RCU
 66 extern int rcu_expedited; /* from sysctl */
 67 module_param(rcu_expedited, int, 0);
 68 extern int rcu_normal; /* from sysctl */
 69 module_param(rcu_normal, int, 0);
 70 static int rcu_normal_after_boot;
 71 module_param(rcu_normal_after_boot, int, 0);
 72 #endif /* #ifndef CONFIG_TINY_RCU */
 73 
 74 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 75 /**
 76  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
 77  *
 78  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 79  * RCU-sched read-side critical section.  In absence of
 80  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 81  * critical section unless it can prove otherwise.  Note that disabling
 82  * of preemption (including disabling irqs) counts as an RCU-sched
 83  * read-side critical section.  This is useful for debug checks in functions
 84  * that required that they be called within an RCU-sched read-side
 85  * critical section.
 86  *
 87  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 88  * and while lockdep is disabled.
 89  *
 90  * Note that if the CPU is in the idle loop from an RCU point of
 91  * view (ie: that we are in the section between rcu_idle_enter() and
 92  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 93  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 94  * that are in such a section, considering these as in extended quiescent
 95  * state, so such a CPU is effectively never in an RCU read-side critical
 96  * section regardless of what RCU primitives it invokes.  This state of
 97  * affairs is required --- we need to keep an RCU-free window in idle
 98  * where the CPU may possibly enter into low power mode. This way we can
 99  * notice an extended quiescent state to other CPUs that started a grace
100  * period. Otherwise we would delay any grace period as long as we run in
101  * the idle task.
102  *
103  * Similarly, we avoid claiming an SRCU read lock held if the current
104  * CPU is offline.
105  */
106 int rcu_read_lock_sched_held(void)
107 {
108         int lockdep_opinion = 0;
109 
110         if (!debug_lockdep_rcu_enabled())
111                 return 1;
112         if (!rcu_is_watching())
113                 return 0;
114         if (!rcu_lockdep_current_cpu_online())
115                 return 0;
116         if (debug_locks)
117                 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
118         return lockdep_opinion || !preemptible();
119 }
120 EXPORT_SYMBOL(rcu_read_lock_sched_held);
121 #endif
122 
123 #ifndef CONFIG_TINY_RCU
124 
125 /*
126  * Should expedited grace-period primitives always fall back to their
127  * non-expedited counterparts?  Intended for use within RCU.  Note
128  * that if the user specifies both rcu_expedited and rcu_normal, then
129  * rcu_normal wins.  (Except during the time period during boot from
130  * when the first task is spawned until the rcu_set_runtime_mode()
131  * core_initcall() is invoked, at which point everything is expedited.)
132  */
133 bool rcu_gp_is_normal(void)
134 {
135         return READ_ONCE(rcu_normal) &&
136                rcu_scheduler_active != RCU_SCHEDULER_INIT;
137 }
138 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
139 
140 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
141 
142 /*
143  * Should normal grace-period primitives be expedited?  Intended for
144  * use within RCU.  Note that this function takes the rcu_expedited
145  * sysfs/boot variable and rcu_scheduler_active into account as well
146  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
147  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
148  */
149 bool rcu_gp_is_expedited(void)
150 {
151         return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
152                rcu_scheduler_active == RCU_SCHEDULER_INIT;
153 }
154 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
155 
156 /**
157  * rcu_expedite_gp - Expedite future RCU grace periods
158  *
159  * After a call to this function, future calls to synchronize_rcu() and
160  * friends act as the corresponding synchronize_rcu_expedited() function
161  * had instead been called.
162  */
163 void rcu_expedite_gp(void)
164 {
165         atomic_inc(&rcu_expedited_nesting);
166 }
167 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
168 
169 /**
170  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
171  *
172  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
173  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
174  * and if the rcu_expedited sysfs/boot parameter is not set, then all
175  * subsequent calls to synchronize_rcu() and friends will return to
176  * their normal non-expedited behavior.
177  */
178 void rcu_unexpedite_gp(void)
179 {
180         atomic_dec(&rcu_expedited_nesting);
181 }
182 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
183 
184 /*
185  * Inform RCU of the end of the in-kernel boot sequence.
186  */
187 void rcu_end_inkernel_boot(void)
188 {
189         rcu_unexpedite_gp();
190         if (rcu_normal_after_boot)
191                 WRITE_ONCE(rcu_normal, 1);
192 }
193 
194 #endif /* #ifndef CONFIG_TINY_RCU */
195 
196 /*
197  * Test each non-SRCU synchronous grace-period wait API.  This is
198  * useful just after a change in mode for these primitives, and
199  * during early boot.
200  */
201 void rcu_test_sync_prims(void)
202 {
203         if (!IS_ENABLED(CONFIG_PROVE_RCU))
204                 return;
205         synchronize_rcu();
206         synchronize_rcu_expedited();
207 }
208 
209 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
210 
211 /*
212  * Switch to run-time mode once RCU has fully initialized.
213  */
214 static int __init rcu_set_runtime_mode(void)
215 {
216         rcu_test_sync_prims();
217         rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
218         rcu_test_sync_prims();
219         return 0;
220 }
221 core_initcall(rcu_set_runtime_mode);
222 
223 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
224 
225 #ifdef CONFIG_DEBUG_LOCK_ALLOC
226 static struct lock_class_key rcu_lock_key;
227 struct lockdep_map rcu_lock_map =
228         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
229 EXPORT_SYMBOL_GPL(rcu_lock_map);
230 
231 static struct lock_class_key rcu_bh_lock_key;
232 struct lockdep_map rcu_bh_lock_map =
233         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
234 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
235 
236 static struct lock_class_key rcu_sched_lock_key;
237 struct lockdep_map rcu_sched_lock_map =
238         STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
239 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
240 
241 static struct lock_class_key rcu_callback_key;
242 struct lockdep_map rcu_callback_map =
243         STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
244 EXPORT_SYMBOL_GPL(rcu_callback_map);
245 
246 int notrace debug_lockdep_rcu_enabled(void)
247 {
248         return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
249                current->lockdep_recursion == 0;
250 }
251 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
252 
253 /**
254  * rcu_read_lock_held() - might we be in RCU read-side critical section?
255  *
256  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
257  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
258  * this assumes we are in an RCU read-side critical section unless it can
259  * prove otherwise.  This is useful for debug checks in functions that
260  * require that they be called within an RCU read-side critical section.
261  *
262  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
263  * and while lockdep is disabled.
264  *
265  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
266  * occur in the same context, for example, it is illegal to invoke
267  * rcu_read_unlock() in process context if the matching rcu_read_lock()
268  * was invoked from within an irq handler.
269  *
270  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
271  * offline from an RCU perspective, so check for those as well.
272  */
273 int rcu_read_lock_held(void)
274 {
275         if (!debug_lockdep_rcu_enabled())
276                 return 1;
277         if (!rcu_is_watching())
278                 return 0;
279         if (!rcu_lockdep_current_cpu_online())
280                 return 0;
281         return lock_is_held(&rcu_lock_map);
282 }
283 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
284 
285 /**
286  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
287  *
288  * Check for bottom half being disabled, which covers both the
289  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
290  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
291  * will show the situation.  This is useful for debug checks in functions
292  * that require that they be called within an RCU read-side critical
293  * section.
294  *
295  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
296  *
297  * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
298  * offline from an RCU perspective, so check for those as well.
299  */
300 int rcu_read_lock_bh_held(void)
301 {
302         if (!debug_lockdep_rcu_enabled())
303                 return 1;
304         if (!rcu_is_watching())
305                 return 0;
306         if (!rcu_lockdep_current_cpu_online())
307                 return 0;
308         return in_softirq() || irqs_disabled();
309 }
310 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
311 
312 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
313 
314 /**
315  * wakeme_after_rcu() - Callback function to awaken a task after grace period
316  * @head: Pointer to rcu_head member within rcu_synchronize structure
317  *
318  * Awaken the corresponding task now that a grace period has elapsed.
319  */
320 void wakeme_after_rcu(struct rcu_head *head)
321 {
322         struct rcu_synchronize *rcu;
323 
324         rcu = container_of(head, struct rcu_synchronize, head);
325         complete(&rcu->completion);
326 }
327 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
328 
329 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
330                    struct rcu_synchronize *rs_array)
331 {
332         int i;
333         int j;
334 
335         /* Initialize and register callbacks for each crcu_array element. */
336         for (i = 0; i < n; i++) {
337                 if (checktiny &&
338                     (crcu_array[i] == call_rcu ||
339                      crcu_array[i] == call_rcu_bh)) {
340                         might_sleep();
341                         continue;
342                 }
343                 init_rcu_head_on_stack(&rs_array[i].head);
344                 init_completion(&rs_array[i].completion);
345                 for (j = 0; j < i; j++)
346                         if (crcu_array[j] == crcu_array[i])
347                                 break;
348                 if (j == i)
349                         (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
350         }
351 
352         /* Wait for all callbacks to be invoked. */
353         for (i = 0; i < n; i++) {
354                 if (checktiny &&
355                     (crcu_array[i] == call_rcu ||
356                      crcu_array[i] == call_rcu_bh))
357                         continue;
358                 for (j = 0; j < i; j++)
359                         if (crcu_array[j] == crcu_array[i])
360                                 break;
361                 if (j == i)
362                         wait_for_completion(&rs_array[i].completion);
363                 destroy_rcu_head_on_stack(&rs_array[i].head);
364         }
365 }
366 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
367 
368 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
369 void init_rcu_head(struct rcu_head *head)
370 {
371         debug_object_init(head, &rcuhead_debug_descr);
372 }
373 EXPORT_SYMBOL_GPL(init_rcu_head);
374 
375 void destroy_rcu_head(struct rcu_head *head)
376 {
377         debug_object_free(head, &rcuhead_debug_descr);
378 }
379 EXPORT_SYMBOL_GPL(destroy_rcu_head);
380 
381 static bool rcuhead_is_static_object(void *addr)
382 {
383         return true;
384 }
385 
386 /**
387  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
388  * @head: pointer to rcu_head structure to be initialized
389  *
390  * This function informs debugobjects of a new rcu_head structure that
391  * has been allocated as an auto variable on the stack.  This function
392  * is not required for rcu_head structures that are statically defined or
393  * that are dynamically allocated on the heap.  This function has no
394  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
395  */
396 void init_rcu_head_on_stack(struct rcu_head *head)
397 {
398         debug_object_init_on_stack(head, &rcuhead_debug_descr);
399 }
400 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
401 
402 /**
403  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
404  * @head: pointer to rcu_head structure to be initialized
405  *
406  * This function informs debugobjects that an on-stack rcu_head structure
407  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
408  * function is not required for rcu_head structures that are statically
409  * defined or that are dynamically allocated on the heap.  Also as with
410  * init_rcu_head_on_stack(), this function has no effect for
411  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
412  */
413 void destroy_rcu_head_on_stack(struct rcu_head *head)
414 {
415         debug_object_free(head, &rcuhead_debug_descr);
416 }
417 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
418 
419 struct debug_obj_descr rcuhead_debug_descr = {
420         .name = "rcu_head",
421         .is_static_object = rcuhead_is_static_object,
422 };
423 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
424 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
425 
426 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
427 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
428                                unsigned long secs,
429                                unsigned long c_old, unsigned long c)
430 {
431         trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
432 }
433 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
434 #else
435 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
436         do { } while (0)
437 #endif
438 
439 #ifdef CONFIG_RCU_STALL_COMMON
440 
441 #ifdef CONFIG_PROVE_RCU
442 #define RCU_STALL_DELAY_DELTA          (5 * HZ)
443 #else
444 #define RCU_STALL_DELAY_DELTA          0
445 #endif
446 
447 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
448 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
449 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
450 
451 module_param(rcu_cpu_stall_suppress, int, 0644);
452 module_param(rcu_cpu_stall_timeout, int, 0644);
453 
454 int rcu_jiffies_till_stall_check(void)
455 {
456         int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
457 
458         /*
459          * Limit check must be consistent with the Kconfig limits
460          * for CONFIG_RCU_CPU_STALL_TIMEOUT.
461          */
462         if (till_stall_check < 3) {
463                 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
464                 till_stall_check = 3;
465         } else if (till_stall_check > 300) {
466                 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
467                 till_stall_check = 300;
468         }
469         return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
470 }
471 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
472 
473 void rcu_sysrq_start(void)
474 {
475         if (!rcu_cpu_stall_suppress)
476                 rcu_cpu_stall_suppress = 2;
477 }
478 
479 void rcu_sysrq_end(void)
480 {
481         if (rcu_cpu_stall_suppress == 2)
482                 rcu_cpu_stall_suppress = 0;
483 }
484 
485 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
486 {
487         rcu_cpu_stall_suppress = 1;
488         return NOTIFY_DONE;
489 }
490 
491 static struct notifier_block rcu_panic_block = {
492         .notifier_call = rcu_panic,
493 };
494 
495 static int __init check_cpu_stall_init(void)
496 {
497         atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
498         return 0;
499 }
500 early_initcall(check_cpu_stall_init);
501 
502 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
503 
504 #ifdef CONFIG_TASKS_RCU
505 
506 /*
507  * Simple variant of RCU whose quiescent states are voluntary context
508  * switch, cond_resched_rcu_qs(), user-space execution, and idle.
509  * As such, grace periods can take one good long time.  There are no
510  * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
511  * because this implementation is intended to get the system into a safe
512  * state for some of the manipulations involved in tracing and the like.
513  * Finally, this implementation does not support high call_rcu_tasks()
514  * rates from multiple CPUs.  If this is required, per-CPU callback lists
515  * will be needed.
516  */
517 
518 /* Global list of callbacks and associated lock. */
519 static struct rcu_head *rcu_tasks_cbs_head;
520 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
521 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
522 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
523 
524 /* Track exiting tasks in order to allow them to be waited for. */
525 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
526 
527 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
528 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
529 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
530 module_param(rcu_task_stall_timeout, int, 0644);
531 
532 static struct task_struct *rcu_tasks_kthread_ptr;
533 
534 /**
535  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
536  * @rhp: structure to be used for queueing the RCU updates.
537  * @func: actual callback function to be invoked after the grace period
538  *
539  * The callback function will be invoked some time after a full grace
540  * period elapses, in other words after all currently executing RCU
541  * read-side critical sections have completed. call_rcu_tasks() assumes
542  * that the read-side critical sections end at a voluntary context
543  * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
544  * or transition to usermode execution.  As such, there are no read-side
545  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
546  * this primitive is intended to determine that all tasks have passed
547  * through a safe state, not so much for data-strcuture synchronization.
548  *
549  * See the description of call_rcu() for more detailed information on
550  * memory ordering guarantees.
551  */
552 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
553 {
554         unsigned long flags;
555         bool needwake;
556 
557         rhp->next = NULL;
558         rhp->func = func;
559         raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
560         needwake = !rcu_tasks_cbs_head;
561         *rcu_tasks_cbs_tail = rhp;
562         rcu_tasks_cbs_tail = &rhp->next;
563         raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
564         /* We can't create the thread unless interrupts are enabled. */
565         if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
566                 wake_up(&rcu_tasks_cbs_wq);
567 }
568 EXPORT_SYMBOL_GPL(call_rcu_tasks);
569 
570 /**
571  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
572  *
573  * Control will return to the caller some time after a full rcu-tasks
574  * grace period has elapsed, in other words after all currently
575  * executing rcu-tasks read-side critical sections have elapsed.  These
576  * read-side critical sections are delimited by calls to schedule(),
577  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
578  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
579  *
580  * This is a very specialized primitive, intended only for a few uses in
581  * tracing and other situations requiring manipulation of function
582  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
583  * is not (yet) intended for heavy use from multiple CPUs.
584  *
585  * Note that this guarantee implies further memory-ordering guarantees.
586  * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
587  * each CPU is guaranteed to have executed a full memory barrier since the
588  * end of its last RCU-tasks read-side critical section whose beginning
589  * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
590  * having an RCU-tasks read-side critical section that extends beyond
591  * the return from synchronize_rcu_tasks() is guaranteed to have executed
592  * a full memory barrier after the beginning of synchronize_rcu_tasks()
593  * and before the beginning of that RCU-tasks read-side critical section.
594  * Note that these guarantees include CPUs that are offline, idle, or
595  * executing in user mode, as well as CPUs that are executing in the kernel.
596  *
597  * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
598  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
599  * to have executed a full memory barrier during the execution of
600  * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
601  * (but again only if the system has more than one CPU).
602  */
603 void synchronize_rcu_tasks(void)
604 {
605         /* Complain if the scheduler has not started.  */
606         RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
607                          "synchronize_rcu_tasks called too soon");
608 
609         /* Wait for the grace period. */
610         wait_rcu_gp(call_rcu_tasks);
611 }
612 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
613 
614 /**
615  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
616  *
617  * Although the current implementation is guaranteed to wait, it is not
618  * obligated to, for example, if there are no pending callbacks.
619  */
620 void rcu_barrier_tasks(void)
621 {
622         /* There is only one callback queue, so this is easy.  ;-) */
623         synchronize_rcu_tasks();
624 }
625 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
626 
627 /* See if tasks are still holding out, complain if so. */
628 static void check_holdout_task(struct task_struct *t,
629                                bool needreport, bool *firstreport)
630 {
631         int cpu;
632 
633         if (!READ_ONCE(t->rcu_tasks_holdout) ||
634             t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
635             !READ_ONCE(t->on_rq) ||
636             (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
637              !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
638                 WRITE_ONCE(t->rcu_tasks_holdout, false);
639                 list_del_init(&t->rcu_tasks_holdout_list);
640                 put_task_struct(t);
641                 return;
642         }
643         rcu_request_urgent_qs_task(t);
644         if (!needreport)
645                 return;
646         if (*firstreport) {
647                 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
648                 *firstreport = false;
649         }
650         cpu = task_cpu(t);
651         pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
652                  t, ".I"[is_idle_task(t)],
653                  "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
654                  t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
655                  t->rcu_tasks_idle_cpu, cpu);
656         sched_show_task(t);
657 }
658 
659 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
660 static int __noreturn rcu_tasks_kthread(void *arg)
661 {
662         unsigned long flags;
663         struct task_struct *g, *t;
664         unsigned long lastreport;
665         struct rcu_head *list;
666         struct rcu_head *next;
667         LIST_HEAD(rcu_tasks_holdouts);
668         int fract;
669 
670         /* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
671         housekeeping_affine(current, HK_FLAG_RCU);
672 
673         /*
674          * Each pass through the following loop makes one check for
675          * newly arrived callbacks, and, if there are some, waits for
676          * one RCU-tasks grace period and then invokes the callbacks.
677          * This loop is terminated by the system going down.  ;-)
678          */
679         for (;;) {
680 
681                 /* Pick up any new callbacks. */
682                 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
683                 list = rcu_tasks_cbs_head;
684                 rcu_tasks_cbs_head = NULL;
685                 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
686                 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
687 
688                 /* If there were none, wait a bit and start over. */
689                 if (!list) {
690                         wait_event_interruptible(rcu_tasks_cbs_wq,
691                                                  rcu_tasks_cbs_head);
692                         if (!rcu_tasks_cbs_head) {
693                                 WARN_ON(signal_pending(current));
694                                 schedule_timeout_interruptible(HZ/10);
695                         }
696                         continue;
697                 }
698 
699                 /*
700                  * Wait for all pre-existing t->on_rq and t->nvcsw
701                  * transitions to complete.  Invoking synchronize_rcu()
702                  * suffices because all these transitions occur with
703                  * interrupts disabled.  Without this synchronize_rcu(),
704                  * a read-side critical section that started before the
705                  * grace period might be incorrectly seen as having started
706                  * after the grace period.
707                  *
708                  * This synchronize_rcu() also dispenses with the
709                  * need for a memory barrier on the first store to
710                  * ->rcu_tasks_holdout, as it forces the store to happen
711                  * after the beginning of the grace period.
712                  */
713                 synchronize_rcu();
714 
715                 /*
716                  * There were callbacks, so we need to wait for an
717                  * RCU-tasks grace period.  Start off by scanning
718                  * the task list for tasks that are not already
719                  * voluntarily blocked.  Mark these tasks and make
720                  * a list of them in rcu_tasks_holdouts.
721                  */
722                 rcu_read_lock();
723                 for_each_process_thread(g, t) {
724                         if (t != current && READ_ONCE(t->on_rq) &&
725                             !is_idle_task(t)) {
726                                 get_task_struct(t);
727                                 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
728                                 WRITE_ONCE(t->rcu_tasks_holdout, true);
729                                 list_add(&t->rcu_tasks_holdout_list,
730                                          &rcu_tasks_holdouts);
731                         }
732                 }
733                 rcu_read_unlock();
734 
735                 /*
736                  * Wait for tasks that are in the process of exiting.
737                  * This does only part of the job, ensuring that all
738                  * tasks that were previously exiting reach the point
739                  * where they have disabled preemption, allowing the
740                  * later synchronize_rcu() to finish the job.
741                  */
742                 synchronize_srcu(&tasks_rcu_exit_srcu);
743 
744                 /*
745                  * Each pass through the following loop scans the list
746                  * of holdout tasks, removing any that are no longer
747                  * holdouts.  When the list is empty, we are done.
748                  */
749                 lastreport = jiffies;
750 
751                 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
752                 fract = 10;
753 
754                 for (;;) {
755                         bool firstreport;
756                         bool needreport;
757                         int rtst;
758                         struct task_struct *t1;
759 
760                         if (list_empty(&rcu_tasks_holdouts))
761                                 break;
762 
763                         /* Slowly back off waiting for holdouts */
764                         schedule_timeout_interruptible(HZ/fract);
765 
766                         if (fract > 1)
767                                 fract--;
768 
769                         rtst = READ_ONCE(rcu_task_stall_timeout);
770                         needreport = rtst > 0 &&
771                                      time_after(jiffies, lastreport + rtst);
772                         if (needreport)
773                                 lastreport = jiffies;
774                         firstreport = true;
775                         WARN_ON(signal_pending(current));
776                         list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
777                                                 rcu_tasks_holdout_list) {
778                                 check_holdout_task(t, needreport, &firstreport);
779                                 cond_resched();
780                         }
781                 }
782 
783                 /*
784                  * Because ->on_rq and ->nvcsw are not guaranteed
785                  * to have a full memory barriers prior to them in the
786                  * schedule() path, memory reordering on other CPUs could
787                  * cause their RCU-tasks read-side critical sections to
788                  * extend past the end of the grace period.  However,
789                  * because these ->nvcsw updates are carried out with
790                  * interrupts disabled, we can use synchronize_rcu()
791                  * to force the needed ordering on all such CPUs.
792                  *
793                  * This synchronize_rcu() also confines all
794                  * ->rcu_tasks_holdout accesses to be within the grace
795                  * period, avoiding the need for memory barriers for
796                  * ->rcu_tasks_holdout accesses.
797                  *
798                  * In addition, this synchronize_rcu() waits for exiting
799                  * tasks to complete their final preempt_disable() region
800                  * of execution, cleaning up after the synchronize_srcu()
801                  * above.
802                  */
803                 synchronize_rcu();
804 
805                 /* Invoke the callbacks. */
806                 while (list) {
807                         next = list->next;
808                         local_bh_disable();
809                         list->func(list);
810                         local_bh_enable();
811                         list = next;
812                         cond_resched();
813                 }
814                 /* Paranoid sleep to keep this from entering a tight loop */
815                 schedule_timeout_uninterruptible(HZ/10);
816         }
817 }
818 
819 /* Spawn rcu_tasks_kthread() at core_initcall() time. */
820 static int __init rcu_spawn_tasks_kthread(void)
821 {
822         struct task_struct *t;
823 
824         t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
825         BUG_ON(IS_ERR(t));
826         smp_mb(); /* Ensure others see full kthread. */
827         WRITE_ONCE(rcu_tasks_kthread_ptr, t);
828         return 0;
829 }
830 core_initcall(rcu_spawn_tasks_kthread);
831 
832 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
833 void exit_tasks_rcu_start(void)
834 {
835         preempt_disable();
836         current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
837         preempt_enable();
838 }
839 
840 /* Do the srcu_read_unlock() for the above synchronize_srcu().  */
841 void exit_tasks_rcu_finish(void)
842 {
843         preempt_disable();
844         __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
845         preempt_enable();
846 }
847 
848 #endif /* #ifdef CONFIG_TASKS_RCU */
849 
850 #ifndef CONFIG_TINY_RCU
851 
852 /*
853  * Print any non-default Tasks RCU settings.
854  */
855 static void __init rcu_tasks_bootup_oddness(void)
856 {
857 #ifdef CONFIG_TASKS_RCU
858         if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
859                 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
860         else
861                 pr_info("\tTasks RCU enabled.\n");
862 #endif /* #ifdef CONFIG_TASKS_RCU */
863 }
864 
865 #endif /* #ifndef CONFIG_TINY_RCU */
866 
867 #ifdef CONFIG_PROVE_RCU
868 
869 /*
870  * Early boot self test parameters.
871  */
872 static bool rcu_self_test;
873 module_param(rcu_self_test, bool, 0444);
874 
875 static int rcu_self_test_counter;
876 
877 static void test_callback(struct rcu_head *r)
878 {
879         rcu_self_test_counter++;
880         pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
881 }
882 
883 DEFINE_STATIC_SRCU(early_srcu);
884 
885 static void early_boot_test_call_rcu(void)
886 {
887         static struct rcu_head head;
888         static struct rcu_head shead;
889 
890         call_rcu(&head, test_callback);
891         if (IS_ENABLED(CONFIG_SRCU))
892                 call_srcu(&early_srcu, &shead, test_callback);
893 }
894 
895 void rcu_early_boot_tests(void)
896 {
897         pr_info("Running RCU self tests\n");
898 
899         if (rcu_self_test)
900                 early_boot_test_call_rcu();
901         rcu_test_sync_prims();
902 }
903 
904 static int rcu_verify_early_boot_tests(void)
905 {
906         int ret = 0;
907         int early_boot_test_counter = 0;
908 
909         if (rcu_self_test) {
910                 early_boot_test_counter++;
911                 rcu_barrier();
912                 if (IS_ENABLED(CONFIG_SRCU)) {
913                         early_boot_test_counter++;
914                         srcu_barrier(&early_srcu);
915                 }
916         }
917         if (rcu_self_test_counter != early_boot_test_counter) {
918                 WARN_ON(1);
919                 ret = -1;
920         }
921 
922         return ret;
923 }
924 late_initcall(rcu_verify_early_boot_tests);
925 #else
926 void rcu_early_boot_tests(void) {}
927 #endif /* CONFIG_PROVE_RCU */
928 
929 #ifndef CONFIG_TINY_RCU
930 
931 /*
932  * Print any significant non-default boot-time settings.
933  */
934 void __init rcupdate_announce_bootup_oddness(void)
935 {
936         if (rcu_normal)
937                 pr_info("\tNo expedited grace period (rcu_normal).\n");
938         else if (rcu_normal_after_boot)
939                 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
940         else if (rcu_expedited)
941                 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
942         if (rcu_cpu_stall_suppress)
943                 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
944         if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
945                 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
946         rcu_tasks_bootup_oddness();
947 }
948 
949 #endif /* #ifndef CONFIG_TINY_RCU */
950 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp