~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/rcupdate.h

Version: ~ [ linux-5.6-rc1 ] ~ [ linux-5.5.2 ] ~ [ linux-5.4.17 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.102 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.170 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.213 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.213 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Read-Copy Update mechanism for mutual exclusion
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License as published by
  6  * the Free Software Foundation; either version 2 of the License, or
  7  * (at your option) any later version.
  8  *
  9  * This program is distributed in the hope that it will be useful,
 10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12  * GNU General Public License for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License
 15  * along with this program; if not, write to the Free Software
 16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 17  *
 18  * Copyright IBM Corporation, 2001
 19  *
 20  * Author: Dipankar Sarma <dipankar@in.ibm.com>
 21  *
 22  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 24  * Papers:
 25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 27  *
 28  * For detailed explanation of Read-Copy Update mechanism see -
 29  *              http://lse.sourceforge.net/locking/rcupdate.html
 30  *
 31  */
 32 
 33 #ifndef __LINUX_RCUPDATE_H
 34 #define __LINUX_RCUPDATE_H
 35 
 36 #include <linux/types.h>
 37 #include <linux/cache.h>
 38 #include <linux/spinlock.h>
 39 #include <linux/threads.h>
 40 #include <linux/cpumask.h>
 41 #include <linux/seqlock.h>
 42 #include <linux/lockdep.h>
 43 #include <linux/completion.h>
 44 #include <linux/debugobjects.h>
 45 #include <linux/compiler.h>
 46 
 47 #ifdef CONFIG_RCU_TORTURE_TEST
 48 extern int rcutorture_runnable; /* for sysctl */
 49 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
 50 
 51 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
 52 extern void rcutorture_record_test_transition(void);
 53 extern void rcutorture_record_progress(unsigned long vernum);
 54 #else
 55 static inline void rcutorture_record_test_transition(void)
 56 {
 57 }
 58 static inline void rcutorture_record_progress(unsigned long vernum)
 59 {
 60 }
 61 #endif
 62 
 63 #define UINT_CMP_GE(a, b)       (UINT_MAX / 2 >= (a) - (b))
 64 #define UINT_CMP_LT(a, b)       (UINT_MAX / 2 < (a) - (b))
 65 #define ULONG_CMP_GE(a, b)      (ULONG_MAX / 2 >= (a) - (b))
 66 #define ULONG_CMP_LT(a, b)      (ULONG_MAX / 2 < (a) - (b))
 67 
 68 /* Exported common interfaces */
 69 
 70 #ifdef CONFIG_PREEMPT_RCU
 71 
 72 /**
 73  * call_rcu() - Queue an RCU callback for invocation after a grace period.
 74  * @head: structure to be used for queueing the RCU updates.
 75  * @func: actual callback function to be invoked after the grace period
 76  *
 77  * The callback function will be invoked some time after a full grace
 78  * period elapses, in other words after all pre-existing RCU read-side
 79  * critical sections have completed.  However, the callback function
 80  * might well execute concurrently with RCU read-side critical sections
 81  * that started after call_rcu() was invoked.  RCU read-side critical
 82  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
 83  * and may be nested.
 84  */
 85 extern void call_rcu(struct rcu_head *head,
 86                               void (*func)(struct rcu_head *head));
 87 
 88 #else /* #ifdef CONFIG_PREEMPT_RCU */
 89 
 90 /* In classic RCU, call_rcu() is just call_rcu_sched(). */
 91 #define call_rcu        call_rcu_sched
 92 
 93 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 94 
 95 /**
 96  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
 97  * @head: structure to be used for queueing the RCU updates.
 98  * @func: actual callback function to be invoked after the grace period
 99  *
100  * The callback function will be invoked some time after a full grace
101  * period elapses, in other words after all currently executing RCU
102  * read-side critical sections have completed. call_rcu_bh() assumes
103  * that the read-side critical sections end on completion of a softirq
104  * handler. This means that read-side critical sections in process
105  * context must not be interrupted by softirqs. This interface is to be
106  * used when most of the read-side critical sections are in softirq context.
107  * RCU read-side critical sections are delimited by :
108  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
109  *  OR
110  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
111  *  These may be nested.
112  */
113 extern void call_rcu_bh(struct rcu_head *head,
114                         void (*func)(struct rcu_head *head));
115 
116 /**
117  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
118  * @head: structure to be used for queueing the RCU updates.
119  * @func: actual callback function to be invoked after the grace period
120  *
121  * The callback function will be invoked some time after a full grace
122  * period elapses, in other words after all currently executing RCU
123  * read-side critical sections have completed. call_rcu_sched() assumes
124  * that the read-side critical sections end on enabling of preemption
125  * or on voluntary preemption.
126  * RCU read-side critical sections are delimited by :
127  *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
128  *  OR
129  *  anything that disables preemption.
130  *  These may be nested.
131  */
132 extern void call_rcu_sched(struct rcu_head *head,
133                            void (*func)(struct rcu_head *rcu));
134 
135 extern void synchronize_sched(void);
136 
137 #ifdef CONFIG_PREEMPT_RCU
138 
139 extern void __rcu_read_lock(void);
140 extern void __rcu_read_unlock(void);
141 void synchronize_rcu(void);
142 
143 /*
144  * Defined as a macro as it is a very low level header included from
145  * areas that don't even know about current.  This gives the rcu_read_lock()
146  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
147  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
148  */
149 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
150 
151 #else /* #ifdef CONFIG_PREEMPT_RCU */
152 
153 static inline void __rcu_read_lock(void)
154 {
155         preempt_disable();
156 }
157 
158 static inline void __rcu_read_unlock(void)
159 {
160         preempt_enable();
161 }
162 
163 static inline void synchronize_rcu(void)
164 {
165         synchronize_sched();
166 }
167 
168 static inline int rcu_preempt_depth(void)
169 {
170         return 0;
171 }
172 
173 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
174 
175 /* Internal to kernel */
176 extern void rcu_sched_qs(int cpu);
177 extern void rcu_bh_qs(int cpu);
178 extern void rcu_check_callbacks(int cpu, int user);
179 struct notifier_block;
180 
181 #ifdef CONFIG_NO_HZ
182 
183 extern void rcu_enter_nohz(void);
184 extern void rcu_exit_nohz(void);
185 
186 #else /* #ifdef CONFIG_NO_HZ */
187 
188 static inline void rcu_enter_nohz(void)
189 {
190 }
191 
192 static inline void rcu_exit_nohz(void)
193 {
194 }
195 
196 #endif /* #else #ifdef CONFIG_NO_HZ */
197 
198 /*
199  * Infrastructure to implement the synchronize_() primitives in
200  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
201  */
202 
203 typedef void call_rcu_func_t(struct rcu_head *head,
204                              void (*func)(struct rcu_head *head));
205 void wait_rcu_gp(call_rcu_func_t crf);
206 
207 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
208 #include <linux/rcutree.h>
209 #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
210 #include <linux/rcutiny.h>
211 #else
212 #error "Unknown RCU implementation specified to kernel configuration"
213 #endif
214 
215 /*
216  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
217  * initialization and destruction of rcu_head on the stack. rcu_head structures
218  * allocated dynamically in the heap or defined statically don't need any
219  * initialization.
220  */
221 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
222 extern void init_rcu_head_on_stack(struct rcu_head *head);
223 extern void destroy_rcu_head_on_stack(struct rcu_head *head);
224 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
225 static inline void init_rcu_head_on_stack(struct rcu_head *head)
226 {
227 }
228 
229 static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
230 {
231 }
232 #endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
233 
234 #ifdef CONFIG_DEBUG_LOCK_ALLOC
235 
236 extern struct lockdep_map rcu_lock_map;
237 # define rcu_read_acquire() \
238                 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
239 # define rcu_read_release()     lock_release(&rcu_lock_map, 1, _THIS_IP_)
240 
241 extern struct lockdep_map rcu_bh_lock_map;
242 # define rcu_read_acquire_bh() \
243                 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
244 # define rcu_read_release_bh()  lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
245 
246 extern struct lockdep_map rcu_sched_lock_map;
247 # define rcu_read_acquire_sched() \
248                 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
249 # define rcu_read_release_sched() \
250                 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
251 
252 extern int debug_lockdep_rcu_enabled(void);
253 
254 /**
255  * rcu_read_lock_held() - might we be in RCU read-side critical section?
256  *
257  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
258  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
259  * this assumes we are in an RCU read-side critical section unless it can
260  * prove otherwise.  This is useful for debug checks in functions that
261  * require that they be called within an RCU read-side critical section.
262  *
263  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
264  * and while lockdep is disabled.
265  */
266 static inline int rcu_read_lock_held(void)
267 {
268         if (!debug_lockdep_rcu_enabled())
269                 return 1;
270         return lock_is_held(&rcu_lock_map);
271 }
272 
273 /*
274  * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
275  * hell.
276  */
277 extern int rcu_read_lock_bh_held(void);
278 
279 /**
280  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
281  *
282  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
283  * RCU-sched read-side critical section.  In absence of
284  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
285  * critical section unless it can prove otherwise.  Note that disabling
286  * of preemption (including disabling irqs) counts as an RCU-sched
287  * read-side critical section.  This is useful for debug checks in functions
288  * that required that they be called within an RCU-sched read-side
289  * critical section.
290  *
291  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
292  * and while lockdep is disabled.
293  */
294 #ifdef CONFIG_PREEMPT_COUNT
295 static inline int rcu_read_lock_sched_held(void)
296 {
297         int lockdep_opinion = 0;
298 
299         if (!debug_lockdep_rcu_enabled())
300                 return 1;
301         if (debug_locks)
302                 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
303         return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
304 }
305 #else /* #ifdef CONFIG_PREEMPT_COUNT */
306 static inline int rcu_read_lock_sched_held(void)
307 {
308         return 1;
309 }
310 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
311 
312 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
313 
314 # define rcu_read_acquire()             do { } while (0)
315 # define rcu_read_release()             do { } while (0)
316 # define rcu_read_acquire_bh()          do { } while (0)
317 # define rcu_read_release_bh()          do { } while (0)
318 # define rcu_read_acquire_sched()       do { } while (0)
319 # define rcu_read_release_sched()       do { } while (0)
320 
321 static inline int rcu_read_lock_held(void)
322 {
323         return 1;
324 }
325 
326 static inline int rcu_read_lock_bh_held(void)
327 {
328         return 1;
329 }
330 
331 #ifdef CONFIG_PREEMPT_COUNT
332 static inline int rcu_read_lock_sched_held(void)
333 {
334         return preempt_count() != 0 || irqs_disabled();
335 }
336 #else /* #ifdef CONFIG_PREEMPT_COUNT */
337 static inline int rcu_read_lock_sched_held(void)
338 {
339         return 1;
340 }
341 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
342 
343 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
344 
345 #ifdef CONFIG_PROVE_RCU
346 
347 extern int rcu_my_thread_group_empty(void);
348 
349 /**
350  * rcu_lockdep_assert - emit lockdep splat if specified condition not met
351  * @c: condition to check
352  * @s: informative message
353  */
354 #define rcu_lockdep_assert(c, s)                                        \
355         do {                                                            \
356                 static bool __warned;                                   \
357                 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
358                         __warned = true;                                \
359                         lockdep_rcu_suspicious(__FILE__, __LINE__, s);  \
360                 }                                                       \
361         } while (0)
362 
363 #define rcu_sleep_check()                                               \
364         do {                                                            \
365                 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),     \
366                                    "Illegal context switch in RCU-bh"   \
367                                    " read-side critical section");      \
368                 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),  \
369                                    "Illegal context switch in RCU-sched"\
370                                    " read-side critical section");      \
371         } while (0)
372 
373 #else /* #ifdef CONFIG_PROVE_RCU */
374 
375 #define rcu_lockdep_assert(c, s) do { } while (0)
376 #define rcu_sleep_check() do { } while (0)
377 
378 #endif /* #else #ifdef CONFIG_PROVE_RCU */
379 
380 /*
381  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
382  * and rcu_assign_pointer().  Some of these could be folded into their
383  * callers, but they are left separate in order to ease introduction of
384  * multiple flavors of pointers to match the multiple flavors of RCU
385  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
386  * the future.
387  */
388 
389 #ifdef __CHECKER__
390 #define rcu_dereference_sparse(p, space) \
391         ((void)(((typeof(*p) space *)p) == p))
392 #else /* #ifdef __CHECKER__ */
393 #define rcu_dereference_sparse(p, space)
394 #endif /* #else #ifdef __CHECKER__ */
395 
396 #define __rcu_access_pointer(p, space) \
397         ({ \
398                 typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
399                 rcu_dereference_sparse(p, space); \
400                 ((typeof(*p) __force __kernel *)(_________p1)); \
401         })
402 #define __rcu_dereference_check(p, c, space) \
403         ({ \
404                 typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
405                 rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
406                                       " usage"); \
407                 rcu_dereference_sparse(p, space); \
408                 smp_read_barrier_depends(); \
409                 ((typeof(*p) __force __kernel *)(_________p1)); \
410         })
411 #define __rcu_dereference_protected(p, c, space) \
412         ({ \
413                 rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
414                                       " usage"); \
415                 rcu_dereference_sparse(p, space); \
416                 ((typeof(*p) __force __kernel *)(p)); \
417         })
418 
419 #define __rcu_access_index(p, space) \
420         ({ \
421                 typeof(p) _________p1 = ACCESS_ONCE(p); \
422                 rcu_dereference_sparse(p, space); \
423                 (_________p1); \
424         })
425 #define __rcu_dereference_index_check(p, c) \
426         ({ \
427                 typeof(p) _________p1 = ACCESS_ONCE(p); \
428                 rcu_lockdep_assert(c, \
429                                    "suspicious rcu_dereference_index_check()" \
430                                    " usage"); \
431                 smp_read_barrier_depends(); \
432                 (_________p1); \
433         })
434 #define __rcu_assign_pointer(p, v, space) \
435         ({ \
436                 smp_wmb(); \
437                 (p) = (typeof(*v) __force space *)(v); \
438         })
439 
440 
441 /**
442  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
443  * @p: The pointer to read
444  *
445  * Return the value of the specified RCU-protected pointer, but omit the
446  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
447  * when the value of this pointer is accessed, but the pointer is not
448  * dereferenced, for example, when testing an RCU-protected pointer against
449  * NULL.  Although rcu_access_pointer() may also be used in cases where
450  * update-side locks prevent the value of the pointer from changing, you
451  * should instead use rcu_dereference_protected() for this use case.
452  */
453 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
454 
455 /**
456  * rcu_dereference_check() - rcu_dereference with debug checking
457  * @p: The pointer to read, prior to dereferencing
458  * @c: The conditions under which the dereference will take place
459  *
460  * Do an rcu_dereference(), but check that the conditions under which the
461  * dereference will take place are correct.  Typically the conditions
462  * indicate the various locking conditions that should be held at that
463  * point.  The check should return true if the conditions are satisfied.
464  * An implicit check for being in an RCU read-side critical section
465  * (rcu_read_lock()) is included.
466  *
467  * For example:
468  *
469  *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
470  *
471  * could be used to indicate to lockdep that foo->bar may only be dereferenced
472  * if either rcu_read_lock() is held, or that the lock required to replace
473  * the bar struct at foo->bar is held.
474  *
475  * Note that the list of conditions may also include indications of when a lock
476  * need not be held, for example during initialisation or destruction of the
477  * target struct:
478  *
479  *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
480  *                                            atomic_read(&foo->usage) == 0);
481  *
482  * Inserts memory barriers on architectures that require them
483  * (currently only the Alpha), prevents the compiler from refetching
484  * (and from merging fetches), and, more importantly, documents exactly
485  * which pointers are protected by RCU and checks that the pointer is
486  * annotated as __rcu.
487  */
488 #define rcu_dereference_check(p, c) \
489         __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
490 
491 /**
492  * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
493  * @p: The pointer to read, prior to dereferencing
494  * @c: The conditions under which the dereference will take place
495  *
496  * This is the RCU-bh counterpart to rcu_dereference_check().
497  */
498 #define rcu_dereference_bh_check(p, c) \
499         __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
500 
501 /**
502  * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
503  * @p: The pointer to read, prior to dereferencing
504  * @c: The conditions under which the dereference will take place
505  *
506  * This is the RCU-sched counterpart to rcu_dereference_check().
507  */
508 #define rcu_dereference_sched_check(p, c) \
509         __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
510                                 __rcu)
511 
512 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
513 
514 /**
515  * rcu_access_index() - fetch RCU index with no dereferencing
516  * @p: The index to read
517  *
518  * Return the value of the specified RCU-protected index, but omit the
519  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
520  * when the value of this index is accessed, but the index is not
521  * dereferenced, for example, when testing an RCU-protected index against
522  * -1.  Although rcu_access_index() may also be used in cases where
523  * update-side locks prevent the value of the index from changing, you
524  * should instead use rcu_dereference_index_protected() for this use case.
525  */
526 #define rcu_access_index(p) __rcu_access_index((p), __rcu)
527 
528 /**
529  * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
530  * @p: The pointer to read, prior to dereferencing
531  * @c: The conditions under which the dereference will take place
532  *
533  * Similar to rcu_dereference_check(), but omits the sparse checking.
534  * This allows rcu_dereference_index_check() to be used on integers,
535  * which can then be used as array indices.  Attempting to use
536  * rcu_dereference_check() on an integer will give compiler warnings
537  * because the sparse address-space mechanism relies on dereferencing
538  * the RCU-protected pointer.  Dereferencing integers is not something
539  * that even gcc will put up with.
540  *
541  * Note that this function does not implicitly check for RCU read-side
542  * critical sections.  If this function gains lots of uses, it might
543  * make sense to provide versions for each flavor of RCU, but it does
544  * not make sense as of early 2010.
545  */
546 #define rcu_dereference_index_check(p, c) \
547         __rcu_dereference_index_check((p), (c))
548 
549 /**
550  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
551  * @p: The pointer to read, prior to dereferencing
552  * @c: The conditions under which the dereference will take place
553  *
554  * Return the value of the specified RCU-protected pointer, but omit
555  * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
556  * is useful in cases where update-side locks prevent the value of the
557  * pointer from changing.  Please note that this primitive does -not-
558  * prevent the compiler from repeating this reference or combining it
559  * with other references, so it should not be used without protection
560  * of appropriate locks.
561  *
562  * This function is only for update-side use.  Using this function
563  * when protected only by rcu_read_lock() will result in infrequent
564  * but very ugly failures.
565  */
566 #define rcu_dereference_protected(p, c) \
567         __rcu_dereference_protected((p), (c), __rcu)
568 
569 
570 /**
571  * rcu_dereference() - fetch RCU-protected pointer for dereferencing
572  * @p: The pointer to read, prior to dereferencing
573  *
574  * This is a simple wrapper around rcu_dereference_check().
575  */
576 #define rcu_dereference(p) rcu_dereference_check(p, 0)
577 
578 /**
579  * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
580  * @p: The pointer to read, prior to dereferencing
581  *
582  * Makes rcu_dereference_check() do the dirty work.
583  */
584 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
585 
586 /**
587  * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
588  * @p: The pointer to read, prior to dereferencing
589  *
590  * Makes rcu_dereference_check() do the dirty work.
591  */
592 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
593 
594 /**
595  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
596  *
597  * When synchronize_rcu() is invoked on one CPU while other CPUs
598  * are within RCU read-side critical sections, then the
599  * synchronize_rcu() is guaranteed to block until after all the other
600  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
601  * on one CPU while other CPUs are within RCU read-side critical
602  * sections, invocation of the corresponding RCU callback is deferred
603  * until after the all the other CPUs exit their critical sections.
604  *
605  * Note, however, that RCU callbacks are permitted to run concurrently
606  * with new RCU read-side critical sections.  One way that this can happen
607  * is via the following sequence of events: (1) CPU 0 enters an RCU
608  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
609  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
610  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
611  * callback is invoked.  This is legal, because the RCU read-side critical
612  * section that was running concurrently with the call_rcu() (and which
613  * therefore might be referencing something that the corresponding RCU
614  * callback would free up) has completed before the corresponding
615  * RCU callback is invoked.
616  *
617  * RCU read-side critical sections may be nested.  Any deferred actions
618  * will be deferred until the outermost RCU read-side critical section
619  * completes.
620  *
621  * You can avoid reading and understanding the next paragraph by
622  * following this rule: don't put anything in an rcu_read_lock() RCU
623  * read-side critical section that would block in a !PREEMPT kernel.
624  * But if you want the full story, read on!
625  *
626  * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
627  * is illegal to block while in an RCU read-side critical section.  In
628  * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
629  * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
630  * be preempted, but explicit blocking is illegal.  Finally, in preemptible
631  * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
632  * RCU read-side critical sections may be preempted and they may also
633  * block, but only when acquiring spinlocks that are subject to priority
634  * inheritance.
635  */
636 static inline void rcu_read_lock(void)
637 {
638         __rcu_read_lock();
639         __acquire(RCU);
640         rcu_read_acquire();
641 }
642 
643 /*
644  * So where is rcu_write_lock()?  It does not exist, as there is no
645  * way for writers to lock out RCU readers.  This is a feature, not
646  * a bug -- this property is what provides RCU's performance benefits.
647  * Of course, writers must coordinate with each other.  The normal
648  * spinlock primitives work well for this, but any other technique may be
649  * used as well.  RCU does not care how the writers keep out of each
650  * others' way, as long as they do so.
651  */
652 
653 /**
654  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
655  *
656  * See rcu_read_lock() for more information.
657  */
658 static inline void rcu_read_unlock(void)
659 {
660         rcu_read_release();
661         __release(RCU);
662         __rcu_read_unlock();
663 }
664 
665 /**
666  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
667  *
668  * This is equivalent of rcu_read_lock(), but to be used when updates
669  * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
670  * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
671  * softirq handler to be a quiescent state, a process in RCU read-side
672  * critical section must be protected by disabling softirqs. Read-side
673  * critical sections in interrupt context can use just rcu_read_lock(),
674  * though this should at least be commented to avoid confusing people
675  * reading the code.
676  */
677 static inline void rcu_read_lock_bh(void)
678 {
679         local_bh_disable();
680         __acquire(RCU_BH);
681         rcu_read_acquire_bh();
682 }
683 
684 /*
685  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
686  *
687  * See rcu_read_lock_bh() for more information.
688  */
689 static inline void rcu_read_unlock_bh(void)
690 {
691         rcu_read_release_bh();
692         __release(RCU_BH);
693         local_bh_enable();
694 }
695 
696 /**
697  * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
698  *
699  * This is equivalent of rcu_read_lock(), but to be used when updates
700  * are being done using call_rcu_sched() or synchronize_rcu_sched().
701  * Read-side critical sections can also be introduced by anything that
702  * disables preemption, including local_irq_disable() and friends.
703  */
704 static inline void rcu_read_lock_sched(void)
705 {
706         preempt_disable();
707         __acquire(RCU_SCHED);
708         rcu_read_acquire_sched();
709 }
710 
711 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
712 static inline notrace void rcu_read_lock_sched_notrace(void)
713 {
714         preempt_disable_notrace();
715         __acquire(RCU_SCHED);
716 }
717 
718 /*
719  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
720  *
721  * See rcu_read_lock_sched for more information.
722  */
723 static inline void rcu_read_unlock_sched(void)
724 {
725         rcu_read_release_sched();
726         __release(RCU_SCHED);
727         preempt_enable();
728 }
729 
730 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
731 static inline notrace void rcu_read_unlock_sched_notrace(void)
732 {
733         __release(RCU_SCHED);
734         preempt_enable_notrace();
735 }
736 
737 /**
738  * rcu_assign_pointer() - assign to RCU-protected pointer
739  * @p: pointer to assign to
740  * @v: value to assign (publish)
741  *
742  * Assigns the specified value to the specified RCU-protected
743  * pointer, ensuring that any concurrent RCU readers will see
744  * any prior initialization.  Returns the value assigned.
745  *
746  * Inserts memory barriers on architectures that require them
747  * (which is most of them), and also prevents the compiler from
748  * reordering the code that initializes the structure after the pointer
749  * assignment.  More importantly, this call documents which pointers
750  * will be dereferenced by RCU read-side code.
751  *
752  * In some special cases, you may use RCU_INIT_POINTER() instead
753  * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
754  * to the fact that it does not constrain either the CPU or the compiler.
755  * That said, using RCU_INIT_POINTER() when you should have used
756  * rcu_assign_pointer() is a very bad thing that results in
757  * impossible-to-diagnose memory corruption.  So please be careful.
758  * See the RCU_INIT_POINTER() comment header for details.
759  */
760 #define rcu_assign_pointer(p, v) \
761         __rcu_assign_pointer((p), (v), __rcu)
762 
763 /**
764  * RCU_INIT_POINTER() - initialize an RCU protected pointer
765  *
766  * Initialize an RCU-protected pointer in special cases where readers
767  * do not need ordering constraints on the CPU or the compiler.  These
768  * special cases are:
769  *
770  * 1.   This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
771  * 2.   The caller has taken whatever steps are required to prevent
772  *      RCU readers from concurrently accessing this pointer -or-
773  * 3.   The referenced data structure has already been exposed to
774  *      readers either at compile time or via rcu_assign_pointer() -and-
775  *      a.      You have not made -any- reader-visible changes to
776  *              this structure since then -or-
777  *      b.      It is OK for readers accessing this structure from its
778  *              new location to see the old state of the structure.  (For
779  *              example, the changes were to statistical counters or to
780  *              other state where exact synchronization is not required.)
781  *
782  * Failure to follow these rules governing use of RCU_INIT_POINTER() will
783  * result in impossible-to-diagnose memory corruption.  As in the structures
784  * will look OK in crash dumps, but any concurrent RCU readers might
785  * see pre-initialized values of the referenced data structure.  So
786  * please be very careful how you use RCU_INIT_POINTER()!!!
787  *
788  * If you are creating an RCU-protected linked structure that is accessed
789  * by a single external-to-structure RCU-protected pointer, then you may
790  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
791  * pointers, but you must use rcu_assign_pointer() to initialize the
792  * external-to-structure pointer -after- you have completely initialized
793  * the reader-accessible portions of the linked structure.
794  */
795 #define RCU_INIT_POINTER(p, v) \
796                 p = (typeof(*v) __force __rcu *)(v)
797 
798 static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
799 {
800         return offset < 4096;
801 }
802 
803 static __always_inline
804 void __kfree_rcu(struct rcu_head *head, unsigned long offset)
805 {
806         typedef void (*rcu_callback)(struct rcu_head *);
807 
808         BUILD_BUG_ON(!__builtin_constant_p(offset));
809 
810         /* See the kfree_rcu() header comment. */
811         BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
812 
813         call_rcu(head, (rcu_callback)offset);
814 }
815 
816 /**
817  * kfree_rcu() - kfree an object after a grace period.
818  * @ptr:        pointer to kfree
819  * @rcu_head:   the name of the struct rcu_head within the type of @ptr.
820  *
821  * Many rcu callbacks functions just call kfree() on the base structure.
822  * These functions are trivial, but their size adds up, and furthermore
823  * when they are used in a kernel module, that module must invoke the
824  * high-latency rcu_barrier() function at module-unload time.
825  *
826  * The kfree_rcu() function handles this issue.  Rather than encoding a
827  * function address in the embedded rcu_head structure, kfree_rcu() instead
828  * encodes the offset of the rcu_head structure within the base structure.
829  * Because the functions are not allowed in the low-order 4096 bytes of
830  * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
831  * If the offset is larger than 4095 bytes, a compile-time error will
832  * be generated in __kfree_rcu().  If this error is triggered, you can
833  * either fall back to use of call_rcu() or rearrange the structure to
834  * position the rcu_head structure into the first 4096 bytes.
835  *
836  * Note that the allowable offset might decrease in the future, for example,
837  * to allow something like kmem_cache_free_rcu().
838  */
839 #define kfree_rcu(ptr, rcu_head)                                        \
840         __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
841 
842 #endif /* __LINUX_RCUPDATE_H */
843 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp