1 /* 2 * Sleepable Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2006 19 * Copyright (C) Fujitsu, 2012 20 * 21 * Author: Paul McKenney <paulmck@us.ibm.com> 22 * Lai Jiangshan <laijs@cn.fujitsu.com> 23 * 24 * For detailed explanation of Read-Copy Update mechanism see - 25 * Documentation/RCU/ *.txt 26 * 27 */ 28 29 #ifndef _LINUX_SRCU_H 30 #define _LINUX_SRCU_H 31 32 #include <linux/mutex.h> 33 #include <linux/rcupdate.h> 34 #include <linux/workqueue.h> 35 36 struct srcu_array { 37 unsigned long lock_count[2]; 38 unsigned long unlock_count[2]; 39 }; 40 41 struct rcu_batch { 42 struct rcu_head *head, **tail; 43 }; 44 45 #define RCU_BATCH_INIT(name) { NULL, &(name.head) } 46 47 struct srcu_struct { 48 unsigned long completed; 49 struct srcu_array __percpu *per_cpu_ref; 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 51 bool running; 52 /* callbacks just queued */ 53 struct rcu_batch batch_queue; 54 /* callbacks try to do the first check_zero */ 55 struct rcu_batch batch_check0; 56 /* callbacks done with the first check_zero and the flip */ 57 struct rcu_batch batch_check1; 58 struct rcu_batch batch_done; 59 struct delayed_work work; 60 #ifdef CONFIG_DEBUG_LOCK_ALLOC 61 struct lockdep_map dep_map; 62 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 63 }; 64 65 #ifdef CONFIG_DEBUG_LOCK_ALLOC 66 67 int __init_srcu_struct(struct srcu_struct *sp, const char *name, 68 struct lock_class_key *key); 69 70 #define init_srcu_struct(sp) \ 71 ({ \ 72 static struct lock_class_key __srcu_key; \ 73 \ 74 __init_srcu_struct((sp), #sp, &__srcu_key); \ 75 }) 76 77 #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, 78 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 79 80 int init_srcu_struct(struct srcu_struct *sp); 81 82 #define __SRCU_DEP_MAP_INIT(srcu_name) 83 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 84 85 void process_srcu(struct work_struct *work); 86 87 #define __SRCU_STRUCT_INIT(name) \ 88 { \ 89 .completed = -300, \ 90 .per_cpu_ref = &name##_srcu_array, \ 91 .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ 92 .running = false, \ 93 .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ 94 .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ 95 .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ 96 .batch_done = RCU_BATCH_INIT(name.batch_done), \ 97 .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ 98 __SRCU_DEP_MAP_INIT(name) \ 99 } 100 101 /* 102 * Define and initialize a srcu struct at build time. 103 * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. 104 * 105 * Note that although DEFINE_STATIC_SRCU() hides the name from other 106 * files, the per-CPU variable rules nevertheless require that the 107 * chosen name be globally unique. These rules also prohibit use of 108 * DEFINE_STATIC_SRCU() within a function. If these rules are too 109 * restrictive, declare the srcu_struct manually. For example, in 110 * each file: 111 * 112 * static struct srcu_struct my_srcu; 113 * 114 * Then, before the first use of each my_srcu, manually initialize it: 115 * 116 * init_srcu_struct(&my_srcu); 117 * 118 * See include/linux/percpu-defs.h for the rules on per-CPU variables. 119 */ 120 #define __DEFINE_SRCU(name, is_static) \ 121 static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\ 122 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) 123 #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) 124 #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) 125 126 /** 127 * call_srcu() - Queue a callback for invocation after an SRCU grace period 128 * @sp: srcu_struct in queue the callback 129 * @head: structure to be used for queueing the SRCU callback. 130 * @func: function to be invoked after the SRCU grace period 131 * 132 * The callback function will be invoked some time after a full SRCU 133 * grace period elapses, in other words after all pre-existing SRCU 134 * read-side critical sections have completed. However, the callback 135 * function might well execute concurrently with other SRCU read-side 136 * critical sections that started after call_srcu() was invoked. SRCU 137 * read-side critical sections are delimited by srcu_read_lock() and 138 * srcu_read_unlock(), and may be nested. 139 * 140 * The callback will be invoked from process context, but must nevertheless 141 * be fast and must not block. 142 */ 143 void call_srcu(struct srcu_struct *sp, struct rcu_head *head, 144 void (*func)(struct rcu_head *head)); 145 146 void cleanup_srcu_struct(struct srcu_struct *sp); 147 int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 148 void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 149 void synchronize_srcu(struct srcu_struct *sp); 150 void synchronize_srcu_expedited(struct srcu_struct *sp); 151 unsigned long srcu_batches_completed(struct srcu_struct *sp); 152 void srcu_barrier(struct srcu_struct *sp); 153 154 #ifdef CONFIG_DEBUG_LOCK_ALLOC 155 156 /** 157 * srcu_read_lock_held - might we be in SRCU read-side critical section? 158 * 159 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 160 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 161 * this assumes we are in an SRCU read-side critical section unless it can 162 * prove otherwise. 163 * 164 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 165 * and while lockdep is disabled. 166 * 167 * Note that SRCU is based on its own statemachine and it doesn't 168 * relies on normal RCU, it can be called from the CPU which 169 * is in the idle loop from an RCU point of view or offline. 170 */ 171 static inline int srcu_read_lock_held(struct srcu_struct *sp) 172 { 173 if (!debug_lockdep_rcu_enabled()) 174 return 1; 175 return lock_is_held(&sp->dep_map); 176 } 177 178 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 179 180 static inline int srcu_read_lock_held(struct srcu_struct *sp) 181 { 182 return 1; 183 } 184 185 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 186 187 /** 188 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 189 * @p: the pointer to fetch and protect for later dereferencing 190 * @sp: pointer to the srcu_struct, which is used to check that we 191 * really are in an SRCU read-side critical section. 192 * @c: condition to check for update-side use 193 * 194 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side 195 * critical section will result in an RCU-lockdep splat, unless @c evaluates 196 * to 1. The @c argument will normally be a logical expression containing 197 * lockdep_is_held() calls. 198 */ 199 #define srcu_dereference_check(p, sp, c) \ 200 __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) 201 202 /** 203 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 204 * @p: the pointer to fetch and protect for later dereferencing 205 * @sp: pointer to the srcu_struct, which is used to check that we 206 * really are in an SRCU read-side critical section. 207 * 208 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 209 * is enabled, invoking this outside of an RCU read-side critical 210 * section will result in an RCU-lockdep splat. 211 */ 212 #define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) 213 214 /** 215 * srcu_read_lock - register a new reader for an SRCU-protected structure. 216 * @sp: srcu_struct in which to register the new reader. 217 * 218 * Enter an SRCU read-side critical section. Note that SRCU read-side 219 * critical sections may be nested. However, it is illegal to 220 * call anything that waits on an SRCU grace period for the same 221 * srcu_struct, whether directly or indirectly. Please note that 222 * one way to indirectly wait on an SRCU grace period is to acquire 223 * a mutex that is held elsewhere while calling synchronize_srcu() or 224 * synchronize_srcu_expedited(). 225 * 226 * Note that srcu_read_lock() and the matching srcu_read_unlock() must 227 * occur in the same context, for example, it is illegal to invoke 228 * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() 229 * was invoked in process context. 230 */ 231 static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) 232 { 233 int retval; 234 235 retval = __srcu_read_lock(sp); 236 rcu_lock_acquire(&(sp)->dep_map); 237 return retval; 238 } 239 240 /** 241 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 242 * @sp: srcu_struct in which to unregister the old reader. 243 * @idx: return value from corresponding srcu_read_lock(). 244 * 245 * Exit an SRCU read-side critical section. 246 */ 247 static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 248 __releases(sp) 249 { 250 rcu_lock_release(&(sp)->dep_map); 251 __srcu_read_unlock(sp, idx); 252 } 253 254 /** 255 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 256 * 257 * Converts the preceding srcu_read_unlock into a two-way memory barrier. 258 * 259 * Call this after srcu_read_unlock, to guarantee that all memory operations 260 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after 261 * the preceding srcu_read_unlock. 262 */ 263 static inline void smp_mb__after_srcu_read_unlock(void) 264 { 265 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ 266 } 267 268 #endif 269
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.