~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/percpu-refcount.h

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Percpu refcounts:
  4  * (C) 2012 Google, Inc.
  5  * Author: Kent Overstreet <koverstreet@google.com>
  6  *
  7  * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
  8  * atomic_dec_and_test() - but percpu.
  9  *
 10  * There's one important difference between percpu refs and normal atomic_t
 11  * refcounts; you have to keep track of your initial refcount, and then when you
 12  * start shutting down you call percpu_ref_kill() _before_ dropping the initial
 13  * refcount.
 14  *
 15  * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
 16  * than an atomic_t - this is because of the way shutdown works, see
 17  * percpu_ref_kill()/PERCPU_COUNT_BIAS.
 18  *
 19  * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
 20  * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
 21  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
 22  * issuing the appropriate barriers, and then marks the ref as shutting down so
 23  * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
 24  * it's safe to drop the initial ref.
 25  *
 26  * USAGE:
 27  *
 28  * See fs/aio.c for some example usage; it's used there for struct kioctx, which
 29  * is created when userspaces calls io_setup(), and destroyed when userspace
 30  * calls io_destroy() or the process exits.
 31  *
 32  * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
 33  * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
 34  * After that, there can't be any new users of the kioctx (from lookup_ioctx())
 35  * and it's then safe to drop the initial ref with percpu_ref_put().
 36  *
 37  * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
 38  * to synchronize with RCU protected lookup_ioctx().  percpu_ref operations don't
 39  * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
 40  * with RCU protection, it must be done explicitly.
 41  *
 42  * Code that does a two stage shutdown like this often needs some kind of
 43  * explicit synchronization to ensure the initial refcount can only be dropped
 44  * once - percpu_ref_kill() does this for you, it returns true once and false if
 45  * someone else already called it. The aio code uses it this way, but it's not
 46  * necessary if the code has some other mechanism to synchronize teardown.
 47  * around.
 48  */
 49 
 50 #ifndef _LINUX_PERCPU_REFCOUNT_H
 51 #define _LINUX_PERCPU_REFCOUNT_H
 52 
 53 #include <linux/atomic.h>
 54 #include <linux/kernel.h>
 55 #include <linux/percpu.h>
 56 #include <linux/rcupdate.h>
 57 #include <linux/gfp.h>
 58 
 59 struct percpu_ref;
 60 typedef void (percpu_ref_func_t)(struct percpu_ref *);
 61 
 62 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
 63 enum {
 64         __PERCPU_REF_ATOMIC     = 1LU << 0,     /* operating in atomic mode */
 65         __PERCPU_REF_DEAD       = 1LU << 1,     /* (being) killed */
 66         __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
 67 
 68         __PERCPU_REF_FLAG_BITS  = 2,
 69 };
 70 
 71 /* @flags for percpu_ref_init() */
 72 enum {
 73         /*
 74          * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
 75          * operation using percpu_ref_switch_to_percpu().  If initialized
 76          * with this flag, the ref will stay in atomic mode until
 77          * percpu_ref_switch_to_percpu() is invoked on it.
 78          * Implies ALLOW_REINIT.
 79          */
 80         PERCPU_REF_INIT_ATOMIC  = 1 << 0,
 81 
 82         /*
 83          * Start dead w/ ref == 0 in atomic mode.  Must be revived with
 84          * percpu_ref_reinit() before used.  Implies INIT_ATOMIC and
 85          * ALLOW_REINIT.
 86          */
 87         PERCPU_REF_INIT_DEAD    = 1 << 1,
 88 
 89         /*
 90          * Allow switching from atomic mode to percpu mode.
 91          */
 92         PERCPU_REF_ALLOW_REINIT = 1 << 2,
 93 };
 94 
 95 struct percpu_ref {
 96         atomic_long_t           count;
 97         /*
 98          * The low bit of the pointer indicates whether the ref is in percpu
 99          * mode; if set, then get/put will manipulate the atomic_t.
100          */
101         unsigned long           percpu_count_ptr;
102         percpu_ref_func_t       *release;
103         percpu_ref_func_t       *confirm_switch;
104         bool                    force_atomic:1;
105         bool                    allow_reinit:1;
106         struct rcu_head         rcu;
107 };
108 
109 int __must_check percpu_ref_init(struct percpu_ref *ref,
110                                  percpu_ref_func_t *release, unsigned int flags,
111                                  gfp_t gfp);
112 void percpu_ref_exit(struct percpu_ref *ref);
113 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
114                                  percpu_ref_func_t *confirm_switch);
115 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
116 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
117 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
118                                  percpu_ref_func_t *confirm_kill);
119 void percpu_ref_resurrect(struct percpu_ref *ref);
120 void percpu_ref_reinit(struct percpu_ref *ref);
121 
122 /**
123  * percpu_ref_kill - drop the initial ref
124  * @ref: percpu_ref to kill
125  *
126  * Must be used to drop the initial ref on a percpu refcount; must be called
127  * precisely once before shutdown.
128  *
129  * Switches @ref into atomic mode before gathering up the percpu counters
130  * and dropping the initial ref.
131  *
132  * There are no implied RCU grace periods between kill and release.
133  */
134 static inline void percpu_ref_kill(struct percpu_ref *ref)
135 {
136         percpu_ref_kill_and_confirm(ref, NULL);
137 }
138 
139 /*
140  * Internal helper.  Don't use outside percpu-refcount proper.  The
141  * function doesn't return the pointer and let the caller test it for NULL
142  * because doing so forces the compiler to generate two conditional
143  * branches as it can't assume that @ref->percpu_count is not NULL.
144  */
145 static inline bool __ref_is_percpu(struct percpu_ref *ref,
146                                           unsigned long __percpu **percpu_countp)
147 {
148         unsigned long percpu_ptr;
149 
150         /*
151          * The value of @ref->percpu_count_ptr is tested for
152          * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
153          * used as a pointer.  If the compiler generates a separate fetch
154          * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
155          * between contaminating the pointer value, meaning that
156          * READ_ONCE() is required when fetching it.
157          *
158          * The dependency ordering from the READ_ONCE() pairs
159          * with smp_store_release() in __percpu_ref_switch_to_percpu().
160          */
161         percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
162 
163         /*
164          * Theoretically, the following could test just ATOMIC; however,
165          * then we'd have to mask off DEAD separately as DEAD may be
166          * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
167          * implies ATOMIC anyway.  Test them together.
168          */
169         if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
170                 return false;
171 
172         *percpu_countp = (unsigned long __percpu *)percpu_ptr;
173         return true;
174 }
175 
176 /**
177  * percpu_ref_get_many - increment a percpu refcount
178  * @ref: percpu_ref to get
179  * @nr: number of references to get
180  *
181  * Analogous to atomic_long_add().
182  *
183  * This function is safe to call as long as @ref is between init and exit.
184  */
185 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
186 {
187         unsigned long __percpu *percpu_count;
188 
189         rcu_read_lock();
190 
191         if (__ref_is_percpu(ref, &percpu_count))
192                 this_cpu_add(*percpu_count, nr);
193         else
194                 atomic_long_add(nr, &ref->count);
195 
196         rcu_read_unlock();
197 }
198 
199 /**
200  * percpu_ref_get - increment a percpu refcount
201  * @ref: percpu_ref to get
202  *
203  * Analagous to atomic_long_inc().
204  *
205  * This function is safe to call as long as @ref is between init and exit.
206  */
207 static inline void percpu_ref_get(struct percpu_ref *ref)
208 {
209         percpu_ref_get_many(ref, 1);
210 }
211 
212 /**
213  * percpu_ref_tryget_many - try to increment a percpu refcount
214  * @ref: percpu_ref to try-get
215  * @nr: number of references to get
216  *
217  * Increment a percpu refcount  by @nr unless its count already reached zero.
218  * Returns %true on success; %false on failure.
219  *
220  * This function is safe to call as long as @ref is between init and exit.
221  */
222 static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
223                                           unsigned long nr)
224 {
225         unsigned long __percpu *percpu_count;
226         bool ret;
227 
228         rcu_read_lock();
229 
230         if (__ref_is_percpu(ref, &percpu_count)) {
231                 this_cpu_add(*percpu_count, nr);
232                 ret = true;
233         } else {
234                 ret = atomic_long_add_unless(&ref->count, nr, 0);
235         }
236 
237         rcu_read_unlock();
238 
239         return ret;
240 }
241 
242 /**
243  * percpu_ref_tryget - try to increment a percpu refcount
244  * @ref: percpu_ref to try-get
245  *
246  * Increment a percpu refcount unless its count already reached zero.
247  * Returns %true on success; %false on failure.
248  *
249  * This function is safe to call as long as @ref is between init and exit.
250  */
251 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
252 {
253         return percpu_ref_tryget_many(ref, 1);
254 }
255 
256 /**
257  * percpu_ref_tryget_live - try to increment a live percpu refcount
258  * @ref: percpu_ref to try-get
259  *
260  * Increment a percpu refcount unless it has already been killed.  Returns
261  * %true on success; %false on failure.
262  *
263  * Completion of percpu_ref_kill() in itself doesn't guarantee that this
264  * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
265  * should be used.  After the confirm_kill callback is invoked, it's
266  * guaranteed that no new reference will be given out by
267  * percpu_ref_tryget_live().
268  *
269  * This function is safe to call as long as @ref is between init and exit.
270  */
271 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
272 {
273         unsigned long __percpu *percpu_count;
274         bool ret = false;
275 
276         rcu_read_lock();
277 
278         if (__ref_is_percpu(ref, &percpu_count)) {
279                 this_cpu_inc(*percpu_count);
280                 ret = true;
281         } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
282                 ret = atomic_long_inc_not_zero(&ref->count);
283         }
284 
285         rcu_read_unlock();
286 
287         return ret;
288 }
289 
290 /**
291  * percpu_ref_put_many - decrement a percpu refcount
292  * @ref: percpu_ref to put
293  * @nr: number of references to put
294  *
295  * Decrement the refcount, and if 0, call the release function (which was passed
296  * to percpu_ref_init())
297  *
298  * This function is safe to call as long as @ref is between init and exit.
299  */
300 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
301 {
302         unsigned long __percpu *percpu_count;
303 
304         rcu_read_lock();
305 
306         if (__ref_is_percpu(ref, &percpu_count))
307                 this_cpu_sub(*percpu_count, nr);
308         else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
309                 ref->release(ref);
310 
311         rcu_read_unlock();
312 }
313 
314 /**
315  * percpu_ref_put - decrement a percpu refcount
316  * @ref: percpu_ref to put
317  *
318  * Decrement the refcount, and if 0, call the release function (which was passed
319  * to percpu_ref_init())
320  *
321  * This function is safe to call as long as @ref is between init and exit.
322  */
323 static inline void percpu_ref_put(struct percpu_ref *ref)
324 {
325         percpu_ref_put_many(ref, 1);
326 }
327 
328 /**
329  * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
330  * @ref: percpu_ref to test
331  *
332  * Returns %true if @ref is dying or dead.
333  *
334  * This function is safe to call as long as @ref is between init and exit
335  * and the caller is responsible for synchronizing against state changes.
336  */
337 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
338 {
339         return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
340 }
341 
342 /**
343  * percpu_ref_is_zero - test whether a percpu refcount reached zero
344  * @ref: percpu_ref to test
345  *
346  * Returns %true if @ref reached zero.
347  *
348  * This function is safe to call as long as @ref is between init and exit.
349  */
350 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
351 {
352         unsigned long __percpu *percpu_count;
353 
354         if (__ref_is_percpu(ref, &percpu_count))
355                 return false;
356         return !atomic_long_read(&ref->count);
357 }
358 
359 #endif
360 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp