~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/spinlock.h

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __LINUX_SPINLOCK_H
  3 #define __LINUX_SPINLOCK_H
  4 
  5 /*
  6  * include/linux/spinlock.h - generic spinlock/rwlock declarations
  7  *
  8  * here's the role of the various spinlock/rwlock related include files:
  9  *
 10  * on SMP builds:
 11  *
 12  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
 13  *                        initializers
 14  *
 15  *  linux/spinlock_types.h:
 16  *                        defines the generic type and initializers
 17  *
 18  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
 19  *                        implementations, mostly inline assembly code
 20  *
 21  *   (also included on UP-debug builds:)
 22  *
 23  *  linux/spinlock_api_smp.h:
 24  *                        contains the prototypes for the _spin_*() APIs.
 25  *
 26  *  linux/spinlock.h:     builds the final spin_*() APIs.
 27  *
 28  * on UP builds:
 29  *
 30  *  linux/spinlock_type_up.h:
 31  *                        contains the generic, simplified UP spinlock type.
 32  *                        (which is an empty structure on non-debug builds)
 33  *
 34  *  linux/spinlock_types.h:
 35  *                        defines the generic type and initializers
 36  *
 37  *  linux/spinlock_up.h:
 38  *                        contains the arch_spin_*()/etc. version of UP
 39  *                        builds. (which are NOPs on non-debug, non-preempt
 40  *                        builds)
 41  *
 42  *   (included on UP-non-debug builds:)
 43  *
 44  *  linux/spinlock_api_up.h:
 45  *                        builds the _spin_*() APIs.
 46  *
 47  *  linux/spinlock.h:     builds the final spin_*() APIs.
 48  */
 49 
 50 #include <linux/typecheck.h>
 51 #include <linux/preempt.h>
 52 #include <linux/linkage.h>
 53 #include <linux/compiler.h>
 54 #include <linux/irqflags.h>
 55 #include <linux/thread_info.h>
 56 #include <linux/kernel.h>
 57 #include <linux/stringify.h>
 58 #include <linux/bottom_half.h>
 59 #include <asm/barrier.h>
 60 
 61 
 62 /*
 63  * Must define these before including other files, inline functions need them
 64  */
 65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
 66 
 67 #define LOCK_SECTION_START(extra)               \
 68         ".subsection 1\n\t"                     \
 69         extra                                   \
 70         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
 71         LOCK_SECTION_NAME ":\n\t"               \
 72         ".endif\n"
 73 
 74 #define LOCK_SECTION_END                        \
 75         ".previous\n\t"
 76 
 77 #define __lockfunc __attribute__((section(".spinlock.text")))
 78 
 79 /*
 80  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
 81  */
 82 #include <linux/spinlock_types.h>
 83 
 84 /*
 85  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
 86  */
 87 #ifdef CONFIG_SMP
 88 # include <asm/spinlock.h>
 89 #else
 90 # include <linux/spinlock_up.h>
 91 #endif
 92 
 93 #ifdef CONFIG_DEBUG_SPINLOCK
 94   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 95                                    struct lock_class_key *key);
 96 # define raw_spin_lock_init(lock)                               \
 97 do {                                                            \
 98         static struct lock_class_key __key;                     \
 99                                                                 \
100         __raw_spin_lock_init((lock), #lock, &__key);            \
101 } while (0)
102 
103 #else
104 # define raw_spin_lock_init(lock)                               \
105         do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
106 #endif
107 
108 #define raw_spin_is_locked(lock)        arch_spin_is_locked(&(lock)->raw_lock)
109 
110 #ifdef arch_spin_is_contended
111 #define raw_spin_is_contended(lock)     arch_spin_is_contended(&(lock)->raw_lock)
112 #else
113 #define raw_spin_is_contended(lock)     (((void)(lock), 0))
114 #endif /*arch_spin_is_contended*/
115 
116 /*
117  * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
118  * between program-order earlier lock acquisitions and program-order later
119  * memory accesses.
120  *
121  * This guarantees that the following two properties hold:
122  *
123  *   1) Given the snippet:
124  *
125  *        { X = 0;  Y = 0; }
126  *
127  *        CPU0                          CPU1
128  *
129  *        WRITE_ONCE(X, 1);             WRITE_ONCE(Y, 1);
130  *        spin_lock(S);                 smp_mb();
131  *        smp_mb__after_spinlock();     r1 = READ_ONCE(X);
132  *        r0 = READ_ONCE(Y);
133  *        spin_unlock(S);
134  *
135  *      it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
136  *      and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
137  *      preceding the call to smp_mb__after_spinlock() in __schedule() and in
138  *      try_to_wake_up().
139  *
140  *   2) Given the snippet:
141  *
142  *  { X = 0;  Y = 0; }
143  *
144  *  CPU0                CPU1                            CPU2
145  *
146  *  spin_lock(S);       spin_lock(S);                   r1 = READ_ONCE(Y);
147  *  WRITE_ONCE(X, 1);   smp_mb__after_spinlock();       smp_rmb();
148  *  spin_unlock(S);     r0 = READ_ONCE(X);              r2 = READ_ONCE(X);
149  *                      WRITE_ONCE(Y, 1);
150  *                      spin_unlock(S);
151  *
152  *      it is forbidden that CPU0's critical section executes before CPU1's
153  *      critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
154  *      and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
155  *      preceding the calls to smp_rmb() in try_to_wake_up() for similar
156  *      snippets but "projected" onto two CPUs.
157  *
158  * Property (2) upgrades the lock to an RCsc lock.
159  *
160  * Since most load-store architectures implement ACQUIRE with an smp_mb() after
161  * the LL/SC loop, they need no further barriers. Similarly all our TSO
162  * architectures imply an smp_mb() for each atomic instruction and equally don't
163  * need more.
164  *
165  * Architectures that can implement ACQUIRE better need to take care.
166  */
167 #ifndef smp_mb__after_spinlock
168 #define smp_mb__after_spinlock()        do { } while (0)
169 #endif
170 
171 #ifdef CONFIG_DEBUG_SPINLOCK
172  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
173 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
174  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
175  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
176 #else
177 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
178 {
179         __acquire(lock);
180         arch_spin_lock(&lock->raw_lock);
181 }
182 
183 #ifndef arch_spin_lock_flags
184 #define arch_spin_lock_flags(lock, flags)       arch_spin_lock(lock)
185 #endif
186 
187 static inline void
188 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
189 {
190         __acquire(lock);
191         arch_spin_lock_flags(&lock->raw_lock, *flags);
192 }
193 
194 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
195 {
196         return arch_spin_trylock(&(lock)->raw_lock);
197 }
198 
199 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
200 {
201         arch_spin_unlock(&lock->raw_lock);
202         __release(lock);
203 }
204 #endif
205 
206 /*
207  * Define the various spin_lock methods.  Note we define these
208  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
209  * various methods are defined as nops in the case they are not
210  * required.
211  */
212 #define raw_spin_trylock(lock)  __cond_lock(lock, _raw_spin_trylock(lock))
213 
214 #define raw_spin_lock(lock)     _raw_spin_lock(lock)
215 
216 #ifdef CONFIG_DEBUG_LOCK_ALLOC
217 # define raw_spin_lock_nested(lock, subclass) \
218         _raw_spin_lock_nested(lock, subclass)
219 
220 # define raw_spin_lock_nest_lock(lock, nest_lock)                       \
221          do {                                                           \
222                  typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
223                  _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
224          } while (0)
225 #else
226 /*
227  * Always evaluate the 'subclass' argument to avoid that the compiler
228  * warns about set-but-not-used variables when building with
229  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
230  */
231 # define raw_spin_lock_nested(lock, subclass)           \
232         _raw_spin_lock(((void)(subclass), (lock)))
233 # define raw_spin_lock_nest_lock(lock, nest_lock)       _raw_spin_lock(lock)
234 #endif
235 
236 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
237 
238 #define raw_spin_lock_irqsave(lock, flags)                      \
239         do {                                            \
240                 typecheck(unsigned long, flags);        \
241                 flags = _raw_spin_lock_irqsave(lock);   \
242         } while (0)
243 
244 #ifdef CONFIG_DEBUG_LOCK_ALLOC
245 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
246         do {                                                            \
247                 typecheck(unsigned long, flags);                        \
248                 flags = _raw_spin_lock_irqsave_nested(lock, subclass);  \
249         } while (0)
250 #else
251 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
252         do {                                                            \
253                 typecheck(unsigned long, flags);                        \
254                 flags = _raw_spin_lock_irqsave(lock);                   \
255         } while (0)
256 #endif
257 
258 #else
259 
260 #define raw_spin_lock_irqsave(lock, flags)              \
261         do {                                            \
262                 typecheck(unsigned long, flags);        \
263                 _raw_spin_lock_irqsave(lock, flags);    \
264         } while (0)
265 
266 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)     \
267         raw_spin_lock_irqsave(lock, flags)
268 
269 #endif
270 
271 #define raw_spin_lock_irq(lock)         _raw_spin_lock_irq(lock)
272 #define raw_spin_lock_bh(lock)          _raw_spin_lock_bh(lock)
273 #define raw_spin_unlock(lock)           _raw_spin_unlock(lock)
274 #define raw_spin_unlock_irq(lock)       _raw_spin_unlock_irq(lock)
275 
276 #define raw_spin_unlock_irqrestore(lock, flags)         \
277         do {                                                    \
278                 typecheck(unsigned long, flags);                \
279                 _raw_spin_unlock_irqrestore(lock, flags);       \
280         } while (0)
281 #define raw_spin_unlock_bh(lock)        _raw_spin_unlock_bh(lock)
282 
283 #define raw_spin_trylock_bh(lock) \
284         __cond_lock(lock, _raw_spin_trylock_bh(lock))
285 
286 #define raw_spin_trylock_irq(lock) \
287 ({ \
288         local_irq_disable(); \
289         raw_spin_trylock(lock) ? \
290         1 : ({ local_irq_enable(); 0;  }); \
291 })
292 
293 #define raw_spin_trylock_irqsave(lock, flags) \
294 ({ \
295         local_irq_save(flags); \
296         raw_spin_trylock(lock) ? \
297         1 : ({ local_irq_restore(flags); 0; }); \
298 })
299 
300 /* Include rwlock functions */
301 #include <linux/rwlock.h>
302 
303 /*
304  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
305  */
306 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
307 # include <linux/spinlock_api_smp.h>
308 #else
309 # include <linux/spinlock_api_up.h>
310 #endif
311 
312 /*
313  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
314  */
315 
316 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
317 {
318         return &lock->rlock;
319 }
320 
321 #define spin_lock_init(_lock)                           \
322 do {                                                    \
323         spinlock_check(_lock);                          \
324         raw_spin_lock_init(&(_lock)->rlock);            \
325 } while (0)
326 
327 static __always_inline void spin_lock(spinlock_t *lock)
328 {
329         raw_spin_lock(&lock->rlock);
330 }
331 
332 static __always_inline void spin_lock_bh(spinlock_t *lock)
333 {
334         raw_spin_lock_bh(&lock->rlock);
335 }
336 
337 static __always_inline int spin_trylock(spinlock_t *lock)
338 {
339         return raw_spin_trylock(&lock->rlock);
340 }
341 
342 #define spin_lock_nested(lock, subclass)                        \
343 do {                                                            \
344         raw_spin_lock_nested(spinlock_check(lock), subclass);   \
345 } while (0)
346 
347 #define spin_lock_nest_lock(lock, nest_lock)                            \
348 do {                                                                    \
349         raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
350 } while (0)
351 
352 static __always_inline void spin_lock_irq(spinlock_t *lock)
353 {
354         raw_spin_lock_irq(&lock->rlock);
355 }
356 
357 #define spin_lock_irqsave(lock, flags)                          \
358 do {                                                            \
359         raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
360 } while (0)
361 
362 #define spin_lock_irqsave_nested(lock, flags, subclass)                 \
363 do {                                                                    \
364         raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
365 } while (0)
366 
367 static __always_inline void spin_unlock(spinlock_t *lock)
368 {
369         raw_spin_unlock(&lock->rlock);
370 }
371 
372 static __always_inline void spin_unlock_bh(spinlock_t *lock)
373 {
374         raw_spin_unlock_bh(&lock->rlock);
375 }
376 
377 static __always_inline void spin_unlock_irq(spinlock_t *lock)
378 {
379         raw_spin_unlock_irq(&lock->rlock);
380 }
381 
382 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
383 {
384         raw_spin_unlock_irqrestore(&lock->rlock, flags);
385 }
386 
387 static __always_inline int spin_trylock_bh(spinlock_t *lock)
388 {
389         return raw_spin_trylock_bh(&lock->rlock);
390 }
391 
392 static __always_inline int spin_trylock_irq(spinlock_t *lock)
393 {
394         return raw_spin_trylock_irq(&lock->rlock);
395 }
396 
397 #define spin_trylock_irqsave(lock, flags)                       \
398 ({                                                              \
399         raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
400 })
401 
402 /**
403  * spin_is_locked() - Check whether a spinlock is locked.
404  * @lock: Pointer to the spinlock.
405  *
406  * This function is NOT required to provide any memory ordering
407  * guarantees; it could be used for debugging purposes or, when
408  * additional synchronization is needed, accompanied with other
409  * constructs (memory barriers) enforcing the synchronization.
410  *
411  * Returns: 1 if @lock is locked, 0 otherwise.
412  *
413  * Note that the function only tells you that the spinlock is
414  * seen to be locked, not that it is locked on your CPU.
415  *
416  * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
417  * the return value is always 0 (see include/linux/spinlock_up.h).
418  * Therefore you should not rely heavily on the return value.
419  */
420 static __always_inline int spin_is_locked(spinlock_t *lock)
421 {
422         return raw_spin_is_locked(&lock->rlock);
423 }
424 
425 static __always_inline int spin_is_contended(spinlock_t *lock)
426 {
427         return raw_spin_is_contended(&lock->rlock);
428 }
429 
430 #define assert_spin_locked(lock)        assert_raw_spin_locked(&(lock)->rlock)
431 
432 /*
433  * Pull the atomic_t declaration:
434  * (asm-mips/atomic.h needs above definitions)
435  */
436 #include <linux/atomic.h>
437 /**
438  * atomic_dec_and_lock - lock on reaching reference count zero
439  * @atomic: the atomic counter
440  * @lock: the spinlock in question
441  *
442  * Decrements @atomic by 1.  If the result is 0, returns true and locks
443  * @lock.  Returns false for all other cases.
444  */
445 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
446 #define atomic_dec_and_lock(atomic, lock) \
447                 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
448 
449 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
450                                         unsigned long *flags);
451 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
452                 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
453 
454 int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
455                              size_t max_size, unsigned int cpu_mult,
456                              gfp_t gfp, const char *name,
457                              struct lock_class_key *key);
458 
459 #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp)    \
460         ({                                                                   \
461                 static struct lock_class_key key;                            \
462                 int ret;                                                     \
463                                                                              \
464                 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size,   \
465                                                cpu_mult, gfp, #locks, &key); \
466                 ret;                                                         \
467         })
468 
469 void free_bucket_spinlocks(spinlock_t *locks);
470 
471 #endif /* __LINUX_SPINLOCK_H */
472 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp