~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/lockdep.h

Version: ~ [ linux-5.16 ] ~ [ linux-5.15.13 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.90 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.170 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.224 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.261 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.296 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.298 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Runtime locking correctness validator
  4  *
  5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  7  *
  8  * see Documentation/locking/lockdep-design.rst for more details.
  9  */
 10 #ifndef __LINUX_LOCKDEP_H
 11 #define __LINUX_LOCKDEP_H
 12 
 13 #include <linux/lockdep_types.h>
 14 #include <linux/smp.h>
 15 #include <asm/percpu.h>
 16 
 17 struct task_struct;
 18 
 19 /* for sysctl */
 20 extern int prove_locking;
 21 extern int lock_stat;
 22 
 23 #ifdef CONFIG_LOCKDEP
 24 
 25 #include <linux/linkage.h>
 26 #include <linux/list.h>
 27 #include <linux/debug_locks.h>
 28 #include <linux/stacktrace.h>
 29 
 30 static inline void lockdep_copy_map(struct lockdep_map *to,
 31                                     struct lockdep_map *from)
 32 {
 33         int i;
 34 
 35         *to = *from;
 36         /*
 37          * Since the class cache can be modified concurrently we could observe
 38          * half pointers (64bit arch using 32bit copy insns). Therefore clear
 39          * the caches and take the performance hit.
 40          *
 41          * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
 42          *     that relies on cache abuse.
 43          */
 44         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
 45                 to->class_cache[i] = NULL;
 46 }
 47 
 48 /*
 49  * Every lock has a list of other locks that were taken after it.
 50  * We only grow the list, never remove from it:
 51  */
 52 struct lock_list {
 53         struct list_head                entry;
 54         struct lock_class               *class;
 55         struct lock_class               *links_to;
 56         const struct lock_trace         *trace;
 57         int                             distance;
 58 
 59         /*
 60          * The parent field is used to implement breadth-first search, and the
 61          * bit 0 is reused to indicate if the lock has been accessed in BFS.
 62          */
 63         struct lock_list                *parent;
 64 };
 65 
 66 /**
 67  * struct lock_chain - lock dependency chain record
 68  *
 69  * @irq_context: the same as irq_context in held_lock below
 70  * @depth:       the number of held locks in this chain
 71  * @base:        the index in chain_hlocks for this chain
 72  * @entry:       the collided lock chains in lock_chain hash list
 73  * @chain_key:   the hash key of this lock_chain
 74  */
 75 struct lock_chain {
 76         /* see BUILD_BUG_ON()s in add_chain_cache() */
 77         unsigned int                    irq_context :  2,
 78                                         depth       :  6,
 79                                         base        : 24;
 80         /* 4 byte hole */
 81         struct hlist_node               entry;
 82         u64                             chain_key;
 83 };
 84 
 85 #define MAX_LOCKDEP_KEYS_BITS           13
 86 #define MAX_LOCKDEP_KEYS                (1UL << MAX_LOCKDEP_KEYS_BITS)
 87 #define INITIAL_CHAIN_KEY               -1
 88 
 89 struct held_lock {
 90         /*
 91          * One-way hash of the dependency chain up to this point. We
 92          * hash the hashes step by step as the dependency chain grows.
 93          *
 94          * We use it for dependency-caching and we skip detection
 95          * passes and dependency-updates if there is a cache-hit, so
 96          * it is absolutely critical for 100% coverage of the validator
 97          * to have a unique key value for every unique dependency path
 98          * that can occur in the system, to make a unique hash value
 99          * as likely as possible - hence the 64-bit width.
100          *
101          * The task struct holds the current hash value (initialized
102          * with zero), here we store the previous hash value:
103          */
104         u64                             prev_chain_key;
105         unsigned long                   acquire_ip;
106         struct lockdep_map              *instance;
107         struct lockdep_map              *nest_lock;
108 #ifdef CONFIG_LOCK_STAT
109         u64                             waittime_stamp;
110         u64                             holdtime_stamp;
111 #endif
112         /*
113          * class_idx is zero-indexed; it points to the element in
114          * lock_classes this held lock instance belongs to. class_idx is in
115          * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
116          */
117         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
118         /*
119          * The lock-stack is unified in that the lock chains of interrupt
120          * contexts nest ontop of process context chains, but we 'separate'
121          * the hashes by starting with 0 if we cross into an interrupt
122          * context, and we also keep do not add cross-context lock
123          * dependencies - the lock usage graph walking covers that area
124          * anyway, and we'd just unnecessarily increase the number of
125          * dependencies otherwise. [Note: hardirq and softirq contexts
126          * are separated from each other too.]
127          *
128          * The following field is used to detect when we cross into an
129          * interrupt context:
130          */
131         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
132         unsigned int trylock:1;                                         /* 16 bits */
133 
134         unsigned int read:2;        /* see lock_acquire() comment */
135         unsigned int check:1;       /* see lock_acquire() comment */
136         unsigned int hardirqs_off:1;
137         unsigned int references:12;                                     /* 32 bits */
138         unsigned int pin_count;
139 };
140 
141 /*
142  * Initialization, self-test and debugging-output methods:
143  */
144 extern void lockdep_init(void);
145 extern void lockdep_reset(void);
146 extern void lockdep_reset_lock(struct lockdep_map *lock);
147 extern void lockdep_free_key_range(void *start, unsigned long size);
148 extern asmlinkage void lockdep_sys_exit(void);
149 extern void lockdep_set_selftest_task(struct task_struct *task);
150 
151 extern void lockdep_init_task(struct task_struct *task);
152 
153 /*
154  * Split the recrursion counter in two to readily detect 'off' vs recursion.
155  */
156 #define LOCKDEP_RECURSION_BITS  16
157 #define LOCKDEP_OFF             (1U << LOCKDEP_RECURSION_BITS)
158 #define LOCKDEP_RECURSION_MASK  (LOCKDEP_OFF - 1)
159 
160 /*
161  * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
162  * to header dependencies.
163  */
164 
165 #define lockdep_off()                                   \
166 do {                                                    \
167         current->lockdep_recursion += LOCKDEP_OFF;      \
168 } while (0)
169 
170 #define lockdep_on()                                    \
171 do {                                                    \
172         current->lockdep_recursion -= LOCKDEP_OFF;      \
173 } while (0)
174 
175 extern void lockdep_register_key(struct lock_class_key *key);
176 extern void lockdep_unregister_key(struct lock_class_key *key);
177 
178 /*
179  * These methods are used by specific locking variants (spinlocks,
180  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
181  * to lockdep:
182  */
183 
184 extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
185         struct lock_class_key *key, int subclass, short inner, short outer);
186 
187 static inline void
188 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
189                       struct lock_class_key *key, int subclass, short inner)
190 {
191         lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
192 }
193 
194 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
195                              struct lock_class_key *key, int subclass)
196 {
197         lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
198 }
199 
200 /*
201  * Reinitialize a lock key - for cases where there is special locking or
202  * special initialization of locks so that the validator gets the scope
203  * of dependencies wrong: they are either too broad (they need a class-split)
204  * or they are too narrow (they suffer from a false class-split):
205  */
206 #define lockdep_set_class(lock, key)                            \
207         lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0,  \
208                                (lock)->dep_map.wait_type_inner, \
209                                (lock)->dep_map.wait_type_outer)
210 
211 #define lockdep_set_class_and_name(lock, key, name)             \
212         lockdep_init_map_waits(&(lock)->dep_map, name, key, 0,  \
213                                (lock)->dep_map.wait_type_inner, \
214                                (lock)->dep_map.wait_type_outer)
215 
216 #define lockdep_set_class_and_subclass(lock, key, sub)          \
217         lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
218                                (lock)->dep_map.wait_type_inner, \
219                                (lock)->dep_map.wait_type_outer)
220 
221 #define lockdep_set_subclass(lock, sub)                                 \
222         lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
223                                (lock)->dep_map.wait_type_inner,         \
224                                (lock)->dep_map.wait_type_outer)
225 
226 #define lockdep_set_novalidate_class(lock) \
227         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
228 
229 /*
230  * Compare locking classes
231  */
232 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
233 
234 static inline int lockdep_match_key(struct lockdep_map *lock,
235                                     struct lock_class_key *key)
236 {
237         return lock->key == key;
238 }
239 
240 /*
241  * Acquire a lock.
242  *
243  * Values for "read":
244  *
245  *   0: exclusive (write) acquire
246  *   1: read-acquire (no recursion allowed)
247  *   2: read-acquire with same-instance recursion allowed
248  *
249  * Values for check:
250  *
251  *   0: simple checks (freeing, held-at-exit-time, etc.)
252  *   1: full validation
253  */
254 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
255                          int trylock, int read, int check,
256                          struct lockdep_map *nest_lock, unsigned long ip);
257 
258 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
259 
260 /*
261  * Same "read" as for lock_acquire(), except -1 means any.
262  */
263 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
264 
265 static inline int lock_is_held(const struct lockdep_map *lock)
266 {
267         return lock_is_held_type(lock, -1);
268 }
269 
270 #define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
271 #define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
272 
273 extern void lock_set_class(struct lockdep_map *lock, const char *name,
274                            struct lock_class_key *key, unsigned int subclass,
275                            unsigned long ip);
276 
277 static inline void lock_set_subclass(struct lockdep_map *lock,
278                 unsigned int subclass, unsigned long ip)
279 {
280         lock_set_class(lock, lock->name, lock->key, subclass, ip);
281 }
282 
283 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
284 
285 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
286 
287 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
288 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
289 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
290 
291 #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
292 
293 #define lockdep_assert_held(l)  do {                            \
294                 WARN_ON(debug_locks && !lockdep_is_held(l));    \
295         } while (0)
296 
297 #define lockdep_assert_held_write(l)    do {                    \
298                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));    \
299         } while (0)
300 
301 #define lockdep_assert_held_read(l)     do {                            \
302                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));    \
303         } while (0)
304 
305 #define lockdep_assert_held_once(l)     do {                            \
306                 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
307         } while (0)
308 
309 #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
310 
311 #define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
312 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
313 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
314 
315 #else /* !CONFIG_LOCKDEP */
316 
317 static inline void lockdep_init_task(struct task_struct *task)
318 {
319 }
320 
321 static inline void lockdep_off(void)
322 {
323 }
324 
325 static inline void lockdep_on(void)
326 {
327 }
328 
329 static inline void lockdep_set_selftest_task(struct task_struct *task)
330 {
331 }
332 
333 # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
334 # define lock_release(l, i)                     do { } while (0)
335 # define lock_downgrade(l, i)                   do { } while (0)
336 # define lock_set_class(l, n, k, s, i)          do { } while (0)
337 # define lock_set_subclass(l, s, i)             do { } while (0)
338 # define lockdep_init()                         do { } while (0)
339 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
340                 do { (void)(name); (void)(key); } while (0)
341 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
342                 do { (void)(name); (void)(key); } while (0)
343 # define lockdep_init_map(lock, name, key, sub) \
344                 do { (void)(name); (void)(key); } while (0)
345 # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
346 # define lockdep_set_class_and_name(lock, key, name) \
347                 do { (void)(key); (void)(name); } while (0)
348 #define lockdep_set_class_and_subclass(lock, key, sub) \
349                 do { (void)(key); } while (0)
350 #define lockdep_set_subclass(lock, sub)         do { } while (0)
351 
352 #define lockdep_set_novalidate_class(lock) do { } while (0)
353 
354 /*
355  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
356  * case since the result is not well defined and the caller should rather
357  * #ifdef the call himself.
358  */
359 
360 # define lockdep_reset()                do { debug_locks = 1; } while (0)
361 # define lockdep_free_key_range(start, size)    do { } while (0)
362 # define lockdep_sys_exit()                     do { } while (0)
363 
364 static inline void lockdep_register_key(struct lock_class_key *key)
365 {
366 }
367 
368 static inline void lockdep_unregister_key(struct lock_class_key *key)
369 {
370 }
371 
372 #define lockdep_depth(tsk)      (0)
373 
374 #define lockdep_is_held_type(l, r)              (1)
375 
376 #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
377 #define lockdep_assert_held_write(l)    do { (void)(l); } while (0)
378 #define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
379 #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
380 
381 #define lockdep_recursing(tsk)                  (0)
382 
383 #define NIL_COOKIE (struct pin_cookie){ }
384 
385 #define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie = { }; cookie; })
386 #define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
387 #define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
388 
389 #endif /* !LOCKDEP */
390 
391 enum xhlock_context_t {
392         XHLOCK_HARD,
393         XHLOCK_SOFT,
394         XHLOCK_CTX_NR,
395 };
396 
397 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
398 /*
399  * To initialize a lockdep_map statically use this macro.
400  * Note that _name must not be NULL.
401  */
402 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
403         { .name = (_name), .key = (void *)(_key), }
404 
405 static inline void lockdep_invariant_state(bool force) {}
406 static inline void lockdep_free_task(struct task_struct *task) {}
407 
408 #ifdef CONFIG_LOCK_STAT
409 
410 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
411 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
412 
413 #define LOCK_CONTENDED(_lock, try, lock)                        \
414 do {                                                            \
415         if (!try(_lock)) {                                      \
416                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
417                 lock(_lock);                                    \
418         }                                                       \
419         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
420 } while (0)
421 
422 #define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
423 ({                                                              \
424         int ____err = 0;                                        \
425         if (!try(_lock)) {                                      \
426                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
427                 ____err = lock(_lock);                          \
428         }                                                       \
429         if (!____err)                                           \
430                 lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
431         ____err;                                                \
432 })
433 
434 #else /* CONFIG_LOCK_STAT */
435 
436 #define lock_contended(lockdep_map, ip) do {} while (0)
437 #define lock_acquired(lockdep_map, ip) do {} while (0)
438 
439 #define LOCK_CONTENDED(_lock, try, lock) \
440         lock(_lock)
441 
442 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
443         lock(_lock)
444 
445 #endif /* CONFIG_LOCK_STAT */
446 
447 #ifdef CONFIG_LOCKDEP
448 
449 /*
450  * On lockdep we dont want the hand-coded irq-enable of
451  * _raw_*_lock_flags() code, because lockdep assumes
452  * that interrupts are not re-enabled during lock-acquire:
453  */
454 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
455         LOCK_CONTENDED((_lock), (try), (lock))
456 
457 #else /* CONFIG_LOCKDEP */
458 
459 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
460         lockfl((_lock), (flags))
461 
462 #endif /* CONFIG_LOCKDEP */
463 
464 #ifdef CONFIG_PROVE_LOCKING
465 extern void print_irqtrace_events(struct task_struct *curr);
466 #else
467 static inline void print_irqtrace_events(struct task_struct *curr)
468 {
469 }
470 #endif
471 
472 /*
473  * For trivial one-depth nesting of a lock-class, the following
474  * global define can be used. (Subsystems with multiple levels
475  * of nesting should define their own lock-nesting subclasses.)
476  */
477 #define SINGLE_DEPTH_NESTING                    1
478 
479 /*
480  * Map the dependency ops to NOP or to real lockdep ops, depending
481  * on the per lock-class debug mode:
482  */
483 
484 #define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
485 #define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
486 #define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
487 
488 #define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
489 #define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
490 #define spin_release(l, i)                      lock_release(l, i)
491 
492 #define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
493 #define rwlock_acquire_read(l, s, t, i)         lock_acquire_shared_recursive(l, s, t, NULL, i)
494 #define rwlock_release(l, i)                    lock_release(l, i)
495 
496 #define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
497 #define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
498 #define seqcount_release(l, i)                  lock_release(l, i)
499 
500 #define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
501 #define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
502 #define mutex_release(l, i)                     lock_release(l, i)
503 
504 #define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
505 #define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
506 #define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
507 #define rwsem_release(l, i)                     lock_release(l, i)
508 
509 #define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
510 #define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
511 #define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
512 #define lock_map_release(l)                     lock_release(l, _THIS_IP_)
513 
514 #ifdef CONFIG_PROVE_LOCKING
515 # define might_lock(lock)                                               \
516 do {                                                                    \
517         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
518         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
519         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
520 } while (0)
521 # define might_lock_read(lock)                                          \
522 do {                                                                    \
523         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
524         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
525         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
526 } while (0)
527 # define might_lock_nested(lock, subclass)                              \
528 do {                                                                    \
529         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
530         lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,         \
531                      _THIS_IP_);                                        \
532         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
533 } while (0)
534 
535 DECLARE_PER_CPU(int, hardirqs_enabled);
536 DECLARE_PER_CPU(int, hardirq_context);
537 DECLARE_PER_CPU(unsigned int, lockdep_recursion);
538 
539 #define __lockdep_enabled       (debug_locks && !this_cpu_read(lockdep_recursion))
540 
541 #define lockdep_assert_irqs_enabled()                                   \
542 do {                                                                    \
543         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
544 } while (0)
545 
546 #define lockdep_assert_irqs_disabled()                                  \
547 do {                                                                    \
548         WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
549 } while (0)
550 
551 #define lockdep_assert_in_irq()                                         \
552 do {                                                                    \
553         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
554 } while (0)
555 
556 #define lockdep_assert_preemption_enabled()                             \
557 do {                                                                    \
558         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
559                      __lockdep_enabled                  &&              \
560                      (preempt_count() != 0              ||              \
561                       !this_cpu_read(hardirqs_enabled)));               \
562 } while (0)
563 
564 #define lockdep_assert_preemption_disabled()                            \
565 do {                                                                    \
566         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
567                      __lockdep_enabled                  &&              \
568                      (preempt_count() == 0              &&              \
569                       this_cpu_read(hardirqs_enabled)));                \
570 } while (0)
571 
572 #else
573 # define might_lock(lock) do { } while (0)
574 # define might_lock_read(lock) do { } while (0)
575 # define might_lock_nested(lock, subclass) do { } while (0)
576 
577 # define lockdep_assert_irqs_enabled() do { } while (0)
578 # define lockdep_assert_irqs_disabled() do { } while (0)
579 # define lockdep_assert_in_irq() do { } while (0)
580 
581 # define lockdep_assert_preemption_enabled() do { } while (0)
582 # define lockdep_assert_preemption_disabled() do { } while (0)
583 #endif
584 
585 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
586 
587 # define lockdep_assert_RT_in_threaded_ctx() do {                       \
588                 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
589                           lockdep_hardirq_context() &&                  \
590                           !(current->hardirq_threaded || current->irq_config),  \
591                           "Not in threaded context on PREEMPT_RT as expected\n");       \
592 } while (0)
593 
594 #else
595 
596 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
597 
598 #endif
599 
600 #ifdef CONFIG_LOCKDEP
601 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
602 #else
603 static inline void
604 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
605 {
606 }
607 #endif
608 
609 #endif /* __LINUX_LOCKDEP_H */
610 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp