~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/lockdep.h

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Runtime locking correctness validator
  3  *
  4  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  6  *
  7  * see Documentation/locking/lockdep-design.txt for more details.
  8  */
  9 #ifndef __LINUX_LOCKDEP_H
 10 #define __LINUX_LOCKDEP_H
 11 
 12 struct task_struct;
 13 struct lockdep_map;
 14 
 15 /* for sysctl */
 16 extern int prove_locking;
 17 extern int lock_stat;
 18 
 19 #define MAX_LOCKDEP_SUBCLASSES          8UL
 20 
 21 #ifdef CONFIG_LOCKDEP
 22 
 23 #include <linux/linkage.h>
 24 #include <linux/list.h>
 25 #include <linux/debug_locks.h>
 26 #include <linux/stacktrace.h>
 27 
 28 /*
 29  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
 30  * the total number of states... :-(
 31  */
 32 #define XXX_LOCK_USAGE_STATES           (1+3*4)
 33 
 34 /*
 35  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
 36  * cached in the instance of lockdep_map
 37  *
 38  * Currently main class (subclass == 0) and signle depth subclass
 39  * are cached in lockdep_map. This optimization is mainly targeting
 40  * on rq->lock. double_rq_lock() acquires this highly competitive with
 41  * single depth.
 42  */
 43 #define NR_LOCKDEP_CACHING_CLASSES      2
 44 
 45 /*
 46  * Lock-classes are keyed via unique addresses, by embedding the
 47  * lockclass-key into the kernel (or module) .data section. (For
 48  * static locks we use the lock address itself as the key.)
 49  */
 50 struct lockdep_subclass_key {
 51         char __one_byte;
 52 } __attribute__ ((__packed__));
 53 
 54 struct lock_class_key {
 55         struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
 56 };
 57 
 58 extern struct lock_class_key __lockdep_no_validate__;
 59 
 60 #define LOCKSTAT_POINTS         4
 61 
 62 /*
 63  * The lock-class itself:
 64  */
 65 struct lock_class {
 66         /*
 67          * class-hash:
 68          */
 69         struct hlist_node               hash_entry;
 70 
 71         /*
 72          * global list of all lock-classes:
 73          */
 74         struct list_head                lock_entry;
 75 
 76         struct lockdep_subclass_key     *key;
 77         unsigned int                    subclass;
 78         unsigned int                    dep_gen_id;
 79 
 80         /*
 81          * IRQ/softirq usage tracking bits:
 82          */
 83         unsigned long                   usage_mask;
 84         struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
 85 
 86         /*
 87          * These fields represent a directed graph of lock dependencies,
 88          * to every node we attach a list of "forward" and a list of
 89          * "backward" graph nodes.
 90          */
 91         struct list_head                locks_after, locks_before;
 92 
 93         /*
 94          * Generation counter, when doing certain classes of graph walking,
 95          * to ensure that we check one node only once:
 96          */
 97         unsigned int                    version;
 98 
 99         /*
100          * Statistics counter:
101          */
102         unsigned long                   ops;
103 
104         const char                      *name;
105         int                             name_version;
106 
107 #ifdef CONFIG_LOCK_STAT
108         unsigned long                   contention_point[LOCKSTAT_POINTS];
109         unsigned long                   contending_point[LOCKSTAT_POINTS];
110 #endif
111 };
112 
113 #ifdef CONFIG_LOCK_STAT
114 struct lock_time {
115         s64                             min;
116         s64                             max;
117         s64                             total;
118         unsigned long                   nr;
119 };
120 
121 enum bounce_type {
122         bounce_acquired_write,
123         bounce_acquired_read,
124         bounce_contended_write,
125         bounce_contended_read,
126         nr_bounce_types,
127 
128         bounce_acquired = bounce_acquired_write,
129         bounce_contended = bounce_contended_write,
130 };
131 
132 struct lock_class_stats {
133         unsigned long                   contention_point[LOCKSTAT_POINTS];
134         unsigned long                   contending_point[LOCKSTAT_POINTS];
135         struct lock_time                read_waittime;
136         struct lock_time                write_waittime;
137         struct lock_time                read_holdtime;
138         struct lock_time                write_holdtime;
139         unsigned long                   bounces[nr_bounce_types];
140 };
141 
142 struct lock_class_stats lock_stats(struct lock_class *class);
143 void clear_lock_stats(struct lock_class *class);
144 #endif
145 
146 /*
147  * Map the lock object (the lock instance) to the lock-class object.
148  * This is embedded into specific lock instances:
149  */
150 struct lockdep_map {
151         struct lock_class_key           *key;
152         struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
153         const char                      *name;
154 #ifdef CONFIG_LOCK_STAT
155         int                             cpu;
156         unsigned long                   ip;
157 #endif
158 };
159 
160 static inline void lockdep_copy_map(struct lockdep_map *to,
161                                     struct lockdep_map *from)
162 {
163         int i;
164 
165         *to = *from;
166         /*
167          * Since the class cache can be modified concurrently we could observe
168          * half pointers (64bit arch using 32bit copy insns). Therefore clear
169          * the caches and take the performance hit.
170          *
171          * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
172          *     that relies on cache abuse.
173          */
174         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
175                 to->class_cache[i] = NULL;
176 }
177 
178 /*
179  * Every lock has a list of other locks that were taken after it.
180  * We only grow the list, never remove from it:
181  */
182 struct lock_list {
183         struct list_head                entry;
184         struct lock_class               *class;
185         struct stack_trace              trace;
186         int                             distance;
187 
188         /*
189          * The parent field is used to implement breadth-first search, and the
190          * bit 0 is reused to indicate if the lock has been accessed in BFS.
191          */
192         struct lock_list                *parent;
193 };
194 
195 /*
196  * We record lock dependency chains, so that we can cache them:
197  */
198 struct lock_chain {
199         /* see BUILD_BUG_ON()s in lookup_chain_cache() */
200         unsigned int                    irq_context :  2,
201                                         depth       :  6,
202                                         base        : 24;
203         /* 4 byte hole */
204         struct hlist_node               entry;
205         u64                             chain_key;
206 };
207 
208 #define MAX_LOCKDEP_KEYS_BITS           13
209 /*
210  * Subtract one because we offset hlock->class_idx by 1 in order
211  * to make 0 mean no class. This avoids overflowing the class_idx
212  * bitfield and hitting the BUG in hlock_class().
213  */
214 #define MAX_LOCKDEP_KEYS                ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
215 
216 struct held_lock {
217         /*
218          * One-way hash of the dependency chain up to this point. We
219          * hash the hashes step by step as the dependency chain grows.
220          *
221          * We use it for dependency-caching and we skip detection
222          * passes and dependency-updates if there is a cache-hit, so
223          * it is absolutely critical for 100% coverage of the validator
224          * to have a unique key value for every unique dependency path
225          * that can occur in the system, to make a unique hash value
226          * as likely as possible - hence the 64-bit width.
227          *
228          * The task struct holds the current hash value (initialized
229          * with zero), here we store the previous hash value:
230          */
231         u64                             prev_chain_key;
232         unsigned long                   acquire_ip;
233         struct lockdep_map              *instance;
234         struct lockdep_map              *nest_lock;
235 #ifdef CONFIG_LOCK_STAT
236         u64                             waittime_stamp;
237         u64                             holdtime_stamp;
238 #endif
239         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
240         /*
241          * The lock-stack is unified in that the lock chains of interrupt
242          * contexts nest ontop of process context chains, but we 'separate'
243          * the hashes by starting with 0 if we cross into an interrupt
244          * context, and we also keep do not add cross-context lock
245          * dependencies - the lock usage graph walking covers that area
246          * anyway, and we'd just unnecessarily increase the number of
247          * dependencies otherwise. [Note: hardirq and softirq contexts
248          * are separated from each other too.]
249          *
250          * The following field is used to detect when we cross into an
251          * interrupt context:
252          */
253         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
254         unsigned int trylock:1;                                         /* 16 bits */
255 
256         unsigned int read:2;        /* see lock_acquire() comment */
257         unsigned int check:1;       /* see lock_acquire() comment */
258         unsigned int hardirqs_off:1;
259         unsigned int references:12;                                     /* 32 bits */
260         unsigned int pin_count;
261 };
262 
263 /*
264  * Initialization, self-test and debugging-output methods:
265  */
266 extern void lockdep_info(void);
267 extern void lockdep_reset(void);
268 extern void lockdep_reset_lock(struct lockdep_map *lock);
269 extern void lockdep_free_key_range(void *start, unsigned long size);
270 extern asmlinkage void lockdep_sys_exit(void);
271 
272 extern void lockdep_off(void);
273 extern void lockdep_on(void);
274 
275 /*
276  * These methods are used by specific locking variants (spinlocks,
277  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
278  * to lockdep:
279  */
280 
281 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
282                              struct lock_class_key *key, int subclass);
283 
284 /*
285  * To initialize a lockdep_map statically use this macro.
286  * Note that _name must not be NULL.
287  */
288 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
289         { .name = (_name), .key = (void *)(_key), }
290 
291 /*
292  * Reinitialize a lock key - for cases where there is special locking or
293  * special initialization of locks so that the validator gets the scope
294  * of dependencies wrong: they are either too broad (they need a class-split)
295  * or they are too narrow (they suffer from a false class-split):
296  */
297 #define lockdep_set_class(lock, key) \
298                 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
299 #define lockdep_set_class_and_name(lock, key, name) \
300                 lockdep_init_map(&(lock)->dep_map, name, key, 0)
301 #define lockdep_set_class_and_subclass(lock, key, sub) \
302                 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
303 #define lockdep_set_subclass(lock, sub) \
304                 lockdep_init_map(&(lock)->dep_map, #lock, \
305                                  (lock)->dep_map.key, sub)
306 
307 #define lockdep_set_novalidate_class(lock) \
308         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
309 /*
310  * Compare locking classes
311  */
312 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
313 
314 static inline int lockdep_match_key(struct lockdep_map *lock,
315                                     struct lock_class_key *key)
316 {
317         return lock->key == key;
318 }
319 
320 /*
321  * Acquire a lock.
322  *
323  * Values for "read":
324  *
325  *   0: exclusive (write) acquire
326  *   1: read-acquire (no recursion allowed)
327  *   2: read-acquire with same-instance recursion allowed
328  *
329  * Values for check:
330  *
331  *   0: simple checks (freeing, held-at-exit-time, etc.)
332  *   1: full validation
333  */
334 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
335                          int trylock, int read, int check,
336                          struct lockdep_map *nest_lock, unsigned long ip);
337 
338 extern void lock_release(struct lockdep_map *lock, int nested,
339                          unsigned long ip);
340 
341 /*
342  * Same "read" as for lock_acquire(), except -1 means any.
343  */
344 extern int lock_is_held_type(struct lockdep_map *lock, int read);
345 
346 static inline int lock_is_held(struct lockdep_map *lock)
347 {
348         return lock_is_held_type(lock, -1);
349 }
350 
351 #define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
352 #define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
353 
354 extern void lock_set_class(struct lockdep_map *lock, const char *name,
355                            struct lock_class_key *key, unsigned int subclass,
356                            unsigned long ip);
357 
358 static inline void lock_set_subclass(struct lockdep_map *lock,
359                 unsigned int subclass, unsigned long ip)
360 {
361         lock_set_class(lock, lock->name, lock->key, subclass, ip);
362 }
363 
364 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
365 extern void lockdep_clear_current_reclaim_state(void);
366 extern void lockdep_trace_alloc(gfp_t mask);
367 
368 struct pin_cookie { unsigned int val; };
369 
370 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
371 
372 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
373 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
374 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
375 
376 # define INIT_LOCKDEP                           .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
377 
378 #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
379 
380 #define lockdep_assert_held(l)  do {                            \
381                 WARN_ON(debug_locks && !lockdep_is_held(l));    \
382         } while (0)
383 
384 #define lockdep_assert_held_exclusive(l)        do {                    \
385                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));    \
386         } while (0)
387 
388 #define lockdep_assert_held_read(l)     do {                            \
389                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));    \
390         } while (0)
391 
392 #define lockdep_assert_held_once(l)     do {                            \
393                 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
394         } while (0)
395 
396 #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
397 
398 #define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
399 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
400 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
401 
402 #else /* !CONFIG_LOCKDEP */
403 
404 static inline void lockdep_off(void)
405 {
406 }
407 
408 static inline void lockdep_on(void)
409 {
410 }
411 
412 # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
413 # define lock_release(l, n, i)                  do { } while (0)
414 # define lock_set_class(l, n, k, s, i)          do { } while (0)
415 # define lock_set_subclass(l, s, i)             do { } while (0)
416 # define lockdep_set_current_reclaim_state(g)   do { } while (0)
417 # define lockdep_clear_current_reclaim_state()  do { } while (0)
418 # define lockdep_trace_alloc(g)                 do { } while (0)
419 # define lockdep_info()                         do { } while (0)
420 # define lockdep_init_map(lock, name, key, sub) \
421                 do { (void)(name); (void)(key); } while (0)
422 # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
423 # define lockdep_set_class_and_name(lock, key, name) \
424                 do { (void)(key); (void)(name); } while (0)
425 #define lockdep_set_class_and_subclass(lock, key, sub) \
426                 do { (void)(key); } while (0)
427 #define lockdep_set_subclass(lock, sub)         do { } while (0)
428 
429 #define lockdep_set_novalidate_class(lock) do { } while (0)
430 
431 /*
432  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
433  * case since the result is not well defined and the caller should rather
434  * #ifdef the call himself.
435  */
436 
437 # define INIT_LOCKDEP
438 # define lockdep_reset()                do { debug_locks = 1; } while (0)
439 # define lockdep_free_key_range(start, size)    do { } while (0)
440 # define lockdep_sys_exit()                     do { } while (0)
441 /*
442  * The class key takes no space if lockdep is disabled:
443  */
444 struct lock_class_key { };
445 
446 #define lockdep_depth(tsk)      (0)
447 
448 #define lockdep_is_held_type(l, r)              (1)
449 
450 #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
451 #define lockdep_assert_held_exclusive(l)        do { (void)(l); } while (0)
452 #define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
453 #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
454 
455 #define lockdep_recursing(tsk)                  (0)
456 
457 struct pin_cookie { };
458 
459 #define NIL_COOKIE (struct pin_cookie){ }
460 
461 #define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie; cookie; })
462 #define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
463 #define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
464 
465 #endif /* !LOCKDEP */
466 
467 #ifdef CONFIG_LOCK_STAT
468 
469 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
470 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
471 
472 #define LOCK_CONTENDED(_lock, try, lock)                        \
473 do {                                                            \
474         if (!try(_lock)) {                                      \
475                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
476                 lock(_lock);                                    \
477         }                                                       \
478         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
479 } while (0)
480 
481 #define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
482 ({                                                              \
483         int ____err = 0;                                        \
484         if (!try(_lock)) {                                      \
485                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
486                 ____err = lock(_lock);                          \
487         }                                                       \
488         if (!____err)                                           \
489                 lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
490         ____err;                                                \
491 })
492 
493 #else /* CONFIG_LOCK_STAT */
494 
495 #define lock_contended(lockdep_map, ip) do {} while (0)
496 #define lock_acquired(lockdep_map, ip) do {} while (0)
497 
498 #define LOCK_CONTENDED(_lock, try, lock) \
499         lock(_lock)
500 
501 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
502         lock(_lock)
503 
504 #endif /* CONFIG_LOCK_STAT */
505 
506 #ifdef CONFIG_LOCKDEP
507 
508 /*
509  * On lockdep we dont want the hand-coded irq-enable of
510  * _raw_*_lock_flags() code, because lockdep assumes
511  * that interrupts are not re-enabled during lock-acquire:
512  */
513 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
514         LOCK_CONTENDED((_lock), (try), (lock))
515 
516 #else /* CONFIG_LOCKDEP */
517 
518 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
519         lockfl((_lock), (flags))
520 
521 #endif /* CONFIG_LOCKDEP */
522 
523 #ifdef CONFIG_TRACE_IRQFLAGS
524 extern void print_irqtrace_events(struct task_struct *curr);
525 #else
526 static inline void print_irqtrace_events(struct task_struct *curr)
527 {
528 }
529 #endif
530 
531 /*
532  * For trivial one-depth nesting of a lock-class, the following
533  * global define can be used. (Subsystems with multiple levels
534  * of nesting should define their own lock-nesting subclasses.)
535  */
536 #define SINGLE_DEPTH_NESTING                    1
537 
538 /*
539  * Map the dependency ops to NOP or to real lockdep ops, depending
540  * on the per lock-class debug mode:
541  */
542 
543 #define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
544 #define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
545 #define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
546 
547 #define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
548 #define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
549 #define spin_release(l, n, i)                   lock_release(l, n, i)
550 
551 #define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
552 #define rwlock_acquire_read(l, s, t, i)         lock_acquire_shared_recursive(l, s, t, NULL, i)
553 #define rwlock_release(l, n, i)                 lock_release(l, n, i)
554 
555 #define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
556 #define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
557 #define seqcount_release(l, n, i)               lock_release(l, n, i)
558 
559 #define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
560 #define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
561 #define mutex_release(l, n, i)                  lock_release(l, n, i)
562 
563 #define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
564 #define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
565 #define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
566 #define rwsem_release(l, n, i)                  lock_release(l, n, i)
567 
568 #define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
569 #define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
570 #define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
571 #define lock_map_release(l)                     lock_release(l, 1, _THIS_IP_)
572 
573 #ifdef CONFIG_PROVE_LOCKING
574 # define might_lock(lock)                                               \
575 do {                                                                    \
576         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
577         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
578         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
579 } while (0)
580 # define might_lock_read(lock)                                          \
581 do {                                                                    \
582         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
583         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
584         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
585 } while (0)
586 #else
587 # define might_lock(lock) do { } while (0)
588 # define might_lock_read(lock) do { } while (0)
589 #endif
590 
591 #ifdef CONFIG_LOCKDEP
592 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
593 #else
594 static inline void
595 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
596 {
597 }
598 #endif
599 
600 #endif /* __LINUX_LOCKDEP_H */
601 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp