~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef LOCKS_H
  3 #define LOCKS_H
  4 
  5 #include <limits.h>
  6 #include <pthread.h>
  7 #include <stdbool.h>
  8 
  9 #include "assume.h"
 10 #include "bug_on.h"
 11 #include "preempt.h"
 12 
 13 int nondet_int(void);
 14 
 15 #define __acquire(x)
 16 #define __acquires(x)
 17 #define __release(x)
 18 #define __releases(x)
 19 
 20 /* Only use one lock mechanism. Select which one. */
 21 #ifdef PTHREAD_LOCK
 22 struct lock_impl {
 23         pthread_mutex_t mutex;
 24 };
 25 
 26 static inline void lock_impl_lock(struct lock_impl *lock)
 27 {
 28         BUG_ON(pthread_mutex_lock(&lock->mutex));
 29 }
 30 
 31 static inline void lock_impl_unlock(struct lock_impl *lock)
 32 {
 33         BUG_ON(pthread_mutex_unlock(&lock->mutex));
 34 }
 35 
 36 static inline bool lock_impl_trylock(struct lock_impl *lock)
 37 {
 38         int err = pthread_mutex_trylock(&lock->mutex);
 39 
 40         if (!err)
 41                 return true;
 42         else if (err == EBUSY)
 43                 return false;
 44         BUG();
 45 }
 46 
 47 static inline void lock_impl_init(struct lock_impl *lock)
 48 {
 49         pthread_mutex_init(&lock->mutex, NULL);
 50 }
 51 
 52 #define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
 53 
 54 #else /* !defined(PTHREAD_LOCK) */
 55 /* Spinlock that assumes that it always gets the lock immediately. */
 56 
 57 struct lock_impl {
 58         bool locked;
 59 };
 60 
 61 static inline bool lock_impl_trylock(struct lock_impl *lock)
 62 {
 63 #ifdef RUN
 64         /* TODO: Should this be a test and set? */
 65         return __sync_bool_compare_and_swap(&lock->locked, false, true);
 66 #else
 67         __CPROVER_atomic_begin();
 68         bool old_locked = lock->locked;
 69         lock->locked = true;
 70         __CPROVER_atomic_end();
 71 
 72         /* Minimal barrier to prevent accesses leaking out of lock. */
 73         __CPROVER_fence("RRfence", "RWfence");
 74 
 75         return !old_locked;
 76 #endif
 77 }
 78 
 79 static inline void lock_impl_lock(struct lock_impl *lock)
 80 {
 81         /*
 82          * CBMC doesn't support busy waiting, so just assume that the
 83          * lock is available.
 84          */
 85         assume(lock_impl_trylock(lock));
 86 
 87         /*
 88          * If the lock was already held by this thread then the assumption
 89          * is unsatisfiable (deadlock).
 90          */
 91 }
 92 
 93 static inline void lock_impl_unlock(struct lock_impl *lock)
 94 {
 95 #ifdef RUN
 96         BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
 97 #else
 98         /* Minimal barrier to prevent accesses leaking out of lock. */
 99         __CPROVER_fence("RWfence", "WWfence");
100 
101         __CPROVER_atomic_begin();
102         bool old_locked = lock->locked;
103         lock->locked = false;
104         __CPROVER_atomic_end();
105 
106         BUG_ON(!old_locked);
107 #endif
108 }
109 
110 static inline void lock_impl_init(struct lock_impl *lock)
111 {
112         lock->locked = false;
113 }
114 
115 #define LOCK_IMPL_INITIALIZER {.locked = false}
116 
117 #endif /* !defined(PTHREAD_LOCK) */
118 
119 /*
120  * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
121  * locks of different types.
122  */
123 typedef struct {
124         struct lock_impl internal_lock;
125 } spinlock_t;
126 
127 #define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
128 #define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
129 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
130 
131 static inline void spin_lock_init(spinlock_t *lock)
132 {
133         lock_impl_init(&lock->internal_lock);
134 }
135 
136 static inline void spin_lock(spinlock_t *lock)
137 {
138         /*
139          * Spin locks also need to be removed in order to eliminate all
140          * memory barriers. They are only used by the write side anyway.
141          */
142 #ifndef NO_SYNC_SMP_MB
143         preempt_disable();
144         lock_impl_lock(&lock->internal_lock);
145 #endif
146 }
147 
148 static inline void spin_unlock(spinlock_t *lock)
149 {
150 #ifndef NO_SYNC_SMP_MB
151         lock_impl_unlock(&lock->internal_lock);
152         preempt_enable();
153 #endif
154 }
155 
156 /* Don't bother with interrupts */
157 #define spin_lock_irq(lock) spin_lock(lock)
158 #define spin_unlock_irq(lock) spin_unlock(lock)
159 #define spin_lock_irqsave(lock, flags) spin_lock(lock)
160 #define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
161 
162 /*
163  * This is supposed to return an int, but I think that a bool should work as
164  * well.
165  */
166 static inline bool spin_trylock(spinlock_t *lock)
167 {
168 #ifndef NO_SYNC_SMP_MB
169         preempt_disable();
170         return lock_impl_trylock(&lock->internal_lock);
171 #else
172         return true;
173 #endif
174 }
175 
176 struct completion {
177         /* Hopefuly this won't overflow. */
178         unsigned int count;
179 };
180 
181 #define COMPLETION_INITIALIZER(x) {.count = 0}
182 #define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
183 #define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
184 
185 static inline void init_completion(struct completion *c)
186 {
187         c->count = 0;
188 }
189 
190 static inline void wait_for_completion(struct completion *c)
191 {
192         unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
193 
194         assume(prev_count);
195 }
196 
197 static inline void complete(struct completion *c)
198 {
199         unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
200 
201         BUG_ON(prev_count == UINT_MAX);
202 }
203 
204 /* This function probably isn't very useful for CBMC. */
205 static inline bool try_wait_for_completion(struct completion *c)
206 {
207         BUG();
208 }
209 
210 static inline bool completion_done(struct completion *c)
211 {
212         return c->count;
213 }
214 
215 /* TODO: Implement complete_all */
216 static inline void complete_all(struct completion *c)
217 {
218         BUG();
219 }
220 
221 #endif
222 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp