~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/locking/spinlock_rt.c

Version: ~ [ linux-6.6-rc4 ] ~ [ linux-6.5.5 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.55 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.133 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.197 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.257 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.295 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.326 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * PREEMPT_RT substitution for spin/rw_locks
  4  *
  5  * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
  6  * resemble the non RT semantics:
  7  *
  8  * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
  9  *   preserving. The task state is saved before blocking on the underlying
 10  *   rtmutex, and restored when the lock has been acquired. Regular wakeups
 11  *   during that time are redirected to the saved state so no wake up is
 12  *   missed.
 13  *
 14  * - Non RT spin/rwlocks disable preemption and eventually interrupts.
 15  *   Disabling preemption has the side effect of disabling migration and
 16  *   preventing RCU grace periods.
 17  *
 18  *   The RT substitutions explicitly disable migration and take
 19  *   rcu_read_lock() across the lock held section.
 20  */
 21 #include <linux/spinlock.h>
 22 #include <linux/export.h>
 23 
 24 #define RT_MUTEX_BUILD_SPINLOCKS
 25 #include "rtmutex.c"
 26 
 27 /*
 28  * __might_resched() skips the state check as rtlocks are state
 29  * preserving. Take RCU nesting into account as spin/read/write_lock() can
 30  * legitimately nest into an RCU read side critical section.
 31  */
 32 #define RTLOCK_RESCHED_OFFSETS                                          \
 33         (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
 34 
 35 #define rtlock_might_resched()                                          \
 36         __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
 37 
 38 static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
 39 {
 40         if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
 41                 rtlock_slowlock(rtm);
 42 }
 43 
 44 static __always_inline void __rt_spin_lock(spinlock_t *lock)
 45 {
 46         rtlock_might_resched();
 47         rtlock_lock(&lock->lock);
 48         rcu_read_lock();
 49         migrate_disable();
 50 }
 51 
 52 void __sched rt_spin_lock(spinlock_t *lock)
 53 {
 54         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 55         __rt_spin_lock(lock);
 56 }
 57 EXPORT_SYMBOL(rt_spin_lock);
 58 
 59 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 60 void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
 61 {
 62         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 63         __rt_spin_lock(lock);
 64 }
 65 EXPORT_SYMBOL(rt_spin_lock_nested);
 66 
 67 void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
 68                                     struct lockdep_map *nest_lock)
 69 {
 70         spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
 71         __rt_spin_lock(lock);
 72 }
 73 EXPORT_SYMBOL(rt_spin_lock_nest_lock);
 74 #endif
 75 
 76 void __sched rt_spin_unlock(spinlock_t *lock)
 77 {
 78         spin_release(&lock->dep_map, _RET_IP_);
 79         migrate_enable();
 80         rcu_read_unlock();
 81 
 82         if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
 83                 rt_mutex_slowunlock(&lock->lock);
 84 }
 85 EXPORT_SYMBOL(rt_spin_unlock);
 86 
 87 /*
 88  * Wait for the lock to get unlocked: instead of polling for an unlock
 89  * (like raw spinlocks do), lock and unlock, to force the kernel to
 90  * schedule if there's contention:
 91  */
 92 void __sched rt_spin_lock_unlock(spinlock_t *lock)
 93 {
 94         spin_lock(lock);
 95         spin_unlock(lock);
 96 }
 97 EXPORT_SYMBOL(rt_spin_lock_unlock);
 98 
 99 static __always_inline int __rt_spin_trylock(spinlock_t *lock)
100 {
101         int ret = 1;
102 
103         if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
104                 ret = rt_mutex_slowtrylock(&lock->lock);
105 
106         if (ret) {
107                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
108                 rcu_read_lock();
109                 migrate_disable();
110         }
111         return ret;
112 }
113 
114 int __sched rt_spin_trylock(spinlock_t *lock)
115 {
116         return __rt_spin_trylock(lock);
117 }
118 EXPORT_SYMBOL(rt_spin_trylock);
119 
120 int __sched rt_spin_trylock_bh(spinlock_t *lock)
121 {
122         int ret;
123 
124         local_bh_disable();
125         ret = __rt_spin_trylock(lock);
126         if (!ret)
127                 local_bh_enable();
128         return ret;
129 }
130 EXPORT_SYMBOL(rt_spin_trylock_bh);
131 
132 #ifdef CONFIG_DEBUG_LOCK_ALLOC
133 void __rt_spin_lock_init(spinlock_t *lock, const char *name,
134                          struct lock_class_key *key, bool percpu)
135 {
136         u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
137 
138         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
139         lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
140                               LD_WAIT_INV, type);
141 }
142 EXPORT_SYMBOL(__rt_spin_lock_init);
143 #endif
144 
145 /*
146  * RT-specific reader/writer locks
147  */
148 #define rwbase_set_and_save_current_state(state)        \
149         current_save_and_set_rtlock_wait_state()
150 
151 #define rwbase_restore_current_state()                  \
152         current_restore_rtlock_saved_state()
153 
154 static __always_inline int
155 rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
156 {
157         if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
158                 rtlock_slowlock(rtm);
159         return 0;
160 }
161 
162 static __always_inline int
163 rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
164 {
165         rtlock_slowlock_locked(rtm);
166         return 0;
167 }
168 
169 static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
170 {
171         if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
172                 return;
173 
174         rt_mutex_slowunlock(rtm);
175 }
176 
177 static __always_inline int  rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
178 {
179         if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
180                 return 1;
181 
182         return rt_mutex_slowtrylock(rtm);
183 }
184 
185 #define rwbase_signal_pending_state(state, current)     (0)
186 
187 #define rwbase_schedule()                               \
188         schedule_rtlock()
189 
190 #include "rwbase_rt.c"
191 /*
192  * The common functions which get wrapped into the rwlock API.
193  */
194 int __sched rt_read_trylock(rwlock_t *rwlock)
195 {
196         int ret;
197 
198         ret = rwbase_read_trylock(&rwlock->rwbase);
199         if (ret) {
200                 rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
201                 rcu_read_lock();
202                 migrate_disable();
203         }
204         return ret;
205 }
206 EXPORT_SYMBOL(rt_read_trylock);
207 
208 int __sched rt_write_trylock(rwlock_t *rwlock)
209 {
210         int ret;
211 
212         ret = rwbase_write_trylock(&rwlock->rwbase);
213         if (ret) {
214                 rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
215                 rcu_read_lock();
216                 migrate_disable();
217         }
218         return ret;
219 }
220 EXPORT_SYMBOL(rt_write_trylock);
221 
222 void __sched rt_read_lock(rwlock_t *rwlock)
223 {
224         rtlock_might_resched();
225         rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
226         rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
227         rcu_read_lock();
228         migrate_disable();
229 }
230 EXPORT_SYMBOL(rt_read_lock);
231 
232 void __sched rt_write_lock(rwlock_t *rwlock)
233 {
234         rtlock_might_resched();
235         rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
236         rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
237         rcu_read_lock();
238         migrate_disable();
239 }
240 EXPORT_SYMBOL(rt_write_lock);
241 
242 #ifdef CONFIG_DEBUG_LOCK_ALLOC
243 void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
244 {
245         rtlock_might_resched();
246         rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
247         rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
248         rcu_read_lock();
249         migrate_disable();
250 }
251 EXPORT_SYMBOL(rt_write_lock_nested);
252 #endif
253 
254 void __sched rt_read_unlock(rwlock_t *rwlock)
255 {
256         rwlock_release(&rwlock->dep_map, _RET_IP_);
257         migrate_enable();
258         rcu_read_unlock();
259         rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
260 }
261 EXPORT_SYMBOL(rt_read_unlock);
262 
263 void __sched rt_write_unlock(rwlock_t *rwlock)
264 {
265         rwlock_release(&rwlock->dep_map, _RET_IP_);
266         rcu_read_unlock();
267         migrate_enable();
268         rwbase_write_unlock(&rwlock->rwbase);
269 }
270 EXPORT_SYMBOL(rt_write_unlock);
271 
272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
273 void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
274                       struct lock_class_key *key)
275 {
276         debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
277         lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
278 }
279 EXPORT_SYMBOL(__rt_rwlock_init);
280 #endif
281 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp