~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/include/asm/spinlock.h

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.10 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.83 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.153 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.200 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.200 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.76 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * include/asm-sh/spinlock.h
  3  *
  4  * Copyright (C) 2002, 2003 Paul Mundt
  5  * Copyright (C) 2006, 2007 Akio Idehara
  6  *
  7  * This file is subject to the terms and conditions of the GNU General Public
  8  * License.  See the file "COPYING" in the main directory of this archive
  9  * for more details.
 10  */
 11 #ifndef __ASM_SH_SPINLOCK_H
 12 #define __ASM_SH_SPINLOCK_H
 13 
 14 /*
 15  * The only locking implemented here uses SH-4A opcodes. For others,
 16  * split this out as per atomic-*.h.
 17  */
 18 #ifndef CONFIG_CPU_SH4A
 19 #error "Need movli.l/movco.l for spinlocks"
 20 #endif
 21 
 22 /*
 23  * Your basic SMP spinlocks, allowing only a single CPU anywhere
 24  */
 25 
 26 #define arch_spin_is_locked(x)          ((x)->lock <= 0)
 27 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 28 #define arch_spin_unlock_wait(x) \
 29         do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
 30 
 31 /*
 32  * Simple spin lock operations.  There are two variants, one clears IRQ's
 33  * on the local processor, one does not.
 34  *
 35  * We make no fairness assumptions.  They have a cost.
 36  */
 37 static inline void arch_spin_lock(arch_spinlock_t *lock)
 38 {
 39         unsigned long tmp;
 40         unsigned long oldval;
 41 
 42         __asm__ __volatile__ (
 43                 "1:                                             \n\t"
 44                 "movli.l        @%2, %0 ! arch_spin_lock        \n\t"
 45                 "mov            %0, %1                          \n\t"
 46                 "mov            #0, %0                          \n\t"
 47                 "movco.l        %0, @%2                         \n\t"
 48                 "bf             1b                              \n\t"
 49                 "cmp/pl         %1                              \n\t"
 50                 "bf             1b                              \n\t"
 51                 : "=&z" (tmp), "=&r" (oldval)
 52                 : "r" (&lock->lock)
 53                 : "t", "memory"
 54         );
 55 }
 56 
 57 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 58 {
 59         unsigned long tmp;
 60 
 61         __asm__ __volatile__ (
 62                 "mov            #1, %0 ! arch_spin_unlock       \n\t"
 63                 "mov.l          %0, @%1                         \n\t"
 64                 : "=&z" (tmp)
 65                 : "r" (&lock->lock)
 66                 : "t", "memory"
 67         );
 68 }
 69 
 70 static inline int arch_spin_trylock(arch_spinlock_t *lock)
 71 {
 72         unsigned long tmp, oldval;
 73 
 74         __asm__ __volatile__ (
 75                 "1:                                             \n\t"
 76                 "movli.l        @%2, %0 ! arch_spin_trylock     \n\t"
 77                 "mov            %0, %1                          \n\t"
 78                 "mov            #0, %0                          \n\t"
 79                 "movco.l        %0, @%2                         \n\t"
 80                 "bf             1b                              \n\t"
 81                 "synco                                          \n\t"
 82                 : "=&z" (tmp), "=&r" (oldval)
 83                 : "r" (&lock->lock)
 84                 : "t", "memory"
 85         );
 86 
 87         return oldval;
 88 }
 89 
 90 /*
 91  * Read-write spinlocks, allowing multiple readers but only one writer.
 92  *
 93  * NOTE! it is quite common to have readers in interrupts but no interrupt
 94  * writers. For those circumstances we can "mix" irq-safe locks - any writer
 95  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 96  * read-locks.
 97  */
 98 
 99 /**
100  * read_can_lock - would read_trylock() succeed?
101  * @lock: the rwlock in question.
102  */
103 #define arch_read_can_lock(x)   ((x)->lock > 0)
104 
105 /**
106  * write_can_lock - would write_trylock() succeed?
107  * @lock: the rwlock in question.
108  */
109 #define arch_write_can_lock(x)  ((x)->lock == RW_LOCK_BIAS)
110 
111 static inline void arch_read_lock(arch_rwlock_t *rw)
112 {
113         unsigned long tmp;
114 
115         __asm__ __volatile__ (
116                 "1:                                             \n\t"
117                 "movli.l        @%1, %0 ! arch_read_lock        \n\t"
118                 "cmp/pl         %0                              \n\t"
119                 "bf             1b                              \n\t"
120                 "add            #-1, %0                         \n\t"
121                 "movco.l        %0, @%1                         \n\t"
122                 "bf             1b                              \n\t"
123                 : "=&z" (tmp)
124                 : "r" (&rw->lock)
125                 : "t", "memory"
126         );
127 }
128 
129 static inline void arch_read_unlock(arch_rwlock_t *rw)
130 {
131         unsigned long tmp;
132 
133         __asm__ __volatile__ (
134                 "1:                                             \n\t"
135                 "movli.l        @%1, %0 ! arch_read_unlock      \n\t"
136                 "add            #1, %0                          \n\t"
137                 "movco.l        %0, @%1                         \n\t"
138                 "bf             1b                              \n\t"
139                 : "=&z" (tmp)
140                 : "r" (&rw->lock)
141                 : "t", "memory"
142         );
143 }
144 
145 static inline void arch_write_lock(arch_rwlock_t *rw)
146 {
147         unsigned long tmp;
148 
149         __asm__ __volatile__ (
150                 "1:                                             \n\t"
151                 "movli.l        @%1, %0 ! arch_write_lock       \n\t"
152                 "cmp/hs         %2, %0                          \n\t"
153                 "bf             1b                              \n\t"
154                 "sub            %2, %0                          \n\t"
155                 "movco.l        %0, @%1                         \n\t"
156                 "bf             1b                              \n\t"
157                 : "=&z" (tmp)
158                 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
159                 : "t", "memory"
160         );
161 }
162 
163 static inline void arch_write_unlock(arch_rwlock_t *rw)
164 {
165         __asm__ __volatile__ (
166                 "mov.l          %1, @%0 ! arch_write_unlock     \n\t"
167                 :
168                 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
169                 : "t", "memory"
170         );
171 }
172 
173 static inline int arch_read_trylock(arch_rwlock_t *rw)
174 {
175         unsigned long tmp, oldval;
176 
177         __asm__ __volatile__ (
178                 "1:                                             \n\t"
179                 "movli.l        @%2, %0 ! arch_read_trylock     \n\t"
180                 "mov            %0, %1                          \n\t"
181                 "cmp/pl         %0                              \n\t"
182                 "bf             2f                              \n\t"
183                 "add            #-1, %0                         \n\t"
184                 "movco.l        %0, @%2                         \n\t"
185                 "bf             1b                              \n\t"
186                 "2:                                             \n\t"
187                 "synco                                          \n\t"
188                 : "=&z" (tmp), "=&r" (oldval)
189                 : "r" (&rw->lock)
190                 : "t", "memory"
191         );
192 
193         return (oldval > 0);
194 }
195 
196 static inline int arch_write_trylock(arch_rwlock_t *rw)
197 {
198         unsigned long tmp, oldval;
199 
200         __asm__ __volatile__ (
201                 "1:                                             \n\t"
202                 "movli.l        @%2, %0 ! arch_write_trylock    \n\t"
203                 "mov            %0, %1                          \n\t"
204                 "cmp/hs         %3, %0                          \n\t"
205                 "bf             2f                              \n\t"
206                 "sub            %3, %0                          \n\t"
207                 "2:                                             \n\t"
208                 "movco.l        %0, @%2                         \n\t"
209                 "bf             1b                              \n\t"
210                 "synco                                          \n\t"
211                 : "=&z" (tmp), "=&r" (oldval)
212                 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
213                 : "t", "memory"
214         );
215 
216         return (oldval > (RW_LOCK_BIAS - 1));
217 }
218 
219 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
220 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
221 
222 #define arch_spin_relax(lock)   cpu_relax()
223 #define arch_read_relax(lock)   cpu_relax()
224 #define arch_write_relax(lock)  cpu_relax()
225 
226 #endif /* __ASM_SH_SPINLOCK_H */
227 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp