~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/atomic.h

Version: ~ [ linux-5.16 ] ~ [ linux-5.15.13 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.90 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.170 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.224 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.261 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.296 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.298 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Based on arch/arm/include/asm/atomic.h
  3  *
  4  * Copyright (C) 1996 Russell King.
  5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6  * Copyright (C) 2012 ARM Ltd.
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License version 2 as
 10  * published by the Free Software Foundation.
 11  *
 12  * This program is distributed in the hope that it will be useful,
 13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15  * GNU General Public License for more details.
 16  *
 17  * You should have received a copy of the GNU General Public License
 18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 19  */
 20 #ifndef __ASM_ATOMIC_H
 21 #define __ASM_ATOMIC_H
 22 
 23 #include <linux/compiler.h>
 24 #include <linux/types.h>
 25 
 26 #include <asm/barrier.h>
 27 #include <asm/cmpxchg.h>
 28 
 29 #define ATOMIC_INIT(i)  { (i) }
 30 
 31 #ifdef __KERNEL__
 32 
 33 /*
 34  * On ARM, ordinary assignment (str instruction) doesn't clear the local
 35  * strex/ldrex monitor on some implementations. The reason we can use it for
 36  * atomic_set() is the clrex or dummy strex done on every exception return.
 37  */
 38 #define atomic_read(v)  (*(volatile int *)&(v)->counter)
 39 #define atomic_set(v,i) (((v)->counter) = (i))
 40 
 41 /*
 42  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
 43  * store exclusive to ensure that these are atomic.  We may loop
 44  * to ensure that the update happens.
 45  */
 46 static inline void atomic_add(int i, atomic_t *v)
 47 {
 48         unsigned long tmp;
 49         int result;
 50 
 51         asm volatile("// atomic_add\n"
 52 "1:     ldxr    %w0, %2\n"
 53 "       add     %w0, %w0, %w3\n"
 54 "       stxr    %w1, %w0, %2\n"
 55 "       cbnz    %w1, 1b"
 56         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 57         : "Ir" (i)
 58         : "cc");
 59 }
 60 
 61 static inline int atomic_add_return(int i, atomic_t *v)
 62 {
 63         unsigned long tmp;
 64         int result;
 65 
 66         asm volatile("// atomic_add_return\n"
 67 "1:     ldaxr   %w0, %2\n"
 68 "       add     %w0, %w0, %w3\n"
 69 "       stlxr   %w1, %w0, %2\n"
 70 "       cbnz    %w1, 1b"
 71         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 72         : "Ir" (i)
 73         : "cc", "memory");
 74 
 75         return result;
 76 }
 77 
 78 static inline void atomic_sub(int i, atomic_t *v)
 79 {
 80         unsigned long tmp;
 81         int result;
 82 
 83         asm volatile("// atomic_sub\n"
 84 "1:     ldxr    %w0, %2\n"
 85 "       sub     %w0, %w0, %w3\n"
 86 "       stxr    %w1, %w0, %2\n"
 87 "       cbnz    %w1, 1b"
 88         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 89         : "Ir" (i)
 90         : "cc");
 91 }
 92 
 93 static inline int atomic_sub_return(int i, atomic_t *v)
 94 {
 95         unsigned long tmp;
 96         int result;
 97 
 98         asm volatile("// atomic_sub_return\n"
 99 "1:     ldaxr   %w0, %2\n"
100 "       sub     %w0, %w0, %w3\n"
101 "       stlxr   %w1, %w0, %2\n"
102 "       cbnz    %w1, 1b"
103         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
104         : "Ir" (i)
105         : "cc", "memory");
106 
107         return result;
108 }
109 
110 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
111 {
112         unsigned long tmp;
113         int oldval;
114 
115         asm volatile("// atomic_cmpxchg\n"
116 "1:     ldaxr   %w1, %2\n"
117 "       cmp     %w1, %w3\n"
118 "       b.ne    2f\n"
119 "       stlxr   %w0, %w4, %2\n"
120 "       cbnz    %w0, 1b\n"
121 "2:"
122         : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
123         : "Ir" (old), "r" (new)
124         : "cc", "memory");
125 
126         return oldval;
127 }
128 
129 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
130 {
131         unsigned long tmp, tmp2;
132 
133         asm volatile("// atomic_clear_mask\n"
134 "1:     ldxr    %0, %2\n"
135 "       bic     %0, %0, %3\n"
136 "       stxr    %w1, %0, %2\n"
137 "       cbnz    %w1, 1b"
138         : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
139         : "Ir" (mask)
140         : "cc");
141 }
142 
143 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
144 
145 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
146 {
147         int c, old;
148 
149         c = atomic_read(v);
150         while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
151                 c = old;
152         return c;
153 }
154 
155 #define atomic_inc(v)           atomic_add(1, v)
156 #define atomic_dec(v)           atomic_sub(1, v)
157 
158 #define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
159 #define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
160 #define atomic_inc_return(v)    (atomic_add_return(1, v))
161 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
162 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
163 
164 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
165 
166 #define smp_mb__before_atomic_dec()     smp_mb()
167 #define smp_mb__after_atomic_dec()      smp_mb()
168 #define smp_mb__before_atomic_inc()     smp_mb()
169 #define smp_mb__after_atomic_inc()      smp_mb()
170 
171 /*
172  * 64-bit atomic operations.
173  */
174 #define ATOMIC64_INIT(i) { (i) }
175 
176 #define atomic64_read(v)        (*(volatile long long *)&(v)->counter)
177 #define atomic64_set(v,i)       (((v)->counter) = (i))
178 
179 static inline void atomic64_add(u64 i, atomic64_t *v)
180 {
181         long result;
182         unsigned long tmp;
183 
184         asm volatile("// atomic64_add\n"
185 "1:     ldxr    %0, %2\n"
186 "       add     %0, %0, %3\n"
187 "       stxr    %w1, %0, %2\n"
188 "       cbnz    %w1, 1b"
189         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
190         : "Ir" (i)
191         : "cc");
192 }
193 
194 static inline long atomic64_add_return(long i, atomic64_t *v)
195 {
196         long result;
197         unsigned long tmp;
198 
199         asm volatile("// atomic64_add_return\n"
200 "1:     ldaxr   %0, %2\n"
201 "       add     %0, %0, %3\n"
202 "       stlxr   %w1, %0, %2\n"
203 "       cbnz    %w1, 1b"
204         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
205         : "Ir" (i)
206         : "cc", "memory");
207 
208         return result;
209 }
210 
211 static inline void atomic64_sub(u64 i, atomic64_t *v)
212 {
213         long result;
214         unsigned long tmp;
215 
216         asm volatile("// atomic64_sub\n"
217 "1:     ldxr    %0, %2\n"
218 "       sub     %0, %0, %3\n"
219 "       stxr    %w1, %0, %2\n"
220 "       cbnz    %w1, 1b"
221         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
222         : "Ir" (i)
223         : "cc");
224 }
225 
226 static inline long atomic64_sub_return(long i, atomic64_t *v)
227 {
228         long result;
229         unsigned long tmp;
230 
231         asm volatile("// atomic64_sub_return\n"
232 "1:     ldaxr   %0, %2\n"
233 "       sub     %0, %0, %3\n"
234 "       stlxr   %w1, %0, %2\n"
235 "       cbnz    %w1, 1b"
236         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
237         : "Ir" (i)
238         : "cc", "memory");
239 
240         return result;
241 }
242 
243 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
244 {
245         long oldval;
246         unsigned long res;
247 
248         asm volatile("// atomic64_cmpxchg\n"
249 "1:     ldaxr   %1, %2\n"
250 "       cmp     %1, %3\n"
251 "       b.ne    2f\n"
252 "       stlxr   %w0, %4, %2\n"
253 "       cbnz    %w0, 1b\n"
254 "2:"
255         : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
256         : "Ir" (old), "r" (new)
257         : "cc", "memory");
258 
259         return oldval;
260 }
261 
262 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
263 
264 static inline long atomic64_dec_if_positive(atomic64_t *v)
265 {
266         long result;
267         unsigned long tmp;
268 
269         asm volatile("// atomic64_dec_if_positive\n"
270 "1:     ldaxr   %0, %2\n"
271 "       subs    %0, %0, #1\n"
272 "       b.mi    2f\n"
273 "       stlxr   %w1, %0, %2\n"
274 "       cbnz    %w1, 1b\n"
275 "2:"
276         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
277         :
278         : "cc", "memory");
279 
280         return result;
281 }
282 
283 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
284 {
285         long c, old;
286 
287         c = atomic64_read(v);
288         while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
289                 c = old;
290 
291         return c != u;
292 }
293 
294 #define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
295 #define atomic64_inc(v)                 atomic64_add(1LL, (v))
296 #define atomic64_inc_return(v)          atomic64_add_return(1LL, (v))
297 #define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
298 #define atomic64_sub_and_test(a, v)     (atomic64_sub_return((a), (v)) == 0)
299 #define atomic64_dec(v)                 atomic64_sub(1LL, (v))
300 #define atomic64_dec_return(v)          atomic64_sub_return(1LL, (v))
301 #define atomic64_dec_and_test(v)        (atomic64_dec_return((v)) == 0)
302 #define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1LL, 0LL)
303 
304 #endif
305 #endif
306 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp