~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/include/asm/atomic.h

Version: ~ [ linux-5.16 ] ~ [ linux-5.15.13 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.90 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.170 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.224 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.261 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.296 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.298 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  arch/arm/include/asm/atomic.h
  3  *
  4  *  Copyright (C) 1996 Russell King.
  5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License version 2 as
  9  * published by the Free Software Foundation.
 10  */
 11 #ifndef __ASM_ARM_ATOMIC_H
 12 #define __ASM_ARM_ATOMIC_H
 13 
 14 #include <linux/compiler.h>
 15 #include <linux/types.h>
 16 #include <linux/irqflags.h>
 17 #include <asm/barrier.h>
 18 #include <asm/cmpxchg.h>
 19 
 20 #define ATOMIC_INIT(i)  { (i) }
 21 
 22 #ifdef __KERNEL__
 23 
 24 /*
 25  * On ARM, ordinary assignment (str instruction) doesn't clear the local
 26  * strex/ldrex monitor on some implementations. The reason we can use it for
 27  * atomic_set() is the clrex or dummy strex done on every exception return.
 28  */
 29 #define atomic_read(v)  (*(volatile int *)&(v)->counter)
 30 #define atomic_set(v,i) (((v)->counter) = (i))
 31 
 32 #if __LINUX_ARM_ARCH__ >= 6
 33 
 34 /*
 35  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 36  * store exclusive to ensure that these are atomic.  We may loop
 37  * to ensure that the update happens.
 38  */
 39 static inline void atomic_add(int i, atomic_t *v)
 40 {
 41         unsigned long tmp;
 42         int result;
 43 
 44         __asm__ __volatile__("@ atomic_add\n"
 45 "1:     ldrex   %0, [%3]\n"
 46 "       add     %0, %0, %4\n"
 47 "       strex   %1, %0, [%3]\n"
 48 "       teq     %1, #0\n"
 49 "       bne     1b"
 50         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 51         : "r" (&v->counter), "Ir" (i)
 52         : "cc");
 53 }
 54 
 55 static inline int atomic_add_return(int i, atomic_t *v)
 56 {
 57         unsigned long tmp;
 58         int result;
 59 
 60         smp_mb();
 61 
 62         __asm__ __volatile__("@ atomic_add_return\n"
 63 "1:     ldrex   %0, [%3]\n"
 64 "       add     %0, %0, %4\n"
 65 "       strex   %1, %0, [%3]\n"
 66 "       teq     %1, #0\n"
 67 "       bne     1b"
 68         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 69         : "r" (&v->counter), "Ir" (i)
 70         : "cc");
 71 
 72         smp_mb();
 73 
 74         return result;
 75 }
 76 
 77 static inline void atomic_sub(int i, atomic_t *v)
 78 {
 79         unsigned long tmp;
 80         int result;
 81 
 82         __asm__ __volatile__("@ atomic_sub\n"
 83 "1:     ldrex   %0, [%3]\n"
 84 "       sub     %0, %0, %4\n"
 85 "       strex   %1, %0, [%3]\n"
 86 "       teq     %1, #0\n"
 87 "       bne     1b"
 88         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 89         : "r" (&v->counter), "Ir" (i)
 90         : "cc");
 91 }
 92 
 93 static inline int atomic_sub_return(int i, atomic_t *v)
 94 {
 95         unsigned long tmp;
 96         int result;
 97 
 98         smp_mb();
 99 
100         __asm__ __volatile__("@ atomic_sub_return\n"
101 "1:     ldrex   %0, [%3]\n"
102 "       sub     %0, %0, %4\n"
103 "       strex   %1, %0, [%3]\n"
104 "       teq     %1, #0\n"
105 "       bne     1b"
106         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
107         : "r" (&v->counter), "Ir" (i)
108         : "cc");
109 
110         smp_mb();
111 
112         return result;
113 }
114 
115 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116 {
117         int oldval;
118         unsigned long res;
119 
120         smp_mb();
121 
122         do {
123                 __asm__ __volatile__("@ atomic_cmpxchg\n"
124                 "ldrex  %1, [%3]\n"
125                 "mov    %0, #0\n"
126                 "teq    %1, %4\n"
127                 "strexeq %0, %5, [%3]\n"
128                     : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
129                     : "r" (&ptr->counter), "Ir" (old), "r" (new)
130                     : "cc");
131         } while (res);
132 
133         smp_mb();
134 
135         return oldval;
136 }
137 
138 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
139 {
140         unsigned long tmp, tmp2;
141 
142         __asm__ __volatile__("@ atomic_clear_mask\n"
143 "1:     ldrex   %0, [%3]\n"
144 "       bic     %0, %0, %4\n"
145 "       strex   %1, %0, [%3]\n"
146 "       teq     %1, #0\n"
147 "       bne     1b"
148         : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
149         : "r" (addr), "Ir" (mask)
150         : "cc");
151 }
152 
153 #else /* ARM_ARCH_6 */
154 
155 #ifdef CONFIG_SMP
156 #error SMP not supported on pre-ARMv6 CPUs
157 #endif
158 
159 static inline int atomic_add_return(int i, atomic_t *v)
160 {
161         unsigned long flags;
162         int val;
163 
164         raw_local_irq_save(flags);
165         val = v->counter;
166         v->counter = val += i;
167         raw_local_irq_restore(flags);
168 
169         return val;
170 }
171 #define atomic_add(i, v)        (void) atomic_add_return(i, v)
172 
173 static inline int atomic_sub_return(int i, atomic_t *v)
174 {
175         unsigned long flags;
176         int val;
177 
178         raw_local_irq_save(flags);
179         val = v->counter;
180         v->counter = val -= i;
181         raw_local_irq_restore(flags);
182 
183         return val;
184 }
185 #define atomic_sub(i, v)        (void) atomic_sub_return(i, v)
186 
187 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
188 {
189         int ret;
190         unsigned long flags;
191 
192         raw_local_irq_save(flags);
193         ret = v->counter;
194         if (likely(ret == old))
195                 v->counter = new;
196         raw_local_irq_restore(flags);
197 
198         return ret;
199 }
200 
201 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
202 {
203         unsigned long flags;
204 
205         raw_local_irq_save(flags);
206         *addr &= ~mask;
207         raw_local_irq_restore(flags);
208 }
209 
210 #endif /* __LINUX_ARM_ARCH__ */
211 
212 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
213 
214 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
215 {
216         int c, old;
217 
218         c = atomic_read(v);
219         while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
220                 c = old;
221         return c;
222 }
223 
224 #define atomic_inc(v)           atomic_add(1, v)
225 #define atomic_dec(v)           atomic_sub(1, v)
226 
227 #define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
228 #define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
229 #define atomic_inc_return(v)    (atomic_add_return(1, v))
230 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
231 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
232 
233 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
234 
235 #define smp_mb__before_atomic_dec()     smp_mb()
236 #define smp_mb__after_atomic_dec()      smp_mb()
237 #define smp_mb__before_atomic_inc()     smp_mb()
238 #define smp_mb__after_atomic_inc()      smp_mb()
239 
240 #ifndef CONFIG_GENERIC_ATOMIC64
241 typedef struct {
242         long long counter;
243 } atomic64_t;
244 
245 #define ATOMIC64_INIT(i) { (i) }
246 
247 #ifdef CONFIG_ARM_LPAE
248 static inline long long atomic64_read(const atomic64_t *v)
249 {
250         long long result;
251 
252         __asm__ __volatile__("@ atomic64_read\n"
253 "       ldrd    %0, %H0, [%1]"
254         : "=&r" (result)
255         : "r" (&v->counter), "Qo" (v->counter)
256         );
257 
258         return result;
259 }
260 
261 static inline void atomic64_set(atomic64_t *v, long long i)
262 {
263         __asm__ __volatile__("@ atomic64_set\n"
264 "       strd    %2, %H2, [%1]"
265         : "=Qo" (v->counter)
266         : "r" (&v->counter), "r" (i)
267         );
268 }
269 #else
270 static inline long long atomic64_read(const atomic64_t *v)
271 {
272         long long result;
273 
274         __asm__ __volatile__("@ atomic64_read\n"
275 "       ldrexd  %0, %H0, [%1]"
276         : "=&r" (result)
277         : "r" (&v->counter), "Qo" (v->counter)
278         );
279 
280         return result;
281 }
282 
283 static inline void atomic64_set(atomic64_t *v, long long i)
284 {
285         long long tmp;
286 
287         __asm__ __volatile__("@ atomic64_set\n"
288 "1:     ldrexd  %0, %H0, [%2]\n"
289 "       strexd  %0, %3, %H3, [%2]\n"
290 "       teq     %0, #0\n"
291 "       bne     1b"
292         : "=&r" (tmp), "=Qo" (v->counter)
293         : "r" (&v->counter), "r" (i)
294         : "cc");
295 }
296 #endif
297 
298 static inline void atomic64_add(long long i, atomic64_t *v)
299 {
300         long long result;
301         unsigned long tmp;
302 
303         __asm__ __volatile__("@ atomic64_add\n"
304 "1:     ldrexd  %0, %H0, [%3]\n"
305 "       adds    %0, %0, %4\n"
306 "       adc     %H0, %H0, %H4\n"
307 "       strexd  %1, %0, %H0, [%3]\n"
308 "       teq     %1, #0\n"
309 "       bne     1b"
310         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
311         : "r" (&v->counter), "r" (i)
312         : "cc");
313 }
314 
315 static inline long long atomic64_add_return(long long i, atomic64_t *v)
316 {
317         long long result;
318         unsigned long tmp;
319 
320         smp_mb();
321 
322         __asm__ __volatile__("@ atomic64_add_return\n"
323 "1:     ldrexd  %0, %H0, [%3]\n"
324 "       adds    %0, %0, %4\n"
325 "       adc     %H0, %H0, %H4\n"
326 "       strexd  %1, %0, %H0, [%3]\n"
327 "       teq     %1, #0\n"
328 "       bne     1b"
329         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
330         : "r" (&v->counter), "r" (i)
331         : "cc");
332 
333         smp_mb();
334 
335         return result;
336 }
337 
338 static inline void atomic64_sub(long long i, atomic64_t *v)
339 {
340         long long result;
341         unsigned long tmp;
342 
343         __asm__ __volatile__("@ atomic64_sub\n"
344 "1:     ldrexd  %0, %H0, [%3]\n"
345 "       subs    %0, %0, %4\n"
346 "       sbc     %H0, %H0, %H4\n"
347 "       strexd  %1, %0, %H0, [%3]\n"
348 "       teq     %1, #0\n"
349 "       bne     1b"
350         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
351         : "r" (&v->counter), "r" (i)
352         : "cc");
353 }
354 
355 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
356 {
357         long long result;
358         unsigned long tmp;
359 
360         smp_mb();
361 
362         __asm__ __volatile__("@ atomic64_sub_return\n"
363 "1:     ldrexd  %0, %H0, [%3]\n"
364 "       subs    %0, %0, %4\n"
365 "       sbc     %H0, %H0, %H4\n"
366 "       strexd  %1, %0, %H0, [%3]\n"
367 "       teq     %1, #0\n"
368 "       bne     1b"
369         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
370         : "r" (&v->counter), "r" (i)
371         : "cc");
372 
373         smp_mb();
374 
375         return result;
376 }
377 
378 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
379                                         long long new)
380 {
381         long long oldval;
382         unsigned long res;
383 
384         smp_mb();
385 
386         do {
387                 __asm__ __volatile__("@ atomic64_cmpxchg\n"
388                 "ldrexd         %1, %H1, [%3]\n"
389                 "mov            %0, #0\n"
390                 "teq            %1, %4\n"
391                 "teqeq          %H1, %H4\n"
392                 "strexdeq       %0, %5, %H5, [%3]"
393                 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
394                 : "r" (&ptr->counter), "r" (old), "r" (new)
395                 : "cc");
396         } while (res);
397 
398         smp_mb();
399 
400         return oldval;
401 }
402 
403 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
404 {
405         long long result;
406         unsigned long tmp;
407 
408         smp_mb();
409 
410         __asm__ __volatile__("@ atomic64_xchg\n"
411 "1:     ldrexd  %0, %H0, [%3]\n"
412 "       strexd  %1, %4, %H4, [%3]\n"
413 "       teq     %1, #0\n"
414 "       bne     1b"
415         : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
416         : "r" (&ptr->counter), "r" (new)
417         : "cc");
418 
419         smp_mb();
420 
421         return result;
422 }
423 
424 static inline long long atomic64_dec_if_positive(atomic64_t *v)
425 {
426         long long result;
427         unsigned long tmp;
428 
429         smp_mb();
430 
431         __asm__ __volatile__("@ atomic64_dec_if_positive\n"
432 "1:     ldrexd  %0, %H0, [%3]\n"
433 "       subs    %0, %0, #1\n"
434 "       sbc     %H0, %H0, #0\n"
435 "       teq     %H0, #0\n"
436 "       bmi     2f\n"
437 "       strexd  %1, %0, %H0, [%3]\n"
438 "       teq     %1, #0\n"
439 "       bne     1b\n"
440 "2:"
441         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
442         : "r" (&v->counter)
443         : "cc");
444 
445         smp_mb();
446 
447         return result;
448 }
449 
450 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
451 {
452         long long val;
453         unsigned long tmp;
454         int ret = 1;
455 
456         smp_mb();
457 
458         __asm__ __volatile__("@ atomic64_add_unless\n"
459 "1:     ldrexd  %0, %H0, [%4]\n"
460 "       teq     %0, %5\n"
461 "       teqeq   %H0, %H5\n"
462 "       moveq   %1, #0\n"
463 "       beq     2f\n"
464 "       adds    %0, %0, %6\n"
465 "       adc     %H0, %H0, %H6\n"
466 "       strexd  %2, %0, %H0, [%4]\n"
467 "       teq     %2, #0\n"
468 "       bne     1b\n"
469 "2:"
470         : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
471         : "r" (&v->counter), "r" (u), "r" (a)
472         : "cc");
473 
474         if (ret)
475                 smp_mb();
476 
477         return ret;
478 }
479 
480 #define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
481 #define atomic64_inc(v)                 atomic64_add(1LL, (v))
482 #define atomic64_inc_return(v)          atomic64_add_return(1LL, (v))
483 #define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
484 #define atomic64_sub_and_test(a, v)     (atomic64_sub_return((a), (v)) == 0)
485 #define atomic64_dec(v)                 atomic64_sub(1LL, (v))
486 #define atomic64_dec_return(v)          atomic64_sub_return(1LL, (v))
487 #define atomic64_dec_and_test(v)        (atomic64_dec_return((v)) == 0)
488 #define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1LL, 0LL)
489 
490 #endif /* !CONFIG_GENERIC_ATOMIC64 */
491 #endif
492 #endif
493 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp