~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/atomic_ll_sc.h

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Based on arch/arm/include/asm/atomic.h
  3  *
  4  * Copyright (C) 1996 Russell King.
  5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6  * Copyright (C) 2012 ARM Ltd.
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License version 2 as
 10  * published by the Free Software Foundation.
 11  *
 12  * This program is distributed in the hope that it will be useful,
 13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15  * GNU General Public License for more details.
 16  *
 17  * You should have received a copy of the GNU General Public License
 18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 19  */
 20 
 21 #ifndef __ASM_ATOMIC_LL_SC_H
 22 #define __ASM_ATOMIC_LL_SC_H
 23 
 24 #ifndef __ARM64_IN_ATOMIC_IMPL
 25 #error "please don't include this file directly"
 26 #endif
 27 
 28 /*
 29  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
 30  * store exclusive to ensure that these are atomic.  We may loop
 31  * to ensure that the update happens.
 32  *
 33  * NOTE: these functions do *not* follow the PCS and must explicitly
 34  * save any clobbered registers other than x0 (regardless of return
 35  * value).  This is achieved through -fcall-saved-* compiler flags for
 36  * this file, which unfortunately don't work on a per-function basis
 37  * (the optimize attribute silently ignores these options).
 38  */
 39 
 40 #define ATOMIC_OP(op, asm_op)                                           \
 41 __LL_SC_INLINE void                                                     \
 42 __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v))                    \
 43 {                                                                       \
 44         unsigned long tmp;                                              \
 45         int result;                                                     \
 46                                                                         \
 47         asm volatile("// atomic_" #op "\n"                              \
 48 "       prfm    pstl1strm, %2\n"                                        \
 49 "1:     ldxr    %w0, %2\n"                                              \
 50 "       " #asm_op "     %w0, %w0, %w3\n"                                \
 51 "       stxr    %w1, %w0, %2\n"                                         \
 52 "       cbnz    %w1, 1b"                                                \
 53         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
 54         : "Ir" (i));                                                    \
 55 }                                                                       \
 56 __LL_SC_EXPORT(arch_atomic_##op);
 57 
 58 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)            \
 59 __LL_SC_INLINE int                                                      \
 60 __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v))     \
 61 {                                                                       \
 62         unsigned long tmp;                                              \
 63         int result;                                                     \
 64                                                                         \
 65         asm volatile("// atomic_" #op "_return" #name "\n"              \
 66 "       prfm    pstl1strm, %2\n"                                        \
 67 "1:     ld" #acq "xr    %w0, %2\n"                                      \
 68 "       " #asm_op "     %w0, %w0, %w3\n"                                \
 69 "       st" #rel "xr    %w1, %w0, %2\n"                                 \
 70 "       cbnz    %w1, 1b\n"                                              \
 71 "       " #mb                                                           \
 72         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
 73         : "Ir" (i)                                                      \
 74         : cl);                                                          \
 75                                                                         \
 76         return result;                                                  \
 77 }                                                                       \
 78 __LL_SC_EXPORT(arch_atomic_##op##_return##name);
 79 
 80 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)             \
 81 __LL_SC_INLINE int                                                      \
 82 __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v))        \
 83 {                                                                       \
 84         unsigned long tmp;                                              \
 85         int val, result;                                                \
 86                                                                         \
 87         asm volatile("// atomic_fetch_" #op #name "\n"                  \
 88 "       prfm    pstl1strm, %3\n"                                        \
 89 "1:     ld" #acq "xr    %w0, %3\n"                                      \
 90 "       " #asm_op "     %w1, %w0, %w4\n"                                \
 91 "       st" #rel "xr    %w2, %w1, %3\n"                                 \
 92 "       cbnz    %w2, 1b\n"                                              \
 93 "       " #mb                                                           \
 94         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
 95         : "Ir" (i)                                                      \
 96         : cl);                                                          \
 97                                                                         \
 98         return result;                                                  \
 99 }                                                                       \
100 __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
101 
102 #define ATOMIC_OPS(...)                                                 \
103         ATOMIC_OP(__VA_ARGS__)                                          \
104         ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)\
105         ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
106         ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
107         ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)\
108         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
109         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
110         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
111         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
112 
113 ATOMIC_OPS(add, add)
114 ATOMIC_OPS(sub, sub)
115 
116 #undef ATOMIC_OPS
117 #define ATOMIC_OPS(...)                                                 \
118         ATOMIC_OP(__VA_ARGS__)                                          \
119         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
120         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
121         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
122         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
123 
124 ATOMIC_OPS(and, and)
125 ATOMIC_OPS(andnot, bic)
126 ATOMIC_OPS(or, orr)
127 ATOMIC_OPS(xor, eor)
128 
129 #undef ATOMIC_OPS
130 #undef ATOMIC_FETCH_OP
131 #undef ATOMIC_OP_RETURN
132 #undef ATOMIC_OP
133 
134 #define ATOMIC64_OP(op, asm_op)                                         \
135 __LL_SC_INLINE void                                                     \
136 __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))               \
137 {                                                                       \
138         long result;                                                    \
139         unsigned long tmp;                                              \
140                                                                         \
141         asm volatile("// atomic64_" #op "\n"                            \
142 "       prfm    pstl1strm, %2\n"                                        \
143 "1:     ldxr    %0, %2\n"                                               \
144 "       " #asm_op "     %0, %0, %3\n"                                   \
145 "       stxr    %w1, %0, %2\n"                                          \
146 "       cbnz    %w1, 1b"                                                \
147         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
148         : "Ir" (i));                                                    \
149 }                                                                       \
150 __LL_SC_EXPORT(arch_atomic64_##op);
151 
152 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)          \
153 __LL_SC_INLINE long                                                     \
154 __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
155 {                                                                       \
156         long result;                                                    \
157         unsigned long tmp;                                              \
158                                                                         \
159         asm volatile("// atomic64_" #op "_return" #name "\n"            \
160 "       prfm    pstl1strm, %2\n"                                        \
161 "1:     ld" #acq "xr    %0, %2\n"                                       \
162 "       " #asm_op "     %0, %0, %3\n"                                   \
163 "       st" #rel "xr    %w1, %0, %2\n"                                  \
164 "       cbnz    %w1, 1b\n"                                              \
165 "       " #mb                                                           \
166         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
167         : "Ir" (i)                                                      \
168         : cl);                                                          \
169                                                                         \
170         return result;                                                  \
171 }                                                                       \
172 __LL_SC_EXPORT(arch_atomic64_##op##_return##name);
173 
174 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)           \
175 __LL_SC_INLINE long                                                     \
176 __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v))   \
177 {                                                                       \
178         long result, val;                                               \
179         unsigned long tmp;                                              \
180                                                                         \
181         asm volatile("// atomic64_fetch_" #op #name "\n"                \
182 "       prfm    pstl1strm, %3\n"                                        \
183 "1:     ld" #acq "xr    %0, %3\n"                                       \
184 "       " #asm_op "     %1, %0, %4\n"                                   \
185 "       st" #rel "xr    %w2, %1, %3\n"                                  \
186 "       cbnz    %w2, 1b\n"                                              \
187 "       " #mb                                                           \
188         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
189         : "Ir" (i)                                                      \
190         : cl);                                                          \
191                                                                         \
192         return result;                                                  \
193 }                                                                       \
194 __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
195 
196 #define ATOMIC64_OPS(...)                                               \
197         ATOMIC64_OP(__VA_ARGS__)                                        \
198         ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)      \
199         ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
200         ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
201         ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)      \
202         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
203         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
204         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
205         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
206 
207 ATOMIC64_OPS(add, add)
208 ATOMIC64_OPS(sub, sub)
209 
210 #undef ATOMIC64_OPS
211 #define ATOMIC64_OPS(...)                                               \
212         ATOMIC64_OP(__VA_ARGS__)                                        \
213         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
214         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
215         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
216         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
217 
218 ATOMIC64_OPS(and, and)
219 ATOMIC64_OPS(andnot, bic)
220 ATOMIC64_OPS(or, orr)
221 ATOMIC64_OPS(xor, eor)
222 
223 #undef ATOMIC64_OPS
224 #undef ATOMIC64_FETCH_OP
225 #undef ATOMIC64_OP_RETURN
226 #undef ATOMIC64_OP
227 
228 __LL_SC_INLINE long
229 __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
230 {
231         long result;
232         unsigned long tmp;
233 
234         asm volatile("// atomic64_dec_if_positive\n"
235 "       prfm    pstl1strm, %2\n"
236 "1:     ldxr    %0, %2\n"
237 "       subs    %0, %0, #1\n"
238 "       b.lt    2f\n"
239 "       stlxr   %w1, %0, %2\n"
240 "       cbnz    %w1, 1b\n"
241 "       dmb     ish\n"
242 "2:"
243         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
244         :
245         : "cc", "memory");
246 
247         return result;
248 }
249 __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
250 
251 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)              \
252 __LL_SC_INLINE u##sz                                                    \
253 __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,            \
254                                          unsigned long old,             \
255                                          u##sz new))                    \
256 {                                                                       \
257         unsigned long tmp;                                              \
258         u##sz oldval;                                                   \
259                                                                         \
260         /*                                                              \
261          * Sub-word sizes require explicit casting so that the compare  \
262          * part of the cmpxchg doesn't end up interpreting non-zero     \
263          * upper bits of the register containing "old".                 \
264          */                                                             \
265         if (sz < 32)                                                    \
266                 old = (u##sz)old;                                       \
267                                                                         \
268         asm volatile(                                                   \
269         "       prfm    pstl1strm, %[v]\n"                              \
270         "1:     ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n"          \
271         "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
272         "       cbnz    %" #w "[tmp], 2f\n"                             \
273         "       st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n"    \
274         "       cbnz    %w[tmp], 1b\n"                                  \
275         "       " #mb "\n"                                              \
276         "2:"                                                            \
277         : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
278           [v] "+Q" (*(u##sz *)ptr)                                      \
279         : [old] "Kr" (old), [new] "r" (new)                             \
280         : cl);                                                          \
281                                                                         \
282         return oldval;                                                  \
283 }                                                                       \
284 __LL_SC_EXPORT(__cmpxchg_case_##name##sz);
285 
286 __CMPXCHG_CASE(w, b,     ,  8,        ,  ,  ,         )
287 __CMPXCHG_CASE(w, h,     , 16,        ,  ,  ,         )
288 __CMPXCHG_CASE(w,  ,     , 32,        ,  ,  ,         )
289 __CMPXCHG_CASE( ,  ,     , 64,        ,  ,  ,         )
290 __CMPXCHG_CASE(w, b, acq_,  8,        , a,  , "memory")
291 __CMPXCHG_CASE(w, h, acq_, 16,        , a,  , "memory")
292 __CMPXCHG_CASE(w,  , acq_, 32,        , a,  , "memory")
293 __CMPXCHG_CASE( ,  , acq_, 64,        , a,  , "memory")
294 __CMPXCHG_CASE(w, b, rel_,  8,        ,  , l, "memory")
295 __CMPXCHG_CASE(w, h, rel_, 16,        ,  , l, "memory")
296 __CMPXCHG_CASE(w,  , rel_, 32,        ,  , l, "memory")
297 __CMPXCHG_CASE( ,  , rel_, 64,        ,  , l, "memory")
298 __CMPXCHG_CASE(w, b,  mb_,  8, dmb ish,  , l, "memory")
299 __CMPXCHG_CASE(w, h,  mb_, 16, dmb ish,  , l, "memory")
300 __CMPXCHG_CASE(w,  ,  mb_, 32, dmb ish,  , l, "memory")
301 __CMPXCHG_CASE( ,  ,  mb_, 64, dmb ish,  , l, "memory")
302 
303 #undef __CMPXCHG_CASE
304 
305 #define __CMPXCHG_DBL(name, mb, rel, cl)                                \
306 __LL_SC_INLINE long                                                     \
307 __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,               \
308                                       unsigned long old2,               \
309                                       unsigned long new1,               \
310                                       unsigned long new2,               \
311                                       volatile void *ptr))              \
312 {                                                                       \
313         unsigned long tmp, ret;                                         \
314                                                                         \
315         asm volatile("// __cmpxchg_double" #name "\n"                   \
316         "       prfm    pstl1strm, %2\n"                                \
317         "1:     ldxp    %0, %1, %2\n"                                   \
318         "       eor     %0, %0, %3\n"                                   \
319         "       eor     %1, %1, %4\n"                                   \
320         "       orr     %1, %0, %1\n"                                   \
321         "       cbnz    %1, 2f\n"                                       \
322         "       st" #rel "xp    %w0, %5, %6, %2\n"                      \
323         "       cbnz    %w0, 1b\n"                                      \
324         "       " #mb "\n"                                              \
325         "2:"                                                            \
326         : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
327         : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
328         : cl);                                                          \
329                                                                         \
330         return ret;                                                     \
331 }                                                                       \
332 __LL_SC_EXPORT(__cmpxchg_double##name);
333 
334 __CMPXCHG_DBL(   ,        ,  ,         )
335 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
336 
337 #undef __CMPXCHG_DBL
338 
339 #endif  /* __ASM_ATOMIC_LL_SC_H */
340 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp