~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/include/asm/atomic.h

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  3  * Copyright (C) 2012 Regents of the University of California
  4  * Copyright (C) 2017 SiFive
  5  *
  6  * This program is free software; you can redistribute it and/or
  7  * modify it under the terms of the GNU General Public Licence
  8  * as published by the Free Software Foundation; either version
  9  * 2 of the Licence, or (at your option) any later version.
 10  */
 11 
 12 #ifndef _ASM_RISCV_ATOMIC_H
 13 #define _ASM_RISCV_ATOMIC_H
 14 
 15 #ifdef CONFIG_GENERIC_ATOMIC64
 16 # include <asm-generic/atomic64.h>
 17 #else
 18 # if (__riscv_xlen < 64)
 19 #  error "64-bit atomics require XLEN to be at least 64"
 20 # endif
 21 #endif
 22 
 23 #include <asm/cmpxchg.h>
 24 #include <asm/barrier.h>
 25 
 26 #define ATOMIC_INIT(i)  { (i) }
 27 
 28 #define __atomic_acquire_fence()                                        \
 29         __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
 30 
 31 #define __atomic_release_fence()                                        \
 32         __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
 33 
 34 static __always_inline int atomic_read(const atomic_t *v)
 35 {
 36         return READ_ONCE(v->counter);
 37 }
 38 static __always_inline void atomic_set(atomic_t *v, int i)
 39 {
 40         WRITE_ONCE(v->counter, i);
 41 }
 42 
 43 #ifndef CONFIG_GENERIC_ATOMIC64
 44 #define ATOMIC64_INIT(i) { (i) }
 45 static __always_inline long atomic64_read(const atomic64_t *v)
 46 {
 47         return READ_ONCE(v->counter);
 48 }
 49 static __always_inline void atomic64_set(atomic64_t *v, long i)
 50 {
 51         WRITE_ONCE(v->counter, i);
 52 }
 53 #endif
 54 
 55 /*
 56  * First, the atomic ops that have no ordering constraints and therefor don't
 57  * have the AQ or RL bits set.  These don't return anything, so there's only
 58  * one version to worry about.
 59  */
 60 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)              \
 61 static __always_inline                                                  \
 62 void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)             \
 63 {                                                                       \
 64         __asm__ __volatile__ (                                          \
 65                 "       amo" #asm_op "." #asm_type " zero, %1, %0"      \
 66                 : "+A" (v->counter)                                     \
 67                 : "r" (I)                                               \
 68                 : "memory");                                            \
 69 }                                                                       \
 70 
 71 #ifdef CONFIG_GENERIC_ATOMIC64
 72 #define ATOMIC_OPS(op, asm_op, I)                                       \
 73         ATOMIC_OP (op, asm_op, I, w,  int,   )
 74 #else
 75 #define ATOMIC_OPS(op, asm_op, I)                                       \
 76         ATOMIC_OP (op, asm_op, I, w,  int,   )                          \
 77         ATOMIC_OP (op, asm_op, I, d, long, 64)
 78 #endif
 79 
 80 ATOMIC_OPS(add, add,  i)
 81 ATOMIC_OPS(sub, add, -i)
 82 ATOMIC_OPS(and, and,  i)
 83 ATOMIC_OPS( or,  or,  i)
 84 ATOMIC_OPS(xor, xor,  i)
 85 
 86 #undef ATOMIC_OP
 87 #undef ATOMIC_OPS
 88 
 89 /*
 90  * Atomic ops that have ordered, relaxed, acquire, and release variants.
 91  * There's two flavors of these: the arithmatic ops have both fetch and return
 92  * versions, while the logical ops only have fetch versions.
 93  */
 94 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)        \
 95 static __always_inline                                                  \
 96 c_type atomic##prefix##_fetch_##op##_relaxed(c_type i,                  \
 97                                              atomic##prefix##_t *v)     \
 98 {                                                                       \
 99         register c_type ret;                                            \
100         __asm__ __volatile__ (                                          \
101                 "       amo" #asm_op "." #asm_type " %1, %2, %0"        \
102                 : "+A" (v->counter), "=r" (ret)                         \
103                 : "r" (I)                                               \
104                 : "memory");                                            \
105         return ret;                                                     \
106 }                                                                       \
107 static __always_inline                                                  \
108 c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)     \
109 {                                                                       \
110         register c_type ret;                                            \
111         __asm__ __volatile__ (                                          \
112                 "       amo" #asm_op "." #asm_type ".aqrl  %1, %2, %0"  \
113                 : "+A" (v->counter), "=r" (ret)                         \
114                 : "r" (I)                                               \
115                 : "memory");                                            \
116         return ret;                                                     \
117 }
118 
119 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
120 static __always_inline                                                  \
121 c_type atomic##prefix##_##op##_return_relaxed(c_type i,                 \
122                                               atomic##prefix##_t *v)    \
123 {                                                                       \
124         return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;      \
125 }                                                                       \
126 static __always_inline                                                  \
127 c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)  \
128 {                                                                       \
129         return atomic##prefix##_fetch_##op(i, v) c_op I;                \
130 }
131 
132 #ifdef CONFIG_GENERIC_ATOMIC64
133 #define ATOMIC_OPS(op, asm_op, c_op, I)                                 \
134         ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )              \
135         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )
136 #else
137 #define ATOMIC_OPS(op, asm_op, c_op, I)                                 \
138         ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )              \
139         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )              \
140         ATOMIC_FETCH_OP( op, asm_op,       I, d, long, 64)              \
141         ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
142 #endif
143 
144 ATOMIC_OPS(add, add, +,  i)
145 ATOMIC_OPS(sub, add, +, -i)
146 
147 #define atomic_add_return_relaxed       atomic_add_return_relaxed
148 #define atomic_sub_return_relaxed       atomic_sub_return_relaxed
149 #define atomic_add_return               atomic_add_return
150 #define atomic_sub_return               atomic_sub_return
151 
152 #define atomic_fetch_add_relaxed        atomic_fetch_add_relaxed
153 #define atomic_fetch_sub_relaxed        atomic_fetch_sub_relaxed
154 #define atomic_fetch_add                atomic_fetch_add
155 #define atomic_fetch_sub                atomic_fetch_sub
156 
157 #ifndef CONFIG_GENERIC_ATOMIC64
158 #define atomic64_add_return_relaxed     atomic64_add_return_relaxed
159 #define atomic64_sub_return_relaxed     atomic64_sub_return_relaxed
160 #define atomic64_add_return             atomic64_add_return
161 #define atomic64_sub_return             atomic64_sub_return
162 
163 #define atomic64_fetch_add_relaxed      atomic64_fetch_add_relaxed
164 #define atomic64_fetch_sub_relaxed      atomic64_fetch_sub_relaxed
165 #define atomic64_fetch_add              atomic64_fetch_add
166 #define atomic64_fetch_sub              atomic64_fetch_sub
167 #endif
168 
169 #undef ATOMIC_OPS
170 
171 #ifdef CONFIG_GENERIC_ATOMIC64
172 #define ATOMIC_OPS(op, asm_op, I)                                       \
173         ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )
174 #else
175 #define ATOMIC_OPS(op, asm_op, I)                                       \
176         ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )                     \
177         ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
178 #endif
179 
180 ATOMIC_OPS(and, and, i)
181 ATOMIC_OPS( or,  or, i)
182 ATOMIC_OPS(xor, xor, i)
183 
184 #define atomic_fetch_and_relaxed        atomic_fetch_and_relaxed
185 #define atomic_fetch_or_relaxed         atomic_fetch_or_relaxed
186 #define atomic_fetch_xor_relaxed        atomic_fetch_xor_relaxed
187 #define atomic_fetch_and                atomic_fetch_and
188 #define atomic_fetch_or                 atomic_fetch_or
189 #define atomic_fetch_xor                atomic_fetch_xor
190 
191 #ifndef CONFIG_GENERIC_ATOMIC64
192 #define atomic64_fetch_and_relaxed      atomic64_fetch_and_relaxed
193 #define atomic64_fetch_or_relaxed       atomic64_fetch_or_relaxed
194 #define atomic64_fetch_xor_relaxed      atomic64_fetch_xor_relaxed
195 #define atomic64_fetch_and              atomic64_fetch_and
196 #define atomic64_fetch_or               atomic64_fetch_or
197 #define atomic64_fetch_xor              atomic64_fetch_xor
198 #endif
199 
200 #undef ATOMIC_OPS
201 
202 #undef ATOMIC_FETCH_OP
203 #undef ATOMIC_OP_RETURN
204 
205 /* This is required to provide a full barrier on success. */
206 static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
207 {
208        int prev, rc;
209 
210         __asm__ __volatile__ (
211                 "0:     lr.w     %[p],  %[c]\n"
212                 "       beq      %[p],  %[u], 1f\n"
213                 "       add      %[rc], %[p], %[a]\n"
214                 "       sc.w.rl  %[rc], %[rc], %[c]\n"
215                 "       bnez     %[rc], 0b\n"
216                 "       fence    rw, rw\n"
217                 "1:\n"
218                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
219                 : [a]"r" (a), [u]"r" (u)
220                 : "memory");
221         return prev;
222 }
223 #define atomic_fetch_add_unless atomic_fetch_add_unless
224 
225 #ifndef CONFIG_GENERIC_ATOMIC64
226 static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
227 {
228        long prev, rc;
229 
230         __asm__ __volatile__ (
231                 "0:     lr.d     %[p],  %[c]\n"
232                 "       beq      %[p],  %[u], 1f\n"
233                 "       add      %[rc], %[p], %[a]\n"
234                 "       sc.d.rl  %[rc], %[rc], %[c]\n"
235                 "       bnez     %[rc], 0b\n"
236                 "       fence    rw, rw\n"
237                 "1:\n"
238                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
239                 : [a]"r" (a), [u]"r" (u)
240                 : "memory");
241         return prev;
242 }
243 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
244 #endif
245 
246 /*
247  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
248  * {cmp,}xchg and the operations that return, so they need a full barrier.
249  */
250 #define ATOMIC_OP(c_t, prefix, size)                                    \
251 static __always_inline                                                  \
252 c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)         \
253 {                                                                       \
254         return __xchg_relaxed(&(v->counter), n, size);                  \
255 }                                                                       \
256 static __always_inline                                                  \
257 c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)         \
258 {                                                                       \
259         return __xchg_acquire(&(v->counter), n, size);                  \
260 }                                                                       \
261 static __always_inline                                                  \
262 c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)         \
263 {                                                                       \
264         return __xchg_release(&(v->counter), n, size);                  \
265 }                                                                       \
266 static __always_inline                                                  \
267 c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)                 \
268 {                                                                       \
269         return __xchg(&(v->counter), n, size);                          \
270 }                                                                       \
271 static __always_inline                                                  \
272 c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,             \
273                                      c_t o, c_t n)                      \
274 {                                                                       \
275         return __cmpxchg_relaxed(&(v->counter), o, n, size);            \
276 }                                                                       \
277 static __always_inline                                                  \
278 c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,             \
279                                      c_t o, c_t n)                      \
280 {                                                                       \
281         return __cmpxchg_acquire(&(v->counter), o, n, size);            \
282 }                                                                       \
283 static __always_inline                                                  \
284 c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,             \
285                                      c_t o, c_t n)                      \
286 {                                                                       \
287         return __cmpxchg_release(&(v->counter), o, n, size);            \
288 }                                                                       \
289 static __always_inline                                                  \
290 c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n)       \
291 {                                                                       \
292         return __cmpxchg(&(v->counter), o, n, size);                    \
293 }
294 
295 #ifdef CONFIG_GENERIC_ATOMIC64
296 #define ATOMIC_OPS()                                                    \
297         ATOMIC_OP( int,   , 4)
298 #else
299 #define ATOMIC_OPS()                                                    \
300         ATOMIC_OP( int,   , 4)                                          \
301         ATOMIC_OP(long, 64, 8)
302 #endif
303 
304 ATOMIC_OPS()
305 
306 #define atomic_xchg_relaxed atomic_xchg_relaxed
307 #define atomic_xchg_acquire atomic_xchg_acquire
308 #define atomic_xchg_release atomic_xchg_release
309 #define atomic_xchg atomic_xchg
310 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
311 #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
312 #define atomic_cmpxchg_release atomic_cmpxchg_release
313 #define atomic_cmpxchg atomic_cmpxchg
314 
315 #undef ATOMIC_OPS
316 #undef ATOMIC_OP
317 
318 static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
319 {
320        int prev, rc;
321 
322         __asm__ __volatile__ (
323                 "0:     lr.w     %[p],  %[c]\n"
324                 "       sub      %[rc], %[p], %[o]\n"
325                 "       bltz     %[rc], 1f\n"
326                 "       sc.w.rl  %[rc], %[rc], %[c]\n"
327                 "       bnez     %[rc], 0b\n"
328                 "       fence    rw, rw\n"
329                 "1:\n"
330                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
331                 : [o]"r" (offset)
332                 : "memory");
333         return prev - offset;
334 }
335 
336 #define atomic_dec_if_positive(v)       atomic_sub_if_positive(v, 1)
337 
338 #ifndef CONFIG_GENERIC_ATOMIC64
339 static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
340 {
341        long prev, rc;
342 
343         __asm__ __volatile__ (
344                 "0:     lr.d     %[p],  %[c]\n"
345                 "       sub      %[rc], %[p], %[o]\n"
346                 "       bltz     %[rc], 1f\n"
347                 "       sc.d.rl  %[rc], %[rc], %[c]\n"
348                 "       bnez     %[rc], 0b\n"
349                 "       fence    rw, rw\n"
350                 "1:\n"
351                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
352                 : [o]"r" (offset)
353                 : "memory");
354         return prev - offset;
355 }
356 
357 #define atomic64_dec_if_positive(v)     atomic64_sub_if_positive(v, 1)
358 #endif
359 
360 #endif /* _ASM_RISCV_ATOMIC_H */
361 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp