~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/atomic64_64.h

Version: ~ [ linux-5.11 ] ~ [ linux-5.10.17 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.99 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.176 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.221 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.257 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.257 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_ATOMIC64_64_H
  3 #define _ASM_X86_ATOMIC64_64_H
  4 
  5 #include <linux/types.h>
  6 #include <asm/alternative.h>
  7 #include <asm/cmpxchg.h>
  8 
  9 /* The 64-bit atomic type */
 10 
 11 #define ATOMIC64_INIT(i)        { (i) }
 12 
 13 /**
 14  * arch_atomic64_read - read atomic64 variable
 15  * @v: pointer of type atomic64_t
 16  *
 17  * Atomically reads the value of @v.
 18  * Doesn't imply a read memory barrier.
 19  */
 20 static inline s64 arch_atomic64_read(const atomic64_t *v)
 21 {
 22         return READ_ONCE((v)->counter);
 23 }
 24 
 25 /**
 26  * arch_atomic64_set - set atomic64 variable
 27  * @v: pointer to type atomic64_t
 28  * @i: required value
 29  *
 30  * Atomically sets the value of @v to @i.
 31  */
 32 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
 33 {
 34         WRITE_ONCE(v->counter, i);
 35 }
 36 
 37 /**
 38  * arch_atomic64_add - add integer to atomic64 variable
 39  * @i: integer value to add
 40  * @v: pointer to type atomic64_t
 41  *
 42  * Atomically adds @i to @v.
 43  */
 44 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
 45 {
 46         asm volatile(LOCK_PREFIX "addq %1,%0"
 47                      : "=m" (v->counter)
 48                      : "er" (i), "m" (v->counter) : "memory");
 49 }
 50 
 51 /**
 52  * arch_atomic64_sub - subtract the atomic64 variable
 53  * @i: integer value to subtract
 54  * @v: pointer to type atomic64_t
 55  *
 56  * Atomically subtracts @i from @v.
 57  */
 58 static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
 59 {
 60         asm volatile(LOCK_PREFIX "subq %1,%0"
 61                      : "=m" (v->counter)
 62                      : "er" (i), "m" (v->counter) : "memory");
 63 }
 64 
 65 /**
 66  * arch_atomic64_sub_and_test - subtract value from variable and test result
 67  * @i: integer value to subtract
 68  * @v: pointer to type atomic64_t
 69  *
 70  * Atomically subtracts @i from @v and returns
 71  * true if the result is zero, or false for all
 72  * other cases.
 73  */
 74 static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
 75 {
 76         return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
 77 }
 78 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 79 
 80 /**
 81  * arch_atomic64_inc - increment atomic64 variable
 82  * @v: pointer to type atomic64_t
 83  *
 84  * Atomically increments @v by 1.
 85  */
 86 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 87 {
 88         asm volatile(LOCK_PREFIX "incq %0"
 89                      : "=m" (v->counter)
 90                      : "m" (v->counter) : "memory");
 91 }
 92 #define arch_atomic64_inc arch_atomic64_inc
 93 
 94 /**
 95  * arch_atomic64_dec - decrement atomic64 variable
 96  * @v: pointer to type atomic64_t
 97  *
 98  * Atomically decrements @v by 1.
 99  */
100 static __always_inline void arch_atomic64_dec(atomic64_t *v)
101 {
102         asm volatile(LOCK_PREFIX "decq %0"
103                      : "=m" (v->counter)
104                      : "m" (v->counter) : "memory");
105 }
106 #define arch_atomic64_dec arch_atomic64_dec
107 
108 /**
109  * arch_atomic64_dec_and_test - decrement and test
110  * @v: pointer to type atomic64_t
111  *
112  * Atomically decrements @v by 1 and
113  * returns true if the result is 0, or false for all other
114  * cases.
115  */
116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
117 {
118         return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
119 }
120 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
121 
122 /**
123  * arch_atomic64_inc_and_test - increment and test
124  * @v: pointer to type atomic64_t
125  *
126  * Atomically increments @v by 1
127  * and returns true if the result is zero, or false for all
128  * other cases.
129  */
130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
131 {
132         return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
133 }
134 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
135 
136 /**
137  * arch_atomic64_add_negative - add and test if negative
138  * @i: integer value to add
139  * @v: pointer to type atomic64_t
140  *
141  * Atomically adds @i to @v and returns true
142  * if the result is negative, or false when
143  * result is greater than or equal to zero.
144  */
145 static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
146 {
147         return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
148 }
149 #define arch_atomic64_add_negative arch_atomic64_add_negative
150 
151 /**
152  * arch_atomic64_add_return - add and return
153  * @i: integer value to add
154  * @v: pointer to type atomic64_t
155  *
156  * Atomically adds @i to @v and returns @i + @v
157  */
158 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
159 {
160         return i + xadd(&v->counter, i);
161 }
162 
163 static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
164 {
165         return arch_atomic64_add_return(-i, v);
166 }
167 
168 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
169 {
170         return xadd(&v->counter, i);
171 }
172 
173 static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
174 {
175         return xadd(&v->counter, -i);
176 }
177 
178 static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
179 {
180         return arch_cmpxchg(&v->counter, old, new);
181 }
182 
183 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
184 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
185 {
186         return try_cmpxchg(&v->counter, old, new);
187 }
188 
189 static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
190 {
191         return arch_xchg(&v->counter, new);
192 }
193 
194 static inline void arch_atomic64_and(s64 i, atomic64_t *v)
195 {
196         asm volatile(LOCK_PREFIX "andq %1,%0"
197                         : "+m" (v->counter)
198                         : "er" (i)
199                         : "memory");
200 }
201 
202 static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
203 {
204         s64 val = arch_atomic64_read(v);
205 
206         do {
207         } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
208         return val;
209 }
210 
211 static inline void arch_atomic64_or(s64 i, atomic64_t *v)
212 {
213         asm volatile(LOCK_PREFIX "orq %1,%0"
214                         : "+m" (v->counter)
215                         : "er" (i)
216                         : "memory");
217 }
218 
219 static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
220 {
221         s64 val = arch_atomic64_read(v);
222 
223         do {
224         } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
225         return val;
226 }
227 
228 static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
229 {
230         asm volatile(LOCK_PREFIX "xorq %1,%0"
231                         : "+m" (v->counter)
232                         : "er" (i)
233                         : "memory");
234 }
235 
236 static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
237 {
238         s64 val = arch_atomic64_read(v);
239 
240         do {
241         } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
242         return val;
243 }
244 
245 #endif /* _ASM_X86_ATOMIC64_64_H */
246 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp