~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/cmpxchg.h

Version: ~ [ linux-5.4.2 ] ~ [ linux-5.3.15 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.88 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.158 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.206 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.206 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.78 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef ASM_X86_CMPXCHG_H
  2 #define ASM_X86_CMPXCHG_H
  3 
  4 #include <linux/compiler.h>
  5 #include <asm/cpufeatures.h>
  6 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
  7 
  8 /*
  9  * Non-existant functions to indicate usage errors at link time
 10  * (or compile-time if the compiler implements __compiletime_error().
 11  */
 12 extern void __xchg_wrong_size(void)
 13         __compiletime_error("Bad argument size for xchg");
 14 extern void __cmpxchg_wrong_size(void)
 15         __compiletime_error("Bad argument size for cmpxchg");
 16 extern void __xadd_wrong_size(void)
 17         __compiletime_error("Bad argument size for xadd");
 18 extern void __add_wrong_size(void)
 19         __compiletime_error("Bad argument size for add");
 20 
 21 /*
 22  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
 23  * -1 because sizeof will never return -1, thereby making those switch
 24  * case statements guaranteeed dead code which the compiler will
 25  * eliminate, and allowing the "missing symbol in the default case" to
 26  * indicate a usage error.
 27  */
 28 #define __X86_CASE_B    1
 29 #define __X86_CASE_W    2
 30 #define __X86_CASE_L    4
 31 #ifdef CONFIG_64BIT
 32 #define __X86_CASE_Q    8
 33 #else
 34 #define __X86_CASE_Q    -1              /* sizeof will never return -1 */
 35 #endif
 36 
 37 /* 
 38  * An exchange-type operation, which takes a value and a pointer, and
 39  * returns the old value.
 40  */
 41 #define __xchg_op(ptr, arg, op, lock)                                   \
 42         ({                                                              \
 43                 __typeof__ (*(ptr)) __ret = (arg);                      \
 44                 switch (sizeof(*(ptr))) {                               \
 45                 case __X86_CASE_B:                                      \
 46                         asm volatile (lock #op "b %b0, %1\n"            \
 47                                       : "+q" (__ret), "+m" (*(ptr))     \
 48                                       : : "memory", "cc");              \
 49                         break;                                          \
 50                 case __X86_CASE_W:                                      \
 51                         asm volatile (lock #op "w %w0, %1\n"            \
 52                                       : "+r" (__ret), "+m" (*(ptr))     \
 53                                       : : "memory", "cc");              \
 54                         break;                                          \
 55                 case __X86_CASE_L:                                      \
 56                         asm volatile (lock #op "l %0, %1\n"             \
 57                                       : "+r" (__ret), "+m" (*(ptr))     \
 58                                       : : "memory", "cc");              \
 59                         break;                                          \
 60                 case __X86_CASE_Q:                                      \
 61                         asm volatile (lock #op "q %q0, %1\n"            \
 62                                       : "+r" (__ret), "+m" (*(ptr))     \
 63                                       : : "memory", "cc");              \
 64                         break;                                          \
 65                 default:                                                \
 66                         __ ## op ## _wrong_size();                      \
 67                 }                                                       \
 68                 __ret;                                                  \
 69         })
 70 
 71 /*
 72  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
 73  * Since this is generally used to protect other memory information, we
 74  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
 75  * information around.
 76  */
 77 #define xchg(ptr, v)    __xchg_op((ptr), (v), xchg, "")
 78 
 79 /*
 80  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 81  * store NEW in MEM.  Return the initial value in MEM.  Success is
 82  * indicated by comparing RETURN with OLD.
 83  */
 84 #define __raw_cmpxchg(ptr, old, new, size, lock)                        \
 85 ({                                                                      \
 86         __typeof__(*(ptr)) __ret;                                       \
 87         __typeof__(*(ptr)) __old = (old);                               \
 88         __typeof__(*(ptr)) __new = (new);                               \
 89         switch (size) {                                                 \
 90         case __X86_CASE_B:                                              \
 91         {                                                               \
 92                 volatile u8 *__ptr = (volatile u8 *)(ptr);              \
 93                 asm volatile(lock "cmpxchgb %2,%1"                      \
 94                              : "=a" (__ret), "+m" (*__ptr)              \
 95                              : "q" (__new), "" (__old)                 \
 96                              : "memory");                               \
 97                 break;                                                  \
 98         }                                                               \
 99         case __X86_CASE_W:                                              \
100         {                                                               \
101                 volatile u16 *__ptr = (volatile u16 *)(ptr);            \
102                 asm volatile(lock "cmpxchgw %2,%1"                      \
103                              : "=a" (__ret), "+m" (*__ptr)              \
104                              : "r" (__new), "" (__old)                 \
105                              : "memory");                               \
106                 break;                                                  \
107         }                                                               \
108         case __X86_CASE_L:                                              \
109         {                                                               \
110                 volatile u32 *__ptr = (volatile u32 *)(ptr);            \
111                 asm volatile(lock "cmpxchgl %2,%1"                      \
112                              : "=a" (__ret), "+m" (*__ptr)              \
113                              : "r" (__new), "" (__old)                 \
114                              : "memory");                               \
115                 break;                                                  \
116         }                                                               \
117         case __X86_CASE_Q:                                              \
118         {                                                               \
119                 volatile u64 *__ptr = (volatile u64 *)(ptr);            \
120                 asm volatile(lock "cmpxchgq %2,%1"                      \
121                              : "=a" (__ret), "+m" (*__ptr)              \
122                              : "r" (__new), "" (__old)                 \
123                              : "memory");                               \
124                 break;                                                  \
125         }                                                               \
126         default:                                                        \
127                 __cmpxchg_wrong_size();                                 \
128         }                                                               \
129         __ret;                                                          \
130 })
131 
132 #define __cmpxchg(ptr, old, new, size)                                  \
133         __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
134 
135 #define __sync_cmpxchg(ptr, old, new, size)                             \
136         __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
137 
138 #define __cmpxchg_local(ptr, old, new, size)                            \
139         __raw_cmpxchg((ptr), (old), (new), (size), "")
140 
141 #ifdef CONFIG_X86_32
142 # include <asm/cmpxchg_32.h>
143 #else
144 # include <asm/cmpxchg_64.h>
145 #endif
146 
147 #define cmpxchg(ptr, old, new)                                          \
148         __cmpxchg(ptr, old, new, sizeof(*(ptr)))
149 
150 #define sync_cmpxchg(ptr, old, new)                                     \
151         __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
152 
153 #define cmpxchg_local(ptr, old, new)                                    \
154         __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
155 
156 /*
157  * xadd() adds "inc" to "*ptr" and atomically returns the previous
158  * value of "*ptr".
159  *
160  * xadd() is locked when multiple CPUs are online
161  * xadd_sync() is always locked
162  * xadd_local() is never locked
163  */
164 #define __xadd(ptr, inc, lock)  __xchg_op((ptr), (inc), xadd, lock)
165 #define xadd(ptr, inc)          __xadd((ptr), (inc), LOCK_PREFIX)
166 #define xadd_sync(ptr, inc)     __xadd((ptr), (inc), "lock; ")
167 #define xadd_local(ptr, inc)    __xadd((ptr), (inc), "")
168 
169 #define __add(ptr, inc, lock)                                           \
170         ({                                                              \
171                 __typeof__ (*(ptr)) __ret = (inc);                      \
172                 switch (sizeof(*(ptr))) {                               \
173                 case __X86_CASE_B:                                      \
174                         asm volatile (lock "addb %b1, %0\n"             \
175                                       : "+m" (*(ptr)) : "qi" (inc)      \
176                                       : "memory", "cc");                \
177                         break;                                          \
178                 case __X86_CASE_W:                                      \
179                         asm volatile (lock "addw %w1, %0\n"             \
180                                       : "+m" (*(ptr)) : "ri" (inc)      \
181                                       : "memory", "cc");                \
182                         break;                                          \
183                 case __X86_CASE_L:                                      \
184                         asm volatile (lock "addl %1, %0\n"              \
185                                       : "+m" (*(ptr)) : "ri" (inc)      \
186                                       : "memory", "cc");                \
187                         break;                                          \
188                 case __X86_CASE_Q:                                      \
189                         asm volatile (lock "addq %1, %0\n"              \
190                                       : "+m" (*(ptr)) : "ri" (inc)      \
191                                       : "memory", "cc");                \
192                         break;                                          \
193                 default:                                                \
194                         __add_wrong_size();                             \
195                 }                                                       \
196                 __ret;                                                  \
197         })
198 
199 /*
200  * add_*() adds "inc" to "*ptr"
201  *
202  * __add() takes a lock prefix
203  * add_smp() is locked when multiple CPUs are online
204  * add_sync() is always locked
205  */
206 #define add_smp(ptr, inc)       __add((ptr), (inc), LOCK_PREFIX)
207 #define add_sync(ptr, inc)      __add((ptr), (inc), "lock; ")
208 
209 #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)                   \
210 ({                                                                      \
211         bool __ret;                                                     \
212         __typeof__(*(p1)) __old1 = (o1), __new1 = (n1);                 \
213         __typeof__(*(p2)) __old2 = (o2), __new2 = (n2);                 \
214         BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long));                    \
215         BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));                    \
216         VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));            \
217         VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));    \
218         asm volatile(pfx "cmpxchg%c4b %2; sete %0"                      \
219                      : "=a" (__ret), "+d" (__old2),                     \
220                        "+m" (*(p1)), "+m" (*(p2))                       \
221                      : "i" (2 * sizeof(long)), "a" (__old1),            \
222                        "b" (__new1), "c" (__new2));                     \
223         __ret;                                                          \
224 })
225 
226 #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
227         __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
228 
229 #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
230         __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
231 
232 #endif  /* ASM_X86_CMPXCHG_H */
233 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp