~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/barrier.h

Version: ~ [ linux-5.4 ] ~ [ linux-5.3.13 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.86 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.156 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.203 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.202 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.78 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_BARRIER_H
  3 #define _ASM_X86_BARRIER_H
  4 
  5 #include <asm/alternative.h>
  6 #include <asm/nops.h>
  7 
  8 /*
  9  * Force strict CPU ordering.
 10  * And yes, this might be required on UP too when we're talking
 11  * to devices.
 12  */
 13 
 14 #ifdef CONFIG_X86_32
 15 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
 16                                       X86_FEATURE_XMM2) ::: "memory", "cc")
 17 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
 18                                        X86_FEATURE_XMM2) ::: "memory", "cc")
 19 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
 20                                        X86_FEATURE_XMM2) ::: "memory", "cc")
 21 #else
 22 #define mb()    asm volatile("mfence":::"memory")
 23 #define rmb()   asm volatile("lfence":::"memory")
 24 #define wmb()   asm volatile("sfence" ::: "memory")
 25 #endif
 26 
 27 /**
 28  * array_index_mask_nospec() - generate a mask that is ~0UL when the
 29  *      bounds check succeeds and 0 otherwise
 30  * @index: array element index
 31  * @size: number of elements in array
 32  *
 33  * Returns:
 34  *     0 - (index < size)
 35  */
 36 static inline unsigned long array_index_mask_nospec(unsigned long index,
 37                 unsigned long size)
 38 {
 39         unsigned long mask;
 40 
 41         asm volatile ("cmp %1,%2; sbb %0,%0;"
 42                         :"=r" (mask)
 43                         :"g"(size),"r" (index)
 44                         :"cc");
 45         return mask;
 46 }
 47 
 48 /* Override the default implementation from linux/nospec.h. */
 49 #define array_index_mask_nospec array_index_mask_nospec
 50 
 51 /* Prevent speculative execution past this barrier. */
 52 #define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
 53                                            "lfence", X86_FEATURE_LFENCE_RDTSC)
 54 
 55 #define dma_rmb()       barrier()
 56 #define dma_wmb()       barrier()
 57 
 58 #ifdef CONFIG_X86_32
 59 #define __smp_mb()      asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
 60 #else
 61 #define __smp_mb()      asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
 62 #endif
 63 #define __smp_rmb()     dma_rmb()
 64 #define __smp_wmb()     barrier()
 65 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 66 
 67 #define __smp_store_release(p, v)                                       \
 68 do {                                                                    \
 69         compiletime_assert_atomic_type(*p);                             \
 70         barrier();                                                      \
 71         WRITE_ONCE(*p, v);                                              \
 72 } while (0)
 73 
 74 #define __smp_load_acquire(p)                                           \
 75 ({                                                                      \
 76         typeof(*p) ___p1 = READ_ONCE(*p);                               \
 77         compiletime_assert_atomic_type(*p);                             \
 78         barrier();                                                      \
 79         ___p1;                                                          \
 80 })
 81 
 82 /* Atomic operations are already serializing on x86 */
 83 #define __smp_mb__before_atomic()       barrier()
 84 #define __smp_mb__after_atomic()        barrier()
 85 
 86 #include <asm-generic/barrier.h>
 87 
 88 #endif /* _ASM_X86_BARRIER_H */
 89 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp