~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/spinlock.h

Version: ~ [ linux-5.2-rc4 ] ~ [ linux-5.1.9 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.50 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.125 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.181 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.181 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.68 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _ASM_X86_SPINLOCK_H
  2 #define _ASM_X86_SPINLOCK_H
  3 
  4 #include <linux/atomic.h>
  5 #include <asm/page.h>
  6 #include <asm/processor.h>
  7 #include <linux/compiler.h>
  8 #include <asm/paravirt.h>
  9 /*
 10  * Your basic SMP spinlocks, allowing only a single CPU anywhere
 11  *
 12  * Simple spin lock operations.  There are two variants, one clears IRQ's
 13  * on the local processor, one does not.
 14  *
 15  * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
 16  *
 17  * (the type definitions are in asm/spinlock_types.h)
 18  */
 19 
 20 #ifdef CONFIG_X86_32
 21 # define LOCK_PTR_REG "a"
 22 #else
 23 # define LOCK_PTR_REG "D"
 24 #endif
 25 
 26 #if defined(CONFIG_X86_32) && \
 27         (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
 28 /*
 29  * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
 30  * (PPro errata 66, 92)
 31  */
 32 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
 33 #else
 34 # define UNLOCK_LOCK_PREFIX
 35 #endif
 36 
 37 /*
 38  * Ticket locks are conceptually two parts, one indicating the current head of
 39  * the queue, and the other indicating the current tail. The lock is acquired
 40  * by atomically noting the tail and incrementing it by one (thus adding
 41  * ourself to the queue and noting our position), then waiting until the head
 42  * becomes equal to the the initial value of the tail.
 43  *
 44  * We use an xadd covering *both* parts of the lock, to increment the tail and
 45  * also load the position of the head, which takes care of memory ordering
 46  * issues and should be optimal for the uncontended case. Note the tail must be
 47  * in the high part, because a wide xadd increment of the low part would carry
 48  * up and contaminate the high part.
 49  */
 50 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
 51 {
 52         register struct __raw_tickets inc = { .tail = 1 };
 53 
 54         inc = xadd(&lock->tickets, inc);
 55 
 56         for (;;) {
 57                 if (inc.head == inc.tail)
 58                         break;
 59                 cpu_relax();
 60                 inc.head = ACCESS_ONCE(lock->tickets.head);
 61         }
 62         barrier();              /* make sure nothing creeps before the lock is taken */
 63 }
 64 
 65 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 66 {
 67         arch_spinlock_t old, new;
 68 
 69         old.tickets = ACCESS_ONCE(lock->tickets);
 70         if (old.tickets.head != old.tickets.tail)
 71                 return 0;
 72 
 73         new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
 74 
 75         /* cmpxchg is a full barrier, so nothing can move before it */
 76         return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 77 }
 78 
 79 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 80 {
 81         __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
 82 }
 83 
 84 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 85 {
 86         struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
 87 
 88         return tmp.tail != tmp.head;
 89 }
 90 
 91 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
 92 {
 93         struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
 94 
 95         return (__ticket_t)(tmp.tail - tmp.head) > 1;
 96 }
 97 
 98 #ifndef CONFIG_PARAVIRT_SPINLOCKS
 99 
100 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
101 {
102         return __ticket_spin_is_locked(lock);
103 }
104 
105 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
106 {
107         return __ticket_spin_is_contended(lock);
108 }
109 #define arch_spin_is_contended  arch_spin_is_contended
110 
111 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
112 {
113         __ticket_spin_lock(lock);
114 }
115 
116 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
117 {
118         return __ticket_spin_trylock(lock);
119 }
120 
121 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
122 {
123         __ticket_spin_unlock(lock);
124 }
125 
126 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
127                                                   unsigned long flags)
128 {
129         arch_spin_lock(lock);
130 }
131 
132 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */
133 
134 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
135 {
136         while (arch_spin_is_locked(lock))
137                 cpu_relax();
138 }
139 
140 /*
141  * Read-write spinlocks, allowing multiple readers
142  * but only one writer.
143  *
144  * NOTE! it is quite common to have readers in interrupts
145  * but no interrupt writers. For those circumstances we
146  * can "mix" irq-safe locks - any writer needs to get a
147  * irq-safe write-lock, but readers can get non-irqsafe
148  * read-locks.
149  *
150  * On x86, we implement read-write locks as a 32-bit counter
151  * with the high bit (sign) being the "contended" bit.
152  */
153 
154 /**
155  * read_can_lock - would read_trylock() succeed?
156  * @lock: the rwlock in question.
157  */
158 static inline int arch_read_can_lock(arch_rwlock_t *lock)
159 {
160         return lock->lock > 0;
161 }
162 
163 /**
164  * write_can_lock - would write_trylock() succeed?
165  * @lock: the rwlock in question.
166  */
167 static inline int arch_write_can_lock(arch_rwlock_t *lock)
168 {
169         return lock->write == WRITE_LOCK_CMP;
170 }
171 
172 static inline void arch_read_lock(arch_rwlock_t *rw)
173 {
174         asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
175                      "jns 1f\n"
176                      "call __read_lock_failed\n\t"
177                      "1:\n"
178                      ::LOCK_PTR_REG (rw) : "memory");
179 }
180 
181 static inline void arch_write_lock(arch_rwlock_t *rw)
182 {
183         asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
184                      "jz 1f\n"
185                      "call __write_lock_failed\n\t"
186                      "1:\n"
187                      ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
188                      : "memory");
189 }
190 
191 static inline int arch_read_trylock(arch_rwlock_t *lock)
192 {
193         READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
194 
195         if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
196                 return 1;
197         READ_LOCK_ATOMIC(inc)(count);
198         return 0;
199 }
200 
201 static inline int arch_write_trylock(arch_rwlock_t *lock)
202 {
203         atomic_t *count = (atomic_t *)&lock->write;
204 
205         if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
206                 return 1;
207         atomic_add(WRITE_LOCK_CMP, count);
208         return 0;
209 }
210 
211 static inline void arch_read_unlock(arch_rwlock_t *rw)
212 {
213         asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
214                      :"+m" (rw->lock) : : "memory");
215 }
216 
217 static inline void arch_write_unlock(arch_rwlock_t *rw)
218 {
219         asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
220                      : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
221 }
222 
223 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
224 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
225 
226 #undef READ_LOCK_SIZE
227 #undef READ_LOCK_ATOMIC
228 #undef WRITE_LOCK_ADD
229 #undef WRITE_LOCK_SUB
230 #undef WRITE_LOCK_CMP
231 
232 #define arch_spin_relax(lock)   cpu_relax()
233 #define arch_read_relax(lock)   cpu_relax()
234 #define arch_write_relax(lock)  cpu_relax()
235 
236 /* The {read|write|spin}_lock() on x86 are full memory barriers. */
237 static inline void smp_mb__after_lock(void) { }
238 #define ARCH_HAS_SMP_MB_AFTER_LOCK
239 
240 #endif /* _ASM_X86_SPINLOCK_H */
241 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp