~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/locking/qspinlock_paravirt.h

Version: ~ [ linux-5.12-rc7 ] ~ [ linux-5.11.13 ] ~ [ linux-5.10.29 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.111 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.186 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.230 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.266 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.266 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _GEN_PV_LOCK_SLOWPATH
  3 #error "do not include this file"
  4 #endif
  5 
  6 #include <linux/hash.h>
  7 #include <linux/memblock.h>
  8 #include <linux/debug_locks.h>
  9 
 10 /*
 11  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
 12  * of spinning them.
 13  *
 14  * This relies on the architecture to provide two paravirt hypercalls:
 15  *
 16  *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
 17  *   pv_kick(cpu)             -- wakes a suspended vcpu
 18  *
 19  * Using these we implement __pv_queued_spin_lock_slowpath() and
 20  * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
 21  * native_queued_spin_unlock().
 22  */
 23 
 24 #define _Q_SLOW_VAL     (3U << _Q_LOCKED_OFFSET)
 25 
 26 /*
 27  * Queue Node Adaptive Spinning
 28  *
 29  * A queue node vCPU will stop spinning if the vCPU in the previous node is
 30  * not running. The one lock stealing attempt allowed at slowpath entry
 31  * mitigates the slight slowdown for non-overcommitted guest with this
 32  * aggressive wait-early mechanism.
 33  *
 34  * The status of the previous node will be checked at fixed interval
 35  * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
 36  * pound on the cacheline of the previous node too heavily.
 37  */
 38 #define PV_PREV_CHECK_MASK      0xff
 39 
 40 /*
 41  * Queue node uses: vcpu_running & vcpu_halted.
 42  * Queue head uses: vcpu_running & vcpu_hashed.
 43  */
 44 enum vcpu_state {
 45         vcpu_running = 0,
 46         vcpu_halted,            /* Used only in pv_wait_node */
 47         vcpu_hashed,            /* = pv_hash'ed + vcpu_halted */
 48 };
 49 
 50 struct pv_node {
 51         struct mcs_spinlock     mcs;
 52         int                     cpu;
 53         u8                      state;
 54 };
 55 
 56 /*
 57  * Hybrid PV queued/unfair lock
 58  *
 59  * By replacing the regular queued_spin_trylock() with the function below,
 60  * it will be called once when a lock waiter enter the PV slowpath before
 61  * being queued.
 62  *
 63  * The pending bit is set by the queue head vCPU of the MCS wait queue in
 64  * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
 65  * When that bit becomes visible to the incoming waiters, no lock stealing
 66  * is allowed. The function will return immediately to make the waiters
 67  * enter the MCS wait queue. So lock starvation shouldn't happen as long
 68  * as the queued mode vCPUs are actively running to set the pending bit
 69  * and hence disabling lock stealing.
 70  *
 71  * When the pending bit isn't set, the lock waiters will stay in the unfair
 72  * mode spinning on the lock unless the MCS wait queue is empty. In this
 73  * case, the lock waiters will enter the queued mode slowpath trying to
 74  * become the queue head and set the pending bit.
 75  *
 76  * This hybrid PV queued/unfair lock combines the best attributes of a
 77  * queued lock (no lock starvation) and an unfair lock (good performance
 78  * on not heavily contended locks).
 79  */
 80 #define queued_spin_trylock(l)  pv_hybrid_queued_unfair_trylock(l)
 81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
 82 {
 83         /*
 84          * Stay in unfair lock mode as long as queued mode waiters are
 85          * present in the MCS wait queue but the pending bit isn't set.
 86          */
 87         for (;;) {
 88                 int val = atomic_read(&lock->val);
 89 
 90                 if (!(val & _Q_LOCKED_PENDING_MASK) &&
 91                    (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
 92                         qstat_inc(qstat_pv_lock_stealing, true);
 93                         return true;
 94                 }
 95                 if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
 96                         break;
 97 
 98                 cpu_relax();
 99         }
100 
101         return false;
102 }
103 
104 /*
105  * The pending bit is used by the queue head vCPU to indicate that it
106  * is actively spinning on the lock and no lock stealing is allowed.
107  */
108 #if _Q_PENDING_BITS == 8
109 static __always_inline void set_pending(struct qspinlock *lock)
110 {
111         WRITE_ONCE(lock->pending, 1);
112 }
113 
114 /*
115  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
116  * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
117  * lock just to be sure that it will get it.
118  */
119 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
120 {
121         return !READ_ONCE(lock->locked) &&
122                (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
123                                 _Q_LOCKED_VAL) == _Q_PENDING_VAL);
124 }
125 #else /* _Q_PENDING_BITS == 8 */
126 static __always_inline void set_pending(struct qspinlock *lock)
127 {
128         atomic_or(_Q_PENDING_VAL, &lock->val);
129 }
130 
131 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
132 {
133         int val = atomic_read(&lock->val);
134 
135         for (;;) {
136                 int old, new;
137 
138                 if (val  & _Q_LOCKED_MASK)
139                         break;
140 
141                 /*
142                  * Try to clear pending bit & set locked bit
143                  */
144                 old = val;
145                 new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
146                 val = atomic_cmpxchg_acquire(&lock->val, old, new);
147 
148                 if (val == old)
149                         return 1;
150         }
151         return 0;
152 }
153 #endif /* _Q_PENDING_BITS == 8 */
154 
155 /*
156  * Lock and MCS node addresses hash table for fast lookup
157  *
158  * Hashing is done on a per-cacheline basis to minimize the need to access
159  * more than one cacheline.
160  *
161  * Dynamically allocate a hash table big enough to hold at least 4X the
162  * number of possible cpus in the system. Allocation is done on page
163  * granularity. So the minimum number of hash buckets should be at least
164  * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
165  *
166  * Since we should not be holding locks from NMI context (very rare indeed) the
167  * max load factor is 0.75, which is around the point where open addressing
168  * breaks down.
169  *
170  */
171 struct pv_hash_entry {
172         struct qspinlock *lock;
173         struct pv_node   *node;
174 };
175 
176 #define PV_HE_PER_LINE  (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
177 #define PV_HE_MIN       (PAGE_SIZE / sizeof(struct pv_hash_entry))
178 
179 static struct pv_hash_entry *pv_lock_hash;
180 static unsigned int pv_lock_hash_bits __read_mostly;
181 
182 /*
183  * Allocate memory for the PV qspinlock hash buckets
184  *
185  * This function should be called from the paravirt spinlock initialization
186  * routine.
187  */
188 void __init __pv_init_lock_hash(void)
189 {
190         int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
191 
192         if (pv_hash_size < PV_HE_MIN)
193                 pv_hash_size = PV_HE_MIN;
194 
195         /*
196          * Allocate space from bootmem which should be page-size aligned
197          * and hence cacheline aligned.
198          */
199         pv_lock_hash = alloc_large_system_hash("PV qspinlock",
200                                                sizeof(struct pv_hash_entry),
201                                                pv_hash_size, 0,
202                                                HASH_EARLY | HASH_ZERO,
203                                                &pv_lock_hash_bits, NULL,
204                                                pv_hash_size, pv_hash_size);
205 }
206 
207 #define for_each_hash_entry(he, offset, hash)                                           \
208         for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;       \
209              offset < (1 << pv_lock_hash_bits);                                         \
210              offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
211 
212 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
213 {
214         unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
215         struct pv_hash_entry *he;
216         int hopcnt = 0;
217 
218         for_each_hash_entry(he, offset, hash) {
219                 hopcnt++;
220                 if (!cmpxchg(&he->lock, NULL, lock)) {
221                         WRITE_ONCE(he->node, node);
222                         qstat_hop(hopcnt);
223                         return &he->lock;
224                 }
225         }
226         /*
227          * Hard assume there is a free entry for us.
228          *
229          * This is guaranteed by ensuring every blocked lock only ever consumes
230          * a single entry, and since we only have 4 nesting levels per CPU
231          * and allocated 4*nr_possible_cpus(), this must be so.
232          *
233          * The single entry is guaranteed by having the lock owner unhash
234          * before it releases.
235          */
236         BUG();
237 }
238 
239 static struct pv_node *pv_unhash(struct qspinlock *lock)
240 {
241         unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
242         struct pv_hash_entry *he;
243         struct pv_node *node;
244 
245         for_each_hash_entry(he, offset, hash) {
246                 if (READ_ONCE(he->lock) == lock) {
247                         node = READ_ONCE(he->node);
248                         WRITE_ONCE(he->lock, NULL);
249                         return node;
250                 }
251         }
252         /*
253          * Hard assume we'll find an entry.
254          *
255          * This guarantees a limited lookup time and is itself guaranteed by
256          * having the lock owner do the unhash -- IFF the unlock sees the
257          * SLOW flag, there MUST be a hash entry.
258          */
259         BUG();
260 }
261 
262 /*
263  * Return true if when it is time to check the previous node which is not
264  * in a running state.
265  */
266 static inline bool
267 pv_wait_early(struct pv_node *prev, int loop)
268 {
269         if ((loop & PV_PREV_CHECK_MASK) != 0)
270                 return false;
271 
272         return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
273 }
274 
275 /*
276  * Initialize the PV part of the mcs_spinlock node.
277  */
278 static void pv_init_node(struct mcs_spinlock *node)
279 {
280         struct pv_node *pn = (struct pv_node *)node;
281 
282         BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
283 
284         pn->cpu = smp_processor_id();
285         pn->state = vcpu_running;
286 }
287 
288 /*
289  * Wait for node->locked to become true, halt the vcpu after a short spin.
290  * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
291  * behalf.
292  */
293 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
294 {
295         struct pv_node *pn = (struct pv_node *)node;
296         struct pv_node *pp = (struct pv_node *)prev;
297         int loop;
298         bool wait_early;
299 
300         for (;;) {
301                 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
302                         if (READ_ONCE(node->locked))
303                                 return;
304                         if (pv_wait_early(pp, loop)) {
305                                 wait_early = true;
306                                 break;
307                         }
308                         cpu_relax();
309                 }
310 
311                 /*
312                  * Order pn->state vs pn->locked thusly:
313                  *
314                  * [S] pn->state = vcpu_halted    [S] next->locked = 1
315                  *     MB                             MB
316                  * [L] pn->locked               [RmW] pn->state = vcpu_hashed
317                  *
318                  * Matches the cmpxchg() from pv_kick_node().
319                  */
320                 smp_store_mb(pn->state, vcpu_halted);
321 
322                 if (!READ_ONCE(node->locked)) {
323                         qstat_inc(qstat_pv_wait_node, true);
324                         qstat_inc(qstat_pv_wait_early, wait_early);
325                         pv_wait(&pn->state, vcpu_halted);
326                 }
327 
328                 /*
329                  * If pv_kick_node() changed us to vcpu_hashed, retain that
330                  * value so that pv_wait_head_or_lock() knows to not also try
331                  * to hash this lock.
332                  */
333                 cmpxchg(&pn->state, vcpu_halted, vcpu_running);
334 
335                 /*
336                  * If the locked flag is still not set after wakeup, it is a
337                  * spurious wakeup and the vCPU should wait again. However,
338                  * there is a pretty high overhead for CPU halting and kicking.
339                  * So it is better to spin for a while in the hope that the
340                  * MCS lock will be released soon.
341                  */
342                 qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
343         }
344 
345         /*
346          * By now our node->locked should be 1 and our caller will not actually
347          * spin-wait for it. We do however rely on our caller to do a
348          * load-acquire for us.
349          */
350 }
351 
352 /*
353  * Called after setting next->locked = 1 when we're the lock owner.
354  *
355  * Instead of waking the waiters stuck in pv_wait_node() advance their state
356  * such that they're waiting in pv_wait_head_or_lock(), this avoids a
357  * wake/sleep cycle.
358  */
359 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
360 {
361         struct pv_node *pn = (struct pv_node *)node;
362 
363         /*
364          * If the vCPU is indeed halted, advance its state to match that of
365          * pv_wait_node(). If OTOH this fails, the vCPU was running and will
366          * observe its next->locked value and advance itself.
367          *
368          * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
369          *
370          * The write to next->locked in arch_mcs_spin_unlock_contended()
371          * must be ordered before the read of pn->state in the cmpxchg()
372          * below for the code to work correctly. To guarantee full ordering
373          * irrespective of the success or failure of the cmpxchg(),
374          * a relaxed version with explicit barrier is used. The control
375          * dependency will order the reading of pn->state before any
376          * subsequent writes.
377          */
378         smp_mb__before_atomic();
379         if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
380             != vcpu_halted)
381                 return;
382 
383         /*
384          * Put the lock into the hash table and set the _Q_SLOW_VAL.
385          *
386          * As this is the same vCPU that will check the _Q_SLOW_VAL value and
387          * the hash table later on at unlock time, no atomic instruction is
388          * needed.
389          */
390         WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
391         (void)pv_hash(lock, pn);
392 }
393 
394 /*
395  * Wait for l->locked to become clear and acquire the lock;
396  * halt the vcpu after a short spin.
397  * __pv_queued_spin_unlock() will wake us.
398  *
399  * The current value of the lock will be returned for additional processing.
400  */
401 static u32
402 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
403 {
404         struct pv_node *pn = (struct pv_node *)node;
405         struct qspinlock **lp = NULL;
406         int waitcnt = 0;
407         int loop;
408 
409         /*
410          * If pv_kick_node() already advanced our state, we don't need to
411          * insert ourselves into the hash table anymore.
412          */
413         if (READ_ONCE(pn->state) == vcpu_hashed)
414                 lp = (struct qspinlock **)1;
415 
416         /*
417          * Tracking # of slowpath locking operations
418          */
419         qstat_inc(qstat_lock_slowpath, true);
420 
421         for (;; waitcnt++) {
422                 /*
423                  * Set correct vCPU state to be used by queue node wait-early
424                  * mechanism.
425                  */
426                 WRITE_ONCE(pn->state, vcpu_running);
427 
428                 /*
429                  * Set the pending bit in the active lock spinning loop to
430                  * disable lock stealing before attempting to acquire the lock.
431                  */
432                 set_pending(lock);
433                 for (loop = SPIN_THRESHOLD; loop; loop--) {
434                         if (trylock_clear_pending(lock))
435                                 goto gotlock;
436                         cpu_relax();
437                 }
438                 clear_pending(lock);
439 
440 
441                 if (!lp) { /* ONCE */
442                         lp = pv_hash(lock, pn);
443 
444                         /*
445                          * We must hash before setting _Q_SLOW_VAL, such that
446                          * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
447                          * we'll be sure to be able to observe our hash entry.
448                          *
449                          *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
450                          *       MB                           RMB
451                          * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
452                          *
453                          * Matches the smp_rmb() in __pv_queued_spin_unlock().
454                          */
455                         if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
456                                 /*
457                                  * The lock was free and now we own the lock.
458                                  * Change the lock value back to _Q_LOCKED_VAL
459                                  * and unhash the table.
460                                  */
461                                 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
462                                 WRITE_ONCE(*lp, NULL);
463                                 goto gotlock;
464                         }
465                 }
466                 WRITE_ONCE(pn->state, vcpu_hashed);
467                 qstat_inc(qstat_pv_wait_head, true);
468                 qstat_inc(qstat_pv_wait_again, waitcnt);
469                 pv_wait(&lock->locked, _Q_SLOW_VAL);
470 
471                 /*
472                  * Because of lock stealing, the queue head vCPU may not be
473                  * able to acquire the lock before it has to wait again.
474                  */
475         }
476 
477         /*
478          * The cmpxchg() or xchg() call before coming here provides the
479          * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
480          * here is to indicate to the compiler that the value will always
481          * be nozero to enable better code optimization.
482          */
483 gotlock:
484         return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
485 }
486 
487 /*
488  * PV versions of the unlock fastpath and slowpath functions to be used
489  * instead of queued_spin_unlock().
490  */
491 __visible void
492 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
493 {
494         struct pv_node *node;
495 
496         if (unlikely(locked != _Q_SLOW_VAL)) {
497                 WARN(!debug_locks_silent,
498                      "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
499                      (unsigned long)lock, atomic_read(&lock->val));
500                 return;
501         }
502 
503         /*
504          * A failed cmpxchg doesn't provide any memory-ordering guarantees,
505          * so we need a barrier to order the read of the node data in
506          * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
507          *
508          * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
509          */
510         smp_rmb();
511 
512         /*
513          * Since the above failed to release, this must be the SLOW path.
514          * Therefore start by looking up the blocked node and unhashing it.
515          */
516         node = pv_unhash(lock);
517 
518         /*
519          * Now that we have a reference to the (likely) blocked pv_node,
520          * release the lock.
521          */
522         smp_store_release(&lock->locked, 0);
523 
524         /*
525          * At this point the memory pointed at by lock can be freed/reused,
526          * however we can still use the pv_node to kick the CPU.
527          * The other vCPU may not really be halted, but kicking an active
528          * vCPU is harmless other than the additional latency in completing
529          * the unlock.
530          */
531         qstat_inc(qstat_pv_kick_unlock, true);
532         pv_kick(node->cpu);
533 }
534 
535 /*
536  * Include the architecture specific callee-save thunk of the
537  * __pv_queued_spin_unlock(). This thunk is put together with
538  * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
539  * function close to each other sharing consecutive instruction cachelines.
540  * Alternatively, architecture specific version of __pv_queued_spin_unlock()
541  * can be defined.
542  */
543 #include <asm/qspinlock_paravirt.h>
544 
545 #ifndef __pv_queued_spin_unlock
546 __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
547 {
548         u8 locked;
549 
550         /*
551          * We must not unlock if SLOW, because in that case we must first
552          * unhash. Otherwise it would be possible to have multiple @lock
553          * entries, which would be BAD.
554          */
555         locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
556         if (likely(locked == _Q_LOCKED_VAL))
557                 return;
558 
559         __pv_queued_spin_unlock_slowpath(lock, locked);
560 }
561 #endif /* __pv_queued_spin_unlock */
562 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp