~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/locking/osq_lock.c

Version: ~ [ linux-6.3-rc3 ] ~ [ linux-6.2.7 ] ~ [ linux-6.1.20 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.103 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.175 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.237 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.278 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.310 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/percpu.h>
  3 #include <linux/sched.h>
  4 #include <linux/osq_lock.h>
  5 
  6 /*
  7  * An MCS like lock especially tailored for optimistic spinning for sleeping
  8  * lock implementations (mutex, rwsem, etc).
  9  *
 10  * Using a single mcs node per CPU is safe because sleeping locks should not be
 11  * called from interrupt context and we have preemption disabled while
 12  * spinning.
 13  */
 14 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
 15 
 16 /*
 17  * We use the value 0 to represent "no CPU", thus the encoded value
 18  * will be the CPU number incremented by 1.
 19  */
 20 static inline int encode_cpu(int cpu_nr)
 21 {
 22         return cpu_nr + 1;
 23 }
 24 
 25 static inline int node_cpu(struct optimistic_spin_node *node)
 26 {
 27         return node->cpu - 1;
 28 }
 29 
 30 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
 31 {
 32         int cpu_nr = encoded_cpu_val - 1;
 33 
 34         return per_cpu_ptr(&osq_node, cpu_nr);
 35 }
 36 
 37 /*
 38  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
 39  * Can return NULL in case we were the last queued and we updated @lock instead.
 40  */
 41 static inline struct optimistic_spin_node *
 42 osq_wait_next(struct optimistic_spin_queue *lock,
 43               struct optimistic_spin_node *node,
 44               struct optimistic_spin_node *prev)
 45 {
 46         struct optimistic_spin_node *next = NULL;
 47         int curr = encode_cpu(smp_processor_id());
 48         int old;
 49 
 50         /*
 51          * If there is a prev node in queue, then the 'old' value will be
 52          * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
 53          * we're currently last in queue, then the queue will then become empty.
 54          */
 55         old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
 56 
 57         for (;;) {
 58                 if (atomic_read(&lock->tail) == curr &&
 59                     atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
 60                         /*
 61                          * We were the last queued, we moved @lock back. @prev
 62                          * will now observe @lock and will complete its
 63                          * unlock()/unqueue().
 64                          */
 65                         break;
 66                 }
 67 
 68                 /*
 69                  * We must xchg() the @node->next value, because if we were to
 70                  * leave it in, a concurrent unlock()/unqueue() from
 71                  * @node->next might complete Step-A and think its @prev is
 72                  * still valid.
 73                  *
 74                  * If the concurrent unlock()/unqueue() wins the race, we'll
 75                  * wait for either @lock to point to us, through its Step-B, or
 76                  * wait for a new @node->next from its Step-C.
 77                  */
 78                 if (node->next) {
 79                         next = xchg(&node->next, NULL);
 80                         if (next)
 81                                 break;
 82                 }
 83 
 84                 cpu_relax();
 85         }
 86 
 87         return next;
 88 }
 89 
 90 bool osq_lock(struct optimistic_spin_queue *lock)
 91 {
 92         struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
 93         struct optimistic_spin_node *prev, *next;
 94         int curr = encode_cpu(smp_processor_id());
 95         int old;
 96 
 97         node->locked = 0;
 98         node->next = NULL;
 99         node->cpu = curr;
100 
101         /*
102          * We need both ACQUIRE (pairs with corresponding RELEASE in
103          * unlock() uncontended, or fastpath) and RELEASE (to publish
104          * the node fields we just initialised) semantics when updating
105          * the lock tail.
106          */
107         old = atomic_xchg(&lock->tail, curr);
108         if (old == OSQ_UNLOCKED_VAL)
109                 return true;
110 
111         prev = decode_cpu(old);
112         node->prev = prev;
113 
114         /*
115          * osq_lock()                   unqueue
116          *
117          * node->prev = prev            osq_wait_next()
118          * WMB                          MB
119          * prev->next = node            next->prev = prev // unqueue-C
120          *
121          * Here 'node->prev' and 'next->prev' are the same variable and we need
122          * to ensure these stores happen in-order to avoid corrupting the list.
123          */
124         smp_wmb();
125 
126         WRITE_ONCE(prev->next, node);
127 
128         /*
129          * Normally @prev is untouchable after the above store; because at that
130          * moment unlock can proceed and wipe the node element from stack.
131          *
132          * However, since our nodes are static per-cpu storage, we're
133          * guaranteed their existence -- this allows us to apply
134          * cmpxchg in an attempt to undo our queueing.
135          */
136 
137         /*
138          * Wait to acquire the lock or cancellation. Note that need_resched()
139          * will come with an IPI, which will wake smp_cond_load_relaxed() if it
140          * is implemented with a monitor-wait. vcpu_is_preempted() relies on
141          * polling, be careful.
142          */
143         if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
144                                   vcpu_is_preempted(node_cpu(node->prev))))
145                 return true;
146 
147         /* unqueue */
148         /*
149          * Step - A  -- stabilize @prev
150          *
151          * Undo our @prev->next assignment; this will make @prev's
152          * unlock()/unqueue() wait for a next pointer since @lock points to us
153          * (or later).
154          */
155 
156         for (;;) {
157                 /*
158                  * cpu_relax() below implies a compiler barrier which would
159                  * prevent this comparison being optimized away.
160                  */
161                 if (data_race(prev->next) == node &&
162                     cmpxchg(&prev->next, node, NULL) == node)
163                         break;
164 
165                 /*
166                  * We can only fail the cmpxchg() racing against an unlock(),
167                  * in which case we should observe @node->locked becoming
168                  * true.
169                  */
170                 if (smp_load_acquire(&node->locked))
171                         return true;
172 
173                 cpu_relax();
174 
175                 /*
176                  * Or we race against a concurrent unqueue()'s step-B, in which
177                  * case its step-C will write us a new @node->prev pointer.
178                  */
179                 prev = READ_ONCE(node->prev);
180         }
181 
182         /*
183          * Step - B -- stabilize @next
184          *
185          * Similar to unlock(), wait for @node->next or move @lock from @node
186          * back to @prev.
187          */
188 
189         next = osq_wait_next(lock, node, prev);
190         if (!next)
191                 return false;
192 
193         /*
194          * Step - C -- unlink
195          *
196          * @prev is stable because its still waiting for a new @prev->next
197          * pointer, @next is stable because our @node->next pointer is NULL and
198          * it will wait in Step-A.
199          */
200 
201         WRITE_ONCE(next->prev, prev);
202         WRITE_ONCE(prev->next, next);
203 
204         return false;
205 }
206 
207 void osq_unlock(struct optimistic_spin_queue *lock)
208 {
209         struct optimistic_spin_node *node, *next;
210         int curr = encode_cpu(smp_processor_id());
211 
212         /*
213          * Fast path for the uncontended case.
214          */
215         if (likely(atomic_cmpxchg_release(&lock->tail, curr,
216                                           OSQ_UNLOCKED_VAL) == curr))
217                 return;
218 
219         /*
220          * Second most likely case.
221          */
222         node = this_cpu_ptr(&osq_node);
223         next = xchg(&node->next, NULL);
224         if (next) {
225                 WRITE_ONCE(next->locked, 1);
226                 return;
227         }
228 
229         next = osq_wait_next(lock, node, NULL);
230         if (next)
231                 WRITE_ONCE(next->locked, 1);
232 }
233 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp