~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/wait.c

Version: ~ [ linux-4.17-rc6 ] ~ [ linux-4.16.10 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.42 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.101 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.132 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.51 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.109 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.56 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.101 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Generic waiting primitives.
  3  *
  4  * (C) 2004 Nadia Yvette Chambers, Oracle
  5  */
  6 #include "sched.h"
  7 
  8 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
  9 {
 10         spin_lock_init(&wq_head->lock);
 11         lockdep_set_class_and_name(&wq_head->lock, key, name);
 12         INIT_LIST_HEAD(&wq_head->head);
 13 }
 14 
 15 EXPORT_SYMBOL(__init_waitqueue_head);
 16 
 17 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 18 {
 19         unsigned long flags;
 20 
 21         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
 22         spin_lock_irqsave(&wq_head->lock, flags);
 23         __add_wait_queue(wq_head, wq_entry);
 24         spin_unlock_irqrestore(&wq_head->lock, flags);
 25 }
 26 EXPORT_SYMBOL(add_wait_queue);
 27 
 28 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 29 {
 30         unsigned long flags;
 31 
 32         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 33         spin_lock_irqsave(&wq_head->lock, flags);
 34         __add_wait_queue_entry_tail(wq_head, wq_entry);
 35         spin_unlock_irqrestore(&wq_head->lock, flags);
 36 }
 37 EXPORT_SYMBOL(add_wait_queue_exclusive);
 38 
 39 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 40 {
 41         unsigned long flags;
 42 
 43         spin_lock_irqsave(&wq_head->lock, flags);
 44         __remove_wait_queue(wq_head, wq_entry);
 45         spin_unlock_irqrestore(&wq_head->lock, flags);
 46 }
 47 EXPORT_SYMBOL(remove_wait_queue);
 48 
 49 /*
 50  * Scan threshold to break wait queue walk.
 51  * This allows a waker to take a break from holding the
 52  * wait queue lock during the wait queue walk.
 53  */
 54 #define WAITQUEUE_WALK_BREAK_CNT 64
 55 
 56 /*
 57  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 58  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 59  * number) then we wake all the non-exclusive tasks and one exclusive task.
 60  *
 61  * There are circumstances in which we can try to wake a task which has already
 62  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 63  * zero in this (rare) case, and we handle it by continuing to scan the queue.
 64  */
 65 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
 66                         int nr_exclusive, int wake_flags, void *key,
 67                         wait_queue_entry_t *bookmark)
 68 {
 69         wait_queue_entry_t *curr, *next;
 70         int cnt = 0;
 71 
 72         if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
 73                 curr = list_next_entry(bookmark, entry);
 74 
 75                 list_del(&bookmark->entry);
 76                 bookmark->flags = 0;
 77         } else
 78                 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
 79 
 80         if (&curr->entry == &wq_head->head)
 81                 return nr_exclusive;
 82 
 83         list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
 84                 unsigned flags = curr->flags;
 85                 int ret;
 86 
 87                 if (flags & WQ_FLAG_BOOKMARK)
 88                         continue;
 89 
 90                 ret = curr->func(curr, mode, wake_flags, key);
 91                 if (ret < 0)
 92                         break;
 93                 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
 94                         break;
 95 
 96                 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
 97                                 (&next->entry != &wq_head->head)) {
 98                         bookmark->flags = WQ_FLAG_BOOKMARK;
 99                         list_add_tail(&bookmark->entry, &next->entry);
100                         break;
101                 }
102         }
103 
104         return nr_exclusive;
105 }
106 
107 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
108                         int nr_exclusive, int wake_flags, void *key)
109 {
110         unsigned long flags;
111         wait_queue_entry_t bookmark;
112 
113         bookmark.flags = 0;
114         bookmark.private = NULL;
115         bookmark.func = NULL;
116         INIT_LIST_HEAD(&bookmark.entry);
117 
118         spin_lock_irqsave(&wq_head->lock, flags);
119         nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark);
120         spin_unlock_irqrestore(&wq_head->lock, flags);
121 
122         while (bookmark.flags & WQ_FLAG_BOOKMARK) {
123                 spin_lock_irqsave(&wq_head->lock, flags);
124                 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
125                                                 wake_flags, key, &bookmark);
126                 spin_unlock_irqrestore(&wq_head->lock, flags);
127         }
128 }
129 
130 /**
131  * __wake_up - wake up threads blocked on a waitqueue.
132  * @wq_head: the waitqueue
133  * @mode: which threads
134  * @nr_exclusive: how many wake-one or wake-many threads to wake up
135  * @key: is directly passed to the wakeup function
136  *
137  * It may be assumed that this function implies a write memory barrier before
138  * changing the task state if and only if any tasks are woken up.
139  */
140 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
141                         int nr_exclusive, void *key)
142 {
143         __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
144 }
145 EXPORT_SYMBOL(__wake_up);
146 
147 /*
148  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
149  */
150 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
151 {
152         __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
153 }
154 EXPORT_SYMBOL_GPL(__wake_up_locked);
155 
156 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
157 {
158         __wake_up_common(wq_head, mode, 1, 0, key, NULL);
159 }
160 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
161 
162 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
163                 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
164 {
165         __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
166 }
167 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
168 
169 /**
170  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
171  * @wq_head: the waitqueue
172  * @mode: which threads
173  * @nr_exclusive: how many wake-one or wake-many threads to wake up
174  * @key: opaque value to be passed to wakeup targets
175  *
176  * The sync wakeup differs that the waker knows that it will schedule
177  * away soon, so while the target thread will be woken up, it will not
178  * be migrated to another CPU - ie. the two threads are 'synchronized'
179  * with each other. This can prevent needless bouncing between CPUs.
180  *
181  * On UP it can prevent extra preemption.
182  *
183  * It may be assumed that this function implies a write memory barrier before
184  * changing the task state if and only if any tasks are woken up.
185  */
186 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
187                         int nr_exclusive, void *key)
188 {
189         int wake_flags = 1; /* XXX WF_SYNC */
190 
191         if (unlikely(!wq_head))
192                 return;
193 
194         if (unlikely(nr_exclusive != 1))
195                 wake_flags = 0;
196 
197         __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
198 }
199 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
200 
201 /*
202  * __wake_up_sync - see __wake_up_sync_key()
203  */
204 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
205 {
206         __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
207 }
208 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
209 
210 /*
211  * Note: we use "set_current_state()" _after_ the wait-queue add,
212  * because we need a memory barrier there on SMP, so that any
213  * wake-function that tests for the wait-queue being active
214  * will be guaranteed to see waitqueue addition _or_ subsequent
215  * tests in this thread will see the wakeup having taken place.
216  *
217  * The spin_unlock() itself is semi-permeable and only protects
218  * one way (it only protects stuff inside the critical region and
219  * stops them from bleeding out - it would still allow subsequent
220  * loads to move into the critical region).
221  */
222 void
223 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
224 {
225         unsigned long flags;
226 
227         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
228         spin_lock_irqsave(&wq_head->lock, flags);
229         if (list_empty(&wq_entry->entry))
230                 __add_wait_queue(wq_head, wq_entry);
231         set_current_state(state);
232         spin_unlock_irqrestore(&wq_head->lock, flags);
233 }
234 EXPORT_SYMBOL(prepare_to_wait);
235 
236 void
237 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
238 {
239         unsigned long flags;
240 
241         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
242         spin_lock_irqsave(&wq_head->lock, flags);
243         if (list_empty(&wq_entry->entry))
244                 __add_wait_queue_entry_tail(wq_head, wq_entry);
245         set_current_state(state);
246         spin_unlock_irqrestore(&wq_head->lock, flags);
247 }
248 EXPORT_SYMBOL(prepare_to_wait_exclusive);
249 
250 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
251 {
252         wq_entry->flags = flags;
253         wq_entry->private = current;
254         wq_entry->func = autoremove_wake_function;
255         INIT_LIST_HEAD(&wq_entry->entry);
256 }
257 EXPORT_SYMBOL(init_wait_entry);
258 
259 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
260 {
261         unsigned long flags;
262         long ret = 0;
263 
264         spin_lock_irqsave(&wq_head->lock, flags);
265         if (unlikely(signal_pending_state(state, current))) {
266                 /*
267                  * Exclusive waiter must not fail if it was selected by wakeup,
268                  * it should "consume" the condition we were waiting for.
269                  *
270                  * The caller will recheck the condition and return success if
271                  * we were already woken up, we can not miss the event because
272                  * wakeup locks/unlocks the same wq_head->lock.
273                  *
274                  * But we need to ensure that set-condition + wakeup after that
275                  * can't see us, it should wake up another exclusive waiter if
276                  * we fail.
277                  */
278                 list_del_init(&wq_entry->entry);
279                 ret = -ERESTARTSYS;
280         } else {
281                 if (list_empty(&wq_entry->entry)) {
282                         if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
283                                 __add_wait_queue_entry_tail(wq_head, wq_entry);
284                         else
285                                 __add_wait_queue(wq_head, wq_entry);
286                 }
287                 set_current_state(state);
288         }
289         spin_unlock_irqrestore(&wq_head->lock, flags);
290 
291         return ret;
292 }
293 EXPORT_SYMBOL(prepare_to_wait_event);
294 
295 /*
296  * Note! These two wait functions are entered with the
297  * wait-queue lock held (and interrupts off in the _irq
298  * case), so there is no race with testing the wakeup
299  * condition in the caller before they add the wait
300  * entry to the wake queue.
301  */
302 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
303 {
304         if (likely(list_empty(&wait->entry)))
305                 __add_wait_queue_entry_tail(wq, wait);
306 
307         set_current_state(TASK_INTERRUPTIBLE);
308         if (signal_pending(current))
309                 return -ERESTARTSYS;
310 
311         spin_unlock(&wq->lock);
312         schedule();
313         spin_lock(&wq->lock);
314 
315         return 0;
316 }
317 EXPORT_SYMBOL(do_wait_intr);
318 
319 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
320 {
321         if (likely(list_empty(&wait->entry)))
322                 __add_wait_queue_entry_tail(wq, wait);
323 
324         set_current_state(TASK_INTERRUPTIBLE);
325         if (signal_pending(current))
326                 return -ERESTARTSYS;
327 
328         spin_unlock_irq(&wq->lock);
329         schedule();
330         spin_lock_irq(&wq->lock);
331 
332         return 0;
333 }
334 EXPORT_SYMBOL(do_wait_intr_irq);
335 
336 /**
337  * finish_wait - clean up after waiting in a queue
338  * @wq_head: waitqueue waited on
339  * @wq_entry: wait descriptor
340  *
341  * Sets current thread back to running state and removes
342  * the wait descriptor from the given waitqueue if still
343  * queued.
344  */
345 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
346 {
347         unsigned long flags;
348 
349         __set_current_state(TASK_RUNNING);
350         /*
351          * We can check for list emptiness outside the lock
352          * IFF:
353          *  - we use the "careful" check that verifies both
354          *    the next and prev pointers, so that there cannot
355          *    be any half-pending updates in progress on other
356          *    CPU's that we haven't seen yet (and that might
357          *    still change the stack area.
358          * and
359          *  - all other users take the lock (ie we can only
360          *    have _one_ other CPU that looks at or modifies
361          *    the list).
362          */
363         if (!list_empty_careful(&wq_entry->entry)) {
364                 spin_lock_irqsave(&wq_head->lock, flags);
365                 list_del_init(&wq_entry->entry);
366                 spin_unlock_irqrestore(&wq_head->lock, flags);
367         }
368 }
369 EXPORT_SYMBOL(finish_wait);
370 
371 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
372 {
373         int ret = default_wake_function(wq_entry, mode, sync, key);
374 
375         if (ret)
376                 list_del_init(&wq_entry->entry);
377 
378         return ret;
379 }
380 EXPORT_SYMBOL(autoremove_wake_function);
381 
382 static inline bool is_kthread_should_stop(void)
383 {
384         return (current->flags & PF_KTHREAD) && kthread_should_stop();
385 }
386 
387 /*
388  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
389  *
390  * add_wait_queue(&wq_head, &wait);
391  * for (;;) {
392  *     if (condition)
393  *         break;
394  *
395  *     p->state = mode;                         condition = true;
396  *     smp_mb(); // A                           smp_wmb(); // C
397  *     if (!wq_entry->flags & WQ_FLAG_WOKEN)    wq_entry->flags |= WQ_FLAG_WOKEN;
398  *         schedule()                           try_to_wake_up();
399  *     p->state = TASK_RUNNING;             ~~~~~~~~~~~~~~~~~~
400  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;               condition = true;
401  *     smp_mb() // B                            smp_wmb(); // C
402  *                                              wq_entry->flags |= WQ_FLAG_WOKEN;
403  * }
404  * remove_wait_queue(&wq_head, &wait);
405  *
406  */
407 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
408 {
409         set_current_state(mode); /* A */
410         /*
411          * The above implies an smp_mb(), which matches with the smp_wmb() from
412          * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
413          * also observe all state before the wakeup.
414          */
415         if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
416                 timeout = schedule_timeout(timeout);
417         __set_current_state(TASK_RUNNING);
418 
419         /*
420          * The below implies an smp_mb(), it too pairs with the smp_wmb() from
421          * woken_wake_function() such that we must either observe the wait
422          * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
423          * an event.
424          */
425         smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
426 
427         return timeout;
428 }
429 EXPORT_SYMBOL(wait_woken);
430 
431 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
432 {
433         /*
434          * Although this function is called under waitqueue lock, LOCK
435          * doesn't imply write barrier and the users expects write
436          * barrier semantics on wakeup functions.  The following
437          * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
438          * and is paired with smp_store_mb() in wait_woken().
439          */
440         smp_wmb(); /* C */
441         wq_entry->flags |= WQ_FLAG_WOKEN;
442 
443         return default_wake_function(wq_entry, mode, sync, key);
444 }
445 EXPORT_SYMBOL(woken_wake_function);
446 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp