blob: bd4bd7b479b62d42cf3ffae8e31bde3a98a5ea54 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/list.h>
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +01009#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020012typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15struct __wait_queue {
16 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070018 void *private;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 wait_queue_func_t func;
20 struct list_head task_list;
21};
22
23struct wait_bit_key {
24 void *flags;
25 int bit_nr;
David Howellscb655372013-05-10 19:50:26 +010026#define WAIT_ATOMIC_T_BIT_NR -1
Linus Torvalds1da177e2005-04-16 15:20:36 -070027};
28
29struct wait_bit_queue {
30 struct wait_bit_key key;
31 wait_queue_t wait;
32};
33
34struct __wait_queue_head {
35 spinlock_t lock;
36 struct list_head task_list;
37};
38typedef struct __wait_queue_head wait_queue_head_t;
39
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080040struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/*
43 * Macros for declaration and initialisaton of the datatypes
44 */
45
46#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070047 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 .func = default_wake_function, \
49 .task_list = { NULL, NULL } }
50
51#define DECLARE_WAITQUEUE(name, tsk) \
52 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
53
54#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070055 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 .task_list = { &(name).task_list, &(name).task_list } }
57
58#define DECLARE_WAIT_QUEUE_HEAD(name) \
59 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60
61#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
62 { .flags = word, .bit_nr = bit, }
63
David Howellscb655372013-05-10 19:50:26 +010064#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
65 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
66
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010067extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010068
69#define init_waitqueue_head(q) \
70 do { \
71 static struct lock_class_key __key; \
72 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010073 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010074 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080076#ifdef CONFIG_LOCKDEP
77# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
78 ({ init_waitqueue_head(&name); name; })
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
80 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
81#else
82# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
83#endif
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{
87 q->flags = 0;
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070088 q->private = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 q->func = default_wake_function;
90}
91
92static inline void init_waitqueue_func_entry(wait_queue_t *q,
93 wait_queue_func_t func)
94{
95 q->flags = 0;
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070096 q->private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 q->func = func;
98}
99
100static inline int waitqueue_active(wait_queue_head_t *q)
101{
102 return !list_empty(&q->task_list);
103}
104
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800105extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
106extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
107extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
110{
111 list_add(&new->task_list, &head->task_list);
112}
113
114/*
115 * Used for wake-one threads:
116 */
Changli Gaoa93d2f12010-05-07 14:33:26 +0800117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
118 wait_queue_t *wait)
119{
120 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait);
122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124static inline void __add_wait_queue_tail(wait_queue_head_t *head,
Changli Gaoa93d2f12010-05-07 14:33:26 +0800125 wait_queue_t *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
127 list_add_tail(&new->task_list, &head->task_list);
128}
129
Changli Gaoa93d2f12010-05-07 14:33:26 +0800130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait);
135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137static inline void __remove_wait_queue(wait_queue_head_t *head,
138 wait_queue_t *old)
139{
140 list_del(&old->task_list);
141}
142
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
146 void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100153void wake_up_atomic_t(atomic_t *);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
David Howellscb655372013-05-10 19:50:26 +0100156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800157wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800170/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700171 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800172 */
Davide Libenzic0da3772009-03-31 15:24:20 -0700173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800181
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200182#define ___wait_cond_timeout(condition) \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200183({ \
184 bool __cond = (condition); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200185 if (__cond && !__ret) \
186 __ret = 1; \
187 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200188})
189
Peter Zijlstra41a14312013-10-02 11:22:21 +0200190#define ___wait_signal_pending(state) \
191 ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \
192 (state == TASK_KILLABLE && fatal_signal_pending(current)))
193
Peter Zijlstra41a14312013-10-02 11:22:21 +0200194#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200195({ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200196 __label__ __out; \
197 DEFINE_WAIT(__wait); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200198 long __ret = ret; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200199 \
200 for (;;) { \
201 if (exclusive) \
202 prepare_to_wait_exclusive(&wq, &__wait, state); \
203 else \
204 prepare_to_wait(&wq, &__wait, state); \
205 \
206 if (condition) \
207 break; \
208 \
209 if (___wait_signal_pending(state)) { \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200210 __ret = -ERESTARTSYS; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200211 if (exclusive) { \
212 abort_exclusive_wait(&wq, &__wait, \
213 state, NULL); \
214 goto __out; \
215 } \
216 break; \
217 } \
218 \
219 cmd; \
220 } \
221 finish_wait(&wq, &__wait); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200222__out: __ret; \
223})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225#define __wait_event(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200226 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
227 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229/**
230 * wait_event - sleep until a condition gets true
231 * @wq: the waitqueue to wait on
232 * @condition: a C expression for the event to wait for
233 *
234 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
235 * @condition evaluates to true. The @condition is checked each time
236 * the waitqueue @wq is woken up.
237 *
238 * wake_up() has to be called after changing any variable that could
239 * change the result of the wait condition.
240 */
241#define wait_event(wq, condition) \
242do { \
243 if (condition) \
244 break; \
245 __wait_event(wq, condition); \
246} while (0)
247
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200248#define __wait_event_timeout(wq, condition, timeout) \
249 ___wait_event(wq, ___wait_cond_timeout(condition), \
250 TASK_UNINTERRUPTIBLE, 0, timeout, \
251 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253/**
254 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
255 * @wq: the waitqueue to wait on
256 * @condition: a C expression for the event to wait for
257 * @timeout: timeout, in jiffies
258 *
259 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
260 * @condition evaluates to true. The @condition is checked each time
261 * the waitqueue @wq is woken up.
262 *
263 * wake_up() has to be called after changing any variable that could
264 * change the result of the wait condition.
265 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700266 * The function returns 0 if the @timeout elapsed, or the remaining
267 * jiffies (at least 1) if the @condition evaluated to %true before
268 * the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 */
270#define wait_event_timeout(wq, condition, timeout) \
271({ \
272 long __ret = timeout; \
273 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200274 __ret = __wait_event_timeout(wq, condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 __ret; \
276})
277
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200278#define __wait_event_interruptible(wq, condition) \
279 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200280 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282/**
283 * wait_event_interruptible - sleep until a condition gets true
284 * @wq: the waitqueue to wait on
285 * @condition: a C expression for the event to wait for
286 *
287 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
288 * @condition evaluates to true or a signal is received.
289 * The @condition is checked each time the waitqueue @wq is woken up.
290 *
291 * wake_up() has to be called after changing any variable that could
292 * change the result of the wait condition.
293 *
294 * The function will return -ERESTARTSYS if it was interrupted by a
295 * signal and 0 if @condition evaluated to true.
296 */
297#define wait_event_interruptible(wq, condition) \
298({ \
299 int __ret = 0; \
300 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200301 __ret = __wait_event_interruptible(wq, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 __ret; \
303})
304
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200305#define __wait_event_interruptible_timeout(wq, condition, timeout) \
306 ___wait_event(wq, ___wait_cond_timeout(condition), \
307 TASK_INTERRUPTIBLE, 0, timeout, \
308 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310/**
311 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
312 * @wq: the waitqueue to wait on
313 * @condition: a C expression for the event to wait for
314 * @timeout: timeout, in jiffies
315 *
316 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
317 * @condition evaluates to true or a signal is received.
318 * The @condition is checked each time the waitqueue @wq is woken up.
319 *
320 * wake_up() has to be called after changing any variable that could
321 * change the result of the wait condition.
322 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700323 * Returns:
324 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
325 * a signal, or the remaining jiffies (at least 1) if the @condition
326 * evaluated to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 */
328#define wait_event_interruptible_timeout(wq, condition, timeout) \
329({ \
330 long __ret = timeout; \
331 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200332 __ret = __wait_event_interruptible_timeout(wq, \
333 condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 __ret; \
335})
336
Kent Overstreet774a08b2013-05-07 16:18:43 -0700337#define __wait_event_hrtimeout(wq, condition, timeout, state) \
338({ \
339 int __ret = 0; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700340 struct hrtimer_sleeper __t; \
341 \
342 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
343 HRTIMER_MODE_REL); \
344 hrtimer_init_sleeper(&__t, current); \
345 if ((timeout).tv64 != KTIME_MAX) \
346 hrtimer_start_range_ns(&__t.timer, timeout, \
347 current->timer_slack_ns, \
348 HRTIMER_MODE_REL); \
349 \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200350 __ret = ___wait_event(wq, condition, state, 0, 0, \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700351 if (!__t.task) { \
352 __ret = -ETIME; \
353 break; \
354 } \
Peter Zijlstraebdc1952013-10-02 11:22:32 +0200355 schedule()); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700356 \
357 hrtimer_cancel(&__t.timer); \
358 destroy_hrtimer_on_stack(&__t.timer); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700359 __ret; \
360})
361
362/**
363 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
364 * @wq: the waitqueue to wait on
365 * @condition: a C expression for the event to wait for
366 * @timeout: timeout, as a ktime_t
367 *
368 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
369 * @condition evaluates to true or a signal is received.
370 * The @condition is checked each time the waitqueue @wq is woken up.
371 *
372 * wake_up() has to be called after changing any variable that could
373 * change the result of the wait condition.
374 *
375 * The function returns 0 if @condition became true, or -ETIME if the timeout
376 * elapsed.
377 */
378#define wait_event_hrtimeout(wq, condition, timeout) \
379({ \
380 int __ret = 0; \
381 if (!(condition)) \
382 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
383 TASK_UNINTERRUPTIBLE); \
384 __ret; \
385})
386
387/**
388 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
389 * @wq: the waitqueue to wait on
390 * @condition: a C expression for the event to wait for
391 * @timeout: timeout, as a ktime_t
392 *
393 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
394 * @condition evaluates to true or a signal is received.
395 * The @condition is checked each time the waitqueue @wq is woken up.
396 *
397 * wake_up() has to be called after changing any variable that could
398 * change the result of the wait condition.
399 *
400 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
401 * interrupted by a signal, or -ETIME if the timeout elapsed.
402 */
403#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
404({ \
405 long __ret = 0; \
406 if (!(condition)) \
407 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
408 TASK_INTERRUPTIBLE); \
409 __ret; \
410})
411
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200412#define __wait_event_interruptible_exclusive(wq, condition) \
413 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200414 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416#define wait_event_interruptible_exclusive(wq, condition) \
417({ \
418 int __ret = 0; \
419 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200420 __ret = __wait_event_interruptible_exclusive(wq, condition);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 __ret; \
422})
423
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200424
425#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
426({ \
427 int __ret = 0; \
428 DEFINE_WAIT(__wait); \
429 if (exclusive) \
430 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
431 do { \
432 if (likely(list_empty(&__wait.task_list))) \
433 __add_wait_queue_tail(&(wq), &__wait); \
434 set_current_state(TASK_INTERRUPTIBLE); \
435 if (signal_pending(current)) { \
436 __ret = -ERESTARTSYS; \
437 break; \
438 } \
439 if (irq) \
440 spin_unlock_irq(&(wq).lock); \
441 else \
442 spin_unlock(&(wq).lock); \
443 schedule(); \
444 if (irq) \
445 spin_lock_irq(&(wq).lock); \
446 else \
447 spin_lock(&(wq).lock); \
448 } while (!(condition)); \
449 __remove_wait_queue(&(wq), &__wait); \
450 __set_current_state(TASK_RUNNING); \
451 __ret; \
452})
453
454
455/**
456 * wait_event_interruptible_locked - sleep until a condition gets true
457 * @wq: the waitqueue to wait on
458 * @condition: a C expression for the event to wait for
459 *
460 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
461 * @condition evaluates to true or a signal is received.
462 * The @condition is checked each time the waitqueue @wq is woken up.
463 *
464 * It must be called with wq.lock being held. This spinlock is
465 * unlocked while sleeping but @condition testing is done while lock
466 * is held and when this macro exits the lock is held.
467 *
468 * The lock is locked/unlocked using spin_lock()/spin_unlock()
469 * functions which must match the way they are locked/unlocked outside
470 * of this macro.
471 *
472 * wake_up_locked() has to be called after changing any variable that could
473 * change the result of the wait condition.
474 *
475 * The function will return -ERESTARTSYS if it was interrupted by a
476 * signal and 0 if @condition evaluated to true.
477 */
478#define wait_event_interruptible_locked(wq, condition) \
479 ((condition) \
480 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
481
482/**
483 * wait_event_interruptible_locked_irq - sleep until a condition gets true
484 * @wq: the waitqueue to wait on
485 * @condition: a C expression for the event to wait for
486 *
487 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
488 * @condition evaluates to true or a signal is received.
489 * The @condition is checked each time the waitqueue @wq is woken up.
490 *
491 * It must be called with wq.lock being held. This spinlock is
492 * unlocked while sleeping but @condition testing is done while lock
493 * is held and when this macro exits the lock is held.
494 *
495 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
496 * functions which must match the way they are locked/unlocked outside
497 * of this macro.
498 *
499 * wake_up_locked() has to be called after changing any variable that could
500 * change the result of the wait condition.
501 *
502 * The function will return -ERESTARTSYS if it was interrupted by a
503 * signal and 0 if @condition evaluated to true.
504 */
505#define wait_event_interruptible_locked_irq(wq, condition) \
506 ((condition) \
507 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
508
509/**
510 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
511 * @wq: the waitqueue to wait on
512 * @condition: a C expression for the event to wait for
513 *
514 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
515 * @condition evaluates to true or a signal is received.
516 * The @condition is checked each time the waitqueue @wq is woken up.
517 *
518 * It must be called with wq.lock being held. This spinlock is
519 * unlocked while sleeping but @condition testing is done while lock
520 * is held and when this macro exits the lock is held.
521 *
522 * The lock is locked/unlocked using spin_lock()/spin_unlock()
523 * functions which must match the way they are locked/unlocked outside
524 * of this macro.
525 *
526 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
527 * set thus when other process waits process on the list if this
528 * process is awaken further processes are not considered.
529 *
530 * wake_up_locked() has to be called after changing any variable that could
531 * change the result of the wait condition.
532 *
533 * The function will return -ERESTARTSYS if it was interrupted by a
534 * signal and 0 if @condition evaluated to true.
535 */
536#define wait_event_interruptible_exclusive_locked(wq, condition) \
537 ((condition) \
538 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
539
540/**
541 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
542 * @wq: the waitqueue to wait on
543 * @condition: a C expression for the event to wait for
544 *
545 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
546 * @condition evaluates to true or a signal is received.
547 * The @condition is checked each time the waitqueue @wq is woken up.
548 *
549 * It must be called with wq.lock being held. This spinlock is
550 * unlocked while sleeping but @condition testing is done while lock
551 * is held and when this macro exits the lock is held.
552 *
553 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
554 * functions which must match the way they are locked/unlocked outside
555 * of this macro.
556 *
557 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
558 * set thus when other process waits process on the list if this
559 * process is awaken further processes are not considered.
560 *
561 * wake_up_locked() has to be called after changing any variable that could
562 * change the result of the wait condition.
563 *
564 * The function will return -ERESTARTSYS if it was interrupted by a
565 * signal and 0 if @condition evaluated to true.
566 */
567#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
568 ((condition) \
569 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
570
571
572
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200573#define __wait_event_killable(wq, condition) \
574 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500575
576/**
577 * wait_event_killable - sleep until a condition gets true
578 * @wq: the waitqueue to wait on
579 * @condition: a C expression for the event to wait for
580 *
581 * The process is put to sleep (TASK_KILLABLE) until the
582 * @condition evaluates to true or a signal is received.
583 * The @condition is checked each time the waitqueue @wq is woken up.
584 *
585 * wake_up() has to be called after changing any variable that could
586 * change the result of the wait condition.
587 *
588 * The function will return -ERESTARTSYS if it was interrupted by a
589 * signal and 0 if @condition evaluated to true.
590 */
591#define wait_event_killable(wq, condition) \
592({ \
593 int __ret = 0; \
594 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200595 __ret = __wait_event_killable(wq, condition); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500596 __ret; \
597})
598
Lukas Czernereed8c022012-11-30 11:42:40 +0100599
600#define __wait_event_lock_irq(wq, condition, lock, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200601 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
602 spin_unlock_irq(&lock); \
603 cmd; \
604 schedule(); \
605 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100606
607/**
608 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
609 * condition is checked under the lock. This
610 * is expected to be called with the lock
611 * taken.
612 * @wq: the waitqueue to wait on
613 * @condition: a C expression for the event to wait for
614 * @lock: a locked spinlock_t, which will be released before cmd
615 * and schedule() and reacquired afterwards.
616 * @cmd: a command which is invoked outside the critical section before
617 * sleep
618 *
619 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
620 * @condition evaluates to true. The @condition is checked each time
621 * the waitqueue @wq is woken up.
622 *
623 * wake_up() has to be called after changing any variable that could
624 * change the result of the wait condition.
625 *
626 * This is supposed to be called while holding the lock. The lock is
627 * dropped before invoking the cmd and going to sleep and is reacquired
628 * afterwards.
629 */
630#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
631do { \
632 if (condition) \
633 break; \
634 __wait_event_lock_irq(wq, condition, lock, cmd); \
635} while (0)
636
637/**
638 * wait_event_lock_irq - sleep until a condition gets true. The
639 * condition is checked under the lock. This
640 * is expected to be called with the lock
641 * taken.
642 * @wq: the waitqueue to wait on
643 * @condition: a C expression for the event to wait for
644 * @lock: a locked spinlock_t, which will be released before schedule()
645 * and reacquired afterwards.
646 *
647 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
648 * @condition evaluates to true. The @condition is checked each time
649 * the waitqueue @wq is woken up.
650 *
651 * wake_up() has to be called after changing any variable that could
652 * change the result of the wait condition.
653 *
654 * This is supposed to be called while holding the lock. The lock is
655 * dropped before going to sleep and is reacquired afterwards.
656 */
657#define wait_event_lock_irq(wq, condition, lock) \
658do { \
659 if (condition) \
660 break; \
661 __wait_event_lock_irq(wq, condition, lock, ); \
662} while (0)
663
664
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200665#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
666 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
667 spin_unlock_irq(&lock); \
668 cmd; \
669 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200670 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100671
672/**
673 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
674 * The condition is checked under the lock. This is expected to
675 * be called with the lock taken.
676 * @wq: the waitqueue to wait on
677 * @condition: a C expression for the event to wait for
678 * @lock: a locked spinlock_t, which will be released before cmd and
679 * schedule() and reacquired afterwards.
680 * @cmd: a command which is invoked outside the critical section before
681 * sleep
682 *
683 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
684 * @condition evaluates to true or a signal is received. The @condition is
685 * checked each time the waitqueue @wq is woken up.
686 *
687 * wake_up() has to be called after changing any variable that could
688 * change the result of the wait condition.
689 *
690 * This is supposed to be called while holding the lock. The lock is
691 * dropped before invoking the cmd and going to sleep and is reacquired
692 * afterwards.
693 *
694 * The macro will return -ERESTARTSYS if it was interrupted by a signal
695 * and 0 if @condition evaluated to true.
696 */
697#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
698({ \
699 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100700 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200701 __ret = __wait_event_interruptible_lock_irq(wq, \
702 condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100703 __ret; \
704})
705
706/**
707 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
708 * The condition is checked under the lock. This is expected
709 * to be called with the lock taken.
710 * @wq: the waitqueue to wait on
711 * @condition: a C expression for the event to wait for
712 * @lock: a locked spinlock_t, which will be released before schedule()
713 * and reacquired afterwards.
714 *
715 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
716 * @condition evaluates to true or signal is received. The @condition is
717 * checked each time the waitqueue @wq is woken up.
718 *
719 * wake_up() has to be called after changing any variable that could
720 * change the result of the wait condition.
721 *
722 * This is supposed to be called while holding the lock. The lock is
723 * dropped before going to sleep and is reacquired afterwards.
724 *
725 * The macro will return -ERESTARTSYS if it was interrupted by a signal
726 * and 0 if @condition evaluated to true.
727 */
728#define wait_event_interruptible_lock_irq(wq, condition, lock) \
729({ \
730 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100731 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200732 __ret = __wait_event_interruptible_lock_irq(wq, \
733 condition, lock,) \
Lukas Czernereed8c022012-11-30 11:42:40 +0100734 __ret; \
735})
736
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200737#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
738 lock, timeout) \
739 ___wait_event(wq, ___wait_cond_timeout(condition), \
740 TASK_INTERRUPTIBLE, 0, ret, \
741 spin_unlock_irq(&lock); \
742 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc68522013-10-02 11:22:29 +0200743 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200744
745/**
746 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
747 * The condition is checked under the lock. This is expected
748 * to be called with the lock taken.
749 * @wq: the waitqueue to wait on
750 * @condition: a C expression for the event to wait for
751 * @lock: a locked spinlock_t, which will be released before schedule()
752 * and reacquired afterwards.
753 * @timeout: timeout, in jiffies
754 *
755 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
756 * @condition evaluates to true or signal is received. The @condition is
757 * checked each time the waitqueue @wq is woken up.
758 *
759 * wake_up() has to be called after changing any variable that could
760 * change the result of the wait condition.
761 *
762 * This is supposed to be called while holding the lock. The lock is
763 * dropped before going to sleep and is reacquired afterwards.
764 *
765 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
766 * was interrupted by a signal, and the remaining jiffies otherwise
767 * if the condition evaluated to true before the timeout elapsed.
768 */
769#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
770 timeout) \
771({ \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200772 long __ret = timeout; \
Martin Peschked79ff142013-08-22 17:45:36 +0200773 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200774 __ret = __wait_event_interruptible_lock_irq_timeout( \
775 wq, condition, lock, timeout); \
Martin Peschked79ff142013-08-22 17:45:36 +0200776 __ret; \
777})
778
Lukas Czernereed8c022012-11-30 11:42:40 +0100779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 * These are the old interfaces to sleep waiting for an event.
Ingo Molnar0fec1712007-07-09 18:52:01 +0200782 * They are racy. DO NOT use them, use the wait_event* interfaces above.
783 * We plan to remove these interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 */
Ingo Molnar0fec1712007-07-09 18:52:01 +0200785extern void sleep_on(wait_queue_head_t *q);
786extern long sleep_on_timeout(wait_queue_head_t *q,
787 signed long timeout);
788extern void interruptible_sleep_on(wait_queue_head_t *q);
789extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
790 signed long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
792/*
793 * Waitqueues which are removed from the waitqueue_head at wakeup time
794 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800795void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
796void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
797void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Johannes Weiner777c6c52009-02-04 15:12:14 -0800798void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
799 unsigned int mode, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
801int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
802
Eric Dumazetbf368e42009-04-28 02:24:21 -0700803#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700805 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700806 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200807 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
809
Eric Dumazetbf368e42009-04-28 02:24:21 -0700810#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812#define DEFINE_WAIT_BIT(name, word, bit) \
813 struct wait_bit_queue name = { \
814 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
815 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700816 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 .func = wake_bit_function, \
818 .task_list = \
819 LIST_HEAD_INIT((name).wait.task_list), \
820 }, \
821 }
822
823#define init_wait(wait) \
824 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700825 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 (wait)->func = autoremove_wake_function; \
827 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +0400828 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 } while (0)
830
831/**
832 * wait_on_bit - wait for a bit to be cleared
833 * @word: the word being waited on, a kernel virtual address
834 * @bit: the bit of the word being waited on
835 * @action: the function used to sleep, which may take special actions
836 * @mode: the task state to sleep in
837 *
838 * There is a standard hashed waitqueue table for generic use. This
839 * is the part of the hashtable's accessor API that waits on a bit.
840 * For instance, if one were to have waiters on a bitflag, one would
841 * call wait_on_bit() in threads waiting for the bit to clear.
842 * One uses wait_on_bit() where one is waiting for the bit to clear,
843 * but has no intention of setting it.
844 */
845static inline int wait_on_bit(void *word, int bit,
846 int (*action)(void *), unsigned mode)
847{
848 if (!test_bit(bit, word))
849 return 0;
850 return out_of_line_wait_on_bit(word, bit, action, mode);
851}
852
853/**
854 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
855 * @word: the word being waited on, a kernel virtual address
856 * @bit: the bit of the word being waited on
857 * @action: the function used to sleep, which may take special actions
858 * @mode: the task state to sleep in
859 *
860 * There is a standard hashed waitqueue table for generic use. This
861 * is the part of the hashtable's accessor API that waits on a bit
862 * when one intends to set it, for instance, trying to lock bitflags.
863 * For instance, if one were to have waiters trying to set bitflag
864 * and waiting for it to clear before setting it, one would call
865 * wait_on_bit() in threads waiting to be able to set the bit.
866 * One uses wait_on_bit_lock() where one is waiting for the bit to
867 * clear with the intention of setting it, and when done, clearing it.
868 */
869static inline int wait_on_bit_lock(void *word, int bit,
870 int (*action)(void *), unsigned mode)
871{
872 if (!test_and_set_bit(bit, word))
873 return 0;
874 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
875}
David Howellscb655372013-05-10 19:50:26 +0100876
877/**
878 * wait_on_atomic_t - Wait for an atomic_t to become 0
879 * @val: The atomic value being waited on, a kernel virtual address
880 * @action: the function used to sleep, which may take special actions
881 * @mode: the task state to sleep in
882 *
883 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
884 * the purpose of getting a waitqueue, but we set the key to a bit number
885 * outside of the target 'word'.
886 */
887static inline
888int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
889{
890 if (atomic_read(val) == 0)
891 return 0;
892 return out_of_line_wait_on_atomic_t(val, action, mode);
893}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895#endif