aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/wait-simple.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/wait-simple.c')
-rw-r--r--kernel/sched/wait-simple.c73
1 files changed, 64 insertions, 9 deletions
diff --git a/kernel/sched/wait-simple.c b/kernel/sched/wait-simple.c
index 040d7146e4df..2c856267445b 100644
--- a/kernel/sched/wait-simple.c
+++ b/kernel/sched/wait-simple.c
@@ -12,6 +12,28 @@
#include <linux/sched.h>
#include <linux/wait-simple.h>
+/* Adds w to head->list. Must be called with head->lock locked. */
+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
+{
+ list_add(&w->node, &head->list);
+ /* We can't let the condition leak before the setting of head */
+ smp_mb();
+}
+
+/* Removes w from head->list. Must be called with head->lock locked. */
+static inline void __swait_dequeue(struct swaiter *w)
+{
+ list_del_init(&w->node);
+}
+
+/* Check whether a head has waiters enqueued */
+static inline bool swait_head_has_waiters(struct swait_head *h)
+{
+ /* Make sure the condition is visible before checking list_empty() */
+ smp_mb();
+ return !list_empty(&h->list);
+}
+
void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
{
raw_spin_lock_init(&head->lock);
@@ -20,19 +42,31 @@ void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
}
EXPORT_SYMBOL(__init_swait_head);
+void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
+{
+ w->task = current;
+ if (list_empty(&w->node))
+ __swait_enqueue(head, w);
+}
+
void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
{
unsigned long flags;
raw_spin_lock_irqsave(&head->lock, flags);
- w->task = current;
- if (list_empty(&w->node))
- __swait_enqueue(head, w);
- set_current_state(state);
+ swait_prepare_locked(head, w);
+ __set_current_state(state);
raw_spin_unlock_irqrestore(&head->lock, flags);
}
EXPORT_SYMBOL(swait_prepare);
+void swait_finish_locked(struct swait_head *head, struct swaiter *w)
+{
+ __set_current_state(TASK_RUNNING);
+ if (w->task)
+ __swait_dequeue(w);
+}
+
void swait_finish(struct swait_head *head, struct swaiter *w)
{
unsigned long flags;
@@ -46,22 +80,43 @@ void swait_finish(struct swait_head *head, struct swaiter *w)
}
EXPORT_SYMBOL(swait_finish);
-int __swait_wake(struct swait_head *head, unsigned int state)
+unsigned int
+__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
{
struct swaiter *curr, *next;
- unsigned long flags;
int woken = 0;
- raw_spin_lock_irqsave(&head->lock, flags);
-
list_for_each_entry_safe(curr, next, &head->list, node) {
if (wake_up_state(curr->task, state)) {
__swait_dequeue(curr);
+ /*
+ * The waiting task can free the waiter as
+ * soon as curr->task = NULL is written,
+ * without taking any locks. A memory barrier
+ * is required here to prevent the following
+ * store to curr->task from getting ahead of
+ * the dequeue operation.
+ */
+ smp_wmb();
curr->task = NULL;
- woken++;
+ if (++woken == num)
+ break;
}
}
+ return woken;
+}
+
+unsigned int
+__swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
+{
+ unsigned long flags;
+ int woken;
+ if (!swait_head_has_waiters(head))
+ return 0;
+
+ raw_spin_lock_irqsave(&head->lock, flags);
+ woken = __swait_wake_locked(head, state, num);
raw_spin_unlock_irqrestore(&head->lock, flags);
return woken;
}