From f7dd1cee9a4e2b1450e4a3732636dfbf28562ee4 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Tue, 7 May 2013 06:45:50 -0700 Subject: rwsem: shorter spinlocked section in rwsem_down_failed_common() This change reduces the size of the spinlocked and TASK_UNINTERRUPTIBLE sections in rwsem_down_failed_common(): - We only need the sem->wait_lock to insert ourselves on the wait_list; the waiter node can be prepared outside of the wait_lock. - The task state only needs to be set to TASK_UNINTERRUPTIBLE immediately before checking if we actually need to sleep; it doesn't need to protect the entire function. Signed-off-by: Michel Lespinasse Reviewed-by: Rik van Riel Reviewed-by: Peter Hurley Acked-by: Davidlohr Bueso Signed-off-by: Linus Torvalds --- lib/rwsem.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/rwsem.c b/lib/rwsem.c index 672eb33218a..40636454cf3 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -188,14 +188,12 @@ rwsem_down_failed_common(struct rw_semaphore *sem, struct task_struct *tsk = current; signed long count; - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - /* set up my own style of waitqueue */ - raw_spin_lock_irq(&sem->wait_lock); waiter.task = tsk; waiter.type = type; get_task_struct(tsk); + raw_spin_lock_irq(&sem->wait_lock); if (list_empty(&sem->wait_list)) adjustment += RWSEM_WAITING_BIAS; list_add_tail(&waiter.list, &sem->wait_list); @@ -218,7 +216,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem, raw_spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ - for (;;) { + while (true) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!waiter.task) break; @@ -231,7 +230,6 @@ rwsem_down_failed_common(struct rw_semaphore *sem, } raw_spin_unlock_irq(&sem->wait_lock); schedule(); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; -- cgit v1.2.3