diff options
author | Mike Galbraith <umgwanakikbuti@gmail.com> | 2015-02-26 09:02:05 +0100 |
---|---|---|
committer | Anders Roxell <anders.roxell@linaro.org> | 2015-03-23 09:31:18 +0100 |
commit | 74928f45fdb4242eeb9d4bc0fa29a1b47118b6a6 (patch) | |
tree | a1d0d0e73a704829c9a34a8010e4688dd05658e3 | |
parent | c78b7df36137356ee55e8a1285ed9da50f10b1a3 (diff) |
locking: ww_mutex: fix ww_mutex vs self-deadlock
If the caller already holds the mutex, task_blocks_on_rt_mutex()
returns -EDEADLK, we proceed directly to rt_mutex_handle_deadlock()
where it's instant game over.
Let ww_mutexes return EDEADLK/EALREADY as they want to instead.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
-rw-r--r-- | kernel/locking/rtmutex.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 27a1993111f9..ee4e7e747e06 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1694,13 +1694,20 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, if (likely(!ret)) ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx); + else if (ww_ctx) { + /* ww_mutex received EDEADLK, let it become EALREADY */ + ret = __mutex_lock_check_stamp(lock, ww_ctx); + BUG_ON(!ret); + } set_current_state(TASK_RUNNING); if (unlikely(ret)) { if (rt_mutex_has_waiters(lock)) remove_waiter(lock, &waiter); - rt_mutex_handle_deadlock(ret, chwalk, &waiter); + /* ww_mutex want to report EDEADLK/EALREADY, let them */ + if (!ww_ctx) + rt_mutex_handle_deadlock(ret, chwalk, &waiter); } else if (ww_ctx) { ww_mutex_account_lock(lock, ww_ctx); } @@ -2239,8 +2246,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_c might_sleep(); mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); - ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, - RT_MUTEX_FULL_CHAINWALK, ww_ctx); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); if (ret) mutex_release(&lock->base.dep_map, 1, _RET_IP_); else if (!ret && ww_ctx->acquired > 1) @@ -2258,8 +2264,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) might_sleep(); mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); - ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, - RT_MUTEX_FULL_CHAINWALK, ww_ctx); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); if (ret) mutex_release(&lock->base.dep_map, 1, _RET_IP_); else if (!ret && ww_ctx->acquired > 1) @@ -2271,11 +2276,13 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock); void __sched ww_mutex_unlock(struct ww_mutex *lock) { + int nest = !!lock->ctx; + /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: */ - if (lock->ctx) { + if (nest) { #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); #endif @@ -2284,7 +2291,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock) lock->ctx = NULL; } - mutex_release(&lock->base.dep_map, 1, _RET_IP_); + mutex_release(&lock->base.dep_map, nest, _RET_IP_); rt_mutex_unlock(&lock->base.lock); } EXPORT_SYMBOL(ww_mutex_unlock); |