aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/rtmutex.c13
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/workqueue_internal.h3
3 files changed, 16 insertions, 1 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 78a6c4a223c1..b73cd7c87551 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -22,6 +22,7 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/ww_mutex.h>
+#include <linux/blkdev.h>
#include "rtmutex_common.h"
@@ -1968,6 +1969,15 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
+ /*
+ * If rt_mutex blocks, the function sched_submit_work will not call
+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
+ * We must call blk_schedule_flush_plug here, if we don't call it,
+ * a deadlock in device mapper may happen.
+ */
+ if (unlikely(blk_needs_flush_plug(current)))
+ blk_schedule_flush_plug(current);
+
return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
}
@@ -1985,6 +1995,9 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
+ if (unlikely(blk_needs_flush_plug(current)))
+ blk_schedule_flush_plug(current);
+
return slowfn(lock, state, timeout, chwalk, ww_ctx);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5dce33351ae6..034a738f1bf7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8310,6 +8310,7 @@ void sched_move_task(struct task_struct *tsk)
struct rq *rq;
rq = task_rq_lock(tsk, &rf);
+ update_rq_clock(rq);
running = task_current(rq, tsk);
queued = task_on_rq_queued(tsk);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index f000c4d6917e..42d1e3974554 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
struct worker_pool;
@@ -60,7 +61,7 @@ struct worker {
*/
static inline struct worker *current_wq_worker(void)
{
- if (current->flags & PF_WQ_WORKER)
+ if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}