aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-12-18 10:35:02 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-03-04 06:03:38 +0800
commite50e7d633e76caf48ca5341a9679fcfdd1318d80 (patch)
tree3b21091c3ae0f0e6d68193d6e3a72f7c1b85022d /kernel
parentc205ae0e166fa0fdfdfb2cb55a4fc597b0dd68be (diff)
workqueue: consider work function when searching for busy work items
commit a2c1c57be8d9fd5b716113c8991d3d702eeacf77 upstream. To avoid executing the same work item concurrenlty, workqueue hashes currently busy workers according to their current work items and looks up the the table when it wants to execute a new work item. If there already is a worker which is executing the new work item, the new item is queued to the found worker so that it gets executed only after the current execution finishes. Unfortunately, a work item may be freed while being executed and thus recycled for different purposes. If it gets recycled for a different work item and queued while the previous execution is still in progress, workqueue may make the new work item wait for the old one although the two aren't really related in any way. In extreme cases, this false dependency may lead to deadlock although it's extremely unlikely given that there aren't too many self-freeing work item users and they usually don't wait for other work items. To alleviate the problem, record the current work function in each busy worker and match it together with the work item address in find_worker_executing_work(). While this isn't complete, it ensures that unrelated work items don't interact with each other and in the very unlikely case where a twisted wq user triggers it, it's always onto itself making the culprit easy to spot. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Andrey Isakov <andy51@gmx.ru> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=51701 Cc: stable@vger.kernel.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c36
1 files changed, 29 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 033ad5b8c1b..3a3a98f0c17 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -138,6 +138,7 @@ struct worker {
};
struct work_struct *current_work; /* L: work being processed */
+ work_func_t current_func; /* L: current_work's fn */
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
@@ -910,7 +911,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
struct hlist_node *tmp;
hlist_for_each_entry(worker, tmp, bwh, hentry)
- if (worker->current_work == work)
+ if (worker->current_work == work &&
+ worker->current_func == work->func)
return worker;
return NULL;
}
@@ -920,9 +922,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
* @gcwq: gcwq of interest
* @work: work to find worker for
*
- * Find a worker which is executing @work on @gcwq. This function is
- * identical to __find_worker_executing_work() except that this
- * function calculates @bwh itself.
+ * Find a worker which is executing @work on @gcwq by searching
+ * @gcwq->busy_hash which is keyed by the address of @work. For a worker
+ * to match, its current execution should match the address of @work and
+ * its work function. This is to avoid unwanted dependency between
+ * unrelated work executions through a work item being recycled while still
+ * being executed.
+ *
+ * This is a bit tricky. A work item may be freed once its execution
+ * starts and nothing prevents the freed area from being recycled for
+ * another work item. If the same work item address ends up being reused
+ * before the original execution finishes, workqueue will identify the
+ * recycled work item as currently executing and make it wait until the
+ * current execution finishes, introducing an unwanted dependency.
+ *
+ * This function checks the work item address, work function and workqueue
+ * to avoid false positives. Note that this isn't complete as one may
+ * construct a work function which can introduce dependency onto itself
+ * through a recycled work item. Well, if somebody wants to shoot oneself
+ * in the foot that badly, there's only so much we can do, and if such
+ * deadlock actually occurs, it should be easy to locate the culprit work
+ * function.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
@@ -2168,7 +2188,6 @@ __acquires(&gcwq->lock)
struct global_cwq *gcwq = pool->gcwq;
struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
- work_func_t f = work->func;
int work_color;
struct worker *collision;
#ifdef CONFIG_LOCKDEP
@@ -2208,6 +2227,7 @@ __acquires(&gcwq->lock)
debug_work_deactivate(work);
hlist_add_head(&worker->hentry, bwh);
worker->current_work = work;
+ worker->current_func = work->func;
worker->current_cwq = cwq;
work_color = get_work_color(work);
@@ -2240,7 +2260,7 @@ __acquires(&gcwq->lock)
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
- f(work);
+ worker->current_func(work);
/*
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
@@ -2252,7 +2272,8 @@ __acquires(&gcwq->lock)
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
" last function: %pf\n",
- current->comm, preempt_count(), task_pid_nr(current), f);
+ current->comm, preempt_count(), task_pid_nr(current),
+ worker->current_func);
debug_show_held_locks(current);
dump_stack();
}
@@ -2266,6 +2287,7 @@ __acquires(&gcwq->lock)
/* we're done with it, release */
hlist_del_init(&worker->hentry);
worker->current_work = NULL;
+ worker->current_func = NULL;
worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color);
}