From e42986de481238204f6e0b0f4434da428895c20b Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 15 Aug 2012 23:25:38 +0900 Subject: workqueue: change value of lcpu in __queue_delayed_work_on() We assign cpu id into work struct's data field in __queue_delayed_work_on(). In current implementation, when work is come in first time, current running cpu id is assigned. If we do __queue_delayed_work_on() with CPU A on CPU B, __queue_work() invoked in delayed_work_timer_fn() go into the following sub-optimal path in case of WQ_NON_REENTRANT. gcwq = get_gcwq(cpu); if (wq->flags & WQ_NON_REENTRANT && (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { Change lcpu to @cpu and rechange lcpu to local cpu if lcpu is WORK_CPU_UNBOUND. It is sufficient to prevent to go into sub-optimal path. tj: Slightly rephrased the comment. Signed-off-by: Joonsoo Kim Signed-off-by: Tejun Heo --- kernel/workqueue.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c29f2dc0f4f..99ee9b93926 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1356,9 +1356,15 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, if (!(wq->flags & WQ_UNBOUND)) { struct global_cwq *gcwq = get_work_gcwq(work); - if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) + /* + * If we cannot get the last gcwq from @work directly, + * select the last CPU such that it avoids unnecessarily + * triggering non-reentrancy check in __queue_work(). + */ + lcpu = cpu; + if (gcwq) lcpu = gcwq->cpu; - else + if (lcpu == WORK_CPU_UNBOUND) lcpu = raw_smp_processor_id(); } else { lcpu = WORK_CPU_UNBOUND; -- cgit v1.2.3