aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2008-04-29 01:00:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 08:06:11 -0700
commit00dfcaf748f46de89efe41baa298b5cf9adda67e (patch)
tree4420dbfac9ba213e1604320cf9615a505ca909fd
parent786083667e0ced85ce17c4c0b6c57a9f47c5b9f2 (diff)
workqueues: shrink cpu_populated_map when CPU dies
When cpu_populated_map was introduced, it was supposed that cwq->thread can survive after CPU_DEAD, that is why we never shrink cpu_populated_map. This is not very nice, we can safely remove the already dead CPU from the map. The only required change is that destroy_workqueue() must hold the hotplug lock until it destroys all cwq->thread's, to protect the cpu_populated_map. We could make the local copy of cpu mask and drop the lock, but sizeof(cpumask_t) may be very large. Also, fix the comment near queue_work(). Unless _cpu_down() happens we do guarantee the cpu-affinity of the work_struct, and we have users which rely on this. [akpm@linux-foundation.org: repair comment] Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/workqueue.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 00ff4d08e37..1ad0ee489cd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -158,8 +158,8 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*
- * We queue the work to the CPU it was submitted, but there is no
- * guarantee that it will be processed by that CPU.
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
*/
int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
@@ -815,12 +815,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
spin_lock(&workqueue_lock);
list_del(&wq->list);
spin_unlock(&workqueue_lock);
- put_online_cpus();
for_each_cpu_mask(cpu, *cpu_map) {
cwq = per_cpu_ptr(wq->cpu_wq, cpu);
cleanup_workqueue_thread(cwq, cpu);
}
+ put_online_cpus();
free_percpu(wq->cpu_wq);
kfree(wq);
@@ -838,7 +838,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
action &= ~CPU_TASKS_FROZEN;
switch (action) {
-
case CPU_UP_PREPARE:
cpu_set(cpu, cpu_populated_map);
}
@@ -866,6 +865,12 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
}
}
+ switch (action) {
+ case CPU_UP_CANCELED:
+ case CPU_DEAD:
+ cpu_clear(cpu, cpu_populated_map);
+ }
+
return NOTIFY_OK;
}