aboutsummaryrefslogtreecommitdiff
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 19:36:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 19:36:53 -0700
commit7f9dce38378f0a4a298e885553d6bb7121376376 (patch)
tree5bfd688c9f356f7216bbc3cef3b4c10153de334b /kernel/cpu.c
parent26dcce0fabbef75ae426461edf21b5030bad60f3 (diff)
parentba42059fbd0aa1ac91b582412b5fedb1258f241f (diff)
Merge branch 'sched/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: hrtick_enabled() should use cpu_active() sched, x86: clean up hrtick implementation sched: fix build error, provide partition_sched_domains() unconditionally sched: fix warning in inc_rt_tasks() to not declare variable 'rq' if it's not needed cpu hotplug: Make cpu_active_map synchronization dependency clear cpu hotplug, sched: Introduce cpu_active_map and redo sched domain managment (take 2) sched: rework of "prioritize non-migratable tasks over migratable ones" sched: reduce stack size in isolated_cpu_setup() Revert parts of "ftrace: do not trace scheduler functions" Fixed up conflicts in include/asm-x86/thread_info.h (due to the TIF_SINGLESTEP unification vs TIF_HRTICK_RESCHED removal) and kernel/sched_fair.c (due to cpu_active_map vs for_each_cpu_mask_nr() introduction).
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c40
1 files changed, 34 insertions, 6 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d26d0b095b3..2cc409ce0a8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -64,6 +64,8 @@ void __init cpu_hotplug_init(void)
cpu_hotplug.refcount = 0;
}
+cpumask_t cpu_active_map;
+
#ifdef CONFIG_HOTPLUG_CPU
void get_online_cpus(void)
@@ -291,11 +293,30 @@ int __ref cpu_down(unsigned int cpu)
int err = 0;
cpu_maps_update_begin();
- if (cpu_hotplug_disabled)
+
+ if (cpu_hotplug_disabled) {
err = -EBUSY;
- else
- err = _cpu_down(cpu, 0);
+ goto out;
+ }
+
+ cpu_clear(cpu, cpu_active_map);
+
+ /*
+ * Make sure the all cpus did the reschedule and are not
+ * using stale version of the cpu_active_map.
+ * This is not strictly necessary becuase stop_machine()
+ * that we run down the line already provides the required
+ * synchronization. But it's really a side effect and we do not
+ * want to depend on the innards of the stop_machine here.
+ */
+ synchronize_sched();
+
+ err = _cpu_down(cpu, 0);
+ if (cpu_online(cpu))
+ cpu_set(cpu, cpu_active_map);
+
+out:
cpu_maps_update_done();
return err;
}
@@ -355,11 +376,18 @@ int __cpuinit cpu_up(unsigned int cpu)
}
cpu_maps_update_begin();
- if (cpu_hotplug_disabled)
+
+ if (cpu_hotplug_disabled) {
err = -EBUSY;
- else
- err = _cpu_up(cpu, 0);
+ goto out;
+ }
+
+ err = _cpu_up(cpu, 0);
+ if (cpu_online(cpu))
+ cpu_set(cpu, cpu_active_map);
+
+out:
cpu_maps_update_done();
return err;
}