aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2014-03-09 07:33:18 +0000
committerMark Brown <broonie@linaro.org>2014-03-09 07:33:18 +0000
commit561ba47707bf63cac3d97143ab5928353d47459e (patch)
tree1cdcdc0c6deef63e1822768e4283ce82330c7aa3 /kernel
parent0a92210a812d913cfb99cb959ec75f27473af664 (diff)
parent6969595f011b46b49c3f1b9e0bd7da27768c1fd9 (diff)
Merge tag 'v3.10.33' into linux-linaro-lsk
This is the 3.10.33 stable release
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c12
-rw-r--r--kernel/workqueue.c7
2 files changed, 13 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e76e4959908c..f8eb2b154bdb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7421,14 +7421,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
- struct perf_event *event, *tmp;
+ struct perf_event *event;
perf_pmu_rotate_stop(ctx->pmu);
- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
- __perf_remove_from_context(event);
- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
+ rcu_read_lock();
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
__perf_remove_from_context(event);
+ rcu_read_unlock();
}
static void perf_event_exit_cpu_context(int cpu)
@@ -7452,11 +7452,11 @@ static void perf_event_exit_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+ perf_event_exit_cpu_context(cpu);
+
mutex_lock(&swhash->hlist_mutex);
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
-
- perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 917fbdea97f7..6188aafe2594 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1836,6 +1836,12 @@ static void destroy_worker(struct worker *worker)
if (worker->flags & WORKER_IDLE)
pool->nr_idle--;
+ /*
+ * Once WORKER_DIE is set, the kworker may destroy itself at any
+ * point. Pin to ensure the task stays until we're done with it.
+ */
+ get_task_struct(worker->task);
+
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
@@ -1844,6 +1850,7 @@ static void destroy_worker(struct worker *worker)
spin_unlock_irq(&pool->lock);
kthread_stop(worker->task);
+ put_task_struct(worker->task);
kfree(worker);
spin_lock_irq(&pool->lock);