aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2016-05-24 12:17:43 +0800
committerAlex Shi <alex.shi@linaro.org>2016-05-24 12:17:43 +0800
commit6c1be3bf831c5b1f2ba51425315287df2c350633 (patch)
treefd07371829b4b5ea7db114502ac12fd989aa4e2a /kernel
parentada3625ee9203002b2cac60be802dae952ae3e22 (diff)
parent3b6aa07b936b09d38c1bfcee1e06845b968df475 (diff)
downloadlinux-linaro-stable-6c1be3bf831c5b1f2ba51425315287df2c350633.tar.gz
Merge tag 'v3.18.34' into linux-linaro-lsk-v3.18lsk-v3.18-16.05
This is the 3.18.34 stable release
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timer.c20
-rw-r--r--kernel/trace/trace_events.c9
-rw-r--r--kernel/workqueue.c40
3 files changed, 64 insertions, 5 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 3260ffdb368f..3c4e3116cdb1 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -956,13 +956,27 @@ EXPORT_SYMBOL(add_timer);
*/
void add_timer_on(struct timer_list *timer, int cpu)
{
- struct tvec_base *base = per_cpu(tvec_bases, cpu);
+ struct tvec_base *new_base = per_cpu(tvec_bases, cpu);
+ struct tvec_base *base;
unsigned long flags;
timer_stats_timer_set_start_info(timer);
BUG_ON(timer_pending(timer) || !timer->function);
- spin_lock_irqsave(&base->lock, flags);
- timer_set_base(timer, base);
+
+ /*
+ * If @timer was on a different CPU, it should be migrated with the
+ * old base locked to prevent other operations proceeding with the
+ * wrong base locked. See lock_timer_base().
+ */
+ base = lock_timer_base(timer, &flags);
+ if (base != new_base) {
+ timer_set_base(timer, NULL);
+ spin_unlock(&base->lock);
+ base = new_base;
+ spin_lock(&base->lock);
+ timer_set_base(timer, base);
+ }
+
debug_activate(timer, timer->expires);
internal_add_timer(base, timer);
spin_unlock_irqrestore(&base->lock, flags);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index b6cd1b653e21..51c47bc832d4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1584,8 +1584,13 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
trace_create_file("filter", 0644, file->dir, file,
&ftrace_event_filter_fops);
- trace_create_file("trigger", 0644, file->dir, file,
- &event_trigger_fops);
+ /*
+ * Only event directories that can be enabled should have
+ * triggers.
+ */
+ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ trace_create_file("trigger", 0644, file->dir, file,
+ &event_trigger_fops);
trace_create_file("format", 0444, file->dir, call,
&ftrace_event_format_fops);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2273f534b01a..0963b7fbc9a6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -623,6 +623,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+ /*
+ * The following mb guarantees that previous clear of a PENDING bit
+ * will not be reordered with any speculative LOADS or STORES from
+ * work->current_func, which is executed afterwards. This possible
+ * reordering can lead to a missed execution on attempt to qeueue
+ * the same @work. E.g. consider this case:
+ *
+ * CPU#0 CPU#1
+ * ---------------------------- --------------------------------
+ *
+ * 1 STORE event_indicated
+ * 2 queue_work_on() {
+ * 3 test_and_set_bit(PENDING)
+ * 4 } set_..._and_clear_pending() {
+ * 5 set_work_data() # clear bit
+ * 6 smp_mb()
+ * 7 work->current_func() {
+ * 8 LOAD event_indicated
+ * }
+ *
+ * Without an explicit full barrier speculative LOAD on line 8 can
+ * be executed before CPU#0 does STORE on line 1. If that happens,
+ * CPU#0 observes the PENDING bit is still set and new execution of
+ * a @work is not queued in a hope, that CPU#1 will eventually
+ * finish the queued @work. Meanwhile CPU#1 does not see
+ * event_indicated is set, because speculative LOAD was executed
+ * before actual STORE.
+ */
+ smp_mb();
}
static void clear_work_data(struct work_struct *work)
@@ -4513,6 +4542,17 @@ static void rebind_workers(struct worker_pool *pool)
pool->attrs->cpumask) < 0);
spin_lock_irq(&pool->lock);
+
+ /*
+ * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
+ * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
+ * being reworked and this can go away in time.
+ */
+ if (!(pool->flags & POOL_DISASSOCIATED)) {
+ spin_unlock_irq(&pool->lock);
+ return;
+ }
+
pool->flags &= ~POOL_DISASSOCIATED;
for_each_pool_worker(worker, pool) {