aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/smp.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 9910744f085..aaeee20c563 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -491,14 +491,15 @@ void smp_call_function_many(const struct cpumask *mask,
cpumask_clear_cpu(this_cpu, data->cpumask);
/*
- * To ensure the interrupt handler gets an complete view
- * we order the cpumask and refs writes and order the read
- * of them in the interrupt handler. In addition we may
- * only clear our own cpu bit from the mask.
+ * We reuse the call function data without waiting for any grace
+ * period after some other cpu removes it from the global queue.
+ * This means a cpu might find our data block as it is writen.
+ * The interrupt handler waits until it sees refs filled out
+ * while its cpu mask bit is set; here we may only clear our
+ * own cpu mask bit, and must wait to set refs until we are sure
+ * previous writes are complete and we have obtained the lock to
+ * add the element to the queue.
*/
- smp_wmb();
-
- atomic_set(&data->refs, cpumask_weight(data->cpumask));
raw_spin_lock_irqsave(&call_function.lock, flags);
/*
@@ -507,6 +508,11 @@ void smp_call_function_many(const struct cpumask *mask,
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
+ /*
+ * We rely on the wmb() in list_add_rcu to order the writes
+ * to func, data, and cpumask before this write to refs.
+ */
+ atomic_set(&data->refs, cpumask_weight(data->cpumask));
raw_spin_unlock_irqrestore(&call_function.lock, flags);
/*