aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/alternative.h1
-rw-r--r--arch/arm64/kernel/alternative.c36
-rw-r--r--arch/arm64/kernel/smp.c6
3 files changed, 40 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 6e1cb8c5af4d..01792d5c95b6 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -19,6 +19,7 @@ struct alt_instr {
u8 alt_len; /* size of new instruction(s), <= orig_len */
};
+void __init apply_alternatives_early(void);
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 06d650f61da7..27a50cf02356 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -28,6 +28,18 @@
#include <asm/sections.h>
#include <linux/stop_machine.h>
+/*
+ * early-apply features can be detected using only the boot CPU (i.e.
+ * no need to check capability of any secondary CPUs) and, even then,
+ * should only include features where we must patch the kernel very
+ * early in the boot process.
+ *
+ * Note that the cpufeature logic *must* be made aware of early-apply
+ * features to ensure they are reported as enabled without waiting
+ * for other CPUs to boot.
+ */
+#define EARLY_APPLY_FEATURE_MASK BIT(ARM64_HAS_SYSREG_GIC_CPUIF)
+
#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
@@ -105,7 +117,7 @@ static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
return insn;
}
-static void __apply_alternatives(void *alt_region)
+static void __apply_alternatives(void *alt_region, unsigned long feature_mask)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -115,6 +127,9 @@ static void __apply_alternatives(void *alt_region)
u32 insn;
int i, nr_inst;
+ if ((BIT(alt->cpufeature) & feature_mask) == 0)
+ continue;
+
if (!cpus_have_cap(alt->cpufeature))
continue;
@@ -137,6 +152,21 @@ static void __apply_alternatives(void *alt_region)
}
/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+void apply_alternatives_early(void)
+{
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+ };
+
+ __apply_alternatives(&region, EARLY_APPLY_FEATURE_MASK);
+}
+
+/*
* We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here.
*/
@@ -155,7 +185,7 @@ static int __apply_alternatives_multi_stop(void *unused)
isb();
} else {
BUG_ON(patched);
- __apply_alternatives(&region);
+ __apply_alternatives(&region, ~EARLY_APPLY_FEATURE_MASK);
/* Barriers provided by the cache flushing */
WRITE_ONCE(patched, 1);
}
@@ -176,5 +206,5 @@ void apply_alternatives(void *start, size_t length)
.end = start + length,
};
- __apply_alternatives(&region);
+ __apply_alternatives(&region, -1);
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ef1caae02110..c20dba928c58 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -452,6 +452,12 @@ void __init smp_prepare_boot_cpu(void)
* cpuinfo_store_boot_cpu() above.
*/
update_cpu_errata_workarounds();
+ /*
+ * We now know enough about the boot CPU to apply the
+ * alternatives that cannot wait until interrupt handling
+ * and/or scheduling is enabled.
+ */
+ apply_alternatives_early();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)