aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-02-06 17:56:15 +0000
committerAlex Shi <alex.shi@linaro.org>2018-03-07 13:15:36 +0800
commit59e728fb3d1a33275e9fddf7b2c2b94ea7f1ac10 (patch)
tree6b7c161e2682fcd7f02b8fae148434d72e339c55
parent6afb0cb416917414dd0ea4160bcacc3b5a6a858e (diff)
arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
commit f72af90c3783 upstream. We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible. So let's intercept it as early as we can by testing for the function call number as soon as we've identified a HVC call coming from the guest. Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Alex Shi <alex.shi@linaro.org>
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S20
1 files changed, 18 insertions, 2 deletions
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 4e92399f7105..4e9d50c3e658 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/alternative.h>
@@ -79,10 +80,11 @@ alternative_endif
lsr x0, x1, #ESR_ELx_EC_SHIFT
cmp x0, #ESR_ELx_EC_HVC64
+ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap
- mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
- cbnz x1, el1_trap // called HVC
+ mrs x1, vttbr_el2 // If vttbr is valid, the guest
+ cbnz x1, el1_hvc_guest // called HVC
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
@@ -101,6 +103,20 @@ alternative_endif
2: eret
+el1_hvc_guest:
+ /*
+ * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+ * The workaround has already been applied on the host,
+ * so let's quickly get back to the guest. We don't bother
+ * restoring x1, as it can be clobbered anyway.
+ */
+ ldr x1, [sp] // Guest's x0
+ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+ cbnz w1, el1_trap
+ mov x0, x1
+ add sp, sp, #16
+ eret
+
el1_trap:
/*
* x0: ESR_EC