From 32f139873e89672cbd5b28f5da944201e86fdb92 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 10 Mar 2014 15:56:51 -0700 Subject: x86: bpf_jit: support negative offsets commit fdfaf64e75397567257e1051931f9a3377360665 upstream. Commit a998d4342337 claimed to introduce negative offset support to x86 jit, but it couldn't be working, since at the time of the execution of LD+ABS or LD+IND instructions via call into bpf_internal_load_pointer_neg_helper() the %edx (3rd argument of this func) had junk value instead of access size in bytes (1 or 2 or 4). Store size into %edx instead of %ecx (what original commit intended to do) Fixes: a998d4342337 ("bpf jit: Let the x86 jit handle negative offsets") Signed-off-by: Alexei Starovoitov Cc: Jan Seiffert Cc: Eric Dumazet Acked-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- arch/x86/net/bpf_jit.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 877b9a1b215..01495755701 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -140,7 +140,7 @@ bpf_slow_path_byte_msh: push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ - mov $SIZE,%ecx; /* size */ \ + mov $SIZE,%edx; /* size */ \ call bpf_internal_load_pointer_neg_helper; \ test %rax,%rax; \ pop SKBDATA; \ -- cgit v1.2.3 From c46696c9e2235a4f00aa424509fd3c12ebcfc475 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Sat, 17 Aug 2013 20:12:57 -0500 Subject: ARM: move outer_cache declaration out of ifdef commit 0b53c11d533a8f6688d73fad0baf67dd08ec1b90 upstream. Move the outer_cache declaration of the CONFIG_OUTER_CACHE ifdef so that outer_cache can be used inside IS_ENABLED condition. Signed-off-by: Rob Herring Cc: Russell King Signed-off-by: Ian Campbell Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/outercache.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 12f71a19042..f94784f0e3a 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -37,10 +37,10 @@ struct outer_cache_fns { void (*resume)(void); }; -#ifdef CONFIG_OUTER_CACHE - extern struct outer_cache_fns outer_cache; +#ifdef CONFIG_OUTER_CACHE + static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.inv_range) -- cgit v1.2.3 From b56a587c3780bc27a6ca0f751c59f9cff9e4be43 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Sat, 17 Aug 2013 20:10:28 -0500 Subject: ARM: highbank: avoid L2 cache smc calls when PL310 is not present commit a56a5cf1f2ec895599eace0ac6eba1e4a489e4bf upstream. While Midway firmware handles L2 smc calls as nops, the custom smc calls present a problem when running virtualized Midway guest. They aren't needed so just avoid calling them. In the process, cleanup the L2X0 ifdefs and use IS_ENABLED instead. Signed-off-by: Rob Herring Signed-off-by: Ian Campbell Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-highbank/highbank.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 5ed19e88874..35d1029d7c9 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -65,14 +65,12 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr) HB_JUMP_TABLE_PHYS(cpu) + 15); } -#ifdef CONFIG_CACHE_L2X0 static void highbank_l2x0_disable(void) { outer_flush_all(); /* Disable PL310 L2 Cache controller */ highbank_smc1(0x102, 0x0); } -#endif static void __init highbank_init_irq(void) { @@ -81,12 +79,13 @@ static void __init highbank_init_irq(void) if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9")) highbank_scu_map_io(); -#ifdef CONFIG_CACHE_L2X0 /* Enable PL310 L2 Cache controller */ - highbank_smc1(0x102, 0x1); - l2x0_of_init(0, ~0UL); - outer_cache.disable = highbank_l2x0_disable; -#endif + if (IS_ENABLED(CONFIG_CACHE_L2X0) && + of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) { + highbank_smc1(0x102, 0x1); + l2x0_of_init(0, ~0UL); + outer_cache.disable = highbank_l2x0_disable; + } } static void __init highbank_timer_init(void) -- cgit v1.2.3 From 9bf49602a4129a4bd97526802c34b5e4e76dbc2e Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Thu, 19 Dec 2013 15:28:51 -0200 Subject: KVM: MMU: handle invalid root_hpa at __direct_map commit 989c6b34f6a9480e397b170cc62237e89bf4fdb9 upstream. It is possible for __direct_map to be called on invalid root_hpa (-1), two examples: 1) try_async_pf -> can_do_async_pf -> vmx_interrupt_allowed -> nested_vmx_vmexit 2) vmx_handle_exit -> vmx_interrupt_allowed -> nested_vmx_vmexit Then to load_vmcs12_host_state and kvm_mmu_reset_context. Check for this possibility, let fault exception be regenerated. BZ: https://bugzilla.redhat.com/show_bug.cgi?id=924916 Signed-off-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini Cc: Josh Boyer Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 004cc87b781..9a4605454af 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2585,6 +2585,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, int emulate = 0; gfn_t pseudo_gfn; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return 0; + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, -- cgit v1.2.3 From 0cb2501e5f4bf2d5409fae7bef5f8e7b7a0d015a Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Fri, 3 Jan 2014 17:09:32 -0200 Subject: KVM: x86: handle invalid root_hpa everywhere commit 37f6a4e237303549c8676dfe1fd1991ceab512eb upstream. Rom Freiman notes other code paths vulnerable to bug fixed by 989c6b34f6a9480e397b. Signed-off-by: Marcelo Tosatti Cc: Josh Boyer Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu.c | 9 +++++++++ arch/x86/kvm/paging_tmpl.h | 8 ++++++++ 2 files changed, 17 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9a4605454af..711c649f80b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2751,6 +2751,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, bool ret = false; u64 spte = 0ull; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return false; + if (!page_fault_can_be_fast(vcpu, error_code)) return false; @@ -3142,6 +3145,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) struct kvm_shadow_walk_iterator iterator; u64 spte = 0ull; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return spte; + walk_shadow_page_lockless_begin(vcpu); for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) if (!is_shadow_present_pte(spte)) @@ -4332,6 +4338,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) u64 spte; int nr_sptes = 0; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return nr_sptes; + walk_shadow_page_lockless_begin(vcpu); for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { sptes[iterator.level-1] = spte; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index da20860b457..7e6090e1323 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -423,6 +423,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, if (FNAME(gpte_changed)(vcpu, gw, top_level)) goto out_gpte_changed; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + goto out_gpte_changed; + for (shadow_walk_init(&it, vcpu, addr); shadow_walk_okay(&it) && it.level > gw->level; shadow_walk_next(&it)) { @@ -671,6 +674,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) */ mmu_topup_memory_caches(vcpu); + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { + WARN_ON(1); + return; + } + spin_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry(vcpu, gva, iterator) { level = iterator.level; -- cgit v1.2.3 From 8705bd42c90cde02da72650c6a2ed87b65f2179a Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Fri, 3 Jan 2014 17:00:51 -0200 Subject: KVM: VMX: fix use after free of vmx->loaded_vmcs commit 26a865f4aa8e66a6d94958de7656f7f1b03c6c56 upstream. After free_loaded_vmcs executes, the "loaded_vmcs" structure is kfreed, and now vmx->loaded_vmcs points to a kfreed area. Subsequent free_loaded_vmcs then attempts to manipulate vmx->loaded_vmcs. Switch the order to avoid the problem. https://bugzilla.redhat.com/show_bug.cgi?id=1047892 Reviewed-by: Jan Kiszka Signed-off-by: Marcelo Tosatti Cc: Josh Boyer Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5402c94ab76..7cdafb6dc70 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7133,8 +7133,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); free_vpid(vmx); - free_nested(vmx); free_loaded_vmcs(vmx->loaded_vmcs); + free_nested(vmx); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); -- cgit v1.2.3