diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/kprobes.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 21 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/slice.c | 2 |
4 files changed, 43 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 7c053f281406..1138fec3dd65 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) #endif #endif + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); + return 1; } @@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); preempt_enable_no_resched(); return 1; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3c3a367b6e59..396dc44e783b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2693,6 +2693,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) return -EINVAL; } + /* + * Don't allow entry with a suspended transaction, because + * the guest entry/exit code will lose it. + * If the guest has TM enabled, save away their TM-related SPRs + * (they will get restored by the TM unavailable interrupt). + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + if (MSR_TM_ACTIVE(current->thread.regs->msr)) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = 0; + return -EINVAL; + } + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); + current->thread.regs->msr &= ~MSR_TM; + } +#endif + kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 4c48b487698c..0b48ce40d351 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -179,6 +179,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ + /* + * It's possible the bad EA is too large to fit in the SLB cache, which + * would mean we'd fail to invalidate it on context switch. So mark the + * SLB cache as full so we force a full flush. We also set cr7+eq to + * mark the address as a kernel address, so slb_finish_load() skips + * trying to insert it into the SLB cache. + */ + li r9,SLB_CACHE_ENTRIES + 1 + sth r9,PACASLBCACHEPTR(r13) + crset 4*cr7+eq li r10,0 /* BAD_VSID */ li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 0f432a702870..6ad12b244770 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) |