aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-08-30 14:12:39 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2017-09-01 16:42:41 +1000
commita53d5182e24c22986ad0e99e52f8fe343ee7d7ac (patch)
tree83ef99241cf94c354d014bc343c3a8e3b493cf57 /arch
parentd955189ae42796621fb439e5e778ccaeebc2a1e7 (diff)
powerpc: Separate out load/store emulation into its own function
This moves the parts of emulate_step() that deal with emulating load and store instructions into a new function called emulate_loadstore(). This is to make it possible to reuse this code in the alignment handler. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/sstep.h9
-rw-r--r--arch/powerpc/lib/sstep.c258
2 files changed, 154 insertions, 113 deletions
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 958c2c55bcfe..309d1c5de143 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -152,6 +152,15 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
*/
extern int emulate_step(struct pt_regs *regs, unsigned int instr);
+/*
+ * Emulate a load or store instruction by reading/writing the
+ * memory of the current process. FP/VMX/VSX registers are assumed
+ * to hold live values if the appropriate enable bit in regs->msr is
+ * set; otherwise this will use the saved values in the thread struct
+ * for user-mode accesses.
+ */
+extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
+
extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
const void *mem, bool cross_endian);
extern void emulate_vsx_store(struct instruction_op *op,
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5d8284938898..423815599063 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -2667,76 +2667,35 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
}
/*
- * Emulate instructions that cause a transfer of control,
- * loads and stores, and a few other instructions.
- * Returns 1 if the step was emulated, 0 if not,
- * or -1 if the instruction is one that should not be stepped,
- * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ * Emulate a previously-analysed load or store instruction.
+ * Return values are:
+ * 0 = instruction emulated successfully
+ * -EFAULT = address out of range or access faulted (regs->dar
+ * contains the faulting address)
+ * -EACCES = misaligned access, instruction requires alignment
+ * -EINVAL = unknown operation in *op
*/
-int emulate_step(struct pt_regs *regs, unsigned int instr)
+int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
{
- struct instruction_op op;
- int r, err, size, type;
- unsigned long val;
- unsigned int cr;
+ int err, size, type;
int i, rd, nb;
+ unsigned int cr;
+ unsigned long val;
unsigned long ea;
bool cross_endian;
- r = analyse_instr(&op, regs, instr);
- if (r < 0)
- return r;
- if (r > 0) {
- emulate_update_regs(regs, &op);
- return 1;
- }
-
err = 0;
- size = GETSIZE(op.type);
- type = op.type & INSTR_TYPE_MASK;
+ size = GETSIZE(op->type);
+ type = op->type & INSTR_TYPE_MASK;
cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
-
- ea = op.ea;
- if (OP_IS_LOAD_STORE(type) || type == CACHEOP)
- ea = truncate_if_32bit(regs->msr, op.ea);
+ ea = truncate_if_32bit(regs->msr, op->ea);
switch (type) {
- case CACHEOP:
- if (!address_ok(regs, ea, 8))
- return 0;
- switch (op.type & CACHEOP_MASK) {
- case DCBST:
- __cacheop_user_asmx(ea, err, "dcbst");
- break;
- case DCBF:
- __cacheop_user_asmx(ea, err, "dcbf");
- break;
- case DCBTST:
- if (op.reg == 0)
- prefetchw((void *) ea);
- break;
- case DCBT:
- if (op.reg == 0)
- prefetch((void *) ea);
- break;
- case ICBI:
- __cacheop_user_asmx(ea, err, "icbi");
- break;
- case DCBZ:
- err = emulate_dcbz(ea, regs);
- break;
- }
- if (err) {
- regs->dar = ea;
- return 0;
- }
- goto instr_done;
-
case LARX:
if (ea & (size - 1))
- break; /* can't handle misaligned */
+ return -EACCES; /* can't handle misaligned */
if (!address_ok(regs, ea, size))
- return 0;
+ return -EFAULT;
err = 0;
switch (size) {
#ifdef __powerpc64__
@@ -2755,49 +2714,49 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
__get_user_asmx(val, ea, err, "ldarx");
break;
case 16:
- err = do_lqarx(ea, &regs->gpr[op.reg]);
+ err = do_lqarx(ea, &regs->gpr[op->reg]);
break;
#endif
default:
- return 0;
+ return -EINVAL;
}
if (err) {
regs->dar = ea;
- return 0;
+ break;
}
if (size < 16)
- regs->gpr[op.reg] = val;
- goto ldst_done;
+ regs->gpr[op->reg] = val;
+ break;
case STCX:
if (ea & (size - 1))
- break; /* can't handle misaligned */
+ return -EACCES; /* can't handle misaligned */
if (!address_ok(regs, ea, size))
- return 0;
+ return -EFAULT;
err = 0;
switch (size) {
#ifdef __powerpc64__
case 1:
- __put_user_asmx(op.val, ea, err, "stbcx.", cr);
+ __put_user_asmx(op->val, ea, err, "stbcx.", cr);
break;
case 2:
- __put_user_asmx(op.val, ea, err, "stbcx.", cr);
+ __put_user_asmx(op->val, ea, err, "stbcx.", cr);
break;
#endif
case 4:
- __put_user_asmx(op.val, ea, err, "stwcx.", cr);
+ __put_user_asmx(op->val, ea, err, "stwcx.", cr);
break;
#ifdef __powerpc64__
case 8:
- __put_user_asmx(op.val, ea, err, "stdcx.", cr);
+ __put_user_asmx(op->val, ea, err, "stdcx.", cr);
break;
case 16:
- err = do_stqcx(ea, regs->gpr[op.reg],
- regs->gpr[op.reg + 1], &cr);
+ err = do_stqcx(ea, regs->gpr[op->reg],
+ regs->gpr[op->reg + 1], &cr);
break;
#endif
default:
- return 0;
+ return -EINVAL;
}
if (!err)
regs->ccr = (regs->ccr & 0x0fffffff) |
@@ -2805,23 +2764,23 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
((regs->xer >> 3) & 0x10000000);
else
regs->dar = ea;
- goto ldst_done;
+ break;
case LOAD:
#ifdef __powerpc64__
if (size == 16) {
- err = emulate_lq(regs, ea, op.reg, cross_endian);
- goto ldst_done;
+ err = emulate_lq(regs, ea, op->reg, cross_endian);
+ break;
}
#endif
- err = read_mem(&regs->gpr[op.reg], ea, size, regs);
+ err = read_mem(&regs->gpr[op->reg], ea, size, regs);
if (!err) {
- if (op.type & SIGNEXT)
- do_signext(&regs->gpr[op.reg], size);
- if ((op.type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
- do_byterev(&regs->gpr[op.reg], size);
+ if (op->type & SIGNEXT)
+ do_signext(&regs->gpr[op->reg], size);
+ if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
+ do_byterev(&regs->gpr[op->reg], size);
}
- goto ldst_done;
+ break;
#ifdef CONFIG_PPC_FPU
case LOAD_FP:
@@ -2833,15 +2792,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
*/
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
return 0;
- err = do_fp_load(op.reg, ea, size, regs, cross_endian);
- goto ldst_done;
+ err = do_fp_load(op->reg, ea, size, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_ALTIVEC
case LOAD_VMX:
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
return 0;
- err = do_vec_load(op.reg, ea, size, regs, cross_endian);
- goto ldst_done;
+ err = do_vec_load(op->reg, ea, size, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_VSX
case LOAD_VSX: {
@@ -2851,18 +2810,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
* Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
* when the target of the instruction is a vector register.
*/
- if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
+ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC;
if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
return 0;
- err = do_vsx_load(&op, ea, regs, cross_endian);
- goto ldst_done;
+ err = do_vsx_load(op, ea, regs, cross_endian);
+ break;
}
#endif
case LOAD_MULTI:
if (!address_ok(regs, ea, size))
return -EFAULT;
- rd = op.reg;
+ rd = op->reg;
for (i = 0; i < size; i += 4) {
unsigned int v32 = 0;
@@ -2871,47 +2830,47 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
nb = 4;
err = copy_mem_in((u8 *) &v32, ea, nb, regs);
if (err)
- return 0;
+ break;
if (unlikely(cross_endian))
v32 = byterev_4(v32);
regs->gpr[rd] = v32;
ea += 4;
++rd;
}
- goto instr_done;
+ break;
case STORE:
#ifdef __powerpc64__
if (size == 16) {
- err = emulate_stq(regs, ea, op.reg, cross_endian);
- goto ldst_done;
+ err = emulate_stq(regs, ea, op->reg, cross_endian);
+ break;
}
#endif
- if ((op.type & UPDATE) && size == sizeof(long) &&
- op.reg == 1 && op.update_reg == 1 &&
+ if ((op->type & UPDATE) && size == sizeof(long) &&
+ op->reg == 1 && op->update_reg == 1 &&
!(regs->msr & MSR_PR) &&
ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
err = handle_stack_update(ea, regs);
- goto ldst_done;
+ break;
}
if (unlikely(cross_endian))
- do_byterev(&op.val, size);
- err = write_mem(op.val, ea, size, regs);
- goto ldst_done;
+ do_byterev(&op->val, size);
+ err = write_mem(op->val, ea, size, regs);
+ break;
#ifdef CONFIG_PPC_FPU
case STORE_FP:
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
return 0;
- err = do_fp_store(op.reg, ea, size, regs, cross_endian);
- goto ldst_done;
+ err = do_fp_store(op->reg, ea, size, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_ALTIVEC
case STORE_VMX:
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
return 0;
- err = do_vec_store(op.reg, ea, size, regs, cross_endian);
- goto ldst_done;
+ err = do_vec_store(op->reg, ea, size, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_VSX
case STORE_VSX: {
@@ -2921,18 +2880,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
* Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
* when the target of the instruction is a vector register.
*/
- if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
+ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC;
if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
return 0;
- err = do_vsx_store(&op, ea, regs, cross_endian);
- goto ldst_done;
+ err = do_vsx_store(op, ea, regs, cross_endian);
+ break;
}
#endif
case STORE_MULTI:
if (!address_ok(regs, ea, size))
return -EFAULT;
- rd = op.reg;
+ rd = op->reg;
for (i = 0; i < size; i += 4) {
unsigned int v32 = regs->gpr[rd];
@@ -2943,10 +2902,89 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
v32 = byterev_4(v32);
err = copy_mem_out((u8 *) &v32, ea, nb, regs);
if (err)
- return 0;
+ break;
ea += 4;
++rd;
}
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ if (op->type & UPDATE)
+ regs->gpr[op->update_reg] = op->ea;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(emulate_loadstore);
+
+/*
+ * Emulate instructions that cause a transfer of control,
+ * loads and stores, and a few other instructions.
+ * Returns 1 if the step was emulated, 0 if not,
+ * or -1 if the instruction is one that should not be stepped,
+ * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ */
+int emulate_step(struct pt_regs *regs, unsigned int instr)
+{
+ struct instruction_op op;
+ int r, err, type;
+ unsigned long val;
+ unsigned long ea;
+
+ r = analyse_instr(&op, regs, instr);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ emulate_update_regs(regs, &op);
+ return 1;
+ }
+
+ err = 0;
+ type = op.type & INSTR_TYPE_MASK;
+
+ if (OP_IS_LOAD_STORE(type)) {
+ err = emulate_loadstore(regs, &op);
+ if (err)
+ return 0;
+ goto instr_done;
+ }
+
+ switch (type) {
+ case CACHEOP:
+ ea = truncate_if_32bit(regs->msr, op.ea);
+ if (!address_ok(regs, ea, 8))
+ return 0;
+ switch (op.type & CACHEOP_MASK) {
+ case DCBST:
+ __cacheop_user_asmx(ea, err, "dcbst");
+ break;
+ case DCBF:
+ __cacheop_user_asmx(ea, err, "dcbf");
+ break;
+ case DCBTST:
+ if (op.reg == 0)
+ prefetchw((void *) ea);
+ break;
+ case DCBT:
+ if (op.reg == 0)
+ prefetch((void *) ea);
+ break;
+ case ICBI:
+ __cacheop_user_asmx(ea, err, "icbi");
+ break;
+ case DCBZ:
+ err = emulate_dcbz(ea, regs);
+ break;
+ }
+ if (err) {
+ regs->dar = ea;
+ return 0;
+ }
goto instr_done;
case MFMSR:
@@ -2989,12 +3027,6 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
}
return 0;
- ldst_done:
- if (err)
- return 0;
- if (op.type & UPDATE)
- regs->gpr[op.update_reg] = op.ea;
-
instr_done:
regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
return 1;