aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJacob Bramley <jacob.bramley@arm.com>2018-08-23 17:45:37 +0100
committerJacob Bramley <jacob.bramley@arm.com>2018-10-12 09:51:36 +0100
commit8f36e7f6aa3fcb4ae765ab527904e32af2224c38 (patch)
treef98df6fb9074f50f1cea2c2cc1b65bdb96634da2 /src
parent385eb9037db9b478295ca22e2cf2de2e74c32aec (diff)
Add support for the FHM extension.
This includes all forms of FMLAL, FMLAL2, FMLSL and FMLSL2. Simulator tests are included, but the traces were generated with the simulator itself. Later, we'll test these on hardware to verify that they are correct. In the meantime, some basic Assembler tests are included as a sanity check. Change-Id: If41a8c7041a9995fbe268051896a2439de5f841d
Diffstat (limited to 'src')
-rw-r--r--src/aarch64/assembler-aarch64.cc53
-rw-r--r--src/aarch64/assembler-aarch64.h36
-rw-r--r--src/aarch64/constants-aarch64.h43
-rw-r--r--src/aarch64/cpu-features-auditor-aarch64.cc24
-rw-r--r--src/aarch64/disasm-aarch64.cc170
-rw-r--r--src/aarch64/instructions-aarch64.h3
-rw-r--r--src/aarch64/logic-aarch64.cc144
-rw-r--r--src/aarch64/macro-assembler-aarch64.h8
-rw-r--r--src/aarch64/simulator-aarch64.cc48
-rw-r--r--src/aarch64/simulator-aarch64.h37
10 files changed, 494 insertions, 72 deletions
diff --git a/src/aarch64/assembler-aarch64.cc b/src/aarch64/assembler-aarch64.cc
index f2a07785..53f3c112 100644
--- a/src/aarch64/assembler-aarch64.cc
+++ b/src/aarch64/assembler-aarch64.cc
@@ -3556,6 +3556,30 @@ NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC
+// clang-format off
+#define NEON_FHM_LIST(V) \
+ V(fmlal, NEON_FMLAL) \
+ V(fmlal2, NEON_FMLAL2) \
+ V(fmlsl, NEON_FMLSL) \
+ V(fmlsl2, NEON_FMLSL2)
+// clang-format on
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP) \
+ void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, \
+ CPUFeatures::kFP, \
+ CPUFeatures::kNEONHalf, \
+ CPUFeatures::kFHM)); \
+ VIXL_ASSERT((vd.Is2S() && vn.Is2H() && vm.Is2H()) || \
+ (vd.Is4S() && vn.Is4H() && vm.Is4H())); \
+ Emit(FPFormat(vd) | VEC_OP | Rm(vm) | Rn(vn) | Rd(vd)); \
+ }
+NEON_FHM_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
void Assembler::addp(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
@@ -4034,6 +4058,35 @@ NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC
+// clang-format off
+#define NEON_BYELEMENT_FHM_LIST(V) \
+ V(fmlal, NEON_FMLAL_H_byelement) \
+ V(fmlal2, NEON_FMLAL2_H_byelement) \
+ V(fmlsl, NEON_FMLSL_H_byelement) \
+ V(fmlsl2, NEON_FMLSL2_H_byelement)
+// clang-format on
+
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, \
+ CPUFeatures::kFP, \
+ CPUFeatures::kNEONHalf, \
+ CPUFeatures::kFHM)); \
+ VIXL_ASSERT((vd.Is2S() && vn.Is2H()) || (vd.Is4S() && vn.Is4H())); \
+ VIXL_ASSERT(vm.IsH()); \
+ VIXL_ASSERT((vm_index >= 0) && (vm_index < 8)); \
+ /* Vm itself can only be in the bottom 16 registers. */ \
+ VIXL_ASSERT(vm.GetCode() < 16); \
+ Emit(FPFormat(vd) | OP | Rd(vd) | Rn(vn) | Rm(vm) | \
+ ImmNEONHLM(vm_index, 3)); \
+ }
+NEON_BYELEMENT_FHM_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
void Assembler::suqadd(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
NEON2RegMisc(vd, vn, NEON_SUQADD);
diff --git a/src/aarch64/assembler-aarch64.h b/src/aarch64/assembler-aarch64.h
index 7d954665..b583089f 100644
--- a/src/aarch64/assembler-aarch64.h
+++ b/src/aarch64/assembler-aarch64.h
@@ -3381,9 +3381,45 @@ class Assembler : public vixl::internal::AssemblerBase {
// FP vector multiply accumulate.
void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+ // FP fused multiply-add long to accumulator.
+ void fmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-add long to accumulator (second part).
+ void fmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-add long to accumulator by element.
+ void fmlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-add long to accumulator by element (second part).
+ void fmlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
// FP vector multiply subtract.
void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+ // FP fused multiply-subtract long to accumulator.
+ void fmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-subtract long to accumulator (second part).
+ void fmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-subtract long to accumulator by element.
+ void fmlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-subtract long to accumulator by element (second part).
+ void fmlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
// FP vector multiply extended.
void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm);
diff --git a/src/aarch64/constants-aarch64.h b/src/aarch64/constants-aarch64.h
index 29fe9d86..63c06bb3 100644
--- a/src/aarch64/constants-aarch64.h
+++ b/src/aarch64/constants-aarch64.h
@@ -51,13 +51,14 @@ const int kFirstCalleeSavedFPRegisterIndex = 8;
#define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \
-V_(Rd, 4, 0, ExtractBits) /* Destination register. */ \
-V_(Rn, 9, 5, ExtractBits) /* First source register. */ \
-V_(Rm, 20, 16, ExtractBits) /* Second source register. */ \
-V_(Ra, 14, 10, ExtractBits) /* Third source register. */ \
-V_(Rt, 4, 0, ExtractBits) /* Load/store register. */ \
-V_(Rt2, 14, 10, ExtractBits) /* Load/store second register. */ \
-V_(Rs, 20, 16, ExtractBits) /* Exclusive access status. */ \
+V_(Rd, 4, 0, ExtractBits) /* Destination register. */ \
+V_(Rn, 9, 5, ExtractBits) /* First source register. */ \
+V_(Rm, 20, 16, ExtractBits) /* Second source register. */ \
+V_(RmLow16, 19, 16, ExtractBits) /* Second source register (code 0-15). */ \
+V_(Ra, 14, 10, ExtractBits) /* Third source register. */ \
+V_(Rt, 4, 0, ExtractBits) /* Load/store register. */ \
+V_(Rt2, 14, 10, ExtractBits) /* Load/store second register. */ \
+V_(Rs, 20, 16, ExtractBits) /* Exclusive access status. */ \
\
/* Common bits */ \
V_(SixtyFourBits, 31, 31, ExtractBits) \
@@ -1807,7 +1808,14 @@ enum NEON3SameOp {
NEON_BIC = NEON3SameLogicalFixed | 0x00400000,
NEON_BIF = NEON3SameLogicalFixed | 0x20C00000,
NEON_BIT = NEON3SameLogicalFixed | 0x20800000,
- NEON_BSL = NEON3SameLogicalFixed | 0x20400000
+ NEON_BSL = NEON3SameLogicalFixed | 0x20400000,
+
+ // FHM (FMLAL-like) instructions have an oddball encoding scheme under 3Same.
+ NEON3SameFHMMask = 0xBFE0FC00, // U size opcode
+ NEON_FMLAL = NEON3SameFixed | 0x0000E800, // 0 00 11101
+ NEON_FMLAL2 = NEON3SameFixed | 0x2000C800, // 1 00 11001
+ NEON_FMLSL = NEON3SameFixed | 0x0080E800, // 0 10 11101
+ NEON_FMLSL2 = NEON3SameFixed | 0x2080C800 // 1 10 11001
};
@@ -1979,6 +1987,7 @@ enum NEONByIndexedElementOp {
NEON_SQRDMLAH_byelement = NEONByIndexedElementFixed | 0x2000D000,
NEON_UDOT_byelement = NEONByIndexedElementFixed | 0x2000E000,
NEON_SQRDMLSH_byelement = NEONByIndexedElementFixed | 0x2000F000,
+
NEON_FMLA_H_byelement = NEONByIndexedElementFixed | 0x00001000,
NEON_FMLS_H_byelement = NEONByIndexedElementFixed | 0x00005000,
NEON_FMUL_H_byelement = NEONByIndexedElementFixed | 0x00009000,
@@ -1991,10 +2000,22 @@ enum NEONByIndexedElementOp {
NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000,
NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000,
NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000,
- NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000,
- // Complex instruction(s) this is necessary because 'rot' encoding moves into the NEONByIndex..Mask space
- NEONByIndexedElementFPComplexMask = 0xBF009400
+ // FMLAL-like instructions.
+ // For all cases: U = x, size = 10, opcode = xx00
+ NEONByIndexedElementFPLongFixed = NEONByIndexedElementFixed | 0x00800000,
+ NEONByIndexedElementFPLongFMask = NEONByIndexedElementFMask | 0x00C03000,
+ NEONByIndexedElementFPLongMask = 0xBFC0F400,
+ NEON_FMLAL_H_byelement = NEONByIndexedElementFixed | 0x00800000,
+ NEON_FMLAL2_H_byelement = NEONByIndexedElementFixed | 0x20808000,
+ NEON_FMLSL_H_byelement = NEONByIndexedElementFixed | 0x00804000,
+ NEON_FMLSL2_H_byelement = NEONByIndexedElementFixed | 0x2080C000,
+
+ // Complex instruction(s).
+ // This is necessary because the 'rot' encoding moves into the
+ // NEONByIndex..Mask space.
+ NEONByIndexedElementFPComplexMask = 0xBF009400,
+ NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000
};
// NEON register copy.
diff --git a/src/aarch64/cpu-features-auditor-aarch64.cc b/src/aarch64/cpu-features-auditor-aarch64.cc
index 45512835..726676fc 100644
--- a/src/aarch64/cpu-features-auditor-aarch64.cc
+++ b/src/aarch64/cpu-features-auditor-aarch64.cc
@@ -638,6 +638,17 @@ void CPUFeaturesAuditor::VisitNEON3Same(const Instruction* instr) {
if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
scope.Record(CPUFeatures::kFP);
}
+ switch (instr->Mask(NEON3SameFHMMask)) {
+ case NEON_FMLAL:
+ case NEON_FMLAL2:
+ case NEON_FMLSL:
+ case NEON_FMLSL2:
+ scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf, CPUFeatures::kFHM);
+ return;
+ default:
+ // No additional features.
+ return;
+ }
}
void CPUFeaturesAuditor::VisitNEON3SameExtra(const Instruction* instr) {
@@ -699,7 +710,18 @@ void CPUFeaturesAuditor::VisitNEONByIndexedElement(const Instruction* instr) {
scope.Record(CPUFeatures::kRDM);
return;
default:
- // Fall through to check other FP instructions.
+ // Fall through to check other instructions.
+ break;
+ }
+ switch (instr->Mask(NEONByIndexedElementFPLongMask)) {
+ case NEON_FMLAL_H_byelement:
+ case NEON_FMLAL2_H_byelement:
+ case NEON_FMLSL_H_byelement:
+ case NEON_FMLSL2_H_byelement:
+ scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf, CPUFeatures::kFHM);
+ return;
+ default:
+ // Fall through to check other instructions.
break;
}
switch (instr->Mask(NEONByIndexedElementFPMask)) {
diff --git a/src/aarch64/disasm-aarch64.cc b/src/aarch64/disasm-aarch64.cc
index 16f7b37f..209f668c 100644
--- a/src/aarch64/disasm-aarch64.cc
+++ b/src/aarch64/disasm-aarch64.cc
@@ -2572,6 +2572,7 @@ void Disassembler::VisitNEON3Same(const Instruction *instr) {
}
nfd.SetFormatMaps(nfd.LogicalFormatMap());
} else {
+ static const char kUnknown[] = "unallocated";
static const char *mnemonics[] = {"shadd",
"uhadd",
"shadd",
@@ -2666,32 +2667,32 @@ void Disassembler::VisitNEON3Same(const Instruction *instr) {
"sqdmulh",
"sqrdmulh",
"addp",
- "unallocated",
+ kUnknown,
"addp",
- "unallocated",
+ kUnknown,
"fmaxnm",
"fmaxnmp",
"fminnm",
"fminnmp",
"fmla",
- "unallocated",
+ kUnknown, // FMLAL2 or unallocated
"fmls",
- "unallocated",
+ kUnknown, // FMLSL2 or unallocated
"fadd",
"faddp",
"fsub",
"fabd",
"fmulx",
"fmul",
- "unallocated",
- "unallocated",
+ kUnknown,
+ kUnknown,
"fcmeq",
"fcmge",
- "unallocated",
+ kUnknown,
"fcmgt",
- "unallocated",
+ kUnknown, // FMLAL or unallocated
"facge",
- "unallocated",
+ kUnknown, // FMLSL or unallocated
"facgt",
"fmax",
"fmaxp",
@@ -2700,7 +2701,7 @@ void Disassembler::VisitNEON3Same(const Instruction *instr) {
"frecps",
"fdiv",
"frsqrts",
- "unallocated"};
+ kUnknown};
// Operation is determined by the opcode bits (15-11), the top bit of
// size (23) and the U bit (29).
@@ -2712,6 +2713,37 @@ void Disassembler::VisitNEON3Same(const Instruction *instr) {
// instructions.
VIXL_ASSERT(mnemonic != NULL);
+ if (mnemonic == kUnknown) {
+ // Catch special cases where we need to check more bits than we have in
+ // the table index. Anything not matched here is unallocated.
+
+ const char *fhm_form = (instr->Mask(NEON_Q) == 0)
+ ? "'Vd.2s, 'Vn.2h, 'Vm.2h"
+ : "'Vd.4s, 'Vn.4h, 'Vm.4h";
+ switch (instr->Mask(NEON3SameFHMMask)) {
+ case NEON_FMLAL:
+ mnemonic = "fmlal";
+ form = fhm_form;
+ break;
+ case NEON_FMLAL2:
+ mnemonic = "fmlal2";
+ form = fhm_form;
+ break;
+ case NEON_FMLSL:
+ mnemonic = "fmlsl";
+ form = fhm_form;
+ break;
+ case NEON_FMLSL2:
+ mnemonic = "fmlsl2";
+ form = fhm_form;
+ break;
+ default:
+ VIXL_ASSERT(strcmp(mnemonic, "unallocated") == 0);
+ form = "(NEON3Same)";
+ break;
+ }
+ }
+
if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
nfd.SetFormatMaps(nfd.FPFormatMap());
}
@@ -3007,6 +3039,7 @@ void Disassembler::VisitNEONByIndexedElement(const Instruction *instr) {
bool fp_instr = false;
bool cn_instr = false;
bool half_instr = false;
+ bool fhm_instr = false; // FMLAL{2}, FMLSL{2}
const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]";
@@ -3090,52 +3123,78 @@ void Disassembler::VisitNEONByIndexedElement(const Instruction *instr) {
case NEON_SQRDMLSH_byelement:
mnemonic = "sqrdmlsh";
break;
- default:
- switch (instr->Mask(NEONByIndexedElementFPMask)) {
- case NEON_FMUL_byelement:
- mnemonic = "fmul";
- fp_instr = true;
- break;
- case NEON_FMLA_byelement:
- mnemonic = "fmla";
- fp_instr = true;
- break;
- case NEON_FMLS_byelement:
- mnemonic = "fmls";
- fp_instr = true;
- break;
- case NEON_FMULX_byelement:
- mnemonic = "fmulx";
- fp_instr = true;
- break;
- case NEON_FMLA_H_byelement:
- mnemonic = "fmla";
- half_instr = true;
+ default: {
+ switch (instr->Mask(NEONByIndexedElementFPLongMask)) {
+ case NEON_FMLAL_H_byelement:
+ mnemonic = "fmlal";
+ fhm_instr = true;
break;
- case NEON_FMLS_H_byelement:
- mnemonic = "fmls";
- half_instr = true;
+ case NEON_FMLAL2_H_byelement:
+ mnemonic = "fmlal2";
+ fhm_instr = true;
break;
- case NEON_FMUL_H_byelement:
- mnemonic = "fmul";
- half_instr = true;
+ case NEON_FMLSL_H_byelement:
+ mnemonic = "fmlsl";
+ fhm_instr = true;
break;
- case NEON_FMULX_H_byelement:
- mnemonic = "fmulx";
- half_instr = true;
+ case NEON_FMLSL2_H_byelement:
+ mnemonic = "fmlsl2";
+ fhm_instr = true;
break;
default:
- switch (instr->Mask(NEONByIndexedElementFPComplexMask)) {
- case NEON_FCMLA_byelement:
- mnemonic = "fcmla";
- cn_instr = true;
- form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndexRot], 'ILFCNR";
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement:
+ mnemonic = "fmul";
+ fp_instr = true;
+ break;
+ case NEON_FMLA_byelement:
+ mnemonic = "fmla";
+ fp_instr = true;
+ break;
+ case NEON_FMLS_byelement:
+ mnemonic = "fmls";
+ fp_instr = true;
+ break;
+ case NEON_FMULX_byelement:
+ mnemonic = "fmulx";
+ fp_instr = true;
+ break;
+ case NEON_FMLA_H_byelement:
+ mnemonic = "fmla";
+ half_instr = true;
+ break;
+ case NEON_FMLS_H_byelement:
+ mnemonic = "fmls";
+ half_instr = true;
+ break;
+ case NEON_FMUL_H_byelement:
+ mnemonic = "fmul";
+ half_instr = true;
break;
+ case NEON_FMULX_H_byelement:
+ mnemonic = "fmulx";
+ half_instr = true;
+ break;
+ default:
+ switch (instr->Mask(NEONByIndexedElementFPComplexMask)) {
+ case NEON_FCMLA_byelement:
+ mnemonic = "fcmla";
+ cn_instr = true;
+ form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndexRot], 'ILFCNR";
+ break;
+ }
}
}
+ }
}
- if (half_instr) {
+ if (fhm_instr) {
+ // These are oddballs. Set the format manually.
+ form = (instr->Mask(NEON_Q) == 0)
+ ? "'Vd.2s, 'Vn.2h, 'Ve.h['IVByElemIndexFHM]"
+ : "'Vd.4s, 'Vn.4h, 'Ve.h['IVByElemIndexFHM]";
+ Format(instr, mnemonic, nfd.Substitute(form));
+ } else if (half_instr) {
form = "'Vd.%s, 'Vn.%s, 'Ve.h['IVByElemIndex]";
nfd.SetFormatMaps(&map_half, &map_half);
Format(instr, mnemonic, nfd.Substitute(form));
@@ -4961,7 +5020,7 @@ int Disassembler::SubstituteRegisterField(const Instruction *instr,
case 'e':
// This is register Rm, but using a 4-bit specifier. Used in NEON
// by-element instructions.
- reg_num = (instr->GetRm() & 0xf);
+ reg_num = instr->GetRmLow16();
break;
case 'a':
reg_num = instr->GetRa();
@@ -5262,20 +5321,23 @@ int Disassembler::SubstituteImmediateField(const Instruction *instr,
case 'B': { // IVByElemIndex.
int ret = strlen("IVByElemIndex");
int vm_index = (instr->GetNEONH() << 1) | instr->GetNEONL();
- if ((strncmp(format,
- "IVByElemIndexRot",
- strlen("IVByElemIndexRot")) == 0)) {
+ static const char *format_rot = "IVByElemIndexRot";
+ static const char *format_fhm = "IVByElemIndexFHM";
+ bool is_fhm = strncmp(format, format_fhm, strlen(format_fhm)) == 0;
+ if (strncmp(format, format_rot, strlen(format_rot)) == 0) {
// FCMLA uses 'H' bit index when SIZE is 2, else H:L
if (instr->GetNEONSize() == 2) {
vm_index = instr->GetNEONH();
}
- ret += 3;
- } else if (instr->GetNEONSize() == 1) {
- vm_index = (vm_index << 1) | instr->GetNEONM();
- } else if (instr->GetNEONSize() == 0) {
+ ret = static_cast<int>(strlen(format_rot));
+ } else if (is_fhm || (instr->GetNEONSize() == 0)) {
// Half-precision FP ops use H:L:M bit index
+ // Widening operations with H-sized operands also use H:L:M.
vm_index = (instr->GetNEONH() << 2) | (instr->GetNEONL() << 1) |
instr->GetNEONM();
+ if (is_fhm) ret = static_cast<int>(strlen(format_fhm));
+ } else if (instr->GetNEONSize() == 1) {
+ vm_index = (vm_index << 1) | instr->GetNEONM();
}
AppendToOutput("%d", vm_index);
return ret;
diff --git a/src/aarch64/instructions-aarch64.h b/src/aarch64/instructions-aarch64.h
index 4e6bce75..c6964039 100644
--- a/src/aarch64/instructions-aarch64.h
+++ b/src/aarch64/instructions-aarch64.h
@@ -527,7 +527,8 @@ enum VectorFormat {
kFormatS = NEON_S | NEONScalar,
kFormatD = NEON_D | NEONScalar,
- // A value invented solely for FP16 scalar pairwise simulator trace tests.
+ // An artificial value, used by simulator trace tests and a few oddball
+ // instructions (such as FMLAL).
kFormat2H = 0xfffffffe
};
diff --git a/src/aarch64/logic-aarch64.cc b/src/aarch64/logic-aarch64.cc
index aebd2270..022e22f6 100644
--- a/src/aarch64/logic-aarch64.cc
+++ b/src/aarch64/logic-aarch64.cc
@@ -4426,6 +4426,150 @@ LogicVRegister Simulator::fmls(VectorFormat vform,
}
+LogicVRegister Simulator::fmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op1 = FPToFloat(src1.Float<SimFloat16>(i), kIgnoreDefaultNaN);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(i), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int src = i + LaneCountFromFormat(vform);
+ float op1 = FPToFloat(src1.Float<SimFloat16>(src), kIgnoreDefaultNaN);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(src), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op1 = -FPToFloat(src1.Float<SimFloat16>(i), kIgnoreDefaultNaN);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(i), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int src = i + LaneCountFromFormat(vform);
+ float op1 = -FPToFloat(src1.Float<SimFloat16>(src), kIgnoreDefaultNaN);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(src), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(index), kIgnoreDefaultNaN);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op1 = FPToFloat(src1.Float<SimFloat16>(i), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(index), kIgnoreDefaultNaN);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int src = i + LaneCountFromFormat(vform);
+ float op1 = FPToFloat(src1.Float<SimFloat16>(src), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(index), kIgnoreDefaultNaN);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op1 = -FPToFloat(src1.Float<SimFloat16>(i), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ dst.ClearForWrite(vform);
+ float op2 = FPToFloat(src2.Float<SimFloat16>(index), kIgnoreDefaultNaN);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int src = i + LaneCountFromFormat(vform);
+ float op1 = -FPToFloat(src1.Float<SimFloat16>(src), kIgnoreDefaultNaN);
+ float acc = dst.Float<float>(i);
+ float result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
template <typename T>
LogicVRegister Simulator::fneg(VectorFormat vform,
LogicVRegister dst,
diff --git a/src/aarch64/macro-assembler-aarch64.h b/src/aarch64/macro-assembler-aarch64.h
index 88ed5577..8c0be9fe 100644
--- a/src/aarch64/macro-assembler-aarch64.h
+++ b/src/aarch64/macro-assembler-aarch64.h
@@ -2557,7 +2557,11 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(fminnmp, Fminnmp) \
V(fminp, Fminp) \
V(fmla, Fmla) \
+ V(fmlal, Fmlal) \
+ V(fmlal2, Fmlal2) \
V(fmls, Fmls) \
+ V(fmlsl, Fmlsl) \
+ V(fmlsl2, Fmlsl2) \
V(fmulx, Fmulx) \
V(frecps, Frecps) \
V(frsqrts, Frsqrts) \
@@ -2775,7 +2779,11 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmul, Fmul) \
V(fmla, Fmla) \
+ V(fmlal, Fmlal) \
+ V(fmlal2, Fmlal2) \
V(fmls, Fmls) \
+ V(fmlsl, Fmlsl) \
+ V(fmlsl2, Fmlsl2) \
V(fmulx, Fmulx) \
V(mul, Mul) \
V(mla, Mla) \
diff --git a/src/aarch64/simulator-aarch64.cc b/src/aarch64/simulator-aarch64.cc
index 4a31a91f..01130bc9 100644
--- a/src/aarch64/simulator-aarch64.cc
+++ b/src/aarch64/simulator-aarch64.cc
@@ -4264,7 +4264,23 @@ void Simulator::VisitNEON3Same(const Instruction* instr) {
fminnmp(vf, rd, rn, rm);
break;
default:
- VIXL_UNIMPLEMENTED();
+ // FMLAL{2} and FMLSL{2} have special-case encodings.
+ switch (instr->Mask(NEON3SameFHMMask)) {
+ case NEON_FMLAL:
+ fmlal(vf, rd, rn, rm);
+ break;
+ case NEON_FMLAL2:
+ fmlal2(vf, rd, rn, rm);
+ break;
+ case NEON_FMLSL:
+ fmlsl(vf, rd, rn, rm);
+ break;
+ case NEON_FMLSL2:
+ fmlsl2(vf, rd, rn, rm);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
}
} else {
VectorFormat vf = nfd.GetVectorFormat();
@@ -4762,10 +4778,31 @@ void Simulator::VisitNEONByIndexedElement(const Instruction* instr) {
ByElementOp Op = NULL;
int rm_reg = instr->GetRm();
+ int rm_low_reg = instr->GetRmLow16();
int index = (instr->GetNEONH() << 1) | instr->GetNEONL();
+ int index_hlm = (index << 1) | instr->GetNEONM();
+
+ switch (instr->Mask(NEONByIndexedElementFPLongMask)) {
+ // These are oddballs and are best handled as special cases.
+ // - Rm is encoded with only 4 bits (and must be in the lower 16 registers).
+ // - The index is always H:L:M.
+ case NEON_FMLAL_H_byelement:
+ fmlal(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
+ return;
+ case NEON_FMLAL2_H_byelement:
+ fmlal2(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
+ return;
+ case NEON_FMLSL_H_byelement:
+ fmlsl(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
+ return;
+ case NEON_FMLSL2_H_byelement:
+ fmlsl2(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
+ return;
+ }
+
if (instr->GetNEONSize() == 1) {
- rm_reg &= 0xf;
- index = (index << 1) | instr->GetNEONM();
+ rm_reg = rm_low_reg;
+ index = index_hlm;
}
switch (instr->Mask(NEONByIndexedElementMask)) {
@@ -4905,10 +4942,11 @@ void Simulator::VisitNEONByIndexedElement(const Instruction* instr) {
Op = &Simulator::fmulx;
break;
default:
- if (instr->GetNEONSize() == 2)
+ if (instr->GetNEONSize() == 2) {
index = instr->GetNEONH();
- else
+ } else {
index = (instr->GetNEONH() << 1) | instr->GetNEONL();
+ }
switch (instr->Mask(NEONByIndexedElementFPComplexMask)) {
case NEON_FCMLA_byelement:
vf = vf_r;
diff --git a/src/aarch64/simulator-aarch64.h b/src/aarch64/simulator-aarch64.h
index a4117877..5f1f6325 100644
--- a/src/aarch64/simulator-aarch64.h
+++ b/src/aarch64/simulator-aarch64.h
@@ -1998,11 +1998,31 @@ class Simulator : public DecoderVisitor {
const LogicVRegister& src1,
const LogicVRegister& src2,
int index);
+ LogicVRegister fmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
LogicVRegister fmls(VectorFormat vform,
LogicVRegister dst,
const LogicVRegister& src1,
const LogicVRegister& src2,
int index);
+ LogicVRegister fmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
LogicVRegister fmulx(VectorFormat vform,
LogicVRegister dst,
const LogicVRegister& src1,
@@ -2872,6 +2892,23 @@ class Simulator : public DecoderVisitor {
const LogicVRegister& src1,
const LogicVRegister& src2);
+ LogicVRegister fmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+
template <typename T>
LogicVRegister fcmp(VectorFormat vform,
LogicVRegister dst,