aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2012-08-11 19:49:03 -0500
committerAnthony Liguori <aliguori@us.ibm.com>2012-08-11 19:49:03 -0500
commit346fe0c4c0b88f11a3d0c01c34d9a170d73429cc (patch)
tree699be308892b0a9c1f31d8025c092a99f3418b00
parent53810bab3acd73b9844807e53f02d867c1ad1d2a (diff)
parentb90372ad2a69a9cdad2a40766eb46f0a89d98535 (diff)
Merge remote-tracking branch 'stefanha/trivial-patches' into staging
* stefanha/trivial-patches: target-arm: Fix typos in comments arm: translate: comment typo - s/middel/middle/ vl.c: Exit QEMU early if no machine is found
-rw-r--r--target-arm/arm-semi.c2
-rw-r--r--target-arm/cpu.h2
-rw-r--r--target-arm/helper.c6
-rw-r--r--target-arm/neon_helper.c26
-rw-r--r--target-arm/op_helper.c2
-rw-r--r--target-arm/translate.c12
-rw-r--r--vl.c10
7 files changed, 30 insertions, 30 deletions
diff --git a/target-arm/arm-semi.c b/target-arm/arm-semi.c
index 88ca9bb1b7..24952061cf 100644
--- a/target-arm/arm-semi.c
+++ b/target-arm/arm-semi.c
@@ -281,7 +281,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
return len - ret;
}
case TARGET_SYS_READC:
- /* XXX: Read from debug cosole. Not implemented. */
+ /* XXX: Read from debug console. Not implemented. */
return 0;
case TARGET_SYS_ISTTY:
if (use_gdb_syscalls()) {
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 191895cca8..d7f93d98f0 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -79,7 +79,7 @@ struct arm_boot_info;
typedef struct CPUARMState {
/* Regs for current mode. */
uint32_t regs[16];
- /* Frequently accessed CPSR bits are stored separately for efficiently.
+ /* Frequently accessed CPSR bits are stored separately for efficiency.
This contains all the other bits. Use cpsr_{read,write} to access
the whole CPSR. */
uint32_t uncached_cpsr;
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 5727da296c..dceaa95c80 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -988,7 +988,7 @@ static void ttbr164_reset(CPUARMState *env, const ARMCPRegInfo *ri)
}
static const ARMCPRegInfo lpae_cp_reginfo[] = {
- /* NOP AMAIR0/1: the override is because these clash with tha rather
+ /* NOP AMAIR0/1: the override is because these clash with the rather
* broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
*/
{ .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
@@ -2899,8 +2899,8 @@ uint32_t HELPER(logicq_cc)(uint64_t val)
return (val >> 32) | (val != 0);
}
-/* VFP support. We follow the convention used for VFP instrunctions:
- Single precition routines have a "s" suffix, double precision a
+/* VFP support. We follow the convention used for VFP instructions:
+ Single precision routines have a "s" suffix, double precision a
"d" suffix. */
/* Convert host exception flags to vfp form. */
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
index e0b9dbf67e..8bb5129d6a 100644
--- a/target-arm/neon_helper.c
+++ b/target-arm/neon_helper.c
@@ -530,7 +530,7 @@ NEON_VOP(rshl_s16, neon_s16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bits accumulator. */
+ * intermediate 64 bit accumulator. */
uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
{
int32_t dest;
@@ -547,8 +547,8 @@ uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
return dest;
}
-/* Handling addition overflow with 64 bits inputs values is more
- * tricky than with 32 bits values. */
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
@@ -590,7 +590,7 @@ NEON_VOP(rshl_u16, neon_u16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bits accumulator. */
+ * intermediate 64 bit accumulator. */
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
{
uint32_t dest;
@@ -608,8 +608,8 @@ uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
return dest;
}
-/* Handling addition overflow with 64 bits inputs values is more
- * tricky than with 32 bits values. */
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
@@ -817,7 +817,7 @@ NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bits accumulator. */
+ * intermediate 64 bit accumulator. */
uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
{
uint32_t dest;
@@ -846,8 +846,8 @@ uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop
return dest;
}
-/* Handling addition overflow with 64 bits inputs values is more
- * tricky than with 32 bits values. */
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
@@ -914,7 +914,7 @@ NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bits accumulator. */
+ * intermediate 64 bit accumulator. */
uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
{
int32_t dest;
@@ -942,8 +942,8 @@ uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shift
return dest;
}
-/* Handling addition overflow with 64 bits inputs values is more
- * tricky than with 32 bits values. */
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
@@ -1671,7 +1671,7 @@ uint64_t HELPER(neon_negl_u64)(uint64_t x)
return -x;
}
-/* Saturnating sign manuipulation. */
+/* Saturating sign manipulation. */
/* ??? Make these use NEON_VOP1 */
#define DO_QABS8(x) do { \
if (x == (int8_t)0x80) { \
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 490111c22f..d77bfab771 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -99,7 +99,7 @@ void tlb_fill(CPUARMState *env1, target_ulong addr, int is_write, int mmu_idx,
}
#endif
-/* FIXME: Pass an axplicit pointer to QF to CPUARMState, and move saturating
+/* FIXME: Pass an explicit pointer to QF to CPUARMState, and move saturating
instructions into helper.c */
uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
{
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 29008a4b34..edef79a2cf 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -53,7 +53,7 @@ typedef struct DisasContext {
int condjmp;
/* The label that will be jumped to when the instruction is skipped. */
int condlabel;
- /* Thumb-2 condtional execution bits. */
+ /* Thumb-2 conditional execution bits. */
int condexec_mask;
int condexec_cond;
struct TranslationBlock *tb;
@@ -77,7 +77,7 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
#endif
/* These instructions trap after executing, so defer them until after the
- conditional executions state has been updated. */
+ conditional execution state has been updated. */
#define DISAS_WFI 4
#define DISAS_SWI 5
@@ -155,7 +155,7 @@ static void load_reg_var(DisasContext *s, TCGv var, int reg)
{
if (reg == 15) {
uint32_t addr;
- /* normaly, since we updated PC, we need only to add one insn */
+ /* normally, since we updated PC, we need only to add one insn */
if (s->thumb)
addr = (long)s->pc + 2;
else
@@ -4897,7 +4897,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
size--;
}
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
- /* To avoid excessive dumplication of ops we implement shift
+ /* To avoid excessive duplication of ops we implement shift
by immediate using the variable shift operations. */
if (op < 8) {
/* Shift by immediate:
@@ -6402,7 +6402,7 @@ static void gen_logicq_cc(TCGv_i64 val)
/* Load/Store exclusive instructions are implemented by remembering
the value/address loaded, and seeing if these are the same
- when the store is performed. This should be is sufficient to implement
+ when the store is performed. This should be sufficient to implement
the architecturally mandated semantics, and avoids having to monitor
regular stores.
@@ -9892,7 +9892,7 @@ static inline void gen_intermediate_code_internal(CPUARMState *env,
} else {
/* While branches must always occur at the end of an IT block,
there are a few other things that can cause us to terminate
- the TB in the middel of an IT block:
+ the TB in the middle of an IT block:
- Exception generating instructions (bkpt, swi, undefined).
- Page boundaries.
- Hardware watchpoints.
diff --git a/vl.c b/vl.c
index ad9b03602b..91076f0e7c 100644
--- a/vl.c
+++ b/vl.c
@@ -3209,6 +3209,11 @@ int main(int argc, char **argv, char **envp)
}
loc_set_none();
+ if (machine == NULL) {
+ fprintf(stderr, "No machine found.\n");
+ exit(1);
+ }
+
if (machine->hw_version) {
qemu_set_version(machine->hw_version);
}
@@ -3251,11 +3256,6 @@ int main(int argc, char **argv, char **envp)
data_dir = CONFIG_QEMU_DATADIR;
}
- if (machine == NULL) {
- fprintf(stderr, "No machine found.\n");
- exit(1);
- }
-
/*
* Default to max_cpus = smp_cpus, in case the user doesn't
* specify a max_cpus value.