summaryrefslogtreecommitdiff
path: root/big-little/virtualisor
diff options
context:
space:
mode:
Diffstat (limited to 'big-little/virtualisor')
-rw-r--r--big-little/virtualisor/cache_geom.c9
-rw-r--r--big-little/virtualisor/cpus/a15/a15.c17
-rw-r--r--big-little/virtualisor/cpus/a7/a7.c2
-rw-r--r--big-little/virtualisor/include/cache_geom.h10
-rw-r--r--big-little/virtualisor/kfscb_trap_handler.c2
-rw-r--r--big-little/virtualisor/pmu_trap_handler.c240
-rw-r--r--big-little/virtualisor/vgic_trap_handler.c3
-rw-r--r--big-little/virtualisor/virt_context.c3
-rw-r--r--big-little/virtualisor/virt_handle.c28
9 files changed, 140 insertions, 174 deletions
diff --git a/big-little/virtualisor/cache_geom.c b/big-little/virtualisor/cache_geom.c
index 7bcb42b..1f24882 100644
--- a/big-little/virtualisor/cache_geom.c
+++ b/big-little/virtualisor/cache_geom.c
@@ -61,14 +61,12 @@ void find_cache_geometry(cache_geometry * cg_ptr)
write_csselr(ctr << 1);
isb();
cg_ptr->ccsidr[ctr][CIND_DATA] = read_ccsidr();
- }
- else if (cache_type == 0x04) {
+ } else if (cache_type == 0x04) {
/* unified cache */
write_csselr(ctr << 1);
isb();
cg_ptr->ccsidr[ctr][CIND_UNIF] = read_ccsidr();
- }
- else {
+ } else {
/*
* Stop scanning at the first invalid/unsupported cache
* level
@@ -233,8 +231,7 @@ unsigned map_cache_geometries(cache_geometry * hcg_ptr,
void handle_cm_op(unsigned reg,
void (*op_handler) (unsigned),
cache_geometry * hcg_ptr,
- cache_geometry * tcg_ptr,
- cache_diff * cd_ptr)
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
unsigned clvl = 0, cpu_id = read_cpuid();
unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
diff --git a/big-little/virtualisor/cpus/a15/a15.c b/big-little/virtualisor/cpus/a15/a15.c
index 213ae67..1b244c4 100644
--- a/big-little/virtualisor/cpus/a15/a15.c
+++ b/big-little/virtualisor/cpus/a15/a15.c
@@ -54,7 +54,7 @@ unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
ind = get_cache_ind(csselr);
if (CRn == CRN_C0 && Op1 == 1 && CRm == 0 && Op2 == CCSIDR &&
- level == 0 && ind == CIND_INST) {
+ level == 0 && ind == CIND_INST) {
unsigned cpu_id = read_cpuid();
/*
@@ -64,9 +64,9 @@ unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
* [BC=x, TC=A7, HC=A15]
*/
- cache_geometry * cg_ptr = (IS_TGT_CLUSTER) ?
- &host_cache_geometry[cpu_id] :
- &target_cache_geometry[cpu_id];
+ cache_geometry *cg_ptr = (IS_TGT_CLUSTER) ?
+ &host_cache_geometry[cpu_id] :
+ &target_cache_geometry[cpu_id];
regs->r[Rt] = cg_ptr->ccsidr[level][ind];
}
@@ -126,13 +126,12 @@ unsigned a15_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
* configuration [BC=x, TC=A7, HC=A15].
*/
- cache_geometry * cg_ptr = (IS_TGT_CLUSTER) ?
- &host_cache_geometry[cpu_id] :
- &target_cache_geometry[cpu_id];
-
+ cache_geometry *cg_ptr = (IS_TGT_CLUSTER) ?
+ &host_cache_geometry[cpu_id] :
+ &target_cache_geometry[cpu_id];
cg_ptr->ccsidr[0][CIND_INST] =
- CACHE_A7_L1_INST_CCSIDR;
+ CACHE_A7_L1_INST_CCSIDR;
}
/*
diff --git a/big-little/virtualisor/cpus/a7/a7.c b/big-little/virtualisor/cpus/a7/a7.c
index d39f857..4aff69d 100644
--- a/big-little/virtualisor/cpus/a7/a7.c
+++ b/big-little/virtualisor/cpus/a7/a7.c
@@ -54,7 +54,7 @@ unsigned a7_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
ind = get_cache_ind(csselr);
if (CRn == CRN_C0 && Op1 == 1 && CRm == 0 && Op2 == CCSIDR &&
- level == 0 && ind == CIND_INST)
+ level == 0 && ind == CIND_INST)
regs->r[Rt] = read_ccsidr();
}
diff --git a/big-little/virtualisor/include/cache_geom.h b/big-little/virtualisor/include/cache_geom.h
index f162337..23db57b 100644
--- a/big-little/virtualisor/include/cache_geom.h
+++ b/big-little/virtualisor/include/cache_geom.h
@@ -31,9 +31,9 @@
#define TCSZ_BIG 0x2
/* Cache Instruction not Data bit, CSSELR[0] */
-#define CIND_DATA 0x0 /* Data cache */
-#define CIND_UNIF CIND_DATA /* Unified cache */
-#define CIND_INST 0x1 /* Instruction cache */
+#define CIND_DATA 0x0 /* Data cache */
+#define CIND_UNIF CIND_DATA /* Unified cache */
+#define CIND_INST 0x1 /* Instruction cache */
/* A7 L1 instruction cache CCSIDR value */
#define CACHE_A7_L1_INST_CCSIDR 0x203FE009
@@ -101,11 +101,11 @@ typedef struct cache_stats {
} cache_stats;
extern unsigned map_cache_geometries(cache_geometry *, cache_geometry *,
- cache_diff *);
+ cache_diff *);
extern void find_cache_geometry(cache_geometry *);
extern void find_cache_diff(cache_geometry *, cache_geometry *, cache_diff *);
extern void handle_cm_op(unsigned, void (*)(unsigned), cache_geometry *,
- cache_geometry *, cache_diff *);
+ cache_geometry *, cache_diff *);
//extern void stat_init_host_cache_geometry(cache_geometry *);
#endif /* __CACHE_GEOM_H__ */
diff --git a/big-little/virtualisor/kfscb_trap_handler.c b/big-little/virtualisor/kfscb_trap_handler.c
index 7813cce..6c621e7 100644
--- a/big-little/virtualisor/kfscb_trap_handler.c
+++ b/big-little/virtualisor/kfscb_trap_handler.c
@@ -43,7 +43,7 @@ void handle_kfscb_abort(unsigned pa, unsigned *data, unsigned write)
switch (reg_offset) {
- /* Access to KFSCB registers */
+ /* Access to KFSCB registers */
case (RST_HOLD0):
if (write) {
/* Entry */
diff --git a/big-little/virtualisor/pmu_trap_handler.c b/big-little/virtualisor/pmu_trap_handler.c
index bf3f3b6..37eddf4 100644
--- a/big-little/virtualisor/pmu_trap_handler.c
+++ b/big-little/virtualisor/pmu_trap_handler.c
@@ -69,25 +69,25 @@ struct descriptor {
enum {
PMU_CLUSTER_A15 = 0x00,
- PMU_CLUSTER_A7 = 0x01,
+ PMU_CLUSTER_A7 = 0x01,
};
enum {
- PMU_CNT_CYCLE_COUNTER = 0x00,
- PMU_CNT_OVERFLOW_FLAG = 0x01,
- PMU_CNT_EVENT_COUNTER_0 = 0x02,
- PMU_CNT_EVENT_COUNTER_1 = 0x03,
- PMU_CNT_EVENT_COUNTER_2 = 0x04,
- PMU_CNT_EVENT_COUNTER_3 = 0x05,
- PMU_CNT_EVENT_COUNTER_4 = 0x06,
- PMU_CNT_EVENT_COUNTER_5 = 0x07,
+ PMU_CNT_CYCLE_COUNTER = 0x00,
+ PMU_CNT_OVERFLOW_FLAG = 0x01,
+ PMU_CNT_EVENT_COUNTER_0 = 0x02,
+ PMU_CNT_EVENT_COUNTER_1 = 0x03,
+ PMU_CNT_EVENT_COUNTER_2 = 0x04,
+ PMU_CNT_EVENT_COUNTER_3 = 0x05,
+ PMU_CNT_EVENT_COUNTER_4 = 0x06,
+ PMU_CNT_EVENT_COUNTER_5 = 0x07,
};
enum {
- PMU_REQ_DISABLE_COUNTER = 0x01,
- PMU_REQ_CONF_COUNTER = 0x02,
+ PMU_REQ_DISABLE_COUNTER = 0x01,
+ PMU_REQ_CONF_COUNTER = 0x02,
PMU_REQ_CONF_RESET_COUNTER = 0x03,
- PMU_REQ_READ_COUNTER = 0x04,
+ PMU_REQ_READ_COUNTER = 0x04,
PMU_REQ_READ_RESET_COUNTER = 0x05,
};
@@ -138,8 +138,7 @@ void set_pmu_state(unsigned new)
}
static void handle_desc(struct descriptor *desc,
- unsigned cluster_id,
- unsigned cpu_id)
+ unsigned cluster_id, unsigned cpu_id)
{
unsigned entry_cluster = desc->u.counter.cluster_id;
unsigned selected_counter = desc->u.counter.selected_counter;
@@ -171,10 +170,9 @@ static void handle_desc(struct descriptor *desc,
} else {
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] &= 0x7FFFFFFF;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] &= 0x7FFFFFFF;
break;
case PMU_CNT_OVERFLOW_FLAG:
/* Can't disable overflow flags. */
@@ -184,11 +182,10 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_2:
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] &=
- ~(1 << selected_counter);
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] &=
+ ~(1 << selected_counter);
break;
default:
break;
@@ -223,14 +220,13 @@ static void handle_desc(struct descriptor *desc,
};
} else {
clusters_ctx[entry_cluster][cpu_id][PMCR_IDX] |=
- (pmu_counters << 11) | 1;
+ (pmu_counters << 11) | 1;
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |= 0x80000000;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= 0x80000000;
break;
case PMU_CNT_OVERFLOW_FLAG:
/* Can't configure overflow flags. */
@@ -240,17 +236,13 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_2:
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)] =
- event_type;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |=
- (1 << selected_counter);
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX +
+ (selected_counter * 2)] = event_type;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= (1 << selected_counter);
break;
default:
break;
@@ -287,19 +279,16 @@ static void handle_desc(struct descriptor *desc,
};
} else {
clusters_ctx[entry_cluster][cpu_id][PMCR_IDX] |=
- (pmu_counters << 11) | 1;
+ (pmu_counters << 11) | 1;
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCCNTR_IDX] = reset_value;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |=
- 0x80000000;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCCNTR_IDX] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= 0x80000000;
break;
case PMU_CNT_OVERFLOW_FLAG:
break;
@@ -308,21 +297,17 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_2:
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)] = event_type;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)] = reset_value;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |=
- (1 << selected_counter);
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX +
+ (selected_counter * 2)] = event_type;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX +
+ (selected_counter * 2)] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= (1 << selected_counter);
break;
default:
break;
@@ -344,8 +329,9 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
write_pmselr(selected_counter);
- desc->u.counter.event_type=read_pmxevtyper();
- desc->u.counter.counter_value=read_pmxevcntr();
+ desc->u.counter.event_type = read_pmxevtyper();
+ desc->u.counter.counter_value =
+ read_pmxevcntr();
break;
default:
break;
@@ -354,17 +340,15 @@ static void handle_desc(struct descriptor *desc,
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCCNTR_IDX];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCCNTR_IDX];
break;
case PMU_CNT_OVERFLOW_FLAG:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMOVSR_IDX];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMOVSR_IDX];
break;
case PMU_CNT_EVENT_COUNTER_0:
case PMU_CNT_EVENT_COUNTER_1:
@@ -372,17 +356,13 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
desc->u.counter.event_type =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (selected_counter * 2)];
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (selected_counter * 2)];
break;
default:
break;
@@ -406,8 +386,9 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
write_pmselr(selected_counter);
- desc->u.counter.event_type=read_pmxevtyper();
- desc->u.counter.counter_value=read_pmxevcntr();
+ desc->u.counter.event_type = read_pmxevtyper();
+ desc->u.counter.counter_value =
+ read_pmxevcntr();
write_pmxevcntr(reset_value);
break;
default:
@@ -417,24 +398,20 @@ static void handle_desc(struct descriptor *desc,
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCCNTR_IDX];
- clusters_ctx
- [cluster_id]
- [cpu_id]
- [PMCCNTR_IDX] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCCNTR_IDX];
+ clusters_ctx[cluster_id]
+ [cpu_id]
+ [PMCCNTR_IDX] = reset_value;
case PMU_CNT_OVERFLOW_FLAG:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMOVSR_IDX];
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMOVSR_IDX] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMOVSR_IDX];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMOVSR_IDX] = reset_value;
break;
case PMU_CNT_EVENT_COUNTER_0:
case PMU_CNT_EVENT_COUNTER_1:
@@ -442,22 +419,17 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
desc->u.counter.event_type =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (selected_counter * 2)];
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)];
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX +
+ (selected_counter * 2)] = reset_value;
break;
default:
break;
@@ -508,10 +480,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
ret = read_pmxevtyper();
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- ret = clusters_ctx
- [first]
- [cpu_id]
- [PMXEVTYPE0_IDX + (tmp * 2)];
+ ret = clusters_ctx[first]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (tmp * 2)];
}
break;
case HVC_PMU_PMXEVTYPER_WRITE:
@@ -519,10 +490,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
write_pmxevtyper(second);
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- clusters_ctx
- [first]
- [cpu_id]
- [PMXEVTYPE0_IDX + (tmp * 2)] = second;
+ clusters_ctx[first]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (tmp * 2)] = second;
}
break;
case HVC_PMU_PMCNTENSET_READ:
@@ -578,10 +548,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
ret = read_pmxevcntr();
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- ret = clusters_ctx
- [first]
- [cpu_id]
- [PMXEVCNT0_IDX + (tmp * 2)];
+ ret = clusters_ctx[first]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (tmp * 2)];
}
break;
case HVC_PMU_PMXEVCNTR_WRITE:
@@ -589,10 +558,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
write_pmxevcntr(second);
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- clusters_ctx
- [first]
- [cpu_id]
- [PMXEVCNT0_IDX + (tmp * 2)] = second;
+ clusters_ctx[first]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (tmp * 2)] = second;
}
break;
case HVC_PMU_PMINTENSET_READ:
@@ -629,7 +597,7 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
ret = sizeof(struct descriptor) * ENTRIES;
break;
case HVC_PMU_SYNC_PMU_COUNTERS:
- {
+ {
int i;
int entries;
unsigned int *pentries;
@@ -641,8 +609,8 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
for (i = 0, desc++; i < entries; i++, desc++) {
handle_desc(desc, cluster_id, cpu_id);
}
- }
- break;
+ }
+ break;
}
return ret;
diff --git a/big-little/virtualisor/vgic_trap_handler.c b/big-little/virtualisor/vgic_trap_handler.c
index cd06578..75e1030 100644
--- a/big-little/virtualisor/vgic_trap_handler.c
+++ b/big-little/virtualisor/vgic_trap_handler.c
@@ -60,7 +60,8 @@ void handle_vgic_distif_abort(unsigned pa, unsigned *data, unsigned write)
* in progress.
*/
if (FALSE == async_switchover)
- start_virq_migration(icdiptr_orig, icdiptr_curr, reg_offset - GICD_CPUS);
+ start_virq_migration(icdiptr_orig, icdiptr_curr,
+ reg_offset - GICD_CPUS);
} else {
value = read32(pa);
diff --git a/big-little/virtualisor/virt_context.c b/big-little/virtualisor/virt_context.c
index def3551..65dacb6 100644
--- a/big-little/virtualisor/virt_context.c
+++ b/big-little/virtualisor/virt_context.c
@@ -173,7 +173,8 @@ void RestoreVirtualisor(unsigned first_cpu)
&((unsigned long long
*)((unsigned)((&s2_td[ctr].table)
[0])))[s2_td
- [ctr].index];
+ [ctr].
+ index];
s2_td[ctr].prev_desc = *cd_ptr;
*cd_ptr = s2_td[ctr].cur_desc;
periph_addr = (unsigned *)cd_ptr;
diff --git a/big-little/virtualisor/virt_handle.c b/big-little/virtualisor/virt_handle.c
index abdf604..ca3cb8e 100644
--- a/big-little/virtualisor/virt_handle.c
+++ b/big-little/virtualisor/virt_handle.c
@@ -186,7 +186,7 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
switch (CRm) {
case 0:
switch (Op2) {
- unsigned csselr, level, ind;
+ unsigned csselr, level, ind;
case CCSIDR:
if (write)
goto error;
@@ -198,20 +198,20 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
* The error is later corrected in the
* A7 or A15 specific trap function.
*/
- csselr = target_cache_geometry[cpu_id].
- csselr;
+ csselr =
+ target_cache_geometry
+ [cpu_id].csselr;
level = get_cache_level(csselr);
ind = get_cache_ind(csselr);
regs->r[Rt] =
- target_cache_geometry[cpu_id].
- ccsidr[level][ind];
+ target_cache_geometry[cpu_id].ccsidr
+ [level][ind];
break;
case CLIDR:
if (write)
goto error;
regs->r[Rt] =
- target_cache_geometry[cpu_id].
- clidr;
+ target_cache_geometry[cpu_id].clidr;
break;
case AIDR:
if (write)
@@ -233,13 +233,13 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
case CSSELR:
if (write) {
target_cache_geometry
- [cpu_id].csselr = regs->r[Rt];
+ [cpu_id].csselr =
+ regs->r[Rt];
write_csselr(regs->r[Rt]);
- }
- else
+ } else
regs->r[Rt] =
- target_cache_geometry
- [cpu_id].csselr;
+ target_cache_geometry
+ [cpu_id].csselr;
break;
default:
goto error;
@@ -575,7 +575,7 @@ void trap_dabort_handle(unsigned hsr, gp_regs * regs)
return;
}
-void trap_hvc_handle(unsigned hsr, gp_regs *regs)
+void trap_hvc_handle(unsigned hsr, gp_regs * regs)
{
unsigned opcode = regs->r[0];
@@ -587,7 +587,7 @@ void trap_hvc_handle(unsigned hsr, gp_regs *regs)
*/
case HVC_SWITCHER_CLUSTER_SWITCH:
/* Do not switch till previous one has completed */
- while (FALSE == cluster_reset_status(!read_clusterid()));
+ while (FALSE == cluster_reset_status(!read_clusterid())) ;
signal_switchover();
break;