summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--acsr/v7_c.c230
-rw-r--r--big-little/common/pagetable_setup.c24
-rw-r--r--big-little/common/vgiclib.c44
-rw-r--r--big-little/include/events.h1
-rw-r--r--big-little/include/hyp_vmmap.h12
-rw-r--r--big-little/lib/ipi.c126
-rw-r--r--big-little/secure_world/secure_resets.c20
-rw-r--r--big-little/switcher/context/ns_context.c44
-rw-r--r--big-little/switcher/trigger/async_switchover.c185
-rw-r--r--big-little/virtualisor/cache_geom.c9
-rw-r--r--big-little/virtualisor/cpus/a15/a15.c17
-rw-r--r--big-little/virtualisor/cpus/a7/a7.c2
-rw-r--r--big-little/virtualisor/include/cache_geom.h10
-rw-r--r--big-little/virtualisor/kfscb_trap_handler.c2
-rw-r--r--big-little/virtualisor/pmu_trap_handler.c240
-rw-r--r--big-little/virtualisor/vgic_trap_handler.c3
-rw-r--r--big-little/virtualisor/virt_context.c3
-rw-r--r--big-little/virtualisor/virt_handle.c28
-rw-r--r--bootwrapper/bootwrapper.h2
-rw-r--r--bootwrapper/c_start.c4
20 files changed, 497 insertions, 509 deletions
diff --git a/acsr/v7_c.c b/acsr/v7_c.c
index 5417a7d..f9bb23d 100644
--- a/acsr/v7_c.c
+++ b/acsr/v7_c.c
@@ -113,8 +113,8 @@ typedef volatile struct { /* Registers Save?
} debug_registers_t;
typedef struct {
- unsigned (*read)(void);
- void (*write)(unsigned);
+ unsigned (*read) (void);
+ void (*write) (unsigned);
} rw_ops;
typedef struct {
@@ -127,117 +127,117 @@ typedef struct {
dbgreg_rw_ops dbgreg_rw_handlers[] = {
{
- {read_dbg_bvr0, write_dbg_bvr0,},
- {read_dbg_bcr0, write_dbg_bcr0,},
- {read_dbg_wvr0, write_dbg_wvr0,},
- {read_dbg_wcr0, write_dbg_wcr0,},
- {read_dbg_bxvr0, write_dbg_bxvr0,},
- },
+ {read_dbg_bvr0, write_dbg_bvr0,},
+ {read_dbg_bcr0, write_dbg_bcr0,},
+ {read_dbg_wvr0, write_dbg_wvr0,},
+ {read_dbg_wcr0, write_dbg_wcr0,},
+ {read_dbg_bxvr0, write_dbg_bxvr0,},
+ },
{
- {read_dbg_bvr1, write_dbg_bvr1,},
- {read_dbg_bcr1, write_dbg_bcr1,},
- {read_dbg_wvr1, write_dbg_wvr1,},
- {read_dbg_wcr1, write_dbg_wcr1,},
- {read_dbg_bxvr1, write_dbg_bxvr1,},
- },
+ {read_dbg_bvr1, write_dbg_bvr1,},
+ {read_dbg_bcr1, write_dbg_bcr1,},
+ {read_dbg_wvr1, write_dbg_wvr1,},
+ {read_dbg_wcr1, write_dbg_wcr1,},
+ {read_dbg_bxvr1, write_dbg_bxvr1,},
+ },
{
- {read_dbg_bvr2, write_dbg_bvr2,},
- {read_dbg_bcr2, write_dbg_bcr2,},
- {read_dbg_wvr2, write_dbg_wvr2,},
- {read_dbg_wcr2, write_dbg_wcr2,},
- {read_dbg_bxvr2, write_dbg_bxvr2,},
- },
+ {read_dbg_bvr2, write_dbg_bvr2,},
+ {read_dbg_bcr2, write_dbg_bcr2,},
+ {read_dbg_wvr2, write_dbg_wvr2,},
+ {read_dbg_wcr2, write_dbg_wcr2,},
+ {read_dbg_bxvr2, write_dbg_bxvr2,},
+ },
{
- {read_dbg_bvr3, write_dbg_bvr3,},
- {read_dbg_bcr3, write_dbg_bcr3,},
- {read_dbg_wvr3, write_dbg_wvr3,},
- {read_dbg_wcr3, write_dbg_wcr3,},
- {read_dbg_bxvr3, write_dbg_bxvr3,},
- },
+ {read_dbg_bvr3, write_dbg_bvr3,},
+ {read_dbg_bcr3, write_dbg_bcr3,},
+ {read_dbg_wvr3, write_dbg_wvr3,},
+ {read_dbg_wcr3, write_dbg_wcr3,},
+ {read_dbg_bxvr3, write_dbg_bxvr3,},
+ },
{
- {read_dbg_bvr4, write_dbg_bvr4,},
- {read_dbg_bcr4, write_dbg_bcr4,},
- {read_dbg_wvr4, write_dbg_wvr4,},
- {read_dbg_wcr4, write_dbg_wcr4,},
- {read_dbg_bxvr4, write_dbg_bxvr4,},
- },
+ {read_dbg_bvr4, write_dbg_bvr4,},
+ {read_dbg_bcr4, write_dbg_bcr4,},
+ {read_dbg_wvr4, write_dbg_wvr4,},
+ {read_dbg_wcr4, write_dbg_wcr4,},
+ {read_dbg_bxvr4, write_dbg_bxvr4,},
+ },
{
- {read_dbg_bvr5, write_dbg_bvr5,},
- {read_dbg_bcr5, write_dbg_bcr5,},
- {read_dbg_wvr5, write_dbg_wvr5,},
- {read_dbg_wcr5, write_dbg_wcr5,},
- {read_dbg_bxvr5, write_dbg_bxvr5,},
- },
+ {read_dbg_bvr5, write_dbg_bvr5,},
+ {read_dbg_bcr5, write_dbg_bcr5,},
+ {read_dbg_wvr5, write_dbg_wvr5,},
+ {read_dbg_wcr5, write_dbg_wcr5,},
+ {read_dbg_bxvr5, write_dbg_bxvr5,},
+ },
{
- {read_dbg_bvr6, write_dbg_bvr6,},
- {read_dbg_bcr6, write_dbg_bcr6,},
- {read_dbg_wvr6, write_dbg_wvr6,},
- {read_dbg_wcr6, write_dbg_wcr6,},
- {read_dbg_bxvr6, write_dbg_bxvr6,},
- },
+ {read_dbg_bvr6, write_dbg_bvr6,},
+ {read_dbg_bcr6, write_dbg_bcr6,},
+ {read_dbg_wvr6, write_dbg_wvr6,},
+ {read_dbg_wcr6, write_dbg_wcr6,},
+ {read_dbg_bxvr6, write_dbg_bxvr6,},
+ },
{
- {read_dbg_bvr7, write_dbg_bvr7,},
- {read_dbg_bcr7, write_dbg_bcr7,},
- {read_dbg_wvr7, write_dbg_wvr7,},
- {read_dbg_wcr7, write_dbg_wcr7,},
- {read_dbg_bxvr7, write_dbg_bxvr7,},
- },
+ {read_dbg_bvr7, write_dbg_bvr7,},
+ {read_dbg_bcr7, write_dbg_bcr7,},
+ {read_dbg_wvr7, write_dbg_wvr7,},
+ {read_dbg_wcr7, write_dbg_wcr7,},
+ {read_dbg_bxvr7, write_dbg_bxvr7,},
+ },
{
- {read_dbg_bvr8, write_dbg_bvr8,},
- {read_dbg_bcr8, write_dbg_bcr8,},
- {read_dbg_wvr8, write_dbg_wvr8,},
- {read_dbg_wcr8, write_dbg_wcr8,},
- {read_dbg_bxvr8, write_dbg_bxvr8,},
- },
+ {read_dbg_bvr8, write_dbg_bvr8,},
+ {read_dbg_bcr8, write_dbg_bcr8,},
+ {read_dbg_wvr8, write_dbg_wvr8,},
+ {read_dbg_wcr8, write_dbg_wcr8,},
+ {read_dbg_bxvr8, write_dbg_bxvr8,},
+ },
{
- {read_dbg_bvr9, write_dbg_bvr9,},
- {read_dbg_bcr9, write_dbg_bcr9,},
- {read_dbg_wvr9, write_dbg_wvr9,},
- {read_dbg_wcr9, write_dbg_wcr9,},
- {read_dbg_bxvr9, write_dbg_bxvr9,},
- },
+ {read_dbg_bvr9, write_dbg_bvr9,},
+ {read_dbg_bcr9, write_dbg_bcr9,},
+ {read_dbg_wvr9, write_dbg_wvr9,},
+ {read_dbg_wcr9, write_dbg_wcr9,},
+ {read_dbg_bxvr9, write_dbg_bxvr9,},
+ },
{
- {read_dbg_bvr10, write_dbg_bvr10,},
- {read_dbg_bcr10, write_dbg_bcr10,},
- {read_dbg_wvr10, write_dbg_wvr10,},
- {read_dbg_wcr10, write_dbg_wcr10,},
- {read_dbg_bxvr10, write_dbg_bxvr10,},
- },
+ {read_dbg_bvr10, write_dbg_bvr10,},
+ {read_dbg_bcr10, write_dbg_bcr10,},
+ {read_dbg_wvr10, write_dbg_wvr10,},
+ {read_dbg_wcr10, write_dbg_wcr10,},
+ {read_dbg_bxvr10, write_dbg_bxvr10,},
+ },
{
- {read_dbg_bvr11, write_dbg_bvr11,},
- {read_dbg_bcr11, write_dbg_bcr11,},
- {read_dbg_wvr11, write_dbg_wvr11,},
- {read_dbg_wcr11, write_dbg_wcr11,},
- {read_dbg_bxvr11, write_dbg_bxvr11,},
- },
+ {read_dbg_bvr11, write_dbg_bvr11,},
+ {read_dbg_bcr11, write_dbg_bcr11,},
+ {read_dbg_wvr11, write_dbg_wvr11,},
+ {read_dbg_wcr11, write_dbg_wcr11,},
+ {read_dbg_bxvr11, write_dbg_bxvr11,},
+ },
{
- {read_dbg_bvr12, write_dbg_bvr12,},
- {read_dbg_bcr12, write_dbg_bcr12,},
- {read_dbg_wvr12, write_dbg_wvr12,},
- {read_dbg_wcr12, write_dbg_wcr12,},
- {read_dbg_bxvr12, write_dbg_bxvr12,},
- },
+ {read_dbg_bvr12, write_dbg_bvr12,},
+ {read_dbg_bcr12, write_dbg_bcr12,},
+ {read_dbg_wvr12, write_dbg_wvr12,},
+ {read_dbg_wcr12, write_dbg_wcr12,},
+ {read_dbg_bxvr12, write_dbg_bxvr12,},
+ },
{
- {read_dbg_bvr13, write_dbg_bvr13,},
- {read_dbg_bcr13, write_dbg_bcr13,},
- {read_dbg_wvr13, write_dbg_wvr13,},
- {read_dbg_wcr13, write_dbg_wcr13,},
- {read_dbg_bxvr13, write_dbg_bxvr13,},
- },
+ {read_dbg_bvr13, write_dbg_bvr13,},
+ {read_dbg_bcr13, write_dbg_bcr13,},
+ {read_dbg_wvr13, write_dbg_wvr13,},
+ {read_dbg_wcr13, write_dbg_wcr13,},
+ {read_dbg_bxvr13, write_dbg_bxvr13,},
+ },
{
- {read_dbg_bvr14, write_dbg_bvr14,},
- {read_dbg_bcr14, write_dbg_bcr14,},
- {read_dbg_wvr14, write_dbg_wvr14,},
- {read_dbg_wcr14, write_dbg_wcr14,},
- {read_dbg_bxvr14, write_dbg_bxvr14,},
- },
+ {read_dbg_bvr14, write_dbg_bvr14,},
+ {read_dbg_bcr14, write_dbg_bcr14,},
+ {read_dbg_wvr14, write_dbg_wvr14,},
+ {read_dbg_wcr14, write_dbg_wcr14,},
+ {read_dbg_bxvr14, write_dbg_bxvr14,},
+ },
{
- {read_dbg_bvr15, write_dbg_bvr15,},
- {read_dbg_bcr15, write_dbg_bcr15,},
- {read_dbg_wvr15, write_dbg_wvr15,},
- {read_dbg_wcr15, write_dbg_wcr15,},
- {read_dbg_bxvr15, write_dbg_bxvr15,},
- },
+ {read_dbg_bvr15, write_dbg_bvr15,},
+ {read_dbg_bcr15, write_dbg_bcr15,},
+ {read_dbg_wvr15, write_dbg_wvr15,},
+ {read_dbg_wcr15, write_dbg_wcr15,},
+ {read_dbg_bxvr15, write_dbg_bxvr15,},
+ },
};
static void restore_bp_reg(debug_context_t *dbg, unsigned index, unsigned type)
@@ -323,7 +323,7 @@ static void sr_bp_context(debug_context_t *dbg, unsigned bp_type, unsigned op)
return;
}
-static void save_v71_debug_cp14(unsigned * context)
+static void save_v71_debug_cp14(unsigned *context)
{
debug_context_t *dbg = (void *) context;
unsigned virtext_present;
@@ -341,11 +341,11 @@ static void save_v71_debug_cp14(unsigned * context)
*/
write_dbg_oslar(OSLAR_LOCKED);
- dbg->dtrrx_e = read_dbg_dtrrxext();
- dbg->dtrtx_e = read_dbg_dtrtxext();
- dbg->dscr_e = read_dbg_dscrext();
- dbg->wfar = read_dbg_wfar();
- dbg->vcr = read_dbg_vcr();
+ dbg->dtrrx_e = read_dbg_dtrrxext();
+ dbg->dtrtx_e = read_dbg_dtrtxext();
+ dbg->dscr_e = read_dbg_dscrext();
+ dbg->wfar = read_dbg_wfar();
+ dbg->vcr = read_dbg_vcr();
dbg->claimclr = read_dbg_claimclr();
if (virtext_present)
@@ -361,7 +361,7 @@ static void save_v71_debug_cp14(unsigned * context)
return;
}
-static void restore_v71_debug_cp14(unsigned * context)
+static void restore_v71_debug_cp14(unsigned *context)
{
debug_context_t *dbg = (void *) context;
unsigned virtext_present;
@@ -426,7 +426,7 @@ debug_registers_t *read_debug_address(void)
* - OSDLR is NOT locked, or the debugger would not work properly
*/
-static void save_v7_debug_mmapped(unsigned * context)
+static void save_v7_debug_mmapped(unsigned *context)
{
debug_registers_t *dbg = (void *)read_debug_address();
debug_context_t *ctx = (void *)context;
@@ -479,7 +479,7 @@ static void save_v7_debug_mmapped(unsigned * context)
}
}
-static void restore_v7_debug_mmapped(unsigned * context)
+static void restore_v7_debug_mmapped(unsigned *context)
{
debug_registers_t *dbg = (void *)read_debug_address();
debug_context_t *ctx = (void *)context;
@@ -545,18 +545,18 @@ static void restore_v7_debug_mmapped(unsigned * context)
dbg->lar = LAR_LOCKED;
}
-void save_v7_debug(unsigned * context)
+void save_v7_debug(unsigned *context)
{
unsigned v71 = 0, didr = read_dbg_didr();
- v71 =(((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
- DIDR_VERSION_7_1);
+ v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
+ DIDR_VERSION_7_1);
/*
* TODO: Code for saving the v7.0 Debug context through the
* cp14 interface has not been implemented as yet.
*/
- if(v71)
+ if (v71)
save_v71_debug_cp14(context);
else
save_v7_debug_mmapped(context);
@@ -564,18 +564,18 @@ void save_v7_debug(unsigned * context)
return;
}
-void restore_v7_debug(unsigned * context)
+void restore_v7_debug(unsigned *context)
{
unsigned v71 = 0, didr = read_dbg_didr();
- v71 =(((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
- DIDR_VERSION_7_1);
+ v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
+ DIDR_VERSION_7_1);
/*
* TODO: Code for restoring the v7.0 Debug context through the
* cp14 interface has not been implemented as yet.
*/
- if(v71)
+ if (v71)
restore_v71_debug_cp14(context);
else
restore_v7_debug_mmapped(context);
diff --git a/big-little/common/pagetable_setup.c b/big-little/common/pagetable_setup.c
index 431e047..e394b15 100644
--- a/big-little/common/pagetable_setup.c
+++ b/big-little/common/pagetable_setup.c
@@ -201,8 +201,7 @@ static void CreateL3PageTable(four_kb_pt_desc * l3_mapping, unsigned level,
/*
* Replace the existing descriptor with new mapping and attributes
*/
- l3_pt_addr[pa_4k_index] =
- l3_mapping->va | l3_mapping->attrs;
+ l3_pt_addr[pa_4k_index] = l3_mapping->va | l3_mapping->attrs;
return;
}
@@ -218,8 +217,7 @@ static void Add4KMapping(four_kb_pt_desc * l3_mapping)
/*
* Replace the existing descriptor with new mapping and attributes
*/
- l3_pt_addr[pa_4k_index] =
- l3_mapping->va | l3_mapping->attrs;
+ l3_pt_addr[pa_4k_index] = l3_mapping->va | l3_mapping->attrs;
return;
}
@@ -283,7 +281,7 @@ void CreateHypModePageTables(void)
l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
l3_desc.attrs = ACCESS_FLAG | HMAIR0_DEVICE_MEM_ATTR_IDX |
- SHAREABILITY(0x3) | AP(KERN_RW) | VALID_MAPPING;
+ SHAREABILITY(0x3) | AP(KERN_RW) | VALID_MAPPING;
l3_desc.pt_addr = hyp_l3_so_pt;
CreateL3PageTable(&l3_desc, LEVEL1,
(unsigned long long *)hyp_l1_pagetable);
@@ -366,7 +364,7 @@ void Create2ndStagePageTables(void)
l3_desc.va = VGIC_VM_PHY_BASE;
l3_desc.pa = GIC_IC_PHY_BASE;
l3_desc.attrs = ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) |
- MEM_ATTR(0x1) | VALID_MAPPING;
+ MEM_ATTR(0x1) | VALID_MAPPING;
l3_desc.pt_addr = stage2_l3_cpuif_pt;
CreateL3PageTable(&l3_desc, LEVEL2,
(unsigned long long *)stage2_l2_pagetable);
@@ -375,7 +373,7 @@ void Create2ndStagePageTables(void)
l3_desc.va = VGIC_VM_PHY_BASE + 0x1000;
l3_desc.pa = GIC_IC_PHY_BASE + 0x1000;
l3_desc.attrs = ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) |
- MEM_ATTR(0x1) | VALID_MAPPING;
+ MEM_ATTR(0x1) | VALID_MAPPING;
l3_desc.pt_addr = stage2_l3_cpuif_pt;
Add4KMapping(&l3_desc);
@@ -383,7 +381,7 @@ void Create2ndStagePageTables(void)
l3_desc.va = GIC_ID_PHY_BASE;
l3_desc.pa = GIC_ID_PHY_BASE;
l3_desc.attrs = ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) |
- MEM_ATTR(0x1);
+ MEM_ATTR(0x1);
l3_desc.pt_addr = stage2_l3_cpuif_pt;
Add4KMapping(&l3_desc);
@@ -395,7 +393,7 @@ void Create2ndStagePageTables(void)
l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
l3_desc.attrs = ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) |
- MEM_ATTR(0x1) | VALID_MAPPING;
+ MEM_ATTR(0x1) | VALID_MAPPING;
l3_desc.pt_addr = stage2_l3_so_pt;
CreateL3PageTable(&l3_desc, LEVEL2,
(unsigned long long *)stage2_l2_pagetable);
@@ -404,10 +402,10 @@ void Create2ndStagePageTables(void)
l3_desc.va = KFSCB_BASE;
l3_desc.pa = KFSCB_BASE;
l3_desc.attrs = ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) |
- MEM_ATTR(0x1);
+ MEM_ATTR(0x1);
l3_desc.pt_addr = stage2_l3_kfscb_pt;
CreateL3PageTable(&l3_desc, LEVEL2,
- (unsigned long long *)stage2_l2_pagetable);
+ (unsigned long long *)stage2_l2_pagetable);
return;
}
@@ -484,7 +482,9 @@ void SetupVirtExtPageTables(void)
Create2ndStagePageTables();
/* Send the event to all the cpus on this cluster */
- num_cpus = CLUSTER_CPU_COUNT(cluster_id) + CLUSTER_CPU_COUNT(!cluster_id);
+ num_cpus =
+ CLUSTER_CPU_COUNT(cluster_id) +
+ CLUSTER_CPU_COUNT(!cluster_id);
cpu_mask = (1 << num_cpus) - 1;
set_events(VIRT_PGT_DONE, cpu_mask);
}
diff --git a/big-little/common/vgiclib.c b/big-little/common/vgiclib.c
index d048e91..9acee57 100644
--- a/big-little/common/vgiclib.c
+++ b/big-little/common/vgiclib.c
@@ -39,7 +39,7 @@ static struct overflowint theoverflowints[NUM_CPUS][MAXOVERFLOWINTS];
static struct gic_cpuif cpuifs[NUM_CPUS];
static unsigned hv_lr_count[NUM_CPUS] = { 0 };
-static mig_irq_info migrated_irqs[NUM_CPUS][MAX_MIG_IRQS] = {0};
+static mig_irq_info migrated_irqs[NUM_CPUS][MAX_MIG_IRQS] = { 0 };
static void free_overflowint(struct overflowint *p, unsigned cpuid)
{
@@ -87,7 +87,6 @@ static unsigned get_elrsr_active_bits(unsigned index, unsigned cpuid,
return elrsr;
}
-
/*
* For a given interrupt and cpu id, this function will
* check whether its virq is not inactive and return the
@@ -98,16 +97,20 @@ static unsigned dequeue_virq(unsigned irq, unsigned cpu_id)
unsigned list_reg = 0, max_index = elrsr_max_index(cpu_id), ctr = 0;
unsigned cur_elrsr = 0, i = 0;
struct gic_cpuif *cpuif = &cpuifs[cpu_id];
- struct overflowint *ovflow = cpuif->overflow, *ovflowp = cpuif->overflow;
+ struct overflowint *ovflow = cpuif->overflow, *ovflowp =
+ cpuif->overflow;
/* First check the hw list registers */
for (ctr = 0; ctr <= max_index; ctr++) {
cur_elrsr = get_elrsr_active_bits(ctr, cpu_id, max_index);
- for (i = bitindex(cur_elrsr); ((int)i) >= 0; i = bitindex(cur_elrsr)) {
+ for (i = bitindex(cur_elrsr); ((int)i) >= 0;
+ i = bitindex(cur_elrsr)) {
unsigned int_id = 0;
- list_reg = read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2));
+ list_reg =
+ read32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
+ ((1 << 7) * ctr) + (i << 2));
int_id = (list_reg >> 10) & 0x3ff;
/* Clear the current bit */
@@ -120,7 +123,9 @@ static unsigned dequeue_virq(unsigned irq, unsigned cpu_id)
/*
* Invalidate the list register entry if the ids match and return
*/
- write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2), list_reg & ~(0x3 << 28));
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
+ ((1 << 7) * ctr) + (i << 2),
+ list_reg & ~(0x3 << 28));
return list_reg;
}
@@ -129,7 +134,7 @@ static unsigned dequeue_virq(unsigned irq, unsigned cpu_id)
/* Check the sw linked list for the presence of this interrupt */
while (ovflow) {
- unsigned int_id = (ovflow->value >> 10 )& 0x3ff;
+ unsigned int_id = (ovflow->value >> 10) & 0x3ff;
unsigned type = ovflow->value & HW_IRQ;
if ((type == HW_IRQ) && (int_id == irq)) {
@@ -161,7 +166,8 @@ static unsigned dequeue_virq(unsigned irq, unsigned cpu_id)
* interrupt and return a cpu mask for asking other cpus to
* complete the migration.
*/
-static unsigned set_mig_irq_info(unsigned orig, unsigned curr, unsigned icdiptr_offset)
+static unsigned set_mig_irq_info(unsigned orig, unsigned curr,
+ unsigned icdiptr_offset)
{
unsigned ctr, diff = orig ^ curr, cpu_mask = 0;
unsigned desc = 0, cpu_id = read_cpuid();
@@ -169,12 +175,16 @@ static unsigned set_mig_irq_info(unsigned orig, unsigned curr, unsigned icdiptr_
for (ctr = 0; ctr < MAX_MIG_IRQS; ctr++) {
if ((diff >> (ctr << 3)) & 0xff) {
migrated_irqs[cpu_id][ctr].id = icdiptr_offset + ctr;
- migrated_irqs[cpu_id][ctr].src_cpuif = bitindex((orig >> (ctr << 3)) & 0xff);
- migrated_irqs[cpu_id][ctr].dest_cpuif = bitindex((curr >> (ctr << 3)) & 0xff);
- desc = dequeue_virq(migrated_irqs[cpu_id][ctr].id, cpu_id);
+ migrated_irqs[cpu_id][ctr].src_cpuif =
+ bitindex((orig >> (ctr << 3)) & 0xff);
+ migrated_irqs[cpu_id][ctr].dest_cpuif =
+ bitindex((curr >> (ctr << 3)) & 0xff);
+ desc =
+ dequeue_virq(migrated_irqs[cpu_id][ctr].id, cpu_id);
if (desc) {
migrated_irqs[cpu_id][ctr].desc = desc;
- cpu_mask |= 1 << migrated_irqs[cpu_id][ctr].dest_cpuif;
+ cpu_mask |=
+ 1 << migrated_irqs[cpu_id][ctr].dest_cpuif;
}
}
}
@@ -187,7 +197,8 @@ static unsigned set_mig_irq_info(unsigned orig, unsigned curr, unsigned icdiptr_
* that have just been migrated. Save this information and ask the target cpu to
* enqueue them.
*/
-unsigned start_virq_migration(unsigned orig, unsigned curr, unsigned icdiptr_offset)
+unsigned start_virq_migration(unsigned orig, unsigned curr,
+ unsigned icdiptr_offset)
{
unsigned virq_mig_mask = 0;
@@ -221,9 +232,11 @@ void complete_virq_migration(unsigned src_cpuid)
* Compare the cpu id instead of the cpu interface id in case
* a switch took place before the virq migration ipi was recieved.
*/
- dest_cpuid = get_cpuinfo(migrated_irqs[src_cpuid][ctr].dest_cpuif) & 0xf;
+ dest_cpuid =
+ get_cpuinfo(migrated_irqs[src_cpuid][ctr].dest_cpuif) & 0xf;
if (migrated_irqs[src_cpuid][ctr].desc && dest_cpuid == cpu_id) {
- enqueue_interrupt(migrated_irqs[src_cpuid][ctr].desc, cpu_id);
+ enqueue_interrupt(migrated_irqs[src_cpuid][ctr].desc,
+ cpu_id);
migrated_irqs[src_cpuid][ctr].desc = 0;
}
}
@@ -232,7 +245,6 @@ void complete_virq_migration(unsigned src_cpuid)
return;
}
-
void dump_vgic_state()
{
unsigned int i;
diff --git a/big-little/include/events.h b/big-little/include/events.h
index b31df03..4adc856 100644
--- a/big-little/include/events.h
+++ b/big-little/include/events.h
@@ -68,7 +68,6 @@
#define FLUSH_L2 1
#define SETUP_RST 2
-
extern void set_event(unsigned, unsigned);
extern void set_events(unsigned, unsigned);
extern unsigned get_event(unsigned, unsigned);
diff --git a/big-little/include/hyp_vmmap.h b/big-little/include/hyp_vmmap.h
index 348a6e5..aeeceae 100644
--- a/big-little/include/hyp_vmmap.h
+++ b/big-little/include/hyp_vmmap.h
@@ -30,14 +30,14 @@
* d e f i n e s
* --------------------------------------------------------------------------*/
-#define GIC_ID_PHY_BASE 0x2C001000 /* Physical Distributor */
-#define GIC_IC_PHY_BASE 0x2C002000 /* Physical CPU interface */
+#define GIC_ID_PHY_BASE 0x2C001000 /* Physical Distributor */
+#define GIC_IC_PHY_BASE 0x2C002000 /* Physical CPU interface */
-#define VGIC_HV_PHY_BASE 0x2C004000 /* Hypervisor's VIew */
-#define VGIC_HV_ALIAS_BASE 0x2C005000 /* Hypervisor's VIew of other interfaces */
-#define VGIC_VM_PHY_BASE 0x2C006000 /* Virtual Machine view */
+#define VGIC_HV_PHY_BASE 0x2C004000 /* Hypervisor's VIew */
+#define VGIC_HV_ALIAS_BASE 0x2C005000 /* Hypervisor's VIew of other interfaces */
+#define VGIC_VM_PHY_BASE 0x2C006000 /* Virtual Machine view */
#define UART0_PHY_BASE 0x1C090000
#define UART1_PHY_BASE 0x1C0A0000
-#endif /* __HYP_VMMAP_H__ */
+#endif /* __HYP_VMMAP_H__ */
diff --git a/big-little/lib/ipi.c b/big-little/lib/ipi.c
index 936cda8..5b49d20 100644
--- a/big-little/lib/ipi.c
+++ b/big-little/lib/ipi.c
@@ -49,25 +49,25 @@ static unsigned hyp_ipi_check[16];
*/
static unsigned get_free_ipi(void)
{
- unsigned shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
- read_clusterid();
- int ctr;
+ unsigned shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
+ read_clusterid();
+ int ctr;
- cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
+ cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
- /* Find the register offset */
- for (ctr = 3; ctr >= 0; ctr--)
- /* Check whether IPI<shift> has already been generated by us */
- for (shift = 0; shift < 4; shift++) {
- if (read32
- (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
- (ctr << 2)) & (cpu_if_bit << (shift << 3)))
- continue;
+ /* Find the register offset */
+ for (ctr = 3; ctr >= 0; ctr--)
+ /* Check whether IPI<shift> has already been generated by us */
+ for (shift = 0; shift < 4; shift++) {
+ if (read32
+ (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
+ (ctr << 2)) & (cpu_if_bit << (shift << 3)))
+ continue;
- return (ctr << 2) + shift;
- }
+ return (ctr << 2) + shift;
+ }
- return MAX_IPI;
+ return MAX_IPI;
}
/*
@@ -76,38 +76,38 @@ static unsigned get_free_ipi(void)
*/
unsigned send_hyp_ipi(unsigned cpuif_mask, unsigned type)
{
- unsigned rc = TRUE;
- unsigned ipi_no = 0;
-
- /*
- * First choose a non-pending IPI to avoid a clash with the OS.
- */
- ipi_no = get_free_ipi();
- if (ipi_no == MAX_IPI) {
- rc = FALSE;
- return rc;
- }
-
- /*
- * For this IPI set the mask in our global variable. We do it, payload software
- * does not. But, first check whether any earlier IPIs have already been acked
- */
- while (TRUE) {
- spin_lock(&lock_ipi_check);
- if (hyp_ipi_check[ipi_no] & 0xff) {
- spin_unlock(&lock_ipi_check);
- } else {
- hyp_ipi_check[ipi_no] = (type << 8) | cpuif_mask;
- dsb();
- spin_unlock(&lock_ipi_check);
- break;
- }
- };
-
- /* Send the IPI to the cpu_mask */
- gic_send_ipi(cpuif_mask, ipi_no);
-
- return rc;
+ unsigned rc = TRUE;
+ unsigned ipi_no = 0;
+
+ /*
+ * First choose a non-pending IPI to avoid a clash with the OS.
+ */
+ ipi_no = get_free_ipi();
+ if (ipi_no == MAX_IPI) {
+ rc = FALSE;
+ return rc;
+ }
+
+ /*
+ * For this IPI set the mask in our global variable. We do it, payload software
+ * does not. But, first check whether any earlier IPIs have already been acked
+ */
+ while (TRUE) {
+ spin_lock(&lock_ipi_check);
+ if (hyp_ipi_check[ipi_no] & 0xff) {
+ spin_unlock(&lock_ipi_check);
+ } else {
+ hyp_ipi_check[ipi_no] = (type << 8) | cpuif_mask;
+ dsb();
+ spin_unlock(&lock_ipi_check);
+ break;
+ }
+ };
+
+ /* Send the IPI to the cpu_mask */
+ gic_send_ipi(cpuif_mask, ipi_no);
+
+ return rc;
}
/*
@@ -116,20 +116,20 @@ unsigned send_hyp_ipi(unsigned cpuif_mask, unsigned type)
*/
unsigned get_hyp_ipi(unsigned cpu_if, unsigned ipi_no)
{
- unsigned type = 0;
-
- spin_lock(&lock_ipi_check);
- /*
- * If this IPI was sent by the big-little code then our cpu_if bit must have
- * been set in the ipi_check flag. Reset the bit and indicate that its an
- * internal IPI.
- */
- if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
- type = hyp_ipi_check[ipi_no] >> 8;
- hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
- dsb();
- }
- spin_unlock(&lock_ipi_check);
-
- return type;
+ unsigned type = 0;
+
+ spin_lock(&lock_ipi_check);
+ /*
+ * If this IPI was sent by the big-little code then our cpu_if bit must have
+ * been set in the ipi_check flag. Reset the bit and indicate that its an
+ * internal IPI.
+ */
+ if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
+ type = hyp_ipi_check[ipi_no] >> 8;
+ hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
+ dsb();
+ }
+ spin_unlock(&lock_ipi_check);
+
+ return type;
}
diff --git a/big-little/secure_world/secure_resets.c b/big-little/secure_world/secure_resets.c
index 079c114..a734838 100644
--- a/big-little/secure_world/secure_resets.c
+++ b/big-little/secure_world/secure_resets.c
@@ -72,7 +72,8 @@ static unsigned lock_ib_kfscb;
* KFSCB. It will always be used when the MMU is off.
* Each cluster will anyways use it sequentially
*/
-static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {0};
+static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
+0};
/*
* Small stacks for after we have turned our caches off.
@@ -80,14 +81,12 @@ static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {0};
static unsigned long long powerdown_stacks[NUM_CPUS][32]
__attribute__ ((section("BL_SEC_DV_PAGE")));
-
unsigned long long *get_powerdown_stack(unsigned cpu_id)
{
return &powerdown_stacks[cpu_id + 1][0];
}
-static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void)
-{
+static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void) {
#if FM_BETA
return (void (*)(void))ve_reset_type[cpu_id];
#else
@@ -102,17 +101,19 @@ static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void)
* new one. Instead of dealing with a function pointer, they manipulate a
* variable.
*/
-void set_reset_handler(unsigned cluster_id, unsigned cpu_id, void (*handler)(void))
+void set_reset_handler(unsigned cluster_id, unsigned cpu_id,
+ void (*handler) (void))
{
- void (*prev_reset_handler)(void) = get_reset_handler(cluster_id, cpu_id);
+ void (*prev_reset_handler) (void) =
+ get_reset_handler(cluster_id, cpu_id);
if (prev_reset_handler != handler) {
#if FM_BETA
ve_reset_type[cpu_id]++;
cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
#else
- write32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3),
- (unsigned) handler);
+ write32(KFSCB_BASE + RST_HANDLER0 +
+ ((cpu_id + (cluster_id << 2)) << 3), (unsigned)handler);
dsb();
#endif
}
@@ -296,7 +297,8 @@ void do_power_op(unsigned cpu_mask, unsigned op_type)
switch (op_type) {
case (OP_TYPE_HP):
get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
- write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), cpu_mask);
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2),
+ cpu_mask);
release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
break;
case (OP_TYPE_SWITCH):
diff --git a/big-little/switcher/context/ns_context.c b/big-little/switcher/context/ns_context.c
index ecaadda..65bee37 100644
--- a/big-little/switcher/context/ns_context.c
+++ b/big-little/switcher/context/ns_context.c
@@ -69,7 +69,7 @@ unsigned find_restore_op_type(void)
clus_rst_status = rst_status & (1 << 8);
rst_status &= (mask | (mask << 4));
- return rst_status | clus_rst_status ? OP_TYPE_HP: OP_TYPE_SWITCH;
+ return rst_status | clus_rst_status ? OP_TYPE_HP : OP_TYPE_SWITCH;
}
void stop_generic_timer(generic_timer_context * ctr_ctx)
@@ -112,15 +112,16 @@ void save_context(unsigned first_cpu, unsigned op_type)
switch (op_type) {
case OP_TYPE_SWITCH:
- write_trace(&lock_tube0, NS_TUBE0, "Switch Start", read_cntpct(),
- 0x0, 0x0);
+ write_trace(&lock_tube0, NS_TUBE0, "Switch Start",
+ read_cntpct(), 0x0, 0x0);
break;
case OP_TYPE_HP:
- write_trace(&lock_tube0, NS_TUBE0, "Hotplug Start", read_cntpct(),
- 0x0, 0x0);
+ write_trace(&lock_tube0, NS_TUBE0, "Hotplug Start",
+ read_cntpct(), 0x0, 0x0);
break;
default:
- printf("%s: Unsupported operation : 0x%x \n", __FUNCTION__, op_type);
+ printf("%s: Unsupported operation : 0x%x \n", __FUNCTION__,
+ op_type);
panic();
}
@@ -164,7 +165,8 @@ void save_context(unsigned first_cpu, unsigned op_type)
save_vfp(vfp_context);
/* Save vGIC virtual cpu interface (cpu view) context */
- save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
+ save_gic_interface(gic_pvt_context->gic_cpu_if_regs,
+ VGIC_VM_PHY_BASE);
/*
* TODO:
@@ -174,10 +176,11 @@ void save_context(unsigned first_cpu, unsigned op_type)
* who can change it. Still have to consider cases e.g
* SGIs/Localtimers becoming pending.
*/
- save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ save_gic_distributor_private(gic_pvt_context->
+ gic_dist_if_pvt_regs,
GIC_ID_PHY_BASE);
- save_v7_debug((unsigned *) debug_context);
+ save_v7_debug((unsigned *)debug_context);
}
/*
@@ -255,8 +258,8 @@ void restore_context(unsigned first_cpu, unsigned op_type)
* restore & cache invalidation has been done. Now wait for the
* outbound to provide the context.
*/
- write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(),
- 0x0, 0x0);
+ write_trace(&lock_tube1, NS_TUBE1, "Wait for context",
+ read_cntpct(), 0x0, 0x0);
wait_for_event(OB_CONTEXT_DONE, cpu_id);
reset_event(OB_CONTEXT_DONE, cpu_id);
}
@@ -270,9 +273,11 @@ void restore_context(unsigned first_cpu, unsigned op_type)
* care of their own.
*/
if (cpu_id == first_cpu)
- restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
+ restore_gic_distributor_shared(gbl_context->
+ gic_dist_if_regs,
GIC_ID_PHY_BASE);
- restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ restore_gic_distributor_private(gic_pvt_context->
+ gic_dist_if_pvt_regs,
GIC_ID_PHY_BASE);
/* Restore NS VGIC context */
@@ -300,7 +305,7 @@ void restore_context(unsigned first_cpu, unsigned op_type)
restore_pmu_context(cluster_id, cpu_id);
restore_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
- restore_v7_debug((unsigned *) debug_context);
+ restore_v7_debug((unsigned *)debug_context);
}
vgic_loadstate(cpu_id);
@@ -347,8 +352,8 @@ void restore_context(unsigned first_cpu, unsigned op_type)
dest->usr_lr = src->usr_lr;
dest->elr_hyp = src->elr_hyp;
dest->spsr = src->spsr;
- write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(),
- 0x0, 0x0);
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore End",
+ read_cntpct(), 0x0, 0x0);
set_event(IB_CONTEXT_DONE, cpu_id);
if (async_switchover && cpu_id == first_cpu)
enable_trigger(read_cntfrq());
@@ -366,13 +371,14 @@ void restore_context(unsigned first_cpu, unsigned op_type)
dest->spsr &= ~0x1f;
/* Re-entry into Linux should be with interrupts disabled and in SVC mode */
dest->spsr |= (0x3 << 6 | 0x13);
- write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(),
- 0x0, 0x0);
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore End",
+ read_cntpct(), 0x0, 0x0);
write_trace(&lock_tube0, NS_TUBE0, "Hotplug End", read_cntpct(),
0x0, 0x0);
break;
default:
- printf("%s: Unsupported operation : 0x%x \n", __FUNCTION__, op_type);
+ printf("%s: Unsupported operation : 0x%x \n", __FUNCTION__,
+ op_type);
panic();
}
diff --git a/big-little/switcher/trigger/async_switchover.c b/big-little/switcher/trigger/async_switchover.c
index e5931d7..b8585e7 100644
--- a/big-little/switcher/trigger/async_switchover.c
+++ b/big-little/switcher/trigger/async_switchover.c
@@ -47,18 +47,18 @@ unsigned switchable_cpus_mask = 0;
static void ack_trigger(void)
{
- unsigned ctl = 0;
-
- ctl = read_cnthp_ctl();
- if (ctl & TIMER_IRQ_STAT) {
- /* Disable timer and mask interrupt */
- write_cnthp_ctl(TIMER_MASK_IRQ);
- } else {
- printf("Spurious HYP timer irq \n");
- panic();
- }
-
- return;
+ unsigned ctl = 0;
+
+ ctl = read_cnthp_ctl();
+ if (ctl & TIMER_IRQ_STAT) {
+ /* Disable timer and mask interrupt */
+ write_cnthp_ctl(TIMER_MASK_IRQ);
+ } else {
+ printf("Spurious HYP timer irq \n");
+ panic();
+ }
+
+ return;
}
/*
@@ -91,7 +91,8 @@ void signal_switchover(void)
* will change it during a switch and always after the previous
* switch has completed.
*/
- switchable_cpus_mask = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
+ switchable_cpus_mask =
+ read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
switchable_cpus_mask = (switchable_cpus_mask >> 4) & 0xf;
switchable_cpus_mask ^= (1 << CLUSTER_CPU_COUNT(cluster_id)) - 1;
@@ -101,24 +102,24 @@ void signal_switchover(void)
*/
cpuif_mask = get_cpuif_mask(switchable_cpus_mask);
- /*
- * Send an ipi to all the online cpus in the cluster including ourselves
- * to start a switch to the inbound cluster.
- */
- send_hyp_ipi(cpuif_mask, IPI_CLUSTER_SWITCH);
+ /*
+ * Send an ipi to all the online cpus in the cluster including ourselves
+ * to start a switch to the inbound cluster.
+ */
+ send_hyp_ipi(cpuif_mask, IPI_CLUSTER_SWITCH);
return;
}
unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
{
- unsigned type = 0;
+ unsigned type = 0;
- type = get_hyp_ipi(cpu_if, ipi_no);
- if (type == IPI_CLUSTER_SWITCH)
- return TRUE;
- else
- return FALSE;
+ type = get_hyp_ipi(cpu_if, ipi_no);
+ if (type == IPI_CLUSTER_SWITCH)
+ return TRUE;
+ else
+ return FALSE;
}
@@ -160,7 +161,7 @@ unsigned check_trigger(unsigned int_id, unsigned int_ack)
printf("%d", cluster_id);
/* Do not switch till previous one has not completed */
- while (FALSE == cluster_reset_status(!cluster_id));
+ while (FALSE == cluster_reset_status(!cluster_id)) ;
/*
* Send an IPI to all the cores in this cluster to start
@@ -190,75 +191,75 @@ unsigned check_trigger(unsigned int_id, unsigned int_ack)
void keep_trigger_alive(void)
{
- /*
- * The OS might have disabled the HYP timer interrupt
- * while setting up its view of the vGIC. So enable
- * it if disabled upon receiving any other interrupt.
- * Better than virtualising vGIC accesses on the TARGET
- * CPU.
- */
- if (hyp_timer_trigger)
- if (!
- (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
- (1 << HYP_TIMER_IRQ)))
- gic_enable_int(HYP_TIMER_IRQ);
-
- return;
+ /*
+ * The OS might have disabled the HYP timer interrupt
+ * while setting up its view of the vGIC. So enable
+ * it if disabled upon receiving any other interrupt.
+ * Better than virtualising vGIC accesses on the TARGET
+ * CPU.
+ */
+ if (hyp_timer_trigger)
+ if (!
+ (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
+ (1 << HYP_TIMER_IRQ)))
+ gic_enable_int(HYP_TIMER_IRQ);
+
+ return;
}
void enable_trigger(unsigned tval)
{
- unsigned ctl = TIMER_ENABLE;
- unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
-
- /*
- * No need to lock this as its accessed by only one cpu
- * per cluster and that too one at a time.
- */
- static unsigned int rand_no = 0xdeadbeef;
- static struct _rand_state buffer;
-
- /*
- * Nothing needs to be done if physical local timers
- * are being used for doing a switchover.
- */
- if (hyp_timer_trigger == TRUE) {
- if (rand_async_switches) {
- _srand_r(&buffer, rand_no);
- rand_no = (unsigned)_rand_r(&buffer);
- }
-
- /* Enable timer and unmask interrupt */
- write_cnthp_ctl(ctl);
-
- if (rand_async_switches) {
- unsigned interval;
-
- /*
- * TODO: Assuming that the tval is always 12000000
- * Increment or decrement the timer value randomly
- * but never by more than a factor of 10
- */
- if (rand_no % 2)
- interval = tval * (rand_no % 10);
- else
- interval = tval / (rand_no % 10);
-
- write_cnthp_tval(interval);
-
- } else {
- /*
- * Program the timer to fire every 12000000 instructions
- * on the FastModel while 1500000 cycles on the Emulator
- */
- if (platform == 0x1)
- write_cnthp_tval(tval);
- else
- write_cnthp_tval(tval >> 3);
- }
-
- gic_enable_int(HYP_TIMER_IRQ);
- }
-
- return;
+ unsigned ctl = TIMER_ENABLE;
+ unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * No need to lock this as its accessed by only one cpu
+ * per cluster and that too one at a time.
+ */
+ static unsigned int rand_no = 0xdeadbeef;
+ static struct _rand_state buffer;
+
+ /*
+ * Nothing needs to be done if physical local timers
+ * are being used for doing a switchover.
+ */
+ if (hyp_timer_trigger == TRUE) {
+ if (rand_async_switches) {
+ _srand_r(&buffer, rand_no);
+ rand_no = (unsigned)_rand_r(&buffer);
+ }
+
+ /* Enable timer and unmask interrupt */
+ write_cnthp_ctl(ctl);
+
+ if (rand_async_switches) {
+ unsigned interval;
+
+ /*
+ * TODO: Assuming that the tval is always 12000000
+ * Increment or decrement the timer value randomly
+ * but never by more than a factor of 10
+ */
+ if (rand_no % 2)
+ interval = tval * (rand_no % 10);
+ else
+ interval = tval / (rand_no % 10);
+
+ write_cnthp_tval(interval);
+
+ } else {
+ /*
+ * Program the timer to fire every 12000000 instructions
+ * on the FastModel while 1500000 cycles on the Emulator
+ */
+ if (platform == 0x1)
+ write_cnthp_tval(tval);
+ else
+ write_cnthp_tval(tval >> 3);
+ }
+
+ gic_enable_int(HYP_TIMER_IRQ);
+ }
+
+ return;
}
diff --git a/big-little/virtualisor/cache_geom.c b/big-little/virtualisor/cache_geom.c
index 7bcb42b..1f24882 100644
--- a/big-little/virtualisor/cache_geom.c
+++ b/big-little/virtualisor/cache_geom.c
@@ -61,14 +61,12 @@ void find_cache_geometry(cache_geometry * cg_ptr)
write_csselr(ctr << 1);
isb();
cg_ptr->ccsidr[ctr][CIND_DATA] = read_ccsidr();
- }
- else if (cache_type == 0x04) {
+ } else if (cache_type == 0x04) {
/* unified cache */
write_csselr(ctr << 1);
isb();
cg_ptr->ccsidr[ctr][CIND_UNIF] = read_ccsidr();
- }
- else {
+ } else {
/*
* Stop scanning at the first invalid/unsupported cache
* level
@@ -233,8 +231,7 @@ unsigned map_cache_geometries(cache_geometry * hcg_ptr,
void handle_cm_op(unsigned reg,
void (*op_handler) (unsigned),
cache_geometry * hcg_ptr,
- cache_geometry * tcg_ptr,
- cache_diff * cd_ptr)
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
unsigned clvl = 0, cpu_id = read_cpuid();
unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
diff --git a/big-little/virtualisor/cpus/a15/a15.c b/big-little/virtualisor/cpus/a15/a15.c
index 213ae67..1b244c4 100644
--- a/big-little/virtualisor/cpus/a15/a15.c
+++ b/big-little/virtualisor/cpus/a15/a15.c
@@ -54,7 +54,7 @@ unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
ind = get_cache_ind(csselr);
if (CRn == CRN_C0 && Op1 == 1 && CRm == 0 && Op2 == CCSIDR &&
- level == 0 && ind == CIND_INST) {
+ level == 0 && ind == CIND_INST) {
unsigned cpu_id = read_cpuid();
/*
@@ -64,9 +64,9 @@ unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
* [BC=x, TC=A7, HC=A15]
*/
- cache_geometry * cg_ptr = (IS_TGT_CLUSTER) ?
- &host_cache_geometry[cpu_id] :
- &target_cache_geometry[cpu_id];
+ cache_geometry *cg_ptr = (IS_TGT_CLUSTER) ?
+ &host_cache_geometry[cpu_id] :
+ &target_cache_geometry[cpu_id];
regs->r[Rt] = cg_ptr->ccsidr[level][ind];
}
@@ -126,13 +126,12 @@ unsigned a15_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
* configuration [BC=x, TC=A7, HC=A15].
*/
- cache_geometry * cg_ptr = (IS_TGT_CLUSTER) ?
- &host_cache_geometry[cpu_id] :
- &target_cache_geometry[cpu_id];
-
+ cache_geometry *cg_ptr = (IS_TGT_CLUSTER) ?
+ &host_cache_geometry[cpu_id] :
+ &target_cache_geometry[cpu_id];
cg_ptr->ccsidr[0][CIND_INST] =
- CACHE_A7_L1_INST_CCSIDR;
+ CACHE_A7_L1_INST_CCSIDR;
}
/*
diff --git a/big-little/virtualisor/cpus/a7/a7.c b/big-little/virtualisor/cpus/a7/a7.c
index d39f857..4aff69d 100644
--- a/big-little/virtualisor/cpus/a7/a7.c
+++ b/big-little/virtualisor/cpus/a7/a7.c
@@ -54,7 +54,7 @@ unsigned a7_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
ind = get_cache_ind(csselr);
if (CRn == CRN_C0 && Op1 == 1 && CRm == 0 && Op2 == CCSIDR &&
- level == 0 && ind == CIND_INST)
+ level == 0 && ind == CIND_INST)
regs->r[Rt] = read_ccsidr();
}
diff --git a/big-little/virtualisor/include/cache_geom.h b/big-little/virtualisor/include/cache_geom.h
index f162337..23db57b 100644
--- a/big-little/virtualisor/include/cache_geom.h
+++ b/big-little/virtualisor/include/cache_geom.h
@@ -31,9 +31,9 @@
#define TCSZ_BIG 0x2
/* Cache Instruction not Data bit, CSSELR[0] */
-#define CIND_DATA 0x0 /* Data cache */
-#define CIND_UNIF CIND_DATA /* Unified cache */
-#define CIND_INST 0x1 /* Instruction cache */
+#define CIND_DATA 0x0 /* Data cache */
+#define CIND_UNIF CIND_DATA /* Unified cache */
+#define CIND_INST 0x1 /* Instruction cache */
/* A7 L1 instruction cache CCSIDR value */
#define CACHE_A7_L1_INST_CCSIDR 0x203FE009
@@ -101,11 +101,11 @@ typedef struct cache_stats {
} cache_stats;
extern unsigned map_cache_geometries(cache_geometry *, cache_geometry *,
- cache_diff *);
+ cache_diff *);
extern void find_cache_geometry(cache_geometry *);
extern void find_cache_diff(cache_geometry *, cache_geometry *, cache_diff *);
extern void handle_cm_op(unsigned, void (*)(unsigned), cache_geometry *,
- cache_geometry *, cache_diff *);
+ cache_geometry *, cache_diff *);
//extern void stat_init_host_cache_geometry(cache_geometry *);
#endif /* __CACHE_GEOM_H__ */
diff --git a/big-little/virtualisor/kfscb_trap_handler.c b/big-little/virtualisor/kfscb_trap_handler.c
index 7813cce..6c621e7 100644
--- a/big-little/virtualisor/kfscb_trap_handler.c
+++ b/big-little/virtualisor/kfscb_trap_handler.c
@@ -43,7 +43,7 @@ void handle_kfscb_abort(unsigned pa, unsigned *data, unsigned write)
switch (reg_offset) {
- /* Access to KFSCB registers */
+ /* Access to KFSCB registers */
case (RST_HOLD0):
if (write) {
/* Entry */
diff --git a/big-little/virtualisor/pmu_trap_handler.c b/big-little/virtualisor/pmu_trap_handler.c
index bf3f3b6..37eddf4 100644
--- a/big-little/virtualisor/pmu_trap_handler.c
+++ b/big-little/virtualisor/pmu_trap_handler.c
@@ -69,25 +69,25 @@ struct descriptor {
enum {
PMU_CLUSTER_A15 = 0x00,
- PMU_CLUSTER_A7 = 0x01,
+ PMU_CLUSTER_A7 = 0x01,
};
enum {
- PMU_CNT_CYCLE_COUNTER = 0x00,
- PMU_CNT_OVERFLOW_FLAG = 0x01,
- PMU_CNT_EVENT_COUNTER_0 = 0x02,
- PMU_CNT_EVENT_COUNTER_1 = 0x03,
- PMU_CNT_EVENT_COUNTER_2 = 0x04,
- PMU_CNT_EVENT_COUNTER_3 = 0x05,
- PMU_CNT_EVENT_COUNTER_4 = 0x06,
- PMU_CNT_EVENT_COUNTER_5 = 0x07,
+ PMU_CNT_CYCLE_COUNTER = 0x00,
+ PMU_CNT_OVERFLOW_FLAG = 0x01,
+ PMU_CNT_EVENT_COUNTER_0 = 0x02,
+ PMU_CNT_EVENT_COUNTER_1 = 0x03,
+ PMU_CNT_EVENT_COUNTER_2 = 0x04,
+ PMU_CNT_EVENT_COUNTER_3 = 0x05,
+ PMU_CNT_EVENT_COUNTER_4 = 0x06,
+ PMU_CNT_EVENT_COUNTER_5 = 0x07,
};
enum {
- PMU_REQ_DISABLE_COUNTER = 0x01,
- PMU_REQ_CONF_COUNTER = 0x02,
+ PMU_REQ_DISABLE_COUNTER = 0x01,
+ PMU_REQ_CONF_COUNTER = 0x02,
PMU_REQ_CONF_RESET_COUNTER = 0x03,
- PMU_REQ_READ_COUNTER = 0x04,
+ PMU_REQ_READ_COUNTER = 0x04,
PMU_REQ_READ_RESET_COUNTER = 0x05,
};
@@ -138,8 +138,7 @@ void set_pmu_state(unsigned new)
}
static void handle_desc(struct descriptor *desc,
- unsigned cluster_id,
- unsigned cpu_id)
+ unsigned cluster_id, unsigned cpu_id)
{
unsigned entry_cluster = desc->u.counter.cluster_id;
unsigned selected_counter = desc->u.counter.selected_counter;
@@ -171,10 +170,9 @@ static void handle_desc(struct descriptor *desc,
} else {
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] &= 0x7FFFFFFF;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] &= 0x7FFFFFFF;
break;
case PMU_CNT_OVERFLOW_FLAG:
/* Can't disable overflow flags. */
@@ -184,11 +182,10 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_2:
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] &=
- ~(1 << selected_counter);
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] &=
+ ~(1 << selected_counter);
break;
default:
break;
@@ -223,14 +220,13 @@ static void handle_desc(struct descriptor *desc,
};
} else {
clusters_ctx[entry_cluster][cpu_id][PMCR_IDX] |=
- (pmu_counters << 11) | 1;
+ (pmu_counters << 11) | 1;
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |= 0x80000000;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= 0x80000000;
break;
case PMU_CNT_OVERFLOW_FLAG:
/* Can't configure overflow flags. */
@@ -240,17 +236,13 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_2:
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)] =
- event_type;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |=
- (1 << selected_counter);
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX +
+ (selected_counter * 2)] = event_type;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= (1 << selected_counter);
break;
default:
break;
@@ -287,19 +279,16 @@ static void handle_desc(struct descriptor *desc,
};
} else {
clusters_ctx[entry_cluster][cpu_id][PMCR_IDX] |=
- (pmu_counters << 11) | 1;
+ (pmu_counters << 11) | 1;
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCCNTR_IDX] = reset_value;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |=
- 0x80000000;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCCNTR_IDX] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= 0x80000000;
break;
case PMU_CNT_OVERFLOW_FLAG:
break;
@@ -308,21 +297,17 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_2:
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)] = event_type;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)] = reset_value;
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCNTENSET_IDX] |=
- (1 << selected_counter);
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX +
+ (selected_counter * 2)] = event_type;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX +
+ (selected_counter * 2)] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCNTENSET_IDX] |= (1 << selected_counter);
break;
default:
break;
@@ -344,8 +329,9 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
write_pmselr(selected_counter);
- desc->u.counter.event_type=read_pmxevtyper();
- desc->u.counter.counter_value=read_pmxevcntr();
+ desc->u.counter.event_type = read_pmxevtyper();
+ desc->u.counter.counter_value =
+ read_pmxevcntr();
break;
default:
break;
@@ -354,17 +340,15 @@ static void handle_desc(struct descriptor *desc,
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCCNTR_IDX];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCCNTR_IDX];
break;
case PMU_CNT_OVERFLOW_FLAG:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMOVSR_IDX];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMOVSR_IDX];
break;
case PMU_CNT_EVENT_COUNTER_0:
case PMU_CNT_EVENT_COUNTER_1:
@@ -372,17 +356,13 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
desc->u.counter.event_type =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (selected_counter * 2)];
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (selected_counter * 2)];
break;
default:
break;
@@ -406,8 +386,9 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
write_pmselr(selected_counter);
- desc->u.counter.event_type=read_pmxevtyper();
- desc->u.counter.counter_value=read_pmxevcntr();
+ desc->u.counter.event_type = read_pmxevtyper();
+ desc->u.counter.counter_value =
+ read_pmxevcntr();
write_pmxevcntr(reset_value);
break;
default:
@@ -417,24 +398,20 @@ static void handle_desc(struct descriptor *desc,
switch (selected_counter) {
case PMU_CNT_CYCLE_COUNTER:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMCCNTR_IDX];
- clusters_ctx
- [cluster_id]
- [cpu_id]
- [PMCCNTR_IDX] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMCCNTR_IDX];
+ clusters_ctx[cluster_id]
+ [cpu_id]
+ [PMCCNTR_IDX] = reset_value;
case PMU_CNT_OVERFLOW_FLAG:
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMOVSR_IDX];
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMOVSR_IDX] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMOVSR_IDX];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMOVSR_IDX] = reset_value;
break;
case PMU_CNT_EVENT_COUNTER_0:
case PMU_CNT_EVENT_COUNTER_1:
@@ -442,22 +419,17 @@ static void handle_desc(struct descriptor *desc,
case PMU_CNT_EVENT_COUNTER_3:
selected_counter -= PMU_CNT_EVENT_COUNTER_0;
desc->u.counter.event_type =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVTYPE0_IDX +
- (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (selected_counter * 2)];
desc->u.counter.counter_value =
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)];
- clusters_ctx
- [entry_cluster]
- [cpu_id]
- [PMXEVCNT0_IDX +
- (selected_counter * 2)] = reset_value;
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (selected_counter * 2)];
+ clusters_ctx[entry_cluster]
+ [cpu_id]
+ [PMXEVCNT0_IDX +
+ (selected_counter * 2)] = reset_value;
break;
default:
break;
@@ -508,10 +480,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
ret = read_pmxevtyper();
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- ret = clusters_ctx
- [first]
- [cpu_id]
- [PMXEVTYPE0_IDX + (tmp * 2)];
+ ret = clusters_ctx[first]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (tmp * 2)];
}
break;
case HVC_PMU_PMXEVTYPER_WRITE:
@@ -519,10 +490,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
write_pmxevtyper(second);
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- clusters_ctx
- [first]
- [cpu_id]
- [PMXEVTYPE0_IDX + (tmp * 2)] = second;
+ clusters_ctx[first]
+ [cpu_id]
+ [PMXEVTYPE0_IDX + (tmp * 2)] = second;
}
break;
case HVC_PMU_PMCNTENSET_READ:
@@ -578,10 +548,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
ret = read_pmxevcntr();
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- ret = clusters_ctx
- [first]
- [cpu_id]
- [PMXEVCNT0_IDX + (tmp * 2)];
+ ret = clusters_ctx[first]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (tmp * 2)];
}
break;
case HVC_PMU_PMXEVCNTR_WRITE:
@@ -589,10 +558,9 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
write_pmxevcntr(second);
} else {
tmp = clusters_ctx[first][cpu_id][PMSELR_IDX];
- clusters_ctx
- [first]
- [cpu_id]
- [PMXEVCNT0_IDX + (tmp * 2)] = second;
+ clusters_ctx[first]
+ [cpu_id]
+ [PMXEVCNT0_IDX + (tmp * 2)] = second;
}
break;
case HVC_PMU_PMINTENSET_READ:
@@ -629,7 +597,7 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
ret = sizeof(struct descriptor) * ENTRIES;
break;
case HVC_PMU_SYNC_PMU_COUNTERS:
- {
+ {
int i;
int entries;
unsigned int *pentries;
@@ -641,8 +609,8 @@ unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second)
for (i = 0, desc++; i < entries; i++, desc++) {
handle_desc(desc, cluster_id, cpu_id);
}
- }
- break;
+ }
+ break;
}
return ret;
diff --git a/big-little/virtualisor/vgic_trap_handler.c b/big-little/virtualisor/vgic_trap_handler.c
index cd06578..75e1030 100644
--- a/big-little/virtualisor/vgic_trap_handler.c
+++ b/big-little/virtualisor/vgic_trap_handler.c
@@ -60,7 +60,8 @@ void handle_vgic_distif_abort(unsigned pa, unsigned *data, unsigned write)
* in progress.
*/
if (FALSE == async_switchover)
- start_virq_migration(icdiptr_orig, icdiptr_curr, reg_offset - GICD_CPUS);
+ start_virq_migration(icdiptr_orig, icdiptr_curr,
+ reg_offset - GICD_CPUS);
} else {
value = read32(pa);
diff --git a/big-little/virtualisor/virt_context.c b/big-little/virtualisor/virt_context.c
index def3551..65dacb6 100644
--- a/big-little/virtualisor/virt_context.c
+++ b/big-little/virtualisor/virt_context.c
@@ -173,7 +173,8 @@ void RestoreVirtualisor(unsigned first_cpu)
&((unsigned long long
*)((unsigned)((&s2_td[ctr].table)
[0])))[s2_td
- [ctr].index];
+ [ctr].
+ index];
s2_td[ctr].prev_desc = *cd_ptr;
*cd_ptr = s2_td[ctr].cur_desc;
periph_addr = (unsigned *)cd_ptr;
diff --git a/big-little/virtualisor/virt_handle.c b/big-little/virtualisor/virt_handle.c
index abdf604..ca3cb8e 100644
--- a/big-little/virtualisor/virt_handle.c
+++ b/big-little/virtualisor/virt_handle.c
@@ -186,7 +186,7 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
switch (CRm) {
case 0:
switch (Op2) {
- unsigned csselr, level, ind;
+ unsigned csselr, level, ind;
case CCSIDR:
if (write)
goto error;
@@ -198,20 +198,20 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
* The error is later corrected in the
* A7 or A15 specific trap function.
*/
- csselr = target_cache_geometry[cpu_id].
- csselr;
+ csselr =
+ target_cache_geometry
+ [cpu_id].csselr;
level = get_cache_level(csselr);
ind = get_cache_ind(csselr);
regs->r[Rt] =
- target_cache_geometry[cpu_id].
- ccsidr[level][ind];
+ target_cache_geometry[cpu_id].ccsidr
+ [level][ind];
break;
case CLIDR:
if (write)
goto error;
regs->r[Rt] =
- target_cache_geometry[cpu_id].
- clidr;
+ target_cache_geometry[cpu_id].clidr;
break;
case AIDR:
if (write)
@@ -233,13 +233,13 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
case CSSELR:
if (write) {
target_cache_geometry
- [cpu_id].csselr = regs->r[Rt];
+ [cpu_id].csselr =
+ regs->r[Rt];
write_csselr(regs->r[Rt]);
- }
- else
+ } else
regs->r[Rt] =
- target_cache_geometry
- [cpu_id].csselr;
+ target_cache_geometry
+ [cpu_id].csselr;
break;
default:
goto error;
@@ -575,7 +575,7 @@ void trap_dabort_handle(unsigned hsr, gp_regs * regs)
return;
}
-void trap_hvc_handle(unsigned hsr, gp_regs *regs)
+void trap_hvc_handle(unsigned hsr, gp_regs * regs)
{
unsigned opcode = regs->r[0];
@@ -587,7 +587,7 @@ void trap_hvc_handle(unsigned hsr, gp_regs *regs)
*/
case HVC_SWITCHER_CLUSTER_SWITCH:
/* Do not switch till previous one has completed */
- while (FALSE == cluster_reset_status(!read_clusterid()));
+ while (FALSE == cluster_reset_status(!read_clusterid())) ;
signal_switchover();
break;
diff --git a/bootwrapper/bootwrapper.h b/bootwrapper/bootwrapper.h
index 5bab412..bc2671b 100644
--- a/bootwrapper/bootwrapper.h
+++ b/bootwrapper/bootwrapper.h
@@ -48,7 +48,7 @@
#define FLAGS_SET 0x30
#define FLAGS_CLR 0x34
-#define VE_KFSCB_BASE 0x60000000 /* Kingfisher System Configuration Block */
+#define VE_KFSCB_BASE 0x60000000 /* Kingfisher System Configuration Block */
#define KFS_ID_OFFSET 0xFFC /* Kingfisher System Platform ID register offset (KFS_ID) */
#define KFS_ID_ARCH_MASK 0x000F0000 /* Mask for extracting KFS architecture */
#define KFS_ID_ARCH_SHIFT 16 /* Shift for extracting KFS architecture */
diff --git a/bootwrapper/c_start.c b/bootwrapper/c_start.c
index 0678db3..a4f1729 100644
--- a/bootwrapper/c_start.c
+++ b/bootwrapper/c_start.c
@@ -72,7 +72,9 @@ void setup_gic_nonsecure(unsigned cluster_id, unsigned cpu_id)
*/
void kick(unsigned cpu_id, unsigned cluster_id, int secondary_cpus)
{
- int cpu_mask = (((1 << (secondary_cpus + 1)) - 1) & ~(1 << cpu_id)) << (cluster_id << 2);
+ int cpu_mask =
+ (((1 << (secondary_cpus + 1)) -
+ 1) & ~(1 << cpu_id)) << (cluster_id << 2);
write32(VE_SYS_BASE + FLAGS_CLR, 0xffffffff); // clear the flags register
write32(VE_SYS_BASE + FLAGS_SET, (unsigned)start); // set the start address in the flags register