summaryrefslogtreecommitdiff
path: root/big-little
diff options
context:
space:
mode:
Diffstat (limited to 'big-little')
-rw-r--r--big-little/common/cci.c38
-rw-r--r--big-little/common/hyp_setup.c94
-rwxr-xr-xbig-little/common/pagetable_setup.c585
-rw-r--r--big-little/common/vgic_handle.c263
-rw-r--r--big-little/common/vgic_setup.c50
-rw-r--r--big-little/common/vgiclib.c687
-rw-r--r--big-little/include/arm.h4
-rw-r--r--big-little/include/bakery.h8
-rw-r--r--big-little/include/bl.h16
-rw-r--r--big-little/include/context.h104
-rw-r--r--big-little/include/events.h4
-rw-r--r--big-little/include/gic_registers.h6
-rw-r--r--big-little/include/handler.h4
-rw-r--r--big-little/include/hvc.h4
-rwxr-xr-xbig-little/include/hyp_types.h4
-rw-r--r--big-little/include/hyp_vmmap.h12
-rw-r--r--big-little/include/int_master.h6
-rw-r--r--big-little/include/misc.h40
-rwxr-xr-xbig-little/include/traps.h12
-rw-r--r--big-little/include/vgiclib.h24
-rw-r--r--big-little/include/virt_helpers.h18
-rw-r--r--big-little/lib/bakery.c52
-rwxr-xr-xbig-little/lib/tube.c53
-rw-r--r--big-little/lib/uart.c68
-rw-r--r--big-little/lib/virt_events.c88
-rw-r--r--big-little/secure_world/events.c70
-rw-r--r--big-little/secure_world/secure_context.c293
-rw-r--r--big-little/secure_world/secure_resets.c301
-rw-r--r--big-little/secure_world/secure_world.h28
-rw-r--r--big-little/switcher/context/gic.c369
-rw-r--r--big-little/switcher/context/ns_context.c491
-rw-r--r--big-little/switcher/context/sh_vgic.c170
-rw-r--r--big-little/switcher/trigger/async_switchover.c400
-rw-r--r--big-little/switcher/trigger/sync_switchover.c48
-rw-r--r--big-little/virtualisor/cache_geom.c757
-rw-r--r--big-little/virtualisor/cpus/a15/a15.c46
-rw-r--r--big-little/virtualisor/cpus/a15/include/a15.h4
-rw-r--r--big-little/virtualisor/cpus/a7/a7.c42
-rw-r--r--big-little/virtualisor/cpus/a7/include/a7.h4
-rw-r--r--big-little/virtualisor/include/cache_geom.h81
-rw-r--r--big-little/virtualisor/include/mem_trap.h30
-rw-r--r--big-little/virtualisor/include/virtualisor.h42
-rw-r--r--big-little/virtualisor/mem_trap.c203
-rw-r--r--big-little/virtualisor/vgic_trap_handler.c91
-rw-r--r--big-little/virtualisor/virt_context.c355
-rw-r--r--big-little/virtualisor/virt_handle.c1073
-rw-r--r--big-little/virtualisor/virt_setup.c371
47 files changed, 3776 insertions, 3737 deletions
diff --git a/big-little/common/cci.c b/big-little/common/cci.c
index c07ccb5..3c47e19 100644
--- a/big-little/common/cci.c
+++ b/big-little/common/cci.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "misc.h"
#include "virt_helpers.h"
@@ -26,32 +26,32 @@
void enable_cci_snoops(unsigned cluster_id)
{
- /* Turn off CCI snoops & DVM Messages */
- if (cluster_id)
- write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
- else
- write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ /* Turn off CCI snoops & DVM Messages */
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
- dsb();
+ dsb();
- /* Wait for the dust to settle down */
- while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
- return;
+ return;
}
void disable_cci_snoops(unsigned cluster_id)
{
- /* Turn off CCI snoops & DVM messages */
- if (cluster_id)
- write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
- else
- write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
+ /* Turn off CCI snoops & DVM messages */
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
- dsb();
+ dsb();
- /* Wait for the dust to settle down */
- while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
- return;
+ return;
}
diff --git a/big-little/common/hyp_setup.c b/big-little/common/hyp_setup.c
index 9c90e26..b0cda90 100644
--- a/big-little/common/hyp_setup.c
+++ b/big-little/common/hyp_setup.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "context.h"
#include "misc.h"
@@ -55,53 +55,53 @@ unsigned event[NUM_CPUS][MAX_EVENTS] __attribute__ ((section("BL_DV_PAGE")));
*/
void bl_rest_init(void)
{
- unsigned first_cpu = find_first_cpu();
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
- unsigned warm_reset = 0;
+ unsigned first_cpu = find_first_cpu();
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned warm_reset = 0;
+
+ /* HYP mode initialisation performed after every reset */
+ write_hvbar((unsigned)&vectors);
+ Enable2ndStagePageTables();
+
+ /* Initialise a per cpu UART */
+ config_uart();
+
+ if (switcher) {
+ /*
+ * Ask the secure world to initialise its context.
+ * Not required when "always on"
+ */
+ smc(SMC_SEC_INIT, 0);
+
+ /*
+ * Since we are using the shared vgic, we need to map
+ * the cpuids to the cpu interfaces as there is no
+ * longer a 1:1 mapping
+ */
+ map_cpuif(cluster_id, cpu_id);
+
+ if (async_switchover && first_cpu == cpu_id)
+ enable_trigger(read_cntfrq());
+ } else {
+
+ /*
+ * Only one cpu should enable the CCI while the other
+ * cpus wait.
+ */
+ if (first_cpu == cpu_id && cluster_id == host_cluster) {
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ dsb();
+ }
- /* HYP mode initialisation performed after every reset */
- write_hvbar((unsigned)&vectors);
- Enable2ndStagePageTables();
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+ }
- /* Initialise a per cpu UART */
- config_uart();
-
- if (switcher) {
- /*
- * Ask the secure world to initialise its context.
- * Not required when "always on"
- */
- smc(SMC_SEC_INIT, 0);
-
- /*
- * Since we are using the shared vgic, we need to map
- * the cpuids to the cpu interfaces as there is no
- * longer a 1:1 mapping
- */
- map_cpuif(cluster_id, cpu_id);
-
- if (async_switchover && first_cpu == cpu_id)
- enable_trigger(read_cntfrq());
- } else {
-
- /*
- * Only one cpu should enable the CCI while the other
- * cpus wait.
- */
- if (first_cpu == cpu_id && cluster_id == host_cluster) {
- write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
- write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
- dsb();
- }
-
- /* Wait for the dust to settle down */
- while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
- }
-
- /* Initialise the Virtual GIC and the Virtualizer */
- SetupVGIC(warm_reset);
- SetupVirtualisor(first_cpu);
+ /* Initialise the Virtual GIC and the Virtualizer */
+ SetupVGIC(warm_reset);
+ SetupVirtualisor(first_cpu);
- return;
+ return;
}
diff --git a/big-little/common/pagetable_setup.c b/big-little/common/pagetable_setup.c
index fa4e6fd..0390153 100755
--- a/big-little/common/pagetable_setup.c
+++ b/big-little/common/pagetable_setup.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
/* ----------------------------------------------------------------------------
* i n c l u d e s
@@ -30,10 +30,10 @@
#include "virt_helpers.h"
typedef struct {
- unsigned va;
- unsigned pa;
- unsigned long long attrs;
- unsigned long long *pt_addr;
+ unsigned va;
+ unsigned pa;
+ unsigned long long attrs;
+ unsigned long long *pt_addr;
} four_kb_pt_desc;
/* ----------------------------------------------------------------------------
@@ -42,7 +42,7 @@ typedef struct {
#define LEVEL1 0x1
#define LEVEL2 0x2
-#define HYP_PA_START 0x00000000 /* Flat mapping */
+#define HYP_PA_START 0x00000000 /* Flat mapping */
#define HYP_PA_END 0xFFFFFFFF
#define HYP_VA_START HYP_PA_START
#define HYP_VA_END HYP_PA_END
@@ -147,58 +147,58 @@ unsigned long long stage2_l3_so_pt[512] __attribute__ ((aligned(4096)));
* cpu interface for OS use.
*/
static void CreateL3PageTable(four_kb_pt_desc * l3_mapping, unsigned level,
- unsigned long long *base_pt_addr)
+ unsigned long long *base_pt_addr)
{
- unsigned one_gb_index = l3_mapping->pa >> 30;
- unsigned two_mb_index = l3_mapping->pa >> 21;
- unsigned four_kb_index = 0;
- unsigned pa_4k_index = 0;
- unsigned long long l1_desc = 0;
- unsigned long long *l2_desc = 0;
- unsigned long long old_attrs = 0;
- unsigned long long *l1_pt_addr = 0;
- unsigned long long *l2_pt_addr = 0;
- unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
-
- /*
- * Indices calculated above are relative to the GB or MB they
- * belong to rather than an offset of 0x0. e.g. for the 2mb index
- * index = (address >> 21) - (<number of 2MBs in 1GB> x <this GB index>)
- */
-
- /* Calculate the level 2 page table descriptor */
- if (level == 1) {
- l1_pt_addr = base_pt_addr;
- l1_desc = l1_pt_addr[one_gb_index];
- l2_pt_addr =
- (unsigned long long
- *)((unsigned)((&l1_desc)[0] & 0xfffff000UL));
- l2_desc = &l2_pt_addr[two_mb_index - (512 * one_gb_index)];
- } else {
- l2_pt_addr = &base_pt_addr[one_gb_index << 9];
- l2_desc = &base_pt_addr[two_mb_index - (512 * one_gb_index)];
- }
-
- /* Preserve the old attributes */
- old_attrs = *l2_desc & 0xfff0000000000fffULL;
- /* Replace block mapping with table mapping */
- *l2_desc = (unsigned long long)l3_pt_addr | TABLE_MAPPING;
-
- /* Create a flat mapping for all 4k descriptors to begin with */
- for (four_kb_index = 0; four_kb_index < 512; four_kb_index++) {
- l3_pt_addr[four_kb_index] =
- (((two_mb_index << 9) +
- four_kb_index) << 12) | old_attrs | VALID_MAPPING;
- }
- pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
-
- /*
- * Replace the existing descriptor with new mapping and attributes
- */
- l3_pt_addr[pa_4k_index] =
- l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
-
- return;
+ unsigned one_gb_index = l3_mapping->pa >> 30;
+ unsigned two_mb_index = l3_mapping->pa >> 21;
+ unsigned four_kb_index = 0;
+ unsigned pa_4k_index = 0;
+ unsigned long long l1_desc = 0;
+ unsigned long long *l2_desc = 0;
+ unsigned long long old_attrs = 0;
+ unsigned long long *l1_pt_addr = 0;
+ unsigned long long *l2_pt_addr = 0;
+ unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
+
+ /*
+ * Indices calculated above are relative to the GB or MB they
+ * belong to rather than an offset of 0x0. e.g. for the 2mb index
+ * index = (address >> 21) - (<number of 2MBs in 1GB> x <this GB index>)
+ */
+
+ /* Calculate the level 2 page table descriptor */
+ if (level == 1) {
+ l1_pt_addr = base_pt_addr;
+ l1_desc = l1_pt_addr[one_gb_index];
+ l2_pt_addr =
+ (unsigned long long
+ *)((unsigned)((&l1_desc)[0] & 0xfffff000UL));
+ l2_desc = &l2_pt_addr[two_mb_index - (512 * one_gb_index)];
+ } else {
+ l2_pt_addr = &base_pt_addr[one_gb_index << 9];
+ l2_desc = &base_pt_addr[two_mb_index - (512 * one_gb_index)];
+ }
+
+ /* Preserve the old attributes */
+ old_attrs = *l2_desc & 0xfff0000000000fffULL;
+ /* Replace block mapping with table mapping */
+ *l2_desc = (unsigned long long)l3_pt_addr | TABLE_MAPPING;
+
+ /* Create a flat mapping for all 4k descriptors to begin with */
+ for (four_kb_index = 0; four_kb_index < 512; four_kb_index++) {
+ l3_pt_addr[four_kb_index] =
+ (((two_mb_index << 9) +
+ four_kb_index) << 12) | old_attrs | VALID_MAPPING;
+ }
+ pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
+
+ /*
+ * Replace the existing descriptor with new mapping and attributes
+ */
+ l3_pt_addr[pa_4k_index] =
+ l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
+
+ return;
}
/*
@@ -206,262 +206,261 @@ static void CreateL3PageTable(four_kb_pt_desc * l3_mapping, unsigned level,
*/
static void Add4KMapping(four_kb_pt_desc * l3_mapping)
{
- unsigned pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
- unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
+ unsigned pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
+ unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
- /*
- * Replace the existing descriptor with new mapping and attributes
- */
- l3_pt_addr[pa_4k_index] =
- l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
+ /*
+ * Replace the existing descriptor with new mapping and attributes
+ */
+ l3_pt_addr[pa_4k_index] =
+ l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
- return;
+ return;
}
void CreateHypModePageTables(void)
{
- unsigned num_l1_descs = 0, num_l2_descs = 0;
- unsigned l1_index, l2_index;
- unsigned long long l2_attrs = 0;
- four_kb_pt_desc l3_desc;
-
- /* Create the pagetables */
- num_l1_descs = ((HYP_PA_END - HYP_PA_START) >> 30) + 1;
- num_l2_descs = ((HYP_PA_END - HYP_PA_START) >> 21) + 1;
-
- /* Only the first 4GB are valid translations */
- for (l1_index = 0; l1_index < num_l1_descs; l1_index++) {
- hyp_l1_pagetable[l1_index] =
- (unsigned long long)&hyp_l2_pagetable[l1_index][0] |
- TABLE_MAPPING;
- for (l2_index = 0; l2_index < num_l2_descs / num_l1_descs;
- l2_index++) {
-
- if ((l2_index + (l1_index << 9)) < 32) {
- /* 0-64M(Secure ROM/NOR Flash):Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RO);
- ((unsigned *) &l2_attrs)[1] |= XN;
- }
- else if ((l2_index + (l1_index << 9)) < 64)
- /* 64-128M(Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RW);
- else if ((l2_index + (l1_index << 9)) < 1024) {
- /* 128-2048M (Peripherals) : Block mapping of Device memory */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_DEVICE_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RW);
- ((unsigned *) &l2_attrs)[1] |= XN;
- }
- else
- /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RW);
-
- hyp_l2_pagetable[l1_index][l2_index] =
- ((l2_index + (l1_index << 9)) << 21) | l2_attrs;
- }
- }
-
- /*
- * Create a mapping for a device page to be used
- * for Locks, Events & anything that is shared when both
- * the clusters are executing at the same time.
- */
- l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.attrs =
- ACCESS_FLAG | HMAIR0_DEVICE_MEM_ATTR_IDX | SHAREABILITY(0x3) |
- AP(KERN_RW);
- l3_desc.pt_addr = hyp_l3_so_pt;
- CreateL3PageTable(&l3_desc, LEVEL1, (unsigned long long *)hyp_l1_pagetable);
-
- return;
+ unsigned num_l1_descs = 0, num_l2_descs = 0;
+ unsigned l1_index, l2_index;
+ unsigned long long l2_attrs = 0;
+ four_kb_pt_desc l3_desc;
+
+ /* Create the pagetables */
+ num_l1_descs = ((HYP_PA_END - HYP_PA_START) >> 30) + 1;
+ num_l2_descs = ((HYP_PA_END - HYP_PA_START) >> 21) + 1;
+
+ /* Only the first 4GB are valid translations */
+ for (l1_index = 0; l1_index < num_l1_descs; l1_index++) {
+ hyp_l1_pagetable[l1_index] =
+ (unsigned long long)&hyp_l2_pagetable[l1_index][0] |
+ TABLE_MAPPING;
+ for (l2_index = 0; l2_index < num_l2_descs / num_l1_descs;
+ l2_index++) {
+
+ if ((l2_index + (l1_index << 9)) < 32) {
+ /* 0-64M(Secure ROM/NOR Flash):Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RO);
+ ((unsigned *)&l2_attrs)[1] |= XN;
+ } else if ((l2_index + (l1_index << 9)) < 64)
+ /* 64-128M(Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+ else if ((l2_index + (l1_index << 9)) < 1024) {
+ /* 128-2048M (Peripherals) : Block mapping of Device memory */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_DEVICE_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+ ((unsigned *)&l2_attrs)[1] |= XN;
+ } else
+ /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+
+ hyp_l2_pagetable[l1_index][l2_index] =
+ ((l2_index + (l1_index << 9)) << 21) | l2_attrs;
+ }
+ }
+
+ /*
+ * Create a mapping for a device page to be used
+ * for Locks, Events & anything that is shared when both
+ * the clusters are executing at the same time.
+ */
+ l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.attrs =
+ ACCESS_FLAG | HMAIR0_DEVICE_MEM_ATTR_IDX | SHAREABILITY(0x3) |
+ AP(KERN_RW);
+ l3_desc.pt_addr = hyp_l3_so_pt;
+ CreateL3PageTable(&l3_desc, LEVEL1,
+ (unsigned long long *)hyp_l1_pagetable);
+
+ return;
}
void EnableHypModePageTables(void)
{
- /* Update the HTTBR */
- write_httbr((unsigned long long)hyp_l1_pagetable);
-
- /*
- * Setup the HMAIR0 register.
- * [7:0] = Device memory
- * [15:8] = Normal memory, Inner and outer cacheable, WBWA
- */
- write_hmair0(IDX2(HMAIR_SO_MEM) |
- IDX1(HMAIR_INNER_WB_RWA_MEM | HMAIR_OUTER_WB_RWA_MEM) |
- IDX0(HMAIR_DEVICE_MEM));
-
- /*
- * Set the HTCR.
- * Pagetables are Normal memory, Inner/Outer shareable, Inner/Outer WBWA
- */
- write_htcr(EAE(ENABLE) | SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) |
- IRGN0(CR_C_WBWA) | T0SZ(CR_ADDR_SPC_4GB));
-
- /* Enable the Hyp MMU */
- write_hsctlr(ICACHE(ENABLE) | DCACHE(ENABLE) | ALIGNMENT(ENABLE) |
- MMU(ENABLE));
-
- return;
+ /* Update the HTTBR */
+ write_httbr((unsigned long long)hyp_l1_pagetable);
+
+ /*
+ * Setup the HMAIR0 register.
+ * [7:0] = Device memory
+ * [15:8] = Normal memory, Inner and outer cacheable, WBWA
+ */
+ write_hmair0(IDX2(HMAIR_SO_MEM) |
+ IDX1(HMAIR_INNER_WB_RWA_MEM | HMAIR_OUTER_WB_RWA_MEM) |
+ IDX0(HMAIR_DEVICE_MEM));
+
+ /*
+ * Set the HTCR.
+ * Pagetables are Normal memory, Inner/Outer shareable, Inner/Outer WBWA
+ */
+ write_htcr(EAE(ENABLE) | SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) |
+ IRGN0(CR_C_WBWA) | T0SZ(CR_ADDR_SPC_4GB));
+
+ /* Enable the Hyp MMU */
+ write_hsctlr(ICACHE(ENABLE) | DCACHE(ENABLE) | ALIGNMENT(ENABLE) |
+ MMU(ENABLE));
+
+ return;
}
void Create2ndStagePageTables(void)
{
- unsigned two_mb_index = 0;
- unsigned one_gb_index = 0;
- unsigned long long level2_desc = 0;
- four_kb_pt_desc l3_desc = { 0 };
-
- /*
- * Create the flat mapped 2nd stage page tables.
- * This should be done only once. The remaining
- * cpus can share the mappings and wait while
- * this is being done.
- */
- for (one_gb_index = 0; one_gb_index < 4; one_gb_index++)
- for (two_mb_index = 0; two_mb_index < 512; two_mb_index++) {
-
- if ((two_mb_index + (one_gb_index << 9)) < 32)
- /* 0-64M (Secure ROM/NOR Flash) : Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RO)
- | MEM_ATTR(0xf) | BLOCK_MAPPING;
- else if ((two_mb_index + (one_gb_index << 9)) < 64)
- /* 64-128M (Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
- | MEM_ATTR(0xf) | BLOCK_MAPPING;
- else if ((two_mb_index + (one_gb_index << 9)) < 1024)
- /* 128-2048M (Peripherals) : Block mapping of Device memory */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
- | MEM_ATTR(0x1) | BLOCK_MAPPING;
- else
- /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
- | MEM_ATTR(0xf) | BLOCK_MAPPING;
-
- stage2_l2_pagetable[one_gb_index][two_mb_index] =
- (two_mb_index +
- (512 * one_gb_index) << 21) | level2_desc;
-
- }
-
- /* First 4KB Mapping PCPUIF to the VCPUIF for the payload software */
- l3_desc.va = VGIC_VM_PHY_BASE;
- l3_desc.pa = GIC_IC_PHY_BASE;
- l3_desc.attrs =
- ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
- l3_desc.pt_addr = stage2_l3_cpuif_pt;
- CreateL3PageTable(&l3_desc, LEVEL2,
- (unsigned long long *)stage2_l2_pagetable);
-
- /* Second 4KB Mapping PCPUIF to the VCPUIF for the payload software */
- l3_desc.va = VGIC_VM_PHY_BASE + 0x1000;
- l3_desc.pa = GIC_IC_PHY_BASE + 0x1000;
- l3_desc.attrs =
- ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
- l3_desc.pt_addr = stage2_l3_cpuif_pt;
- Add4KMapping(&l3_desc);
-
- /*
- * Create a mapping for a device page to be used
- * for Locks, Events & anything that is shared when both
- * the clusters are executing at the same time.
- */
- l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.attrs =
- ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
- l3_desc.pt_addr = stage2_l3_so_pt;
- CreateL3PageTable(&l3_desc, LEVEL2,
- (unsigned long long *)stage2_l2_pagetable);
-
- return;
+ unsigned two_mb_index = 0;
+ unsigned one_gb_index = 0;
+ unsigned long long level2_desc = 0;
+ four_kb_pt_desc l3_desc = { 0 };
+
+ /*
+ * Create the flat mapped 2nd stage page tables.
+ * This should be done only once. The remaining
+ * cpus can share the mappings and wait while
+ * this is being done.
+ */
+ for (one_gb_index = 0; one_gb_index < 4; one_gb_index++)
+ for (two_mb_index = 0; two_mb_index < 512; two_mb_index++) {
+
+ if ((two_mb_index + (one_gb_index << 9)) < 32)
+ /* 0-64M (Secure ROM/NOR Flash) : Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RO)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+ else if ((two_mb_index + (one_gb_index << 9)) < 64)
+ /* 64-128M (Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+ else if ((two_mb_index + (one_gb_index << 9)) < 1024)
+ /* 128-2048M (Peripherals) : Block mapping of Device memory */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0x1) | BLOCK_MAPPING;
+ else
+ /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+
+ stage2_l2_pagetable[one_gb_index][two_mb_index] =
+ (two_mb_index +
+ (512 * one_gb_index) << 21) | level2_desc;
+
+ }
+
+ /* First 4KB Mapping PCPUIF to the VCPUIF for the payload software */
+ l3_desc.va = VGIC_VM_PHY_BASE;
+ l3_desc.pa = GIC_IC_PHY_BASE;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_cpuif_pt;
+ CreateL3PageTable(&l3_desc, LEVEL2,
+ (unsigned long long *)stage2_l2_pagetable);
+
+ /* Second 4KB Mapping PCPUIF to the VCPUIF for the payload software */
+ l3_desc.va = VGIC_VM_PHY_BASE + 0x1000;
+ l3_desc.pa = GIC_IC_PHY_BASE + 0x1000;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_cpuif_pt;
+ Add4KMapping(&l3_desc);
+
+ /*
+ * Create a mapping for a device page to be used
+ * for Locks, Events & anything that is shared when both
+ * the clusters are executing at the same time.
+ */
+ l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_so_pt;
+ CreateL3PageTable(&l3_desc, LEVEL2,
+ (unsigned long long *)stage2_l2_pagetable);
+
+ return;
}
void Enable2ndStagePageTables(void)
{
- /*
- * Set the VTCR to:
- * Normal memory outer shareable, Device memory shareable
- * Outer and Inner WBWA
- * Start at level 2
- * Size of addressed region is 4GB (16k worth of page tables)
- */
- write_vtcr(SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) | IRGN0(CR_C_WBWA));
-
- /* Address is already aligned to 16k or 2*14 */
- write_vttbr((unsigned long long)stage2_l2_pagetable);
-
- write_hcr(read_hcr() | HCR_VM);
-
- /*
- * TODO: We do not need a synchronization barrier here as we
- * are not yet executing out of NS PL0 & PL1 and there will be
- * a barrier at some point before that.
- */
- return;
+ /*
+ * Set the VTCR to:
+ * Normal memory outer shareable, Device memory shareable
+ * Outer and Inner WBWA
+ * Start at level 2
+ * Size of addressed region is 4GB (16k worth of page tables)
+ */
+ write_vtcr(SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) | IRGN0(CR_C_WBWA));
+
+ /* Address is already aligned to 16k or 2*14 */
+ write_vttbr((unsigned long long)stage2_l2_pagetable);
+
+ write_hcr(read_hcr() | HCR_VM);
+
+ /*
+ * TODO: We do not need a synchronization barrier here as we
+ * are not yet executing out of NS PL0 & PL1 and there will be
+ * a barrier at some point before that.
+ */
+ return;
}
void SetupVirtExtPageTables(void)
{
- unsigned cpu_id = read_cpuid();
- unsigned first_cpu = find_first_cpu();
- unsigned cluster_id = read_clusterid();
- unsigned abs_cpuid = 0;
-
- if (!switcher)
- abs_cpuid = abs_cpuid(cpu_id, cluster_id);
-
- /*
- * First cpu creates the pagetables after
- * a cold reset. Reused by all cpus across
- * warm resets.
- */
- if (switcher ) {
-
- /*
- * While switching its possible that the host cluster
- * is brought out of reset first. Hence, the first
- * cpu of whichever cluster reaches here does the
- * pagetable setup
- */
- if (cpu_id == first_cpu) {
- CreateHypModePageTables();
- Create2ndStagePageTables();
- set_events(VIRT_PGT_DONE);
- }
-
- wait_for_event(VIRT_PGT_DONE, cpu_id);
- reset_event(VIRT_PGT_DONE, cpu_id);
-
- } else {
-
- /*
- * Any cluster can do the initialisation as long as
- * only one of them does it.
- */
- if (cpu_id == first_cpu && cluster_id == host_cluster) {
- CreateHypModePageTables();
- Create2ndStagePageTables();
- set_events(VIRT_PGT_DONE);
- }
-
- wait_for_event(VIRT_PGT_DONE, abs_cpuid);
- reset_event(VIRT_PGT_DONE, abs_cpuid);
- }
-
- return;
+ unsigned cpu_id = read_cpuid();
+ unsigned first_cpu = find_first_cpu();
+ unsigned cluster_id = read_clusterid();
+ unsigned abs_cpuid = 0;
+
+ if (!switcher)
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+
+ /*
+ * First cpu creates the pagetables after
+ * a cold reset. Reused by all cpus across
+ * warm resets.
+ */
+ if (switcher) {
+
+ /*
+ * While switching its possible that the host cluster
+ * is brought out of reset first. Hence, the first
+ * cpu of whichever cluster reaches here does the
+ * pagetable setup
+ */
+ if (cpu_id == first_cpu) {
+ CreateHypModePageTables();
+ Create2ndStagePageTables();
+ set_events(VIRT_PGT_DONE);
+ }
+
+ wait_for_event(VIRT_PGT_DONE, cpu_id);
+ reset_event(VIRT_PGT_DONE, cpu_id);
+
+ } else {
+
+ /*
+ * Any cluster can do the initialisation as long as
+ * only one of them does it.
+ */
+ if (cpu_id == first_cpu && cluster_id == host_cluster) {
+ CreateHypModePageTables();
+ Create2ndStagePageTables();
+ set_events(VIRT_PGT_DONE);
+ }
+
+ wait_for_event(VIRT_PGT_DONE, abs_cpuid);
+ reset_event(VIRT_PGT_DONE, abs_cpuid);
+ }
+
+ return;
}
diff --git a/big-little/common/vgic_handle.c b/big-little/common/vgic_handle.c
index 0280c38..b6bc29c 100644
--- a/big-little/common/vgic_handle.c
+++ b/big-little/common/vgic_handle.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "int_master.h"
#include "gic_registers.h"
@@ -42,49 +42,49 @@ unsigned async_switchover = ASYNC_SWITCH;
void gic_send_ipi(unsigned cpu_mask, unsigned ipi_num)
{
- write32(GIC_ID_PHY_BASE + GICD_SW,
- ((cpu_mask & 0xff) << 16) | (ipi_num & 0xf));
+ write32(GIC_ID_PHY_BASE + GICD_SW,
+ ((cpu_mask & 0xff) << 16) | (ipi_num & 0xf));
}
void gic_enable_int(unsigned num)
{
- unsigned int regbase;
+ unsigned int regbase;
- regbase = GIC_ID_PHY_BASE + GICD_ENABLESET + ((num >> 5) << 2);
- write32(regbase, 1 << (num & 0x1F));
+ regbase = GIC_ID_PHY_BASE + GICD_ENABLESET + ((num >> 5) << 2);
+ write32(regbase, 1 << (num & 0x1F));
}
void gic_disable_int(unsigned num)
{
- unsigned int regbase;
+ unsigned int regbase;
- regbase = GIC_ID_PHY_BASE + GICD_ENABLECLEAR + ((num >> 5) << 2);
- write32(regbase, 1 << (num & 0x1F));
+ regbase = GIC_ID_PHY_BASE + GICD_ENABLECLEAR + ((num >> 5) << 2);
+ write32(regbase, 1 << (num & 0x1F));
}
void gic_deactivate_int(unsigned num)
{
- write32(GIC_IC_PHY_BASE + GICC_DEACTIVATE, num);
+ write32(GIC_IC_PHY_BASE + GICC_DEACTIVATE, num);
}
void gic_eoi_int(unsigned num)
{
- write32(GIC_IC_PHY_BASE + GICC_EOI, num);
+ write32(GIC_IC_PHY_BASE + GICC_EOI, num);
}
unsigned gic_ack_int(void)
{
- return read32(GIC_IC_PHY_BASE + GICC_INTACK);
+ return read32(GIC_IC_PHY_BASE + GICC_INTACK);
}
unsigned gic_int_num(void)
{
- unsigned intcount = 0;
+ unsigned intcount = 0;
- intcount = read32(GIC_ID_PHY_BASE + GICD_CTR);
- intcount = ((intcount & 0x1F) + 1) * 32;
+ intcount = read32(GIC_ID_PHY_BASE + GICD_CTR);
+ intcount = ((intcount & 0x1F) + 1) * 32;
- return intcount;
+ return intcount;
}
/*
@@ -92,119 +92,120 @@ unsigned gic_int_num(void)
*/
vm_context *handle_interrupt(vm_context * context)
{
- unsigned int status, i, src_cpu = 0;
- unsigned cpuid = read_cpuid();
- unsigned cluster_id = read_clusterid();
- unsigned list_desc = 0;
- unsigned int_pri = 0;
- unsigned cpu_if = get_cpuif(cluster_id, cpuid);
- vm_context *ret_ctx = context;
- unsigned do_switch = 0, first_cpu = find_first_cpu();
-
- /*
- * Get the interrupt #
- */
- status = gic_ack_int();
- i = status & 0x3FF;
-
- /*
- * Stop if there are no more interrupts
- */
- if (i == 1023) {
- printf("Spurious interrupt %d \n", i);
- return ret_ctx;
- }
-
- if (async_switchover && cpuid == first_cpu)
- keep_trigger_alive();
-
- /*
- * Special case IPIs, since we need the source CPU ID
- */
- if (i < 16) {
- src_cpu = (status >> 10) & INTACK_CPUID_MASK;
-
- /* Check whether we have been requested to switchover */
- do_switch = check_switchover_ipi(cpu_if, i);
-
- /*
- * SGI Ack actually returns the source cpu interface
- * which needs to be mapped to the apt cpuid.
- */
- src_cpu = get_cpuinfo(src_cpu) & 0xf;
-
- /*
- * IPI handling:
- * If Split EOI is not enabled, then writing to the EOI
- * register drops the priority and deactivates the IPI
- * together. Otherwise, we need to do it seperately.
- * Note that in either case, the OS cannot deactivate the
- * IPI as writing to the virtual EOI register will not
- * bring about a state change in the physical distributor
- * state machine.
- */
- gic_eoi_int(status);
- if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
- gic_deactivate_int(status);
-
- if (do_switch) {
- /*
- * switch_cluster() takes the first_cpu as its arg. Since
- * all the cores are expected to power down, its reasonable
- * to assume cpu0 is the first cpu and will take care of
- * saving all the global context.
- */
- switch_cluster(first_cpu);
- return ret_ctx;
- }
- }
-
- /*
- * Check if this interrupt is meant to trigger to switch to the
- * other cluster. If so, then we do not forward the interrupt
- * to the payload software.
- */
- if (async_switchover && check_trigger(i, status))
- return ret_ctx;
-
- /*
- * TODO: Further optimizations can be done later when there are
- * more interrupts apart from timer & uart.
- */
- /*
- * vGIC 11.0 onwards split EOI functionality has to be used for
- * all interrupts. EOIing the interrupt from the VCPUIF will only
- * deactivate the interrupt (clear the active bit) and not clear
- * the active priority at the PCPUIF.
- * Do this only for non SGIs as their priority has already been
- * dropped.
- */
- if (i >= 16)
- write32(GIC_IC_PHY_BASE + GICC_PRIODROP, i);
-
- /*
- * Priority reg = (interrupt no. / 4) x 4 bytes.
- * Priority index = interrupt no. % 4 x 8 bits (8 bits for each priority value)
- * Prioriity value = Priority reg >> Priority index
- */
- int_pri =
- read32(GIC_ID_PHY_BASE + GICD_PRI +
- ((i >> 2) << 2)) >> ((i & 0x3) << 3);
-
- /*
- * Signal interrupts as secure to the VCPUIF since the OS will write to the EnableS
- * bit of the VCPUIF through the 2nd stage translations.
- * TODO: Priority is being read as a 8 bit value from the distributor registers
- * and passed as a 5 bit value. Need to check if this will cause problems.
- */
- if (i < 16)
- list_desc =
- STATE(PENDING) | (int_pri >> 3) << 23 | src_cpu << 10 | i;
- else
- list_desc =
- HW_IRQ | STATE(PENDING) | (int_pri >> 3) << 23 | i << 10 | i;
-
- enqueue_interrupt(list_desc, cpuid);
-
- return ret_ctx;
+ unsigned int status, i, src_cpu = 0;
+ unsigned cpuid = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned list_desc = 0;
+ unsigned int_pri = 0;
+ unsigned cpu_if = get_cpuif(cluster_id, cpuid);
+ vm_context *ret_ctx = context;
+ unsigned do_switch = 0, first_cpu = find_first_cpu();
+
+ /*
+ * Get the interrupt #
+ */
+ status = gic_ack_int();
+ i = status & 0x3FF;
+
+ /*
+ * Stop if there are no more interrupts
+ */
+ if (i == 1023) {
+ printf("Spurious interrupt %d \n", i);
+ return ret_ctx;
+ }
+
+ if (async_switchover && cpuid == first_cpu)
+ keep_trigger_alive();
+
+ /*
+ * Special case IPIs, since we need the source CPU ID
+ */
+ if (i < 16) {
+ src_cpu = (status >> 10) & INTACK_CPUID_MASK;
+
+ /* Check whether we have been requested to switchover */
+ do_switch = check_switchover_ipi(cpu_if, i);
+
+ /*
+ * SGI Ack actually returns the source cpu interface
+ * which needs to be mapped to the apt cpuid.
+ */
+ src_cpu = get_cpuinfo(src_cpu) & 0xf;
+
+ /*
+ * IPI handling:
+ * If Split EOI is not enabled, then writing to the EOI
+ * register drops the priority and deactivates the IPI
+ * together. Otherwise, we need to do it seperately.
+ * Note that in either case, the OS cannot deactivate the
+ * IPI as writing to the virtual EOI register will not
+ * bring about a state change in the physical distributor
+ * state machine.
+ */
+ gic_eoi_int(status);
+ if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
+ gic_deactivate_int(status);
+
+ if (do_switch) {
+ /*
+ * switch_cluster() takes the first_cpu as its arg. Since
+ * all the cores are expected to power down, its reasonable
+ * to assume cpu0 is the first cpu and will take care of
+ * saving all the global context.
+ */
+ switch_cluster(first_cpu);
+ return ret_ctx;
+ }
+ }
+
+ /*
+ * Check if this interrupt is meant to trigger to switch to the
+ * other cluster. If so, then we do not forward the interrupt
+ * to the payload software.
+ */
+ if (async_switchover && check_trigger(i, status))
+ return ret_ctx;
+
+ /*
+ * TODO: Further optimizations can be done later when there are
+ * more interrupts apart from timer & uart.
+ */
+ /*
+ * vGIC 11.0 onwards split EOI functionality has to be used for
+ * all interrupts. EOIing the interrupt from the VCPUIF will only
+ * deactivate the interrupt (clear the active bit) and not clear
+ * the active priority at the PCPUIF.
+ * Do this only for non SGIs as their priority has already been
+ * dropped.
+ */
+ if (i >= 16)
+ write32(GIC_IC_PHY_BASE + GICC_PRIODROP, i);
+
+ /*
+ * Priority reg = (interrupt no. / 4) x 4 bytes.
+ * Priority index = interrupt no. % 4 x 8 bits (8 bits for each priority value)
+ * Prioriity value = Priority reg >> Priority index
+ */
+ int_pri =
+ read32(GIC_ID_PHY_BASE + GICD_PRI +
+ ((i >> 2) << 2)) >> ((i & 0x3) << 3);
+
+ /*
+ * Signal interrupts as secure to the VCPUIF since the OS will write to the EnableS
+ * bit of the VCPUIF through the 2nd stage translations.
+ * TODO: Priority is being read as a 8 bit value from the distributor registers
+ * and passed as a 5 bit value. Need to check if this will cause problems.
+ */
+ if (i < 16)
+ list_desc =
+ STATE(PENDING) | (int_pri >> 3) << 23 | src_cpu << 10 | i;
+ else
+ list_desc =
+ HW_IRQ | STATE(PENDING) | (int_pri >> 3) << 23 | i << 10 |
+ i;
+
+ enqueue_interrupt(list_desc, cpuid);
+
+ return ret_ctx;
}
diff --git a/big-little/common/vgic_setup.c b/big-little/common/vgic_setup.c
index 8c839eb..15625cf 100644
--- a/big-little/common/vgic_setup.c
+++ b/big-little/common/vgic_setup.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "int_master.h"
#include "gic_registers.h"
@@ -48,35 +48,35 @@
*/
static void gic_cpuinit()
{
- /* Disable the PCPUIF before configuring it. */
- write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
- write32(GIC_IC_PHY_BASE + GICC_BP, 0x0);
- write32(GIC_IC_PHY_BASE + GICC_PRIMASK, 0xFF);
- /* Enable split EOI & Non-secure PCPUIF */
- write32(GIC_IC_PHY_BASE + GICC_CTL, 0x201);
+ /* Disable the PCPUIF before configuring it. */
+ write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
+ write32(GIC_IC_PHY_BASE + GICC_BP, 0x0);
+ write32(GIC_IC_PHY_BASE + GICC_PRIMASK, 0xFF);
+ /* Enable split EOI & Non-secure PCPUIF */
+ write32(GIC_IC_PHY_BASE + GICC_CTL, 0x201);
}
void SetupVGIC(unsigned warm_reset)
{
- /*
- * Initialise the HYP view Virtual CPU interface after
- * a cold reset
- */
- if (!warm_reset)
- vgic_init();
+ /*
+ * Initialise the HYP view Virtual CPU interface after
+ * a cold reset
+ */
+ if (!warm_reset)
+ vgic_init();
- /* Initialise the Physical cpu interface */
- gic_cpuinit();
+ /* Initialise the Physical cpu interface */
+ gic_cpuinit();
- /*
- * Enable Virtual exceptions
- */
- write_hcr(read_hcr() | HCR_AMO | HCR_IMO | HCR_FMO);
+ /*
+ * Enable Virtual exceptions
+ */
+ write_hcr(read_hcr() | HCR_AMO | HCR_IMO | HCR_FMO);
- /*
- * TODO: Barriers not needed here as there will surely
- * be others further down the line before virtual
- * exceptions are used.
- */
- return;
+ /*
+ * TODO: Barriers not needed here as there will surely
+ * be others further down the line before virtual
+ * exceptions are used.
+ */
+ return;
}
diff --git a/big-little/common/vgiclib.c b/big-little/common/vgiclib.c
index ad2a55d..2a27088 100644
--- a/big-little/common/vgiclib.c
+++ b/big-little/common/vgiclib.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "vgiclib.h"
#include "misc.h"
@@ -34,64 +34,65 @@
static struct overflowint *freeoverflows[NUM_CPUS];
static struct overflowint theoverflowints[NUM_CPUS][MAXOVERFLOWINTS];
static struct gic_cpuif cpuifs[NUM_CPUS];
-static unsigned hv_lr_count[NUM_CPUS] = {0};
+static unsigned hv_lr_count[NUM_CPUS] = { 0 };
void dump_vgic_state()
{
- unsigned int i;
-
- printf("VGIC state:\n");
- printf(" Control : 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_CTL));
- printf(" ActivePri: 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_APR0));
- for (i = 0; i < 4; i++) {
- printf(" List : 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (i * 4)));
- }
+ unsigned int i;
+
+ printf("VGIC state:\n");
+ printf(" Control : 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_CTL));
+ printf(" ActivePri: 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_APR0));
+ for (i = 0; i < 4; i++) {
+ printf(" List : 0x%x \n",
+ read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (i * 4)));
+ }
}
static struct overflowint *get_overflowint(unsigned cpuid)
{
- struct overflowint *p = freeoverflows[cpuid];
+ struct overflowint *p = freeoverflows[cpuid];
- if (!p) {
- printf("Panic: Out of overflow interrupt slots.\n");
- printf("Recompile with larger MAXOVERFLOWINTS.\n");
- panic();
- }
+ if (!p) {
+ printf("Panic: Out of overflow interrupt slots.\n");
+ printf("Recompile with larger MAXOVERFLOWINTS.\n");
+ panic();
+ }
- freeoverflows[cpuid] = p->next;
+ freeoverflows[cpuid] = p->next;
- return p;
+ return p;
}
static void free_overflowint(struct overflowint *p, unsigned cpuid)
{
- p->next = freeoverflows[cpuid];
- freeoverflows[cpuid] = p;
+ p->next = freeoverflows[cpuid];
+ freeoverflows[cpuid] = p;
}
void vgic_init(void)
{
- unsigned int i;
- unsigned cpuid = read_cpuid();
+ unsigned int i;
+ unsigned cpuid = read_cpuid();
- freeoverflows[cpuid] = 0x0;
+ freeoverflows[cpuid] = 0x0;
- for (i = 0; i < MAXOVERFLOWINTS; i++) {
- free_overflowint(&(theoverflowints[cpuid][i]), cpuid);
- }
+ for (i = 0; i < MAXOVERFLOWINTS; i++) {
+ free_overflowint(&(theoverflowints[cpuid][i]), cpuid);
+ }
- /*
- * Find the number of List registers
- * TODO: Will not work if individual cpus can have different number
- * of list registers across clusters. Needs to be detected for each
- * access then.
- */
- hv_lr_count[cpuid] = (read32(VGIC_HV_PHY_BASE + GICH_VTR) & 0x3f) + 1;
+ /*
+ * Find the number of List registers
+ * TODO: Will not work if individual cpus can have different number
+ * of list registers across clusters. Needs to be detected for each
+ * access then.
+ */
+ hv_lr_count[cpuid] = (read32(VGIC_HV_PHY_BASE + GICH_VTR) & 0x3f) + 1;
- /* Enable virtual interrupts & if required, maintenance interrupts */
- write32(VGIC_HV_PHY_BASE + GICH_CTL, VGICH_HCR_EN);
+ /* Enable virtual interrupts & if required, maintenance interrupts */
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, VGICH_HCR_EN);
- return;
+ return;
}
/*
@@ -99,12 +100,12 @@ void vgic_init(void)
*/
static void set_vgic_entry(unsigned int descr, unsigned int slot)
{
- write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4), descr);
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4), descr);
}
static unsigned int get_vgic_entry(unsigned int slot)
{
- return read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4));
+ return read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4));
}
/*
@@ -112,12 +113,12 @@ static unsigned int get_vgic_entry(unsigned int slot)
*/
static void set_vgic_status(unsigned int status)
{
- write32(VGIC_HV_PHY_BASE + GICH_CTL, status);
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, status);
}
static unsigned int get_vgic_status(void)
{
- return read32(VGIC_HV_PHY_BASE + GICH_CTL);
+ return read32(VGIC_HV_PHY_BASE + GICH_CTL);
}
/*
@@ -128,28 +129,28 @@ static unsigned int get_vgic_status(void)
*/
static void set_vgic_queue_entry(struct gic_cpuif *cpuif, unsigned int descr)
{
- unsigned int pri = (descr >> 20) & 0xFF;
- struct overflowint **oflowh, *oflowp;
- unsigned cpuid = read_cpuid();
-
- /*
- * If we are queuing something and there is currently no queue, set the interrupt bit
- */
- if (!(cpuif->overflow))
- set_vgic_status(get_vgic_status() | 0x2);
-
- /*
- * Determine insertion point, might be the end of the list
- */
- for (oflowh = &(cpuif->overflow); *oflowh; oflowh = &((*oflowh)->next))
- if ((*oflowh)->priority > pri)
- break;
-
- oflowp = get_overflowint(cpuid);
- oflowp->priority = pri;
- oflowp->value = descr;
- oflowp->next = *oflowh;
- *oflowh = oflowp;
+ unsigned int pri = (descr >> 20) & 0xFF;
+ struct overflowint **oflowh, *oflowp;
+ unsigned cpuid = read_cpuid();
+
+ /*
+ * If we are queuing something and there is currently no queue, set the interrupt bit
+ */
+ if (!(cpuif->overflow))
+ set_vgic_status(get_vgic_status() | 0x2);
+
+ /*
+ * Determine insertion point, might be the end of the list
+ */
+ for (oflowh = &(cpuif->overflow); *oflowh; oflowh = &((*oflowh)->next))
+ if ((*oflowh)->priority > pri)
+ break;
+
+ oflowp = get_overflowint(cpuid);
+ oflowp->priority = pri;
+ oflowp->value = descr;
+ oflowp->next = *oflowh;
+ *oflowh = oflowp;
}
/*
@@ -159,7 +160,7 @@ static void set_vgic_queue_entry(struct gic_cpuif *cpuif, unsigned int descr)
*/
static inline unsigned elrsr_max_index(unsigned cpuid)
{
- return (hv_lr_count[cpuid] - 1) >> 5;
+ return (hv_lr_count[cpuid] - 1) >> 5;
}
/*
@@ -169,107 +170,117 @@ static inline unsigned elrsr_max_index(unsigned cpuid)
* inverse of what the elrsr returns while taking into account unimplemented
* interrupts.
*/
-static unsigned get_elrsr_active_bits(unsigned index, unsigned cpuid, unsigned max_index)
+static unsigned get_elrsr_active_bits(unsigned index, unsigned cpuid,
+ unsigned max_index)
{
- unsigned elrsr = ~(read32(VGIC_HV_PHY_BASE + GICH_ELRSR0 + (index << 2)));
-
- if (index == max_index) {
- /*
- * Get the remainder, shift 1 times remainder and subtract 1
- * from it to form the mask.
- */
- elrsr &= (1 << (hv_lr_count[cpuid] - (32 * max_index))) - 1;
- } else if (index > max_index) {
- /*
- * There can never be active virqs when the list registers
- * do not exist.
- */
- elrsr = 0;
- }
-
- return elrsr;
+ unsigned elrsr =
+ ~(read32(VGIC_HV_PHY_BASE + GICH_ELRSR0 + (index << 2)));
+
+ if (index == max_index) {
+ /*
+ * Get the remainder, shift 1 times remainder and subtract 1
+ * from it to form the mask.
+ */
+ elrsr &= (1 << (hv_lr_count[cpuid] - (32 * max_index))) - 1;
+ } else if (index > max_index) {
+ /*
+ * There can never be active virqs when the list registers
+ * do not exist.
+ */
+ elrsr = 0;
+ }
+
+ return elrsr;
}
void vgic_savestate(unsigned int cpu)
{
- struct gic_cpuif *cpuif = &(cpuifs[cpu]);
- unsigned int i, ctr = 0, cur_elrsr = 0;
- unsigned max_index = elrsr_max_index(cpu);
-
- for(ctr = 0; ctr <= max_index; ctr++) {
- /* Negate read value so that set bit corresponds to a !inactive register */
- cur_elrsr = get_elrsr_active_bits(ctr, cpu, max_index);
- cpuif->elrsr[ctr] = cur_elrsr;
-
- for(i = bitindex(cur_elrsr); ((int) i) >= 0; i = bitindex(cur_elrsr)) {
- unsigned list_reg =
- read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2));
- unsigned int_id = (list_reg >> 10) & 0x3ff;
-
- /* Clear the saved bit index */
- cur_elrsr &= ~(1 << i);
-
- /*
- * Invalidate the pending/active virtual interrupt. Since its a shared vGIC
- * this irq will persist till the next switch and hence create a duplicate.
- */
- write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2), list_reg & ~(0x3 << 28));
-
- /*
- * While saving queued IPI context, ensure that the requesting cpu
- * interface is mapped to it counterpart on the inbound cluster
- */
- if (int_id < 16) {
- unsigned ob_cpuid = int_id & 0x7;
- unsigned ob_clusterid = read_clusterid();
- unsigned ib_cpuif = 0;
-
- ib_cpuif = get_cpuif(!ob_clusterid, ob_cpuid);
- /* Clear the cpu interface bits and place inbound cpu interface instead */
- list_reg = (list_reg & ~(0x7 << 10)) | (ib_cpuif << 10);
- } else if (int_id < 32) {
- /*
- * Pending Private peripheral interrupts will be recreated from scratch
- * so no need to save them.
- */
- cpuif->elrsr[ctr] &= ~(1 << i);
- continue;
- }
-
- cpuif->ints[i] = list_reg;
-
- }
- }
-
- cpuif->status = read32(VGIC_HV_PHY_BASE + GICH_CTL);
- cpuif->activepris = read32(VGIC_HV_PHY_BASE + GICH_APR0);
-
- write32(VGIC_HV_PHY_BASE + GICH_CTL, 0); /* SMP */
-
- return;
+ struct gic_cpuif *cpuif = &(cpuifs[cpu]);
+ unsigned int i, ctr = 0, cur_elrsr = 0;
+ unsigned max_index = elrsr_max_index(cpu);
+
+ for (ctr = 0; ctr <= max_index; ctr++) {
+ /* Negate read value so that set bit corresponds to a !inactive register */
+ cur_elrsr = get_elrsr_active_bits(ctr, cpu, max_index);
+ cpuif->elrsr[ctr] = cur_elrsr;
+
+ for (i = bitindex(cur_elrsr); ((int)i) >= 0;
+ i = bitindex(cur_elrsr)) {
+ unsigned list_reg =
+ read32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
+ ((1 << 7) * ctr) + (i << 2));
+ unsigned int_id = (list_reg >> 10) & 0x3ff;
+
+ /* Clear the saved bit index */
+ cur_elrsr &= ~(1 << i);
+
+ /*
+ * Invalidate the pending/active virtual interrupt. Since its a shared vGIC
+ * this irq will persist till the next switch and hence create a duplicate.
+ */
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
+ ((1 << 7) * ctr) + (i << 2),
+ list_reg & ~(0x3 << 28));
+
+ /*
+ * While saving queued IPI context, ensure that the requesting cpu
+ * interface is mapped to it counterpart on the inbound cluster
+ */
+ if (int_id < 16) {
+ unsigned ob_cpuid = int_id & 0x7;
+ unsigned ob_clusterid = read_clusterid();
+ unsigned ib_cpuif = 0;
+
+ ib_cpuif = get_cpuif(!ob_clusterid, ob_cpuid);
+ /* Clear the cpu interface bits and place inbound cpu interface instead */
+ list_reg =
+ (list_reg & ~(0x7 << 10)) | (ib_cpuif <<
+ 10);
+ } else if (int_id < 32) {
+ /*
+ * Pending Private peripheral interrupts will be recreated from scratch
+ * so no need to save them.
+ */
+ cpuif->elrsr[ctr] &= ~(1 << i);
+ continue;
+ }
+
+ cpuif->ints[i] = list_reg;
+
+ }
+ }
+
+ cpuif->status = read32(VGIC_HV_PHY_BASE + GICH_CTL);
+ cpuif->activepris = read32(VGIC_HV_PHY_BASE + GICH_APR0);
+
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, 0); /* SMP */
+
+ return;
}
void vgic_loadstate(unsigned int cpu)
{
- struct gic_cpuif *cpuif = &(cpuifs[cpu]);
- unsigned int i, ctr = 0, cur_elrsr = 0;
- unsigned max_index = elrsr_max_index(cpu);
+ struct gic_cpuif *cpuif = &(cpuifs[cpu]);
+ unsigned int i, ctr = 0, cur_elrsr = 0;
+ unsigned max_index = elrsr_max_index(cpu);
+
+ for (ctr = 0; ctr <= max_index; ctr++) {
+ cur_elrsr = cpuif->elrsr[ctr];
- for(ctr = 0; ctr <= max_index; ctr++) {
- cur_elrsr = cpuif->elrsr[ctr];
+ for (i = bitindex(cur_elrsr); ((int)i) >= 0;
+ i = bitindex(cur_elrsr)) {
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
+ ((1 << 7) * ctr) + (i << 2), cpuif->ints[i]);
- for(i = bitindex(cur_elrsr); ((int) i) >= 0; i = bitindex(cur_elrsr)) {
- write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2), cpuif->ints[i]);
-
- /* Clear the restored bit index */
- cur_elrsr &= ~(1 << i);
- }
- }
+ /* Clear the restored bit index */
+ cur_elrsr &= ~(1 << i);
+ }
+ }
- write32(VGIC_HV_PHY_BASE + GICH_CTL, cpuif->status);
- write32(VGIC_HV_PHY_BASE + GICH_APR0, cpuif->activepris);
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, cpuif->status);
+ write32(VGIC_HV_PHY_BASE + GICH_APR0, cpuif->activepris);
- return;
+ return;
}
/*
@@ -296,130 +307,130 @@ void vgic_loadstate(unsigned int cpu)
*/
void vgic_refresh(unsigned int cpu)
{
- struct gic_cpuif *cpuif = &(cpuifs[cpu]);
- unsigned int i, value, status, newstatus;
- struct overflowint **oflowh, *oflowp;
-
- /*
- * Grab a copy of the status.
- */
- status = get_vgic_status();
-
- /*
- * "newstatus" is the value to be written back if needed. Whatever
- * * happens, we will clear the slipped EOI count by the time we are done
- */
- newstatus = status & 0x07FFFFFF;
-
- /*
- * See if there are any "slipped" EOIs
- */
- i = (status >> 27) & 0x1F;
-
- if (i) {
- /*
- * If there are, let's deal with them.
- * *
- * * We will walk through the list of queued interrupts, deactivating the
- * * ACTIVE ones as needed until we either have no more slipped EOI's to
- * * do or run out of queued interrupts. If we run out of queued
- * * interrupts first, that's UNPREDICTABLE behaviour (and the fault of
- * * the VM). In this case we will just ignore the surplus EOIs.
- * *
- * * After EOI'ing, we delete the entry if it was just ACTIVE or set it
- * * to PENDING if it was PENDING+ACTIVE.
- * *
- * * Use a handle to point to the list entries to avoid the need for
- * * special cases in the loop.
- */
- oflowh = &(cpuif->overflow);
-
- while (i && *oflowh) {
- value = (*oflowh)->value;
- if (value & VGIC_ENTRY_ACTIVE) {
- /*
- * It's ACTIVE (or PENDING+ACTIVE)
- */
- i--;
-
- if (value & VGIC_ENTRY_HW) {
- /*
- * HW bit set, so we need to pass on an EOI. This doesn't ever happen
- * * for IPIs, so just pass on the 10-bit "Hardware ID"
- */
- gic_deactivate_int((value >> 10) &
- 0x3FF);
- }
-
- if (value & VGIC_ENTRY_PENDING) {
- /*
- * It was PENDING+ACTIVE, clear the ACTIVE bit and move on
- */
- (*oflowh)->value &= ~VGIC_ENTRY_ACTIVE;
- } else {
- /*
- * It was only ACTIVE, so we need to delete it..
- */
- oflowp = *oflowh;
- oflowh = &(oflowp->next);
- free_overflowint(oflowp, cpu);
- }
- } else {
- /*
- * It wasn't ACTIVE :( Try the next one.
- */
- oflowh = &((*oflowh)->next);
- }
- }
- }
-
- /*
- * Now populate any spare slots with entries from the list (if any). Also fix up the free slot bitmap
- */
- for (i = 0; i < hv_lr_count[cpu]; i++) {
- value = get_vgic_entry(i);
-
- if (value & 0x30000000) {
- /*
- * This entry already contains a valid interrupt, skip
- */
- continue;
- }
-
- /*
- * Not a valid interrupt
- */
- oflowp = cpuif->overflow;
- if (oflowp) {
- /*
- * If there's a queue, move the top entry out of the queue and into
- * * this slot..
- */
- cpuif->overflow = oflowp->next;
-
- set_vgic_entry(oflowp->value, i);
- free_overflowint(oflowp, cpu);
- } else {
- /*
- * .. otherwise mark it as available.
- */
- cpuif->freelist |= (1 << i);
- }
- }
-
- /*
- * If we now don't have any overflow, clear the status bit
- */
- if (!(cpuif->overflow)) {
- newstatus &= ~0x2;
- }
-
- /*
- * Refresh status if needed
- */
- if (newstatus != status) {
- set_vgic_status(newstatus);
- }
+ struct gic_cpuif *cpuif = &(cpuifs[cpu]);
+ unsigned int i, value, status, newstatus;
+ struct overflowint **oflowh, *oflowp;
+
+ /*
+ * Grab a copy of the status.
+ */
+ status = get_vgic_status();
+
+ /*
+ * "newstatus" is the value to be written back if needed. Whatever
+ * * happens, we will clear the slipped EOI count by the time we are done
+ */
+ newstatus = status & 0x07FFFFFF;
+
+ /*
+ * See if there are any "slipped" EOIs
+ */
+ i = (status >> 27) & 0x1F;
+
+ if (i) {
+ /*
+ * If there are, let's deal with them.
+ * *
+ * * We will walk through the list of queued interrupts, deactivating the
+ * * ACTIVE ones as needed until we either have no more slipped EOI's to
+ * * do or run out of queued interrupts. If we run out of queued
+ * * interrupts first, that's UNPREDICTABLE behaviour (and the fault of
+ * * the VM). In this case we will just ignore the surplus EOIs.
+ * *
+ * * After EOI'ing, we delete the entry if it was just ACTIVE or set it
+ * * to PENDING if it was PENDING+ACTIVE.
+ * *
+ * * Use a handle to point to the list entries to avoid the need for
+ * * special cases in the loop.
+ */
+ oflowh = &(cpuif->overflow);
+
+ while (i && *oflowh) {
+ value = (*oflowh)->value;
+ if (value & VGIC_ENTRY_ACTIVE) {
+ /*
+ * It's ACTIVE (or PENDING+ACTIVE)
+ */
+ i--;
+
+ if (value & VGIC_ENTRY_HW) {
+ /*
+ * HW bit set, so we need to pass on an EOI. This doesn't ever happen
+ * * for IPIs, so just pass on the 10-bit "Hardware ID"
+ */
+ gic_deactivate_int((value >> 10) &
+ 0x3FF);
+ }
+
+ if (value & VGIC_ENTRY_PENDING) {
+ /*
+ * It was PENDING+ACTIVE, clear the ACTIVE bit and move on
+ */
+ (*oflowh)->value &= ~VGIC_ENTRY_ACTIVE;
+ } else {
+ /*
+ * It was only ACTIVE, so we need to delete it..
+ */
+ oflowp = *oflowh;
+ oflowh = &(oflowp->next);
+ free_overflowint(oflowp, cpu);
+ }
+ } else {
+ /*
+ * It wasn't ACTIVE :( Try the next one.
+ */
+ oflowh = &((*oflowh)->next);
+ }
+ }
+ }
+
+ /*
+ * Now populate any spare slots with entries from the list (if any). Also fix up the free slot bitmap
+ */
+ for (i = 0; i < hv_lr_count[cpu]; i++) {
+ value = get_vgic_entry(i);
+
+ if (value & 0x30000000) {
+ /*
+ * This entry already contains a valid interrupt, skip
+ */
+ continue;
+ }
+
+ /*
+ * Not a valid interrupt
+ */
+ oflowp = cpuif->overflow;
+ if (oflowp) {
+ /*
+ * If there's a queue, move the top entry out of the queue and into
+ * * this slot..
+ */
+ cpuif->overflow = oflowp->next;
+
+ set_vgic_entry(oflowp->value, i);
+ free_overflowint(oflowp, cpu);
+ } else {
+ /*
+ * .. otherwise mark it as available.
+ */
+ cpuif->freelist |= (1 << i);
+ }
+ }
+
+ /*
+ * If we now don't have any overflow, clear the status bit
+ */
+ if (!(cpuif->overflow)) {
+ newstatus &= ~0x2;
+ }
+
+ /*
+ * Refresh status if needed
+ */
+ if (newstatus != status) {
+ set_vgic_status(newstatus);
+ }
}
/*
@@ -432,67 +443,67 @@ void vgic_refresh(unsigned int cpu)
*/
void enqueue_interrupt(unsigned int descr, unsigned int cpu)
{
- unsigned int slot;
- struct gic_cpuif *cpuif;
-
- cpuif = &(cpuifs[cpu]);
-
- /*
- * If there are no free slots, trigger a maintenance
- */
- if (!(cpuif->freelist)) {
- vgic_refresh(cpu);
- }
-
- if (cpuif->freelist) {
- /*
- * There is a free slot, use it.
- */
- slot = cpuif->freelist; /* Take the free list.. */
- slot &= (-slot); /* .. extract one set bit .. */
- cpuif->freelist &= (~slot); /* .. clear that bit from free list .. */
- slot = bitindex(slot); /* .. and convert to number. */
-
- set_vgic_entry(descr, slot);
- } else {
- /*
- * There are no free slots, we are either queuing this one or swapping another out
- */
- unsigned int pri = (descr >> 20) & 0xFF;
- unsigned int minpri = 0;
- unsigned int minslot = 0;
- unsigned int i, j;
-
- if (cpuif->overflow && cpuif->overflow->priority <= pri) {
- /*
- * There are already queued interrupts with the same or higher priority, just queue this one
- */
- set_vgic_queue_entry(cpuif, descr);
- return;
- }
-
- /*
- * Otherwise find the lowest priority entry..
- */
- for (i = 0; i < hv_lr_count[cpu]; i++) {
- j = (get_vgic_entry(i) >> 20) & 0xFF; /* Get the priority for the current thing in this slot */
- if (i == 0 || (j > minpri)) {
- minpri = j;
- minslot = i;
- }
- }
-
- if (minpri > pri) {
- /*
- * If it's lower priority than this new one we kick it out
- */
- set_vgic_queue_entry(cpuif, get_vgic_entry(minslot));
- set_vgic_entry(descr, minslot);
- } else {
- /*
- * Otherwise just queue the new one
- */
- set_vgic_queue_entry(cpuif, descr);
- }
- }
+ unsigned int slot;
+ struct gic_cpuif *cpuif;
+
+ cpuif = &(cpuifs[cpu]);
+
+ /*
+ * If there are no free slots, trigger a maintenance
+ */
+ if (!(cpuif->freelist)) {
+ vgic_refresh(cpu);
+ }
+
+ if (cpuif->freelist) {
+ /*
+ * There is a free slot, use it.
+ */
+ slot = cpuif->freelist; /* Take the free list.. */
+ slot &= (-slot); /* .. extract one set bit .. */
+ cpuif->freelist &= (~slot); /* .. clear that bit from free list .. */
+ slot = bitindex(slot); /* .. and convert to number. */
+
+ set_vgic_entry(descr, slot);
+ } else {
+ /*
+ * There are no free slots, we are either queuing this one or swapping another out
+ */
+ unsigned int pri = (descr >> 20) & 0xFF;
+ unsigned int minpri = 0;
+ unsigned int minslot = 0;
+ unsigned int i, j;
+
+ if (cpuif->overflow && cpuif->overflow->priority <= pri) {
+ /*
+ * There are already queued interrupts with the same or higher priority, just queue this one
+ */
+ set_vgic_queue_entry(cpuif, descr);
+ return;
+ }
+
+ /*
+ * Otherwise find the lowest priority entry..
+ */
+ for (i = 0; i < hv_lr_count[cpu]; i++) {
+ j = (get_vgic_entry(i) >> 20) & 0xFF; /* Get the priority for the current thing in this slot */
+ if (i == 0 || (j > minpri)) {
+ minpri = j;
+ minslot = i;
+ }
+ }
+
+ if (minpri > pri) {
+ /*
+ * If it's lower priority than this new one we kick it out
+ */
+ set_vgic_queue_entry(cpuif, get_vgic_entry(minslot));
+ set_vgic_entry(descr, minslot);
+ } else {
+ /*
+ * Otherwise just queue the new one
+ */
+ set_vgic_queue_entry(cpuif, descr);
+ }
+ }
}
diff --git a/big-little/include/arm.h b/big-little/include/arm.h
index beac64d..4f33cb5 100644
--- a/big-little/include/arm.h
+++ b/big-little/include/arm.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef ARM_H
#define ARM_H
@@ -53,7 +53,7 @@
* These macros extract the page/section numbers from an address
*/
#define pagenum(x) (((x) >> 12) & 0xFF)
-#define secnum(x) ((x) >> 21) /* i$$NEW$$ */
+#define secnum(x) ((x) >> 21) /* i$$NEW$$ */
//#define secnum(x) ((x) >> 20) /* orig */
#define MODE_USR 0x10
diff --git a/big-little/include/bakery.h b/big-little/include/bakery.h
index a13ec81..261acf3 100644
--- a/big-little/include/bakery.h
+++ b/big-little/include/bakery.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef _BAKERY_H_
#define _BAKERY_H_
@@ -29,8 +29,8 @@
* A pointer to this struct is passed to the lock/unlock functions.
*/
typedef struct {
- volatile char entering[MAX_CPUS];
- volatile unsigned number[MAX_CPUS];
+ volatile char entering[MAX_CPUS];
+ volatile unsigned number[MAX_CPUS];
} bakery_t;
/*
@@ -50,4 +50,4 @@ extern void get_bakery_spinlock(unsigned cpuid, bakery_t * bakery);
*/
extern void release_bakery_spinlock(unsigned cpuid, bakery_t * bakery);
-#endif /* _BAKERY_H_ */
+#endif /* _BAKERY_H_ */
diff --git a/big-little/include/bl.h b/big-little/include/bl.h
index fe0c4b5..94e2fb0 100644
--- a/big-little/include/bl.h
+++ b/big-little/include/bl.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __BL_H__
#define __BL_H__
@@ -26,10 +26,10 @@
#include "misc.h"
typedef struct vm_c {
- unsigned gp_regs[15];
- unsigned elr_hyp;
- unsigned spsr;
- unsigned usr_lr;
+ unsigned gp_regs[15];
+ unsigned elr_hyp;
+ unsigned spsr;
+ unsigned usr_lr;
} vm_context;
/*
@@ -37,11 +37,11 @@ typedef struct vm_c {
* mode upon entry into the HYP mode synchronously/asynchronously.
*/
typedef struct vm_s {
- unsigned stack[STACK_SIZE];
- vm_context context;
+ unsigned stack[STACK_SIZE];
+ vm_context context;
} vm_state;
extern vm_state guestos_state[MAX_CPUIFS];
extern void bl_setup(void);
extern void hyp_warm_reset_handler(void);
-#endif /* __BL_H__ */
+#endif /* __BL_H__ */
diff --git a/big-little/include/context.h b/big-little/include/context.h
index 23672b3..11a737c 100644
--- a/big-little/include/context.h
+++ b/big-little/include/context.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __CONTEXT_H__
#define __CONTEXT_H__
@@ -26,56 +26,56 @@
#include "misc.h"
typedef struct ns_gic_cpu_context {
- unsigned int gic_cpu_if_regs[32]; /* GIC context local to the CPU */
- unsigned int gic_dist_if_pvt_regs[32]; /* GIC SGI/PPI context local to the CPU */
+ unsigned int gic_cpu_if_regs[32]; /* GIC context local to the CPU */
+ unsigned int gic_dist_if_pvt_regs[32]; /* GIC SGI/PPI context local to the CPU */
} gic_cpu_context;
typedef struct fault_regs {
- unsigned dfar;
- unsigned ifar;
- unsigned ifsr;
- unsigned dfsr;
- unsigned adfsr;
- unsigned aifsr;
+ unsigned dfar;
+ unsigned ifar;
+ unsigned ifsr;
+ unsigned dfsr;
+ unsigned adfsr;
+ unsigned aifsr;
} cp15_fault_regs;
typedef struct ns_banked_cp15_context {
- unsigned int cp15_misc_regs[2]; /* cp15 miscellaneous registers */
- unsigned int cp15_ctrl_regs[20]; /* cp15 control registers */
- unsigned int cp15_mmu_regs[16]; /* cp15 mmu registers */
- cp15_fault_regs ns_cp15_fault_regs; /* cp15 fault status registers */
+ unsigned int cp15_misc_regs[2]; /* cp15 miscellaneous registers */
+ unsigned int cp15_ctrl_regs[20]; /* cp15 control registers */
+ unsigned int cp15_mmu_regs[16]; /* cp15 mmu registers */
+ cp15_fault_regs ns_cp15_fault_regs; /* cp15 fault status registers */
} banked_cp15_context;
typedef struct gen_tmr_ctx {
- unsigned cntfrq;
- unsigned long long cntvoff;
- unsigned cnthctl;
- unsigned cntkctl;
- unsigned long long cntp_cval;
- unsigned cntp_tval;
- unsigned cntp_ctl;
- unsigned long long cntv_cval;
- unsigned cntv_tval;
- unsigned cntv_ctl;
- unsigned long long cnthp_cval;
- unsigned cnthp_tval;
- unsigned cnthp_ctl;
+ unsigned cntfrq;
+ unsigned long long cntvoff;
+ unsigned cnthctl;
+ unsigned cntkctl;
+ unsigned long long cntp_cval;
+ unsigned cntp_tval;
+ unsigned cntp_ctl;
+ unsigned long long cntv_cval;
+ unsigned cntv_tval;
+ unsigned cntv_ctl;
+ unsigned long long cnthp_cval;
+ unsigned cnthp_tval;
+ unsigned cnthp_ctl;
} generic_timer_context;
typedef struct ns_cpu_context {
- unsigned int banked_cpu_regs[32]; /* Banked cpu registers */
- banked_cp15_context banked_cp15_regs; /* Per cpu banked cp15 context */
- unsigned int pmon_regs[32]; /* Generic performance monitor registers */
- generic_timer_context cp15_timer_ctx; /* Global counter registers if accessible in NS world */
- gic_cpu_context gic_cpu_ctx; /* Per cpu GIC distributor and interface context */
- unsigned int endianess; /* Per cpu endianess */
- unsigned int vfp_regs[34]; /* Dummy entry for VFP context. */
- unsigned int debug_regs[32]; /* Dummy entry for Debug context. TODO */
+ unsigned int banked_cpu_regs[32]; /* Banked cpu registers */
+ banked_cp15_context banked_cp15_regs; /* Per cpu banked cp15 context */
+ unsigned int pmon_regs[32]; /* Generic performance monitor registers */
+ generic_timer_context cp15_timer_ctx; /* Global counter registers if accessible in NS world */
+ gic_cpu_context gic_cpu_ctx; /* Per cpu GIC distributor and interface context */
+ unsigned int endianess; /* Per cpu endianess */
+ unsigned int vfp_regs[34]; /* Dummy entry for VFP context. */
+ unsigned int debug_regs[32]; /* Dummy entry for Debug context. TODO */
} cpu_context;
typedef struct ns_global_context {
- unsigned int gic_dist_if_regs[512]; /* GIC distributor context to be saved by the last cpu. */
- unsigned int generic_timer_regs[8]; /* Global timers if the NS world has access to them */
+ unsigned int gic_dist_if_regs[512]; /* GIC distributor context to be saved by the last cpu. */
+ unsigned int generic_timer_regs[8]; /* Global timers if the NS world has access to them */
} global_context;
/*
@@ -83,11 +83,11 @@ typedef struct ns_global_context {
* context handler.
*/
typedef struct os_state {
- unsigned sctlr;
- unsigned dacr;
- unsigned ttbr0;
- unsigned nmrr;
- unsigned prrr;
+ unsigned sctlr;
+ unsigned dacr;
+ unsigned ttbr0;
+ unsigned nmrr;
+ unsigned prrr;
} os_state;
/*
@@ -95,10 +95,10 @@ typedef struct os_state {
* a multi-cluster system
*/
typedef struct core_context {
- /*
- * Non-secure context save area
- */
- cpu_context ns_cpu_ctx;
+ /*
+ * Non-secure context save area
+ */
+ cpu_context ns_cpu_ctx;
} core_context;
@@ -107,18 +107,18 @@ typedef struct core_context {
* cluster system
*/
typedef struct cluster_context {
- core_context core[MAX_CORES];
- unsigned num_cores;
- global_context ns_cluster_ctx;
+ core_context core[MAX_CORES];
+ unsigned num_cores;
+ global_context ns_cluster_ctx;
} cluster_context;
/*
* Top level structure to hold the complete context of a multi cluster system
*/
typedef struct system_context {
- cluster_context cluster;
- unsigned num_clusters;
- unsigned warm_reset;
+ cluster_context cluster;
+ unsigned num_clusters;
+ unsigned warm_reset;
} system_context;
extern void context_save(unsigned, unsigned);
@@ -130,4 +130,4 @@ extern void restore_hyp_context(unsigned, unsigned);
extern void save_vfp(unsigned *);
extern void restore_vfp(unsigned *);
extern void enable_trigger(unsigned);
-#endif /* __CONTEXT_H__ */
+#endif /* __CONTEXT_H__ */
diff --git a/big-little/include/events.h b/big-little/include/events.h
index 8f71eec..629404f 100644
--- a/big-little/include/events.h
+++ b/big-little/include/events.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __EVENTS_H__
#define __EVENTS_H__
@@ -75,4 +75,4 @@ extern void reset_event(unsigned, unsigned);
extern void wait_for_event(unsigned, unsigned);
extern void wait_for_events(unsigned);
-#endif /* __EVENTS_H__ */
+#endif /* __EVENTS_H__ */
diff --git a/big-little/include/gic_registers.h b/big-little/include/gic_registers.h
index 6b04945..8a9ce9c 100644
--- a/big-little/include/gic_registers.h
+++ b/big-little/include/gic_registers.h
@@ -18,8 +18,8 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
-
+ */
+
#ifndef __GIC_REGISTERS_H__
#define __GIC_REGISTERS_H__
@@ -98,5 +98,5 @@
#define VGIC_ENTRY_ACTIVE_PENDING 0x30000000
#define VGIC_ENTRY_PENDING 0x10000000
-#endif /* __GIC_REGISTERS_H__ */
+#endif /* __GIC_REGISTERS_H__ */
diff --git a/big-little/include/handler.h b/big-little/include/handler.h
index 7e06594..7246b9e 100644
--- a/big-little/include/handler.h
+++ b/big-little/include/handler.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __HANDLER_H__
#define __HANDLER_H__
@@ -29,4 +29,4 @@
extern system_context switcher_context;
-#endif /* __HANDLER_H__ */
+#endif /* __HANDLER_H__ */
diff --git a/big-little/include/hvc.h b/big-little/include/hvc.h
index dece5ed..d0df974 100644
--- a/big-little/include/hvc.h
+++ b/big-little/include/hvc.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __HVC_H__
#define __HVC_H__
@@ -41,4 +41,4 @@
vm_context *hvc_handler(unsigned, vm_context *);
-#endif /* __HVC_H__ */
+#endif /* __HVC_H__ */
diff --git a/big-little/include/hyp_types.h b/big-little/include/hyp_types.h
index 4e16d72..441320b 100755
--- a/big-little/include/hyp_types.h
+++ b/big-little/include/hyp_types.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __HYP_TYPES_H__
#define __HYP_TYPES_H__
@@ -35,4 +35,4 @@ typedef unsigned long long uint64_t;
#define PRIVATE static
#define PUBLIC
-#endif /* __HYP_TYPES_H__ */
+#endif /* __HYP_TYPES_H__ */
diff --git a/big-little/include/hyp_vmmap.h b/big-little/include/hyp_vmmap.h
index cc0e33d..4da98bb 100644
--- a/big-little/include/hyp_vmmap.h
+++ b/big-little/include/hyp_vmmap.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __HYP_VMMAP_H__
#define __HYP_VMMAP_H__
@@ -30,13 +30,13 @@
* d e f i n e s
* --------------------------------------------------------------------------*/
-#define GIC_ID_PHY_BASE 0x2C001000 /* Physical Distributor */
-#define GIC_IC_PHY_BASE 0x2C002000 /* Physical CPU interface */
+#define GIC_ID_PHY_BASE 0x2C001000 /* Physical Distributor */
+#define GIC_IC_PHY_BASE 0x2C002000 /* Physical CPU interface */
-#define VGIC_HV_PHY_BASE 0x2C004000 /* Hypervisor's VIew */
-#define VGIC_VM_PHY_BASE 0x2C006000 /* Virtual Machine view */
+#define VGIC_HV_PHY_BASE 0x2C004000 /* Hypervisor's VIew */
+#define VGIC_VM_PHY_BASE 0x2C006000 /* Virtual Machine view */
#define UART0_PHY_BASE 0x1C090000
#define UART1_PHY_BASE 0x1C0A0000
-#endif /* __HYP_VMMAP_H__ */
+#endif /* __HYP_VMMAP_H__ */
diff --git a/big-little/include/int_master.h b/big-little/include/int_master.h
index b1a8c04..4224fde 100644
--- a/big-little/include/int_master.h
+++ b/big-little/include/int_master.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
/*
* Master interrupt controller driver - talks to real IC and dispatches
@@ -30,8 +30,8 @@
#include "bl.h"
-#define INT_ENABLED 0x1 /* Interrupt is enabled, something to pass it on to */
-#define INT_ACTIVE 0x2 /* Interrupt is currently actually disabled at the real controller because it is active */
+#define INT_ENABLED 0x1 /* Interrupt is enabled, something to pass it on to */
+#define INT_ACTIVE 0x2 /* Interrupt is currently actually disabled at the real controller because it is active */
#define INT_TRIGGER 0
#define INT_ENABLE 1
diff --git a/big-little/include/misc.h b/big-little/include/misc.h
index b62070c..cc7cb3b 100644
--- a/big-little/include/misc.h
+++ b/big-little/include/misc.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef MISC_H
#define MISC_H
@@ -80,10 +80,10 @@
* TEX remap defines for first level translations
*************************************************/
/* PRRR fields for memory attributes */
-#define TR0(x) ((x) << 0) // SO
-#define TR1(x) ((x) << 2) // DV
-#define TR4(x) ((x) << 8) // NC
-#define TR7(x) ((x) << 14) // C
+#define TR0(x) ((x) << 0) // SO
+#define TR1(x) ((x) << 2) // DV
+#define TR4(x) ((x) << 8) // NC
+#define TR7(x) ((x) << 14) // C
/* PRRR fields for shareability attributes */
#define NOS0(x) ((x) << 24)
#define NOS1(x) ((x) << 25)
@@ -100,10 +100,10 @@
#define SHAREABLE 0x1
/* NMRR fields */
-#define IR7(x) ((x) << 14) // Inner Cache attributes for TEX,C,B = 1,1,1
-#define IR4(x) ((x) << 8) // Inner Cache attributes for TEX,C,B = 1,0,0
-#define OR7(x) ((x) << 30) // Outer Cache attributes for TEX,C,B = 1,1,1
-#define OR4(x) ((x) << 24) // Outer Cache attributes for TEX,C,B = 1,0,0
+#define IR7(x) ((x) << 14) // Inner Cache attributes for TEX,C,B = 1,1,1
+#define IR4(x) ((x) << 8) // Inner Cache attributes for TEX,C,B = 1,0,0
+#define OR7(x) ((x) << 30) // Outer Cache attributes for TEX,C,B = 1,1,1
+#define OR4(x) ((x) << 24) // Outer Cache attributes for TEX,C,B = 1,0,0
/* Normal memory attributes */
#define NMRR_NC 0x0
@@ -285,15 +285,15 @@
#define KFC 0x1
/* Control register bits */
-#define CR_M (1<<0) /* MMU enabled */
-#define CR_A (1<<1) /* Align fault enable */
-#define CR_C (1<<2) /* Data cache */
-#define CR_W (1<<3) /* Write buffer */
-#define CR_Z (1<<11) /* Branch prediction */
-#define CR_I (1<<12) /* Instruction cache */
-#define CR_V (1<<13) /* Vectors */
-#define CR_XP (1<<23) /* Extended page tables */
-#define CR_TRE (1<<28) /* TEX Remap */
+#define CR_M (1<<0) /* MMU enabled */
+#define CR_A (1<<1) /* Align fault enable */
+#define CR_C (1<<2) /* Data cache */
+#define CR_W (1<<3) /* Write buffer */
+#define CR_Z (1<<11) /* Branch prediction */
+#define CR_I (1<<12) /* Instruction cache */
+#define CR_V (1<<13) /* Vectors */
+#define CR_XP (1<<23) /* Extended page tables */
+#define CR_TRE (1<<28) /* TEX Remap */
/*
* Processor modes
@@ -304,8 +304,8 @@
#define USR_MODE 0x10
/* Timer Bits */
-#define HYP_TIMER_MULT 0xa /* 12Mhz * 10 i.e. interrupt every 10ms. Linux uses 12MHz * 10 */
-#define LCL_TIMER_FREQ 0x7f /* Every 128th timer acts as a trigger */
+#define HYP_TIMER_MULT 0xa /* 12Mhz * 10 i.e. interrupt every 10ms. Linux uses 12MHz * 10 */
+#define LCL_TIMER_FREQ 0x7f /* Every 128th timer acts as a trigger */
#define HYP_TIMER_IRQ 0x1a
#define LCL_TIMER_IRQ 0x1e
#define TIMER_ENABLE 0x1
diff --git a/big-little/include/traps.h b/big-little/include/traps.h
index 8afaf5e..2d36210 100755
--- a/big-little/include/traps.h
+++ b/big-little/include/traps.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __TRAPS_H__
#define __TRAPS_H__
@@ -74,22 +74,22 @@
* Structure to hold the registered traps
*/
typedef struct tlist {
- unsigned int hcr;
- unsigned int hstr;
+ unsigned int hcr;
+ unsigned int hstr;
} trap_list;
/*
* Structure to hold registers to whom accesses will be trapped
*/
typedef struct rlist {
- unsigned int reg[MAX_REGS];
+ unsigned int reg[MAX_REGS];
} reg_list;
/*
* Structure to hold platform defined trap handlers
*/
typedef struct hlist {
- int (*handle[MAX_TRAPS]) (unsigned int hsr, unsigned int *operand);
+ int (*handle[MAX_TRAPS]) (unsigned int hsr, unsigned int *operand);
} handler_list;
extern trap_list cp15_trap_list[NUM_CPUS];
@@ -99,4 +99,4 @@ extern handler_list plat_handler_list[NUM_CPUS];
#if !DEBUG
#define printf(...)
#endif
-#endif /* __TRAPS_H__ */
+#endif /* __TRAPS_H__ */
diff --git a/big-little/include/vgiclib.h b/big-little/include/vgiclib.h
index 4f1499e..0d5f461 100644
--- a/big-little/include/vgiclib.h
+++ b/big-little/include/vgiclib.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef VGICLIB_H
#define VGICLIB_H
@@ -26,20 +26,20 @@
#include "gic_registers.h"
struct overflowint {
- /* This is encoded in the value, but speed optimise by splitting out */
- unsigned int priority;
- unsigned int value;
- struct overflowint *next;
+ /* This is encoded in the value, but speed optimise by splitting out */
+ unsigned int priority;
+ unsigned int value;
+ struct overflowint *next;
};
struct gic_cpuif {
- unsigned int status;
- unsigned int activepris; /* Copies of the state from the VGIC itself */
- unsigned int elrsr[2]; /* Copies of Empty list register status registers */
- unsigned int ints[VGIC_LISTENTRIES];
+ unsigned int status;
+ unsigned int activepris; /* Copies of the state from the VGIC itself */
+ unsigned int elrsr[2]; /* Copies of Empty list register status registers */
+ unsigned int ints[VGIC_LISTENTRIES];
- struct overflowint *overflow; /* List of overflowed interrupts */
- unsigned int freelist; /* Bitmask of which list entries are in use */
+ struct overflowint *overflow; /* List of overflowed interrupts */
+ unsigned int freelist; /* Bitmask of which list entries are in use */
};
void vgic_init(void);
@@ -48,4 +48,4 @@ void vgic_loadstate(unsigned int cpu);
void vgic_refresh(unsigned int cpu);
void enqueue_interrupt(unsigned int descr, unsigned int cpu);
-#endif /* VGICLIB_H */
+#endif /* VGICLIB_H */
diff --git a/big-little/include/virt_helpers.h b/big-little/include/virt_helpers.h
index 3ebea2a..7ec73c9 100644
--- a/big-little/include/virt_helpers.h
+++ b/big-little/include/virt_helpers.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef _VIRT_HELPERS_H_
#define _VIRT_HELPERS_H_
@@ -72,17 +72,17 @@ extern void cln_dcache_mva_pou(unsigned *);
* GIC functions
*/
extern void save_gic_interface(unsigned int *pointer,
- unsigned gic_interface_address);
+ unsigned gic_interface_address);
extern int save_gic_distributor_private(unsigned int *pointer,
- unsigned gic_distributor_address);
+ unsigned gic_distributor_address);
extern int save_gic_distributor_shared(unsigned int *pointer,
- unsigned gic_distributor_address);
+ unsigned gic_distributor_address);
extern void restore_gic_interface(unsigned int *pointer,
- unsigned gic_interface_address);
+ unsigned gic_interface_address);
extern void restore_gic_distributor_private(unsigned int *pointer,
- unsigned gic_distributor_address);
+ unsigned gic_distributor_address);
extern void restore_gic_distributor_shared(unsigned int *pointer,
- unsigned gic_distributor_address);
+ unsigned gic_distributor_address);
extern void hyp_save(unsigned, unsigned);
/*
@@ -90,9 +90,9 @@ extern void hyp_save(unsigned, unsigned);
*/
#if TUBE
extern void write_trace(bakery_t *, unsigned, char *, unsigned long long,
- unsigned long long, unsigned long long);
+ unsigned long long, unsigned long long);
#else
#define write_trace(...)
#endif
-#endif /* _VIRT_HELPERS_H_ */
+#endif /* _VIRT_HELPERS_H_ */
diff --git a/big-little/lib/bakery.c b/big-little/lib/bakery.c
index 5ab789e..068ac0d 100644
--- a/big-little/lib/bakery.c
+++ b/big-little/lib/bakery.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
/*
* bakery.c: Lamport's Bakery algorithm for spinlock handling
@@ -33,39 +33,39 @@
void init_bakery_spinlock(bakery_t * bakery)
{
- memset(bakery, 0, sizeof(bakery_t));
+ memset(bakery, 0, sizeof(bakery_t));
}
void get_bakery_spinlock(unsigned cpuid, bakery_t * bakery)
{
- unsigned i, max = 0, my_full_number, his_full_number;
+ unsigned i, max = 0, my_full_number, his_full_number;
- /* Get a ticket */
- bakery->entering[cpuid] = TRUE;
- for (i = 0; i < MAX_CPUS; ++i) {
- if (bakery->number[i] > max) {
- max = bakery->number[i];
- }
- }
- ++max;
- bakery->number[cpuid] = max;
- bakery->entering[cpuid] = FALSE;
+ /* Get a ticket */
+ bakery->entering[cpuid] = TRUE;
+ for (i = 0; i < MAX_CPUS; ++i) {
+ if (bakery->number[i] > max) {
+ max = bakery->number[i];
+ }
+ }
+ ++max;
+ bakery->number[cpuid] = max;
+ bakery->entering[cpuid] = FALSE;
- /* Wait for our turn */
- my_full_number = (max << 8) + cpuid;
- for (i = 0; i < MAX_CPUS; ++i) {
- while (bakery->entering[i]) ; /* Wait */
- do {
- his_full_number = bakery->number[i];
- if (his_full_number) {
- his_full_number = (his_full_number << 8) + i;
- }
- }
- while (his_full_number && (his_full_number < my_full_number));
- }
+ /* Wait for our turn */
+ my_full_number = (max << 8) + cpuid;
+ for (i = 0; i < MAX_CPUS; ++i) {
+ while (bakery->entering[i]) ; /* Wait */
+ do {
+ his_full_number = bakery->number[i];
+ if (his_full_number) {
+ his_full_number = (his_full_number << 8) + i;
+ }
+ }
+ while (his_full_number && (his_full_number < my_full_number));
+ }
}
void release_bakery_spinlock(unsigned cpuid, bakery_t * bakery)
{
- bakery->number[cpuid] = 0;
+ bakery->number[cpuid] = 0;
}
diff --git a/big-little/lib/tube.c b/big-little/lib/tube.c
index a9abb2e..8ab693e 100755
--- a/big-little/lib/tube.c
+++ b/big-little/lib/tube.c
@@ -18,41 +18,42 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
-
+ */
+
#include "misc.h"
#include "virt_helpers.h"
#include "bakery.h"
#if TUBE
-void write_trace(bakery_t *lock,
- unsigned tube_offset,
- char *msg,
- unsigned long long data0,
- unsigned long long data1,
- unsigned long long data2)
+void write_trace(bakery_t * lock,
+ unsigned tube_offset,
+ char *msg,
+ unsigned long long data0,
+ unsigned long long data1, unsigned long long data2)
{
- unsigned long long volatile *data = 0x0;
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
+ unsigned long long volatile *data = 0x0;
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+
+ get_bakery_spinlock(cpu_id, lock);
- get_bakery_spinlock(cpu_id, lock);
-
- /* Write the 3 double words that the tube supports */
- data = (unsigned long long volatile *) (KFSCB_BASE + tube_offset + TUBE_DATA0);
- *data++ = data0;
- *data++ = data1;
- *data = data2;
+ /* Write the 3 double words that the tube supports */
+ data =
+ (unsigned long long volatile *)(KFSCB_BASE + tube_offset +
+ TUBE_DATA0);
+ *data++ = data0;
+ *data++ = data1;
+ *data = data2;
- /* Write the string to the tube. */
- while (*msg != '\0') {
- write32(KFSCB_BASE + tube_offset + TUBE_CHAR, (unsigned) *msg);
- msg++;
- }
- write32(KFSCB_BASE + tube_offset + TUBE_CHAR, *msg);
+ /* Write the string to the tube. */
+ while (*msg != '\0') {
+ write32(KFSCB_BASE + tube_offset + TUBE_CHAR, (unsigned)*msg);
+ msg++;
+ }
+ write32(KFSCB_BASE + tube_offset + TUBE_CHAR, *msg);
- release_bakery_spinlock(cpu_id, lock);
+ release_bakery_spinlock(cpu_id, lock);
- return;
+ return;
}
#endif
diff --git a/big-little/lib/uart.c b/big-little/lib/uart.c
index fe88a11..26d00e5 100644
--- a/big-little/lib/uart.c
+++ b/big-little/lib/uart.c
@@ -27,7 +27,7 @@
* Call config_uart first.
* Implements fputc() so you can use printf() in your code.
*/
-
+
#include "misc.h"
#include "hyp_vmmap.h"
#include "virt_helpers.h"
@@ -57,69 +57,63 @@ static unsigned uart_base = NULL;
#define write32(addr, val) (*(volatile unsigned int *)(addr) = (val))
#define read32(addr) (*(volatile unsigned int *)(addr))
-
void config_uart(void)
{
- uart_base = UART1_PHY_BASE;
- write32(uart_base + PL011_CR, 0);
- write32(uart_base + PL011_FBRD, 0x01);
- write32(uart_base + PL011_IBRD, 0x27);
- write32(uart_base + PL011_LCRH, 0x70);
- write32(uart_base + PL011_CR, 0xf01); /* TXE|RXE|En|DTR|CTS */
+ uart_base = UART1_PHY_BASE;
+ write32(uart_base + PL011_CR, 0);
+ write32(uart_base + PL011_FBRD, 0x01);
+ write32(uart_base + PL011_IBRD, 0x27);
+ write32(uart_base + PL011_LCRH, 0x70);
+ write32(uart_base + PL011_CR, 0xf01); /* TXE|RXE|En|DTR|CTS */
}
void drain_uart_fifo(void)
{
- while (!(read32(uart_base + PL011_FR) & PL011_TXFE))
- {
- /* Do nothing */
- }
+ while (!(read32(uart_base + PL011_FR) & PL011_TXFE)) {
+ /* Do nothing */
+ }
}
static __inline void wait_for_space(void)
{
- while ((read32(uart_base + PL011_FR) & PL011_TXFF))
- {
- /* Do nothing */
- }
+ while ((read32(uart_base + PL011_FR) & PL011_TXFF)) {
+ /* Do nothing */
+ }
}
void output_char(int c)
{
- if (c == '\n')
- {
- wait_for_space();
- write32(uart_base + PL011_DR, '\r');
- }
- wait_for_space();
- write32(uart_base + PL011_DR, c);
+ if (c == '\n') {
+ wait_for_space();
+ write32(uart_base + PL011_DR, '\r');
+ }
+ wait_for_space();
+ write32(uart_base + PL011_DR, c);
}
void output_string(const char *string)
{
- int i;
-
- for (i=0; string[i]; ++i)
- {
- output_char(string[i]);
- }
+ int i;
+
+ for (i = 0; string[i]; ++i) {
+ output_char(string[i]);
+ }
}
void hexword(unsigned value)
{
- printf(" 0x%8.8x", value);
- drain_uart_fifo();
+ printf(" 0x%8.8x", value);
+ drain_uart_fifo();
}
-typedef struct __FILE
-{
- int dummy;
+typedef struct __FILE {
+ int dummy;
} FILE;
FILE __stdout;
-int fputc(int c, FILE *f)
+int fputc(int c, FILE * f)
{
- output_char(c);
- return c;
+ output_char(c);
+ return c;
}
diff --git a/big-little/lib/virt_events.c b/big-little/lib/virt_events.c
index 1b7d8d2..33e9d3b 100644
--- a/big-little/lib/virt_events.c
+++ b/big-little/lib/virt_events.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "events.h"
#include "misc.h"
@@ -40,30 +40,30 @@ extern unsigned event[][MAX_EVENTS];
*/
void set_event(unsigned event_id, unsigned cpu_id)
{
- event[cpu_id][event_id] = TRUE;
- dsb();
- sev();
- return;
+ event[cpu_id][event_id] = TRUE;
+ dsb();
+ sev();
+ return;
}
inline unsigned get_event(unsigned event_id, unsigned cpu_id)
{
- return event[cpu_id][event_id];
+ return event[cpu_id][event_id];
}
void reset_event(unsigned event_id, unsigned cpu_id)
{
- event[cpu_id][event_id] = FALSE;
- return;
+ event[cpu_id][event_id] = FALSE;
+ return;
}
void wait_for_event(unsigned event_id, unsigned cpu_id)
{
- while (FALSE == get_event(event_id, cpu_id)) {
- wfe();
- }
-
- return;
+ while (FALSE == get_event(event_id, cpu_id)) {
+ wfe();
+ }
+
+ return;
}
/*
@@ -76,45 +76,45 @@ void wait_for_event(unsigned event_id, unsigned cpu_id)
*/
void wait_for_events(unsigned event_id)
{
- unsigned ctr, event_count = 0, num_cpus = 0;
+ unsigned ctr, event_count = 0, num_cpus = 0;
- if (switcher) {
- num_cpus = num_secondaries() + 1;
- } else {
- num_cpus = CLUSTER_CPU_COUNT(host_cluster)
- + CLUSTER_CPU_COUNT(!host_cluster);
- }
+ if (switcher) {
+ num_cpus = num_secondaries() + 1;
+ } else {
+ num_cpus = CLUSTER_CPU_COUNT(host_cluster)
+ + CLUSTER_CPU_COUNT(!host_cluster);
+ }
- do {
- for (ctr = 0; ctr < num_cpus; ctr++) {
- if (TRUE == get_event(event_id, ctr)) {
- event_count++;
- reset_event(event_id, ctr);
- }
- }
+ do {
+ for (ctr = 0; ctr < num_cpus; ctr++) {
+ if (TRUE == get_event(event_id, ctr)) {
+ event_count++;
+ reset_event(event_id, ctr);
+ }
+ }
- if (event_count != num_cpus)
- wfe();
- else
- break;
- } while(1);
+ if (event_count != num_cpus)
+ wfe();
+ else
+ break;
+ } while (1);
- return;
+ return;
}
void set_events(unsigned event_id)
{
- unsigned ctr, num_cpus = 0;
+ unsigned ctr, num_cpus = 0;
- if (switcher) {
- num_cpus = num_secondaries() + 1;
- } else {
- num_cpus = CLUSTER_CPU_COUNT(host_cluster)
- + CLUSTER_CPU_COUNT(!host_cluster);
- }
+ if (switcher) {
+ num_cpus = num_secondaries() + 1;
+ } else {
+ num_cpus = CLUSTER_CPU_COUNT(host_cluster)
+ + CLUSTER_CPU_COUNT(!host_cluster);
+ }
- for (ctr = 0; ctr < num_cpus; ctr++) {
- set_event(event_id, ctr);
- }
- return;
+ for (ctr = 0; ctr < num_cpus; ctr++) {
+ set_event(event_id, ctr);
+ }
+ return;
}
diff --git a/big-little/secure_world/events.c b/big-little/secure_world/events.c
index 6577a05..4533039 100644
--- a/big-little/secure_world/events.c
+++ b/big-little/secure_world/events.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "events.h"
@@ -27,36 +27,36 @@
*/
void _set_event(unsigned event_id, unsigned cpu_id, unsigned event_type)
{
- dsb();
- secure_event[cpu_id][event_id] = TRUE;
- dsb();
- sev();
- return;
+ dsb();
+ secure_event[cpu_id][event_id] = TRUE;
+ dsb();
+ sev();
+ return;
}
inline unsigned _get_event(unsigned event_id, unsigned cpu_id)
{
- return secure_event[cpu_id][event_id];
+ return secure_event[cpu_id][event_id];
}
void _reset_event(unsigned event_id, unsigned cpu_id, unsigned event_type)
{
- dsb();
- secure_event[cpu_id][event_id] = FALSE;
- dsb();
- return;
+ dsb();
+ secure_event[cpu_id][event_id] = FALSE;
+ dsb();
+ return;
}
void _wait_for_event(unsigned event_id, unsigned cpu_id, unsigned event_type)
{
- dsb();
- do {
- wfe();
- isb();
- dsb();
- } while (FALSE == _get_event(event_id, cpu_id));
+ dsb();
+ do {
+ wfe();
+ isb();
+ dsb();
+ } while (FALSE == _get_event(event_id, cpu_id));
- return;
+ return;
}
/*
@@ -69,27 +69,27 @@ void _wait_for_event(unsigned event_id, unsigned cpu_id, unsigned event_type)
*/
void _wait_for_events(unsigned event_id, unsigned event_type)
{
- unsigned ctr, event_count = 0, num_cpus = num_secondaries() + 1;;
+ unsigned ctr, event_count = 0, num_cpus = num_secondaries() + 1;;
- dsb();
- do {
- wfe();
- for (ctr = 0; ctr < num_cpus; ctr++) {
- if (TRUE == _get_event(event_id, ctr)) {
- event_count++;
- _reset_event(event_id, ctr, event_type);
- }
- }
- } while (event_count != num_cpus);
+ dsb();
+ do {
+ wfe();
+ for (ctr = 0; ctr < num_cpus; ctr++) {
+ if (TRUE == _get_event(event_id, ctr)) {
+ event_count++;
+ _reset_event(event_id, ctr, event_type);
+ }
+ }
+ } while (event_count != num_cpus);
- return;
+ return;
}
void _set_events(unsigned event_id, unsigned event_type)
{
- unsigned ctr;
- for (ctr = 0; ctr < (num_secondaries() + 1); ctr++) {
- _set_event(event_id, ctr, event_type);
- }
- return;
+ unsigned ctr;
+ for (ctr = 0; ctr < (num_secondaries() + 1); ctr++) {
+ _set_event(event_id, ctr, event_type);
+ }
+ return;
}
diff --git a/big-little/secure_world/secure_context.c b/big-little/secure_world/secure_context.c
index 37a127b..16c50ac 100644
--- a/big-little/secure_world/secure_context.c
+++ b/big-little/secure_world/secure_context.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "secure_world.h"
@@ -31,180 +31,185 @@ unsigned host_cluster = HOST_CLUSTER;
unsigned switcher = SWITCHER;
/* Bakery lock to serialize access to the tube. */
-static bakery_t lock_tube1 __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };
+static bakery_t lock_tube1 __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
+0};
void enable_caches(void)
{
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
- unsigned first_cpu = find_first_cpu();
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned first_cpu = find_first_cpu();
- write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable Start", read_cntpct(), 0x0, 0x0);
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable Start",
+ read_cntpct(), 0x0, 0x0);
- /* Turn on coherency */
- enable_coherency();
+ /* Turn on coherency */
+ enable_coherency();
- /* Enable caches */
- write_sctlr(read_sctlr() | CR_I | CR_Z | CR_C);
- dsb();
- isb();
+ /* Enable caches */
+ write_sctlr(read_sctlr() | CR_I | CR_Z | CR_C);
+ dsb();
+ isb();
- /*
- * Only one cpu should enable the CCI while the other
- * cpus wait.
- */
- if (first_cpu == cpu_id) {
- if (cluster_id)
- write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
- else
- write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ /*
+ * Only one cpu should enable the CCI while the other
+ * cpus wait.
+ */
+ if (first_cpu == cpu_id) {
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
- dsb();
- }
+ dsb();
+ }
- /* Wait for the dust to settle down */
- while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
- write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable End", read_cntpct(), 0x0, 0x0);
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable End",
+ read_cntpct(), 0x0, 0x0);
- return;
+ return;
}
void secure_context_restore(void)
{
- unsigned cpu_id = read_cpuid();
- sec_context *sec_ctx = &secure_context[cpu_id];
-
- write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore Start", read_cntpct(), 0x0, 0x0);
-
- /* Restore state of CCI SAR */
- write32(CCI_BASE + SECURE_ACCESS_REG, sec_ctx->cci_sar);
-
- /* Restore the security state of PPIs. */
- write32(GIC_ID_PHY_BASE + GICD_SEC, sec_ctx->vgic_icdisr0);
-
- /* Restore the Priority mask register */
- write32(GIC_IC_PHY_BASE + GICC_PRIMASK, sec_ctx->vgic_iccpmr);
-
- /* Restore the coprocessor context */
- write_cntfrq(sec_ctx->cntfrq);
- write_mvbar(sec_ctx->mvbar);
- write_vbar(sec_ctx->vbar);
- write_nsacr(sec_ctx->nsacr);
- write_cpacr(sec_ctx->cpacr);
- write_actlr(sec_ctx->actlr);
- write_scr(sec_ctx->scr);
- write_sctlr(read_sctlr() | sec_ctx->sctlr);
- dsb();
- isb();
-
- write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore End", read_cntpct(), 0x0, 0x0);
- return;
+ unsigned cpu_id = read_cpuid();
+ sec_context *sec_ctx = &secure_context[cpu_id];
+
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore Start",
+ read_cntpct(), 0x0, 0x0);
+
+ /* Restore state of CCI SAR */
+ write32(CCI_BASE + SECURE_ACCESS_REG, sec_ctx->cci_sar);
+
+ /* Restore the security state of PPIs. */
+ write32(GIC_ID_PHY_BASE + GICD_SEC, sec_ctx->vgic_icdisr0);
+
+ /* Restore the Priority mask register */
+ write32(GIC_IC_PHY_BASE + GICC_PRIMASK, sec_ctx->vgic_iccpmr);
+
+ /* Restore the coprocessor context */
+ write_cntfrq(sec_ctx->cntfrq);
+ write_mvbar(sec_ctx->mvbar);
+ write_vbar(sec_ctx->vbar);
+ write_nsacr(sec_ctx->nsacr);
+ write_cpacr(sec_ctx->cpacr);
+ write_actlr(sec_ctx->actlr);
+ write_scr(sec_ctx->scr);
+ write_sctlr(read_sctlr() | sec_ctx->sctlr);
+ dsb();
+ isb();
+
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore End",
+ read_cntpct(), 0x0, 0x0);
+ return;
}
void secure_context_save(unsigned ns_entry_point)
{
- unsigned cpu_id = read_cpuid();
- sec_context *sec_ctx = &secure_context[cpu_id];
-
- ns_entry_ptr[cpu_id] = ns_entry_point;
- sec_ctx->cci_sar = read32(CCI_BASE + SECURE_ACCESS_REG);
- sec_ctx->vgic_icdisr0 = read32(GIC_ID_PHY_BASE + GICD_SEC);
- sec_ctx->vgic_iccpmr = read32(GIC_IC_PHY_BASE + GICC_PRIMASK);
- sec_ctx->mvbar = read_mvbar();
- sec_ctx->vbar = read_vbar();
- sec_ctx->nsacr = read_nsacr();
- sec_ctx->cpacr = read_cpacr();
- sec_ctx->actlr = read_actlr();
- sec_ctx->scr = read_scr();
- sec_ctx->sctlr = read_sctlr();
- sec_ctx->cntfrq = read_cntfrq();
-
- /*
- * Now that the context has been saved, its safe to bring
- * our counterpart on the inbound cluster out of reset.
- */
- powerup_ib_core(get_inbound(), cpu_id);
-
- return;
+ unsigned cpu_id = read_cpuid();
+ sec_context *sec_ctx = &secure_context[cpu_id];
+
+ ns_entry_ptr[cpu_id] = ns_entry_point;
+ sec_ctx->cci_sar = read32(CCI_BASE + SECURE_ACCESS_REG);
+ sec_ctx->vgic_icdisr0 = read32(GIC_ID_PHY_BASE + GICD_SEC);
+ sec_ctx->vgic_iccpmr = read32(GIC_IC_PHY_BASE + GICC_PRIMASK);
+ sec_ctx->mvbar = read_mvbar();
+ sec_ctx->vbar = read_vbar();
+ sec_ctx->nsacr = read_nsacr();
+ sec_ctx->cpacr = read_cpacr();
+ sec_ctx->actlr = read_actlr();
+ sec_ctx->scr = read_scr();
+ sec_ctx->sctlr = read_sctlr();
+ sec_ctx->cntfrq = read_cntfrq();
+
+ /*
+ * Now that the context has been saved, its safe to bring
+ * our counterpart on the inbound cluster out of reset.
+ */
+ powerup_ib_core(get_inbound(), cpu_id);
+
+ return;
}
/* Create the small page level 1 descriptor */
static void create_l1_sp_desc(unsigned virt_addr, unsigned l1_ttb_va,
- unsigned l2_ttb_pa)
+ unsigned l2_ttb_pa)
{
- unsigned ttb1_index = 0;
- unsigned ttb1_desc = 0;
-
- ttb1_index = (virt_addr & MB_MASK) >> MB_SHIFT;
-
- /*
- * Create a mapping if one is not already present.
- * Assuming that page tables are initialized to 0.
- */
- if (!(read32(l1_ttb_va + 4 * ttb1_index) & SMALL_PAGE)) {
- l2_ttb_pa = l2_ttb_pa & SP_L1_BASE_MASK;
- ttb1_desc = l2_ttb_pa | SMALL_PAGE;
- write32(l1_ttb_va + 4 * ttb1_index, ttb1_desc);
- cln_dcache_mva_pou((unsigned *)l1_ttb_va + 4 * ttb1_index);
- }
-
- return;
+ unsigned ttb1_index = 0;
+ unsigned ttb1_desc = 0;
+
+ ttb1_index = (virt_addr & MB_MASK) >> MB_SHIFT;
+
+ /*
+ * Create a mapping if one is not already present.
+ * Assuming that page tables are initialized to 0.
+ */
+ if (!(read32(l1_ttb_va + 4 * ttb1_index) & SMALL_PAGE)) {
+ l2_ttb_pa = l2_ttb_pa & SP_L1_BASE_MASK;
+ ttb1_desc = l2_ttb_pa | SMALL_PAGE;
+ write32(l1_ttb_va + 4 * ttb1_index, ttb1_desc);
+ cln_dcache_mva_pou((unsigned *)l1_ttb_va + 4 * ttb1_index);
+ }
+
+ return;
}
/* Create the small page level 2 descriptor */
static void create_l2_sp_desc(unsigned virt_addr, unsigned phys_addr,
- unsigned l2_ttb_va, unsigned attrs)
+ unsigned l2_ttb_va, unsigned attrs)
{
- unsigned int ttb2_index = 0;
- unsigned int ttb2_desc = 0;
- unsigned int mem_attrs =
- SP_SBO | SP_CACHEABLE | SP_BUFFERABLE | SP_TEX0 | SP_SHARED |
- SP_AP0;
-
- /* Use default attributes if the user has not passed any */
- if (attrs) {
- mem_attrs = attrs;
- }
-
- /* Left shift by 12 followed by a right shift by 24 gives 2nd level index */
- ttb2_index = (virt_addr << PAGE_SHIFT) >> (PAGE_SHIFT * 2);
-
- /*
- * Create a mapping if one is not already present
- * Assuming that page tables are initialized to 0.
- */
- if (!(read32(l2_ttb_va + 4 * ttb2_index))) {
- ttb2_desc = (phys_addr & PAGE_MASK) | mem_attrs;
- write32(l2_ttb_va + 4 * ttb2_index, ttb2_desc);
- cln_dcache_mva_pou((unsigned *)l2_ttb_va + 4 * ttb2_index);
- }
-
- return;
+ unsigned int ttb2_index = 0;
+ unsigned int ttb2_desc = 0;
+ unsigned int mem_attrs =
+ SP_SBO | SP_CACHEABLE | SP_BUFFERABLE | SP_TEX0 | SP_SHARED |
+ SP_AP0;
+
+ /* Use default attributes if the user has not passed any */
+ if (attrs) {
+ mem_attrs = attrs;
+ }
+
+ /* Left shift by 12 followed by a right shift by 24 gives 2nd level index */
+ ttb2_index = (virt_addr << PAGE_SHIFT) >> (PAGE_SHIFT * 2);
+
+ /*
+ * Create a mapping if one is not already present
+ * Assuming that page tables are initialized to 0.
+ */
+ if (!(read32(l2_ttb_va + 4 * ttb2_index))) {
+ ttb2_desc = (phys_addr & PAGE_MASK) | mem_attrs;
+ write32(l2_ttb_va + 4 * ttb2_index, ttb2_desc);
+ cln_dcache_mva_pou((unsigned *)l2_ttb_va + 4 * ttb2_index);
+ }
+
+ return;
}
void add_dv_page(unsigned pt_base)
{
- unsigned start_addr = (unsigned)&BL_SEC_DV_PAGE$$Base;
- unsigned dv_mem_attrs = SP_AP0 | SP_SBO | SP_XN | SP_BUFFERABLE;
- unsigned addr = 0x0;
-
- /*
- * Create the L1 small page descriptor using the base address supplied.
- * The region specified must all fit within a single 1MB section.
- */
- create_l1_sp_desc(start_addr, (unsigned)pt_base,
- (unsigned)small_pagetable);
-
- /*
- * We want all memory to be WBWA/S except for a page
- * which is device (used for the Bakery locks etc).
- */
- for (addr = start_addr & MB_MASK;
- addr < (start_addr & MB_MASK) + 0x100000; addr += 4096) {
- create_l2_sp_desc(addr, addr, (unsigned)small_pagetable,
- (addr == start_addr ? dv_mem_attrs : 0));
- }
-
- return;
+ unsigned start_addr = (unsigned)&BL_SEC_DV_PAGE$$Base;
+ unsigned dv_mem_attrs = SP_AP0 | SP_SBO | SP_XN | SP_BUFFERABLE;
+ unsigned addr = 0x0;
+
+ /*
+ * Create the L1 small page descriptor using the base address supplied.
+ * The region specified must all fit within a single 1MB section.
+ */
+ create_l1_sp_desc(start_addr, (unsigned)pt_base,
+ (unsigned)small_pagetable);
+
+ /*
+ * We want all memory to be WBWA/S except for a page
+ * which is device (used for the Bakery locks etc).
+ */
+ for (addr = start_addr & MB_MASK;
+ addr < (start_addr & MB_MASK) + 0x100000; addr += 4096) {
+ create_l2_sp_desc(addr, addr, (unsigned)small_pagetable,
+ (addr == start_addr ? dv_mem_attrs : 0));
+ }
+
+ return;
}
diff --git a/big-little/secure_world/secure_resets.c b/big-little/secure_world/secure_resets.c
index 73acafc..449264e 100644
--- a/big-little/secure_world/secure_resets.c
+++ b/big-little/secure_world/secure_resets.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "secure_world.h"
#include "events.h"
@@ -27,7 +27,8 @@
extern unsigned warm_reset;
/* Bakery lock to serialize access to the tube. */
-bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };
+bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
+0};
/*
* Compile time switch to decided whether the outbound
@@ -49,7 +50,7 @@ unsigned ve_reset_type[NUM_CPUS];
* Allocate secure events in our device page
*/
unsigned event[MAX_CORES][MAX_SEC_EVENTS]
-__attribute__ ((section("BL_SEC_DV_PAGE")));
+ __attribute__ ((section("BL_SEC_DV_PAGE")));
/*
* Normal spinlock to guard inbound cluster registers
@@ -63,13 +64,14 @@ static unsigned lock_ib_kfscb;
* KFSCB. It will always be used when the MMU is off.
* Each cluster will anyways use it sequentially
*/
-static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };
+static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
+0};
/*
* Small stacks for after we have turned our caches off.
*/
static unsigned long long powerdown_stacks[NUM_CPUS][32]
-__attribute__ ((section("BL_SEC_DV_PAGE")));
+ __attribute__ ((section("BL_SEC_DV_PAGE")));
/*
* The way a warm reset is detected has changed in the post beta FastModels.
@@ -77,34 +79,36 @@ __attribute__ ((section("BL_SEC_DV_PAGE")));
* new one. Instead of dealing with a function pointer, they manipulate a
* variable.
*/
-static void set_reset_handler(unsigned cluster_id, unsigned cpu_id, void (*handler)(void))
+static void set_reset_handler(unsigned cluster_id, unsigned cpu_id,
+ void (*handler) (void))
{
#if FM_BETA
- ve_reset_type[cpu_id]++;
- cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
+ ve_reset_type[cpu_id]++;
+ cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
#else
- write32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3), (unsigned) handler);
- dsb();
+ write32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3),
+ (unsigned)handler);
+ dsb();
#endif
}
-static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id))(void)
-{
+static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void) {
#if FM_BETA
- return (void (*)(void)) ve_reset_type[cpu_id];
+ return (void (*)(void))ve_reset_type[cpu_id];
#else
- return (void (*)(void)) read32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3));
+ return (void (*)(void))read32(KFSCB_BASE + RST_HANDLER0 +
+ ((cpu_id + (cluster_id << 2)) << 3));
#endif
}
unsigned long long *get_powerdown_stack(unsigned cpu_id)
{
- return &powerdown_stacks[cpu_id + 1][0];
+ return &powerdown_stacks[cpu_id + 1][0];
}
unsigned get_inbound()
{
- return !read_clusterid();
+ return !read_clusterid();
}
/*
@@ -112,40 +116,41 @@ unsigned get_inbound()
*/
void powerup_ib_core(unsigned cluster_id, unsigned cpu_id)
{
- unsigned rst_stat_reg = 0x0;
- unsigned cpu_mask = 0x0;
- void (*cold_reset_handler)(void) = 0x0;
- void (*warm_reset_handler)(void) = (void (*)(void)) &warm_reset;
-
- if (cold_reset_handler == get_reset_handler(cluster_id, cpu_id)) {
- set_reset_handler(cluster_id, cpu_id, warm_reset_handler);
- } else {
- if (flush_ob_l2) {
+ unsigned rst_stat_reg = 0x0;
+ unsigned cpu_mask = 0x0;
+ void (*cold_reset_handler) (void) = 0x0;
+ void (*warm_reset_handler) (void) = (void (*)(void))&warm_reset;
+
+ if (cold_reset_handler == get_reset_handler(cluster_id, cpu_id)) {
+ set_reset_handler(cluster_id, cpu_id, warm_reset_handler);
+ } else {
+ if (flush_ob_l2) {
#if FLUSH_L2_FIX
- set_event(FLUSH_L2, cpu_id);
+ set_event(FLUSH_L2, cpu_id);
#endif
- }
-
- /*
- * The outbound cluster's last cpu send an event
- * indicating that its finished the last switchover.
- * Wait for it before bringing it's cores out of
- * reset.
- */
- wait_for_event(OB_SHUTDOWN, cpu_id);
- reset_event(OB_SHUTDOWN, cpu_id);
- }
-
- write_trace(&lock_tube0, SEC_TUBE0, "Powerup Inbound", read_cntpct(), 0x0, 0x0);
-
- spin_lock(&lock_ib_kfscb);
- rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
- cpu_mask = 1 << 8 | (1 << 4) << cpu_id | 1 << cpu_id;
- rst_stat_reg &= ~cpu_mask;
- write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), rst_stat_reg);
- spin_unlock(&lock_ib_kfscb);
-
- return;
+ }
+
+ /*
+ * The outbound cluster's last cpu send an event
+ * indicating that its finished the last switchover.
+ * Wait for it before bringing it's cores out of
+ * reset.
+ */
+ wait_for_event(OB_SHUTDOWN, cpu_id);
+ reset_event(OB_SHUTDOWN, cpu_id);
+ }
+
+ write_trace(&lock_tube0, SEC_TUBE0, "Powerup Inbound", read_cntpct(),
+ 0x0, 0x0);
+
+ spin_lock(&lock_ib_kfscb);
+ rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
+ cpu_mask = 1 << 8 | (1 << 4) << cpu_id | 1 << cpu_id;
+ rst_stat_reg &= ~cpu_mask;
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), rst_stat_reg);
+ spin_unlock(&lock_ib_kfscb);
+
+ return;
}
/*
@@ -154,19 +159,19 @@ void powerup_ib_core(unsigned cluster_id, unsigned cpu_id)
*/
void powerdown_ob_core(unsigned cluster_id, unsigned cpu_id)
{
- unsigned val = 0x0;
- unsigned mask = 0x0;
+ unsigned val = 0x0;
+ unsigned mask = 0x0;
- get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+ get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
- val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
- mask = (1 << cpu_id) << 4;
- val |= mask;
- write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);
+ val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
+ mask = (1 << cpu_id) << 4;
+ val |= mask;
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);
- release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+ release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
- return;
+ return;
}
/*
@@ -174,19 +179,19 @@ void powerdown_ob_core(unsigned cluster_id, unsigned cpu_id)
*/
void powerdown_ob_cluster(unsigned cluster_id, unsigned cpu_id)
{
- unsigned val = 0x0;
- unsigned mask = 0x0;
+ unsigned val = 0x0;
+ unsigned mask = 0x0;
- get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+ get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
- val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
- mask = 1 << 8;
- val |= mask;
- write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);
+ val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
+ mask = 1 << 8;
+ val |= mask;
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);
- release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+ release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
- return;
+ return;
}
/*
@@ -194,99 +199,105 @@ void powerdown_ob_cluster(unsigned cluster_id, unsigned cpu_id)
* as it does not hold a lock.
*/
unsigned reset_status(unsigned cluster_id, unsigned rst_level,
- unsigned cpu_mask)
+ unsigned cpu_mask)
{
- unsigned rst_stat_reg = 0x0;
-
- rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
-
- switch (rst_level) {
- case CLUSTER_RESET:
- return rst_stat_reg >> 8;
- case CORE_PORESET:
- return ((rst_stat_reg >> 4) & 0xf) & cpu_mask;
- case CORE_RESET:
- return (rst_stat_reg & 0xf) & cpu_mask;
- default:
- return 0;
- }
+ unsigned rst_stat_reg = 0x0;
+
+ rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
+
+ switch (rst_level) {
+ case CLUSTER_RESET:
+ return rst_stat_reg >> 8;
+ case CORE_PORESET:
+ return ((rst_stat_reg >> 4) & 0xf) & cpu_mask;
+ case CORE_RESET:
+ return (rst_stat_reg & 0xf) & cpu_mask;
+ default:
+ return 0;
+ }
}
void powerdown_cluster(void)
{
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
- unsigned secondary_mask = 0x0;
- unsigned first_cpu = find_first_cpu();
-
- /*
- * Brute force way of cleaning the L1 and L2 caches of the outbound cluster.
- * All cpus flush their L1 caches. The 'first_cpu' waits for the others to
- * finish this operation before flushing the L2
- */
- write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush Begin", read_cntpct(), 0x0, 0x0);
- write_sctlr(read_sctlr() & ~CR_C & ~CR_M);
- dsb();
- isb();
- inv_icache_all();
- cache_maint_op(L1, CLN_INV);
- disable_coherency();
- write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush End", read_cntpct(), 0x0, 0x0);
- set_event(SEC_L1_DONE, cpu_id);
-
- if (cpu_id == first_cpu) {
-
- wait_for_events(SEC_L1_DONE);
-
- if (flush_ob_l2) {
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned secondary_mask = 0x0;
+ unsigned first_cpu = find_first_cpu();
+
+ /*
+ * Brute force way of cleaning the L1 and L2 caches of the outbound cluster.
+ * All cpus flush their L1 caches. The 'first_cpu' waits for the others to
+ * finish this operation before flushing the L2
+ */
+ write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush Begin", read_cntpct(),
+ 0x0, 0x0);
+ write_sctlr(read_sctlr() & ~CR_C & ~CR_M);
+ dsb();
+ isb();
+ inv_icache_all();
+ cache_maint_op(L1, CLN_INV);
+ disable_coherency();
+ write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush End", read_cntpct(), 0x0,
+ 0x0);
+ set_event(SEC_L1_DONE, cpu_id);
+
+ if (cpu_id == first_cpu) {
+
+ wait_for_events(SEC_L1_DONE);
+
+ if (flush_ob_l2) {
#if FLUSH_L2_FIX
- wait_for_event(FLUSH_L2, cpu_id);
- reset_event(FLUSH_L2, cpu_id);
+ wait_for_event(FLUSH_L2, cpu_id);
+ reset_event(FLUSH_L2, cpu_id);
#endif
- write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush Begin", read_cntpct(), 0x0, 0x0);
- cache_maint_op(L2, CLN_INV);
- write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush End", read_cntpct(), 0x0, 0x0);
-
- /* Turn off CCI snoops & DVM messages */
- if (cluster_id)
- write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
- else
- write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
-
- dsb();
-
- /* Wait for the dust to settle down */
- while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
- }
-
- /********************* RESET HANDLING **************************************
+ write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush Begin",
+ read_cntpct(), 0x0, 0x0);
+ cache_maint_op(L2, CLN_INV);
+ write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush End",
+ read_cntpct(), 0x0, 0x0);
+
+ /* Turn off CCI snoops & DVM messages */
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG,
+ 0x0);
+
+ dsb();
+
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+ }
+
+ /********************* RESET HANDLING **************************************
* Secondaries place themselves in reset while the 'first_cpu' waits for
* them to do so.
***************************************************************************/
- /*
- * Read the L2 control to get the number of secondary
- * cores present on this cluster. Shift mask by one to
- * get correct mask which includes the primary
- */
- secondary_mask = (1 << num_secondaries()) - 1;
- secondary_mask <<= 1;
+ /*
+ * Read the L2 control to get the number of secondary
+ * cores present on this cluster. Shift mask by one to
+ * get correct mask which includes the primary
+ */
+ secondary_mask = (1 << num_secondaries()) - 1;
+ secondary_mask <<= 1;
- /* Wait for other cpus to enter reset */
- while (secondary_mask !=
- reset_status(cluster_id, CORE_PORESET, secondary_mask)) ;
+ /* Wait for other cpus to enter reset */
+ while (secondary_mask !=
+ reset_status(cluster_id, CORE_PORESET, secondary_mask)) ;
- if (flush_ob_l2)
- powerdown_ob_cluster(cluster_id, cpu_id);
- else
- powerdown_ob_core(cluster_id, cpu_id);
+ if (flush_ob_l2)
+ powerdown_ob_cluster(cluster_id, cpu_id);
+ else
+ powerdown_ob_core(cluster_id, cpu_id);
- set_events(OB_SHUTDOWN);
+ set_events(OB_SHUTDOWN);
- } else {
- powerdown_ob_core(cluster_id, cpu_id);
- }
+ } else {
+ powerdown_ob_core(cluster_id, cpu_id);
+ }
- write_trace(&lock_tube0, SEC_TUBE0, "Reset Initiated", read_cntpct(), 0x0, 0x0);
- return;
+ write_trace(&lock_tube0, SEC_TUBE0, "Reset Initiated", read_cntpct(),
+ 0x0, 0x0);
+ return;
}
diff --git a/big-little/secure_world/secure_world.h b/big-little/secure_world/secure_world.h
index bf78182..d466452 100644
--- a/big-little/secure_world/secure_world.h
+++ b/big-little/secure_world/secure_world.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __SECURE_WORLD_H__
#define __SECURE_WORLD_H__
@@ -63,25 +63,25 @@
#define SP_GLOBAL (1 << 11)
typedef struct sec_stack {
- unsigned stack[STACK_SIZE];
+ unsigned stack[STACK_SIZE];
} sec_stack;
typedef struct sec_context {
- unsigned sctlr;
- unsigned actlr;
- unsigned cpacr;
- unsigned nsacr;
- unsigned scr;
- unsigned vbar;
- unsigned mvbar;
- unsigned cntfrq;
- unsigned cci_sar;
- unsigned vgic_icdisr0;
- unsigned vgic_iccpmr;
+ unsigned sctlr;
+ unsigned actlr;
+ unsigned cpacr;
+ unsigned nsacr;
+ unsigned scr;
+ unsigned vbar;
+ unsigned mvbar;
+ unsigned cntfrq;
+ unsigned cci_sar;
+ unsigned vgic_icdisr0;
+ unsigned vgic_iccpmr;
} sec_context;
extern void enable_caches(void);
extern void secure_context_restore(void);
extern void secure_context_save(unsigned);
-#endif /* __SECURE_WORLD_H__ */
+#endif /* __SECURE_WORLD_H__ */
diff --git a/big-little/switcher/context/gic.c b/big-little/switcher/context/gic.c
index 6dfc87f..5ae13a7 100644
--- a/big-little/switcher/context/gic.c
+++ b/big-little/switcher/context/gic.c
@@ -18,45 +18,45 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virt_helpers.h"
#include "misc.h"
struct set_and_clear_regs {
- volatile unsigned int set[32], clear[32];
+ volatile unsigned int set[32], clear[32];
};
typedef struct {
- /* 0x000 */ volatile unsigned int control;
- const unsigned int controller_type;
- const unsigned int implementer;
- const char padding1[116];
- /* 0x080 */ volatile unsigned int security[32];
- /* 0x100 */ struct set_and_clear_regs enable;
- /* 0x200 */ struct set_and_clear_regs pending;
- /* 0x300 */ struct set_and_clear_regs active;
- /* 0x400 */ volatile unsigned int priority[256];
- /* 0x800 */ volatile unsigned int target[256];
- /* 0xC00 */ volatile unsigned int configuration[64];
- /* 0xD00 */ const char padding3[512];
- /* 0xF00 */ volatile unsigned int software_interrupt;
- const char padding4[12];
- /* 0xF10 */ volatile unsigned int sgi_clr_pending[4];
- /* 0xF20 */ volatile unsigned int sgi_set_pending[4];
- const char padding5[176];
- /* 0xFE0 */ unsigned const int peripheral_id[4];
- /* 0xFF0 */ unsigned const int primecell_id[4];
+ /* 0x000 */ volatile unsigned int control;
+ const unsigned int controller_type;
+ const unsigned int implementer;
+ const char padding1[116];
+ /* 0x080 */ volatile unsigned int security[32];
+ /* 0x100 */ struct set_and_clear_regs enable;
+ /* 0x200 */ struct set_and_clear_regs pending;
+ /* 0x300 */ struct set_and_clear_regs active;
+ /* 0x400 */ volatile unsigned int priority[256];
+ /* 0x800 */ volatile unsigned int target[256];
+ /* 0xC00 */ volatile unsigned int configuration[64];
+ /* 0xD00 */ const char padding3[512];
+ /* 0xF00 */ volatile unsigned int software_interrupt;
+ const char padding4[12];
+ /* 0xF10 */ volatile unsigned int sgi_clr_pending[4];
+ /* 0xF20 */ volatile unsigned int sgi_set_pending[4];
+ const char padding5[176];
+ /* 0xFE0 */ unsigned const int peripheral_id[4];
+ /* 0xFF0 */ unsigned const int primecell_id[4];
} interrupt_distributor;
typedef struct {
- /* 0x00 */ volatile unsigned int control;
- /* 0x04 */ volatile unsigned int priority_mask;
- /* 0x08 */ volatile unsigned int binary_point;
- /* 0x0c */ volatile unsigned const int interrupt_ack;
- /* 0x10 */ volatile unsigned int end_of_interrupt;
- /* 0x14 */ volatile unsigned const int running_priority;
- /* 0x18 */ volatile unsigned const int highest_pending;
+ /* 0x00 */ volatile unsigned int control;
+ /* 0x04 */ volatile unsigned int priority_mask;
+ /* 0x08 */ volatile unsigned int binary_point;
+ /* 0x0c */ volatile unsigned const int interrupt_ack;
+ /* 0x10 */ volatile unsigned int end_of_interrupt;
+ /* 0x14 */ volatile unsigned const int running_priority;
+ /* 0x18 */ volatile unsigned const int highest_pending;
} cpu_interface;
/*
@@ -65,11 +65,11 @@ typedef struct {
*/
void save_gic_interface(unsigned int *pointer, unsigned gic_interface_address)
{
- cpu_interface *ci = (cpu_interface *) gic_interface_address;
+ cpu_interface *ci = (cpu_interface *) gic_interface_address;
- pointer[0] = ci->control;
- pointer[1] = ci->priority_mask;
- pointer[2] = ci->binary_point;
+ pointer[0] = ci->control;
+ pointer[1] = ci->priority_mask;
+ pointer[2] = ci->binary_point;
}
@@ -79,55 +79,55 @@ void save_gic_interface(unsigned int *pointer, unsigned gic_interface_address)
* Requires 19 words of memory
*/
int save_gic_distributor_private(unsigned int *pointer,
- unsigned gic_distributor_address)
+ unsigned gic_distributor_address)
{
- interrupt_distributor *id =
- (interrupt_distributor *) gic_distributor_address;
- unsigned int *ptr = 0x0;
-
- *pointer = id->enable.set[0];
- ++pointer;
- memcpy((void *) pointer, (const void *) id->priority, 8 << 2);
- pointer += 8;
- memcpy((void *) pointer, (const void *) id->target, 8 << 2);
- pointer += 8;
-
- /* Save just the PPI configurations (SGIs are not configurable) */
- *pointer = id->configuration[1];
- ++pointer;
-
- /*
- * Private peripheral interrupts need to be replayed on
- * the destination cpu interface for consistency. This
- * is the responsibility of the peripheral driver. When
- * it sees a pending interrupt while saving its context
- * it should record enough information to recreate the
- * interrupt while restoring.
- * We don't save the Pending/Active status and clear it
- * so that it does not interfere when we are back.
- */
- id->pending.clear[0] = 0xffffffff;
- id->active.clear[0] = 0xffffffff;
-
- /*
- * IPIs are different and can be replayed just by saving
- * and restoring the set/clear pending registers
- */
- ptr = pointer;
- memcpy((void *) pointer, (const void *) id->sgi_set_pending, 4 << 2);
- pointer += 8;
-
- /*
- * Clear the pending SGIs on this cpuif so that they don't
- * interfere with the wfi later on.
- */
- memcpy((void *) id->sgi_clr_pending, (const void *) ptr, 4 << 2);
-
- if (*pointer) {
- return -1;
- } else {
- return 0;
- }
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned int *ptr = 0x0;
+
+ *pointer = id->enable.set[0];
+ ++pointer;
+ memcpy((void *)pointer, (const void *)id->priority, 8 << 2);
+ pointer += 8;
+ memcpy((void *)pointer, (const void *)id->target, 8 << 2);
+ pointer += 8;
+
+ /* Save just the PPI configurations (SGIs are not configurable) */
+ *pointer = id->configuration[1];
+ ++pointer;
+
+ /*
+ * Private peripheral interrupts need to be replayed on
+ * the destination cpu interface for consistency. This
+ * is the responsibility of the peripheral driver. When
+ * it sees a pending interrupt while saving its context
+ * it should record enough information to recreate the
+ * interrupt while restoring.
+ * We don't save the Pending/Active status and clear it
+ * so that it does not interfere when we are back.
+ */
+ id->pending.clear[0] = 0xffffffff;
+ id->active.clear[0] = 0xffffffff;
+
+ /*
+ * IPIs are different and can be replayed just by saving
+ * and restoring the set/clear pending registers
+ */
+ ptr = pointer;
+ memcpy((void *)pointer, (const void *)id->sgi_set_pending, 4 << 2);
+ pointer += 8;
+
+ /*
+ * Clear the pending SGIs on this cpuif so that they don't
+ * interfere with the wfi later on.
+ */
+ memcpy((void *)id->sgi_clr_pending, (const void *)ptr, 4 << 2);
+
+ if (*pointer) {
+ return -1;
+ } else {
+ return 0;
+ }
}
/*
@@ -136,89 +136,90 @@ int save_gic_distributor_private(unsigned int *pointer,
* Returns non-zero if an SPI interrupt is pending (after saving all required context)
*/
int save_gic_distributor_shared(unsigned int *pointer,
- unsigned gic_distributor_address)
+ unsigned gic_distributor_address)
{
- int retval = 0;
- interrupt_distributor *id =
- (interrupt_distributor *) gic_distributor_address;
- unsigned num_spis = 0;
-
- /* Calculate how many SPIs the GIC supports */
- num_spis = 32 * (id->controller_type & 0x1f);
-
- /* Save rest of GIC configuration */
- if (num_spis) {
- memcpy((void *) pointer, (const void *) (id->target + 8), (num_spis / 4) << 2);
- pointer += num_spis / 4;
- }
-
- /* Save control register */
- *pointer = id->control;
- ++pointer;
-
- return retval;
+ int retval = 0;
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned num_spis = 0;
+
+ /* Calculate how many SPIs the GIC supports */
+ num_spis = 32 * (id->controller_type & 0x1f);
+
+ /* Save rest of GIC configuration */
+ if (num_spis) {
+ memcpy((void *)pointer, (const void *)(id->target + 8),
+ (num_spis / 4) << 2);
+ pointer += num_spis / 4;
+ }
+
+ /* Save control register */
+ *pointer = id->control;
+ ++pointer;
+
+ return retval;
}
void restore_gic_interface(unsigned int *pointer,
- unsigned gic_interface_address)
+ unsigned gic_interface_address)
{
- cpu_interface *ci = (cpu_interface *) gic_interface_address;
+ cpu_interface *ci = (cpu_interface *) gic_interface_address;
- ci->priority_mask = pointer[1];
- ci->binary_point = pointer[2];
+ ci->priority_mask = pointer[1];
+ ci->binary_point = pointer[2];
- /* Restore control register last */
- ci->control = pointer[0];
+ /* Restore control register last */
+ ci->control = pointer[0];
}
void restore_gic_distributor_private(unsigned int *pointer,
- unsigned gic_distributor_address)
+ unsigned gic_distributor_address)
{
- interrupt_distributor *id =
- (interrupt_distributor *) gic_distributor_address;
- unsigned ctr, prev_val = 0, prev_ctr = 0;
-
- id->enable.set[0] = *pointer;
- ++pointer;
-
- memcpy((void *) id->priority, (const void *) pointer, 8 << 2);
- pointer += 8;
- memcpy((void *) id->target, (const void *) pointer, 8 << 2);
- pointer += 8;
-
- /* Restore just the PPI configurations (SGIs are not configurable) */
- id->configuration[1] = *pointer;
- ++pointer;
-
- /*
- * Clear active and pending PPIs as they will be recreated by the
- * peripiherals
- */
- id->active.clear[0] = 0xffffffff;
- id->pending.clear[0] = 0xffffffff;
-
- /*
- * Restore pending IPIs
- */
- for (ctr = 0; ctr < 4; ctr++) {
- if(!pointer[ctr])
- continue;
-
- if(pointer[ctr] == prev_val) {
- pointer[ctr] = pointer[prev_ctr];
- } else {
- prev_val = pointer[ctr];
- prev_ctr = ctr;
- remap_cpuif(&pointer[ctr]);
- }
- }
-
- memcpy((void *) id->sgi_set_pending, (const void *) pointer, 4 << 2);
- pointer += 4;
-
- id->pending.set[0] = *pointer;
-
- return;
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned ctr, prev_val = 0, prev_ctr = 0;
+
+ id->enable.set[0] = *pointer;
+ ++pointer;
+
+ memcpy((void *)id->priority, (const void *)pointer, 8 << 2);
+ pointer += 8;
+ memcpy((void *)id->target, (const void *)pointer, 8 << 2);
+ pointer += 8;
+
+ /* Restore just the PPI configurations (SGIs are not configurable) */
+ id->configuration[1] = *pointer;
+ ++pointer;
+
+ /*
+ * Clear active and pending PPIs as they will be recreated by the
+ * peripiherals
+ */
+ id->active.clear[0] = 0xffffffff;
+ id->pending.clear[0] = 0xffffffff;
+
+ /*
+ * Restore pending IPIs
+ */
+ for (ctr = 0; ctr < 4; ctr++) {
+ if (!pointer[ctr])
+ continue;
+
+ if (pointer[ctr] == prev_val) {
+ pointer[ctr] = pointer[prev_ctr];
+ } else {
+ prev_val = pointer[ctr];
+ prev_ctr = ctr;
+ remap_cpuif(&pointer[ctr]);
+ }
+ }
+
+ memcpy((void *)id->sgi_set_pending, (const void *)pointer, 4 << 2);
+ pointer += 4;
+
+ id->pending.set[0] = *pointer;
+
+ return;
}
/*
@@ -229,36 +230,38 @@ void restore_gic_distributor_private(unsigned int *pointer,
* critical path.
*/
void restore_gic_distributor_shared(unsigned int *pointer,
- unsigned gic_distributor_address)
+ unsigned gic_distributor_address)
{
- interrupt_distributor *id =
- (interrupt_distributor *) gic_distributor_address;
- unsigned num_spis;
- unsigned ctr, prev_val = 0, prev_ctr = 0;
-
- /* Calculate how many SPIs the GIC supports */
- num_spis = 32 * ((id->controller_type) & 0x1f);
-
- /* Restore rest of GIC configuration */
- if (num_spis) {
-
- memcpy((void *) pointer, (const void *) (id->target + 8), (num_spis / 4) << 2);
-
- for (ctr = 0; ctr < num_spis / 4; ctr++) {
- if(!pointer[ctr])
- continue;
-
- if(pointer[ctr] == prev_val) {
- pointer[ctr] = pointer[prev_ctr];
- } else {
- prev_val = pointer[ctr];
- prev_ctr = ctr;
- remap_cpuif(&pointer[ctr]);
- }
- }
-
- memcpy((void *) (id->target + 8), (const void *) pointer, (num_spis / 4) << 2);
- }
-
- return;
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned num_spis;
+ unsigned ctr, prev_val = 0, prev_ctr = 0;
+
+ /* Calculate how many SPIs the GIC supports */
+ num_spis = 32 * ((id->controller_type) & 0x1f);
+
+ /* Restore rest of GIC configuration */
+ if (num_spis) {
+
+ memcpy((void *)pointer, (const void *)(id->target + 8),
+ (num_spis / 4) << 2);
+
+ for (ctr = 0; ctr < num_spis / 4; ctr++) {
+ if (!pointer[ctr])
+ continue;
+
+ if (pointer[ctr] == prev_val) {
+ pointer[ctr] = pointer[prev_ctr];
+ } else {
+ prev_val = pointer[ctr];
+ prev_ctr = ctr;
+ remap_cpuif(&pointer[ctr]);
+ }
+ }
+
+ memcpy((void *)(id->target + 8), (const void *)pointer,
+ (num_spis / 4) << 2);
+ }
+
+ return;
}
diff --git a/big-little/switcher/context/ns_context.c b/big-little/switcher/context/ns_context.c
index 201d930..2541319 100644
--- a/big-little/switcher/context/ns_context.c
+++ b/big-little/switcher/context/ns_context.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virt_helpers.h"
#include "vgiclib.h"
@@ -37,259 +37,268 @@ extern unsigned async_switchover;
extern unsigned hyp_timer_trigger;
/* Bakery locks to serialize access to the tube. */
-static bakery_t lock_tube0 __attribute__ ((section("BL_DV_PAGE"))) = { 0 };
-static bakery_t lock_tube1 __attribute__ ((section("BL_DV_PAGE"))) = { 0 };
+static bakery_t lock_tube0 __attribute__ ((section("BL_DV_PAGE"))) = {
+0};
+
+static bakery_t lock_tube1 __attribute__ ((section("BL_DV_PAGE"))) = {
+0};
/*
* Top level structure which encapsulates the context of the entire
* Kingfisher system
*/
-system_context switcher_context = {0};
+system_context switcher_context = { 0 };
-void stop_generic_timer(generic_timer_context *ctr_ctx)
+void stop_generic_timer(generic_timer_context * ctr_ctx)
{
- /*
- * Disable the timer and mask the irq to prevent
- * suprious interrupts on this cpu interface. It
- * will bite us when we come back if we don't. It
- * will be replayed on the inbound cluster.
- */
- write_cntp_ctl(TIMER_MASK_IRQ);
-
-
- /*
- * If the local timer interrupt was being used as
- * the asynchronous trigger, then it was disabled
- * in handle_interrupt() to prevent this level-
- * triggerred interrupt from firing. Now that its
- * been acked at the peripheral. We can renable it
- */
- if(!hyp_timer_trigger) {
- if (ctr_ctx->cntp_ctl & TIMER_IRQ_STAT)
- gic_enable_int(LCL_TIMER_IRQ);
- }
-
- return;
+ /*
+ * Disable the timer and mask the irq to prevent
+ * suprious interrupts on this cpu interface. It
+ * will bite us when we come back if we don't. It
+ * will be replayed on the inbound cluster.
+ */
+ write_cntp_ctl(TIMER_MASK_IRQ);
+
+ /*
+ * If the local timer interrupt was being used as
+ * the asynchronous trigger, then it was disabled
+ * in handle_interrupt() to prevent this level-
+ * triggerred interrupt from firing. Now that its
+ * been acked at the peripheral. We can renable it
+ */
+ if (!hyp_timer_trigger) {
+ if (ctr_ctx->cntp_ctl & TIMER_IRQ_STAT)
+ gic_enable_int(LCL_TIMER_IRQ);
+ }
+
+ return;
}
void save_context(unsigned first_cpu)
{
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
- cpu_context *ns_cpu_ctx =
- &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
- unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
- unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
- unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
- banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
- gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
- generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
- cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
-
- write_trace(&lock_tube0, NS_TUBE0, "Context Save Start", read_cntpct(), 0x0, 0x0);
-
- /*
- * Good place to bring the inbound cluster out of reset, but first
- * we need to save the secure world context.
- */
- write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save Start", read_cntpct(), 0x0, 0x0);
- smc(SMC_SEC_SAVE, (unsigned) hyp_warm_reset_handler);
- write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save End", read_cntpct(), 0x0, 0x0);
-
- /*
- * Save the 32-bit Generic timer context & stop them
- */
- save_generic_timer((unsigned *) cp15_timer_ctx, 0x1);
- stop_generic_timer(cp15_timer_ctx);
-
- /*
- * Save v7 generic performance monitors
- * Save cpu general purpose banked registers
- * Save cp15 context
- */
- save_performance_monitors(pmon_context);
- save_banked_registers(gp_context);
- save_cp15(cp15_context->cp15_misc_regs);
- save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
- save_mmu(cp15_context->cp15_mmu_regs);
- save_fault_status((unsigned *) fault_ctx);
-
- /*
- * Check if non-secure world has access to the vfp/neon registers
- * and save them if so.
- */
- if (read_nsacr() & (0x3 << 10))
- save_vfp(vfp_context);
-
-
- /*
- * Disable the GIC CPU interface tp prevent interrupts from waking
- * the core from wfi() subsequently.
- */
- write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
-
- /* Save vGIC virtual cpu interface (cpu view) context */
- save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
-
- /*
- * Save the HYP view registers. These registers contain a snapshot
- * of all the physical interrupts acknowledged till we
- * entered this HYP mode.
- */
- vgic_savestate(cpu_id);
-
- /*
- * TODO:
- * Is it safe for the secondary cpu to save its context
- * while the GIC distributor is on. Should be as its
- * banked context and the cpu itself is the only one
- * who can change it. Still have to consider cases e.g
- * SGIs/Localtimers becoming pending.
- */
- save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
- GIC_ID_PHY_BASE);
-
- /* Safe place to save the Virtualisor context */
- SaveVirtualisor(first_cpu);
-
- /*
- * Indicate to the inbound side that the context has been saved and is ready
- * for pickup.
- */
- write_trace(&lock_tube0, NS_TUBE0, "Context Save End", read_cntpct(), 0x0, 0x0);
- set_event(OB_CONTEXT_DONE, cpu_id);
-
- /*
- * Now, we wait for the inbound cluster to signal that its done atleast picking
- * up the saved context.
- */
- if (cpu_id == first_cpu) {
- wait_for_events(IB_CONTEXT_DONE);
- write_trace(&lock_tube0, NS_TUBE0, "Inbound done", read_cntpct(), 0x0, 0x0);
- }
-
- return;
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ cpu_context *ns_cpu_ctx =
+ &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
+ unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
+ unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
+ unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
+ banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
+ gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
+ generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
+ cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
+
+ write_trace(&lock_tube0, NS_TUBE0, "Context Save Start", read_cntpct(),
+ 0x0, 0x0);
+
+ /*
+ * Good place to bring the inbound cluster out of reset, but first
+ * we need to save the secure world context.
+ */
+ write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save Start",
+ read_cntpct(), 0x0, 0x0);
+ smc(SMC_SEC_SAVE, (unsigned)hyp_warm_reset_handler);
+ write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save End",
+ read_cntpct(), 0x0, 0x0);
+
+ /*
+ * Save the 32-bit Generic timer context & stop them
+ */
+ save_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
+ stop_generic_timer(cp15_timer_ctx);
+
+ /*
+ * Save v7 generic performance monitors
+ * Save cpu general purpose banked registers
+ * Save cp15 context
+ */
+ save_performance_monitors(pmon_context);
+ save_banked_registers(gp_context);
+ save_cp15(cp15_context->cp15_misc_regs);
+ save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ save_mmu(cp15_context->cp15_mmu_regs);
+ save_fault_status((unsigned *)fault_ctx);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ save_vfp(vfp_context);
+
+ /*
+ * Disable the GIC CPU interface tp prevent interrupts from waking
+ * the core from wfi() subsequently.
+ */
+ write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
+
+ /* Save vGIC virtual cpu interface (cpu view) context */
+ save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
+
+ /*
+ * Save the HYP view registers. These registers contain a snapshot
+ * of all the physical interrupts acknowledged till we
+ * entered this HYP mode.
+ */
+ vgic_savestate(cpu_id);
+
+ /*
+ * TODO:
+ * Is it safe for the secondary cpu to save its context
+ * while the GIC distributor is on. Should be as its
+ * banked context and the cpu itself is the only one
+ * who can change it. Still have to consider cases e.g
+ * SGIs/Localtimers becoming pending.
+ */
+ save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
+
+ /* Safe place to save the Virtualisor context */
+ SaveVirtualisor(first_cpu);
+
+ /*
+ * Indicate to the inbound side that the context has been saved and is ready
+ * for pickup.
+ */
+ write_trace(&lock_tube0, NS_TUBE0, "Context Save End", read_cntpct(),
+ 0x0, 0x0);
+ set_event(OB_CONTEXT_DONE, cpu_id);
+
+ /*
+ * Now, we wait for the inbound cluster to signal that its done atleast picking
+ * up the saved context.
+ */
+ if (cpu_id == first_cpu) {
+ wait_for_events(IB_CONTEXT_DONE);
+ write_trace(&lock_tube0, NS_TUBE0, "Inbound done",
+ read_cntpct(), 0x0, 0x0);
+ }
+
+ return;
}
void restore_context(unsigned first_cpu)
{
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
- unsigned warm_reset = 1;
- cpu_context *ns_cpu_ctx =
- &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
- global_context *gbl_context = &switcher_context.cluster.ns_cluster_ctx;
- unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
- unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
- unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
- gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
- generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
- banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
- cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
- vm_context *src = 0x0;
- vm_context *dest = 0x0;
- unsigned dest_cpuif = 0x0;
- unsigned src_cpuif = 0x0;
-
- /*
- * Map cpuids to cpu interface numbers so that cpu interface
- * specific context can be correctly restored on the external
- * vGIC.
- */
- map_cpuif(cluster_id, cpu_id);
- SetupVGIC(warm_reset);
-
- /*
- * Inbound headstart i.e. the vGIC configuration, secure context
- * restore & cache invalidation has been done. Now wait for the
- * outbound to provide the context.
- */
- write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(), 0x0, 0x0);
- wait_for_event(OB_CONTEXT_DONE, cpu_id);
- reset_event(OB_CONTEXT_DONE, cpu_id);
-
- /*
- * First cpu restores the global context while the others take
- * care of their own.
- */
- write_trace(&lock_tube1, NS_TUBE1, "Context Restore Start ", read_cntpct(), 0x0, 0x0);
- if (cpu_id == first_cpu)
- restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
- GIC_ID_PHY_BASE);
- restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
- GIC_ID_PHY_BASE);
- vgic_loadstate(cpu_id);
-
- SetupVirtualisor(first_cpu);
-
- /* Restore NS VGIC context */
- restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,
- VGIC_VM_PHY_BASE);
-
- /*
- * Check if non-secure world has access to the vfp/neon registers
- * and save them if so.
- */
- if (read_nsacr() & (0x3 << 10))
- restore_vfp(vfp_context);
-
- /*
- * Restore cp15 context
- * Restore cpu general purpose banked registers
- * Restore v7 generic performance monitors
- * Restore the 32-bit Generic timer context
- */
- restore_fault_status((unsigned *) fault_ctx);
- restore_mmu(cp15_context->cp15_mmu_regs);
- restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
- restore_cp15(cp15_context->cp15_misc_regs);
- restore_banked_registers(gp_context);
- restore_performance_monitors(pmon_context);
- restore_generic_timer((unsigned *) cp15_timer_ctx, 0x1);
-
- /*
- * Paranoid check to ensure that all HYP/Secure context & Virtualisor
- * is restored before any core enters the non-secure mode to use it.
- */
- if (cpu_id == first_cpu) {
- set_events(HYP_CONTEXT_DONE);
- }
- wait_for_event(HYP_CONTEXT_DONE, cpu_id);
- reset_event(HYP_CONTEXT_DONE, cpu_id);
-
- /*
- * Return the saved general purpose registers saved above the HYP mode
- * stack of our counterpart cpu on the other cluster.
- */
- dest_cpuif = get_cpuif(cluster_id, cpu_id);
- src_cpuif = get_cpuif(!cluster_id, cpu_id);
- dest = &guestos_state[dest_cpuif].context;
- src = &guestos_state[src_cpuif].context;
-
- dest->gp_regs[0] = src->gp_regs[0];
- dest->gp_regs[1] = src->gp_regs[1];
- dest->gp_regs[2] = src->gp_regs[2];
- dest->gp_regs[3] = src->gp_regs[3];
- dest->gp_regs[4] = src->gp_regs[4];
- dest->gp_regs[5] = src->gp_regs[5];
- dest->gp_regs[6] = src->gp_regs[6];
- dest->gp_regs[7] = src->gp_regs[7];
- dest->gp_regs[8] = src->gp_regs[8];
- dest->gp_regs[9] = src->gp_regs[9];
- dest->gp_regs[10] = src->gp_regs[10];
- dest->gp_regs[11] = src->gp_regs[11];
- dest->gp_regs[12] = src->gp_regs[12];
- dest->gp_regs[13] = src->gp_regs[13];
- dest->gp_regs[14] = src->gp_regs[14];
- dest->elr_hyp = src->elr_hyp;
- dest->spsr = src->spsr;
- dest->usr_lr = src->usr_lr;
-
- write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(), 0x0, 0x0);
- set_event(IB_CONTEXT_DONE, cpu_id);
-
- if (async_switchover && cpu_id == first_cpu)
- enable_trigger(read_cntfrq());
-
- return;
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned warm_reset = 1;
+ cpu_context *ns_cpu_ctx =
+ &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
+ global_context *gbl_context = &switcher_context.cluster.ns_cluster_ctx;
+ unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
+ unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
+ unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
+ gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
+ generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
+ banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
+ cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
+ vm_context *src = 0x0;
+ vm_context *dest = 0x0;
+ unsigned dest_cpuif = 0x0;
+ unsigned src_cpuif = 0x0;
+
+ /*
+ * Map cpuids to cpu interface numbers so that cpu interface
+ * specific context can be correctly restored on the external
+ * vGIC.
+ */
+ map_cpuif(cluster_id, cpu_id);
+ SetupVGIC(warm_reset);
+
+ /*
+ * Inbound headstart i.e. the vGIC configuration, secure context
+ * restore & cache invalidation has been done. Now wait for the
+ * outbound to provide the context.
+ */
+ write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(),
+ 0x0, 0x0);
+ wait_for_event(OB_CONTEXT_DONE, cpu_id);
+ reset_event(OB_CONTEXT_DONE, cpu_id);
+
+ /*
+ * First cpu restores the global context while the others take
+ * care of their own.
+ */
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore Start ",
+ read_cntpct(), 0x0, 0x0);
+ if (cpu_id == first_cpu)
+ restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
+ GIC_ID_PHY_BASE);
+ restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
+ vgic_loadstate(cpu_id);
+
+ SetupVirtualisor(first_cpu);
+
+ /* Restore NS VGIC context */
+ restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,
+ VGIC_VM_PHY_BASE);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ restore_vfp(vfp_context);
+
+ /*
+ * Restore cp15 context
+ * Restore cpu general purpose banked registers
+ * Restore v7 generic performance monitors
+ * Restore the 32-bit Generic timer context
+ */
+ restore_fault_status((unsigned *)fault_ctx);
+ restore_mmu(cp15_context->cp15_mmu_regs);
+ restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ restore_cp15(cp15_context->cp15_misc_regs);
+ restore_banked_registers(gp_context);
+ restore_performance_monitors(pmon_context);
+ restore_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
+
+ /*
+ * Paranoid check to ensure that all HYP/Secure context & Virtualisor
+ * is restored before any core enters the non-secure mode to use it.
+ */
+ if (cpu_id == first_cpu) {
+ set_events(HYP_CONTEXT_DONE);
+ }
+ wait_for_event(HYP_CONTEXT_DONE, cpu_id);
+ reset_event(HYP_CONTEXT_DONE, cpu_id);
+
+ /*
+ * Return the saved general purpose registers saved above the HYP mode
+ * stack of our counterpart cpu on the other cluster.
+ */
+ dest_cpuif = get_cpuif(cluster_id, cpu_id);
+ src_cpuif = get_cpuif(!cluster_id, cpu_id);
+ dest = &guestos_state[dest_cpuif].context;
+ src = &guestos_state[src_cpuif].context;
+
+ dest->gp_regs[0] = src->gp_regs[0];
+ dest->gp_regs[1] = src->gp_regs[1];
+ dest->gp_regs[2] = src->gp_regs[2];
+ dest->gp_regs[3] = src->gp_regs[3];
+ dest->gp_regs[4] = src->gp_regs[4];
+ dest->gp_regs[5] = src->gp_regs[5];
+ dest->gp_regs[6] = src->gp_regs[6];
+ dest->gp_regs[7] = src->gp_regs[7];
+ dest->gp_regs[8] = src->gp_regs[8];
+ dest->gp_regs[9] = src->gp_regs[9];
+ dest->gp_regs[10] = src->gp_regs[10];
+ dest->gp_regs[11] = src->gp_regs[11];
+ dest->gp_regs[12] = src->gp_regs[12];
+ dest->gp_regs[13] = src->gp_regs[13];
+ dest->gp_regs[14] = src->gp_regs[14];
+ dest->elr_hyp = src->elr_hyp;
+ dest->spsr = src->spsr;
+ dest->usr_lr = src->usr_lr;
+
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(),
+ 0x0, 0x0);
+ set_event(IB_CONTEXT_DONE, cpu_id);
+
+ if (async_switchover && cpu_id == first_cpu)
+ enable_trigger(read_cntfrq());
+
+ return;
}
diff --git a/big-little/switcher/context/sh_vgic.c b/big-little/switcher/context/sh_vgic.c
index 7672fdd..c2ba190 100644
--- a/big-little/switcher/context/sh_vgic.c
+++ b/big-little/switcher/context/sh_vgic.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virt_helpers.h"
#include "gic_registers.h"
@@ -87,13 +87,13 @@ static unsigned int cpuinfo_map[MAX_CPUIFS];
*/
unsigned map_cpuif(unsigned cluster_id, unsigned cpu_id)
{
- unsigned cpuif_id = 0;
+ unsigned cpuif_id = 0;
- cpuif_id = bitindex(read32(GIC_ID_PHY_BASE + GICD_CPUS) & 0xff);
- cpuif_map[cluster_id][cpu_id] = cpuif_id;
- cpuinfo_map[cpuif_id] = (cluster_id << 4) | cpu_id;
+ cpuif_id = bitindex(read32(GIC_ID_PHY_BASE + GICD_CPUS) & 0xff);
+ cpuif_map[cluster_id][cpu_id] = cpuif_id;
+ cpuinfo_map[cpuif_id] = (cluster_id << 4) | cpu_id;
- return 0;
+ return 0;
}
/*
@@ -101,7 +101,7 @@ unsigned map_cpuif(unsigned cluster_id, unsigned cpu_id)
*/
unsigned get_cpuif(unsigned cluster_id, unsigned cpu_id)
{
- return cpuif_map[cluster_id][cpu_id];
+ return cpuif_map[cluster_id][cpu_id];
}
/*
@@ -109,7 +109,7 @@ unsigned get_cpuif(unsigned cluster_id, unsigned cpu_id)
*/
unsigned get_cpuinfo(unsigned cpuif)
{
- return cpuinfo_map[cpuif];
+ return cpuinfo_map[cpuif];
}
/*
@@ -117,24 +117,24 @@ unsigned get_cpuinfo(unsigned cpuif)
*/
unsigned get_cpu_mask(unsigned cpuif_mask)
{
- unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
- unsigned cpuif = 0, clusterid = read_clusterid(), cpu_mask = 0;
- unsigned cpuid = 0;
-
- for (ctr = 0; ctr < num_bytes; ctr++) { /* Iterate through the cpu_mask byte wise */
- unsigned byte = 0;
- unsigned char lz = 0;
-
- byte = (cpuif_mask >> (ctr << 3)) & 0xff;
- while ((lz = __clz(byte)) != 0x20) {
- cpuif = 31 - lz;
- byte &= ~(1 << cpuif); /* Clear the bit just discovered */
- cpuid = get_cpuinfo(cpuif) & 0xf;
- cpu_mask |= (1 << cpuid) << (ctr << 3);
- }
- }
-
- return cpu_mask;
+ unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
+ unsigned cpuif = 0, clusterid = read_clusterid(), cpu_mask = 0;
+ unsigned cpuid = 0;
+
+ for (ctr = 0; ctr < num_bytes; ctr++) { /* Iterate through the cpu_mask byte wise */
+ unsigned byte = 0;
+ unsigned char lz = 0;
+
+ byte = (cpuif_mask >> (ctr << 3)) & 0xff;
+ while ((lz = __clz(byte)) != 0x20) {
+ cpuif = 31 - lz;
+ byte &= ~(1 << cpuif); /* Clear the bit just discovered */
+ cpuid = get_cpuinfo(cpuif) & 0xf;
+ cpu_mask |= (1 << cpuid) << (ctr << 3);
+ }
+ }
+
+ return cpu_mask;
}
/*
@@ -142,24 +142,24 @@ unsigned get_cpu_mask(unsigned cpuif_mask)
*/
unsigned get_cpuif_mask(unsigned cpu_mask)
{
- unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
- unsigned cpuif = 0, clusterid = read_clusterid(), cpuif_mask = 0;
- unsigned cpuid = 0;
-
- for (ctr = 0; ctr < num_bytes; ctr++) { /* Iterate through the cpu_mask byte wise */
- unsigned byte = 0;
- unsigned char lz = 0;
-
- byte = (cpu_mask >> (ctr << 3)) & 0xff;
- while ((lz = __clz(byte)) != 0x20) {
- cpuid = 31 - lz;
- byte &= ~(1 << cpuid); /* Clear the bit just discovered */
- cpuif = get_cpuif(clusterid, cpuid);
- cpuif_mask |= (1 << cpuif) << (ctr << 3);
- }
- }
-
- return cpuif_mask;
+ unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
+ unsigned cpuif = 0, clusterid = read_clusterid(), cpuif_mask = 0;
+ unsigned cpuid = 0;
+
+ for (ctr = 0; ctr < num_bytes; ctr++) { /* Iterate through the cpu_mask byte wise */
+ unsigned byte = 0;
+ unsigned char lz = 0;
+
+ byte = (cpu_mask >> (ctr << 3)) & 0xff;
+ while ((lz = __clz(byte)) != 0x20) {
+ cpuid = 31 - lz;
+ byte &= ~(1 << cpuid); /* Clear the bit just discovered */
+ cpuif = get_cpuif(clusterid, cpuid);
+ cpuif_mask |= (1 << cpuif) << (ctr << 3);
+ }
+ }
+
+ return cpuif_mask;
}
/*
@@ -174,52 +174,52 @@ unsigned get_cpuif_mask(unsigned cpu_mask)
*/
unsigned remap_cpuif(unsigned *cpuif_mask)
{
- unsigned cluster_id = read_clusterid(), num_cpus = num_secondaries() + 1;
-
+ unsigned cluster_id = read_clusterid(), num_cpus =
+ num_secondaries() + 1;
- if(cluster_id == EAGLE)
- *cpuif_mask = *cpuif_mask >> num_cpus;
- else
- *cpuif_mask = *cpuif_mask << num_cpus;
+ if (cluster_id == EAGLE)
+ *cpuif_mask = *cpuif_mask >> num_cpus;
+ else
+ *cpuif_mask = *cpuif_mask << num_cpus;
- return 0;
+ return 0;
}
#else
unsigned remap_cpuif(unsigned *cpuif_mask)
{
- unsigned ib_cpuif_mask = 0, ob_cpuif = 0, ib_cpuif = 0, ob_cpuid =
- 0, ob_clusterid = 0, ib_cpuid = 0, ib_clusterid = 0;
- unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
-
- for (ctr = 0; ctr < num_bytes; ctr++) {
- unsigned byte = 0;
- unsigned char lz = 0;
-
- byte = (*cpuif_mask >> (ctr << 3)) & 0xff;
-
- while ((lz = __clz(byte)) != 0x20) {
- ob_cpuif = 31 - lz;
- byte &= ~(1 << ob_cpuif); /* Clear the bit just discovered */
- ob_cpuid = get_cpuinfo(ob_cpuif) & 0xf;
- ob_clusterid = (get_cpuinfo(ob_cpuif) >> 4) & 0xf;
-
- /*
- * TODO: Can we assume that the inbound and outbound clusters will
- * always be logical complements of each other
- */
- ib_clusterid = !ob_clusterid;
-
- /*
- * TODO: Assuming that the cpuids have a 1:1 mapping i.e. cpuX on
- * one cluster will always map to cpuX on the other cluster.
- */
- ib_cpuid = ob_cpuid;
- ib_cpuif = get_cpuif(ib_clusterid, ib_cpuid);
- ib_cpuif_mask |= (1 << ib_cpuif) << (ctr << 3);
- }
- }
-
- *cpuif_mask = ib_cpuif_mask;
- return 0;
+ unsigned ib_cpuif_mask = 0, ob_cpuif = 0, ib_cpuif = 0, ob_cpuid =
+ 0, ob_clusterid = 0, ib_cpuid = 0, ib_clusterid = 0;
+ unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
+
+ for (ctr = 0; ctr < num_bytes; ctr++) {
+ unsigned byte = 0;
+ unsigned char lz = 0;
+
+ byte = (*cpuif_mask >> (ctr << 3)) & 0xff;
+
+ while ((lz = __clz(byte)) != 0x20) {
+ ob_cpuif = 31 - lz;
+ byte &= ~(1 << ob_cpuif); /* Clear the bit just discovered */
+ ob_cpuid = get_cpuinfo(ob_cpuif) & 0xf;
+ ob_clusterid = (get_cpuinfo(ob_cpuif) >> 4) & 0xf;
+
+ /*
+ * TODO: Can we assume that the inbound and outbound clusters will
+ * always be logical complements of each other
+ */
+ ib_clusterid = !ob_clusterid;
+
+ /*
+ * TODO: Assuming that the cpuids have a 1:1 mapping i.e. cpuX on
+ * one cluster will always map to cpuX on the other cluster.
+ */
+ ib_cpuid = ob_cpuid;
+ ib_cpuif = get_cpuif(ib_clusterid, ib_cpuid);
+ ib_cpuif_mask |= (1 << ib_cpuif) << (ctr << 3);
+ }
+ }
+
+ *cpuif_mask = ib_cpuif_mask;
+ return 0;
}
#endif
diff --git a/big-little/switcher/trigger/async_switchover.c b/big-little/switcher/trigger/async_switchover.c
index 962735a..9c7287d 100644
--- a/big-little/switcher/trigger/async_switchover.c
+++ b/big-little/switcher/trigger/async_switchover.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virt_helpers.h"
#include "misc.h"
@@ -56,40 +56,40 @@ unsigned hyp_timer_trigger = USE_HYP_TIMERS;
*/
static unsigned get_free_ipi(void)
{
- unsigned ctr, shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
- read_clusterid();
+ unsigned ctr, shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
+ read_clusterid();
- cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
+ cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
- /* Find the register offset */
- for (ctr = 0; ctr < 4; ctr++)
- /* Check whether IPI<shift> has already been generated by us */
- for (shift = 0; shift < 4; shift++) {
- if (read32
- (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
- (ctr << 2)) & (cpu_if_bit << (shift << 3)))
- continue;
+ /* Find the register offset */
+ for (ctr = 0; ctr < 4; ctr++)
+ /* Check whether IPI<shift> has already been generated by us */
+ for (shift = 0; shift < 4; shift++) {
+ if (read32
+ (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
+ (ctr << 2)) & (cpu_if_bit << (shift << 3)))
+ continue;
- return (ctr << 2) + shift;
- }
+ return (ctr << 2) + shift;
+ }
- return 16;
+ return 16;
}
static void ack_trigger(void)
{
- unsigned ctl = 0;
-
- ctl = read_cnthp_ctl();
- if (ctl & TIMER_IRQ_STAT) {
- /* Disable timer and mask interrupt */
- write_cnthp_ctl(TIMER_MASK_IRQ);
- } else {
- printf("Spurious HYP timer irq \n");
- panic();
- }
-
- return;
+ unsigned ctl = 0;
+
+ ctl = read_cnthp_ctl();
+ if (ctl & TIMER_IRQ_STAT) {
+ /* Disable timer and mask interrupt */
+ write_cnthp_ctl(TIMER_MASK_IRQ);
+ } else {
+ printf("Spurious HYP timer irq \n");
+ panic();
+ }
+
+ return;
}
/*
@@ -98,193 +98,193 @@ static void ack_trigger(void)
*/
void signal_switchover(void)
{
- unsigned ipi_no = 0x0;
-
- /* If x is the no. of cpus then corresponding mask would be (1 << x) - 1 */
- unsigned cpu_mask = (1 << (num_secondaries() + 1)) - 1;
- /*
- * Map the target cpuids to their cpu interfaces as the 1:1 mapping
- * no longer exists with the external vGIC.
- */
- unsigned cpuif_mask = get_cpuif_mask(cpu_mask);
-
- /*
- * Send an ipi to all the cpus in the cluster including ourselves
- * to start a switch to the inbound cluster. First choose a non-
- * pending IPI to avoid a clash with the OS.
- */
- ipi_no = get_free_ipi();
-
- /*
- * For this IPI set the mask in our global variable. We do it, payload software
- * does not. But, first check whether any earlier IPIs have already been acked
- */
- while (hyp_ipi_check[ipi_no]) ;
- spin_lock(&lock_ipi_check);
- hyp_ipi_check[ipi_no] = cpuif_mask;
- dsb();
- spin_unlock(&lock_ipi_check);
-
- /* Send the IPI to the cpu_mask */
- gic_send_ipi(cpuif_mask, ipi_no);
-
- return;
+ unsigned ipi_no = 0x0;
+
+ /* If x is the no. of cpus then corresponding mask would be (1 << x) - 1 */
+ unsigned cpu_mask = (1 << (num_secondaries() + 1)) - 1;
+ /*
+ * Map the target cpuids to their cpu interfaces as the 1:1 mapping
+ * no longer exists with the external vGIC.
+ */
+ unsigned cpuif_mask = get_cpuif_mask(cpu_mask);
+
+ /*
+ * Send an ipi to all the cpus in the cluster including ourselves
+ * to start a switch to the inbound cluster. First choose a non-
+ * pending IPI to avoid a clash with the OS.
+ */
+ ipi_no = get_free_ipi();
+
+ /*
+ * For this IPI set the mask in our global variable. We do it, payload software
+ * does not. But, first check whether any earlier IPIs have already been acked
+ */
+ while (hyp_ipi_check[ipi_no]) ;
+ spin_lock(&lock_ipi_check);
+ hyp_ipi_check[ipi_no] = cpuif_mask;
+ dsb();
+ spin_unlock(&lock_ipi_check);
+
+ /* Send the IPI to the cpu_mask */
+ gic_send_ipi(cpuif_mask, ipi_no);
+
+ return;
}
unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
{
- unsigned rc = FALSE;
-
- spin_lock(&lock_ipi_check);
- /*
- * If this IPI was sent by the big-little code then our cpu_if bit must have
- * been set in the ipi_check flag. Reset the bit an indicate that its an
- * internal IPI.
- */
- if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
- rc = TRUE;
- hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
- dsb();
- }
- spin_unlock(&lock_ipi_check);
-
- return rc;
+ unsigned rc = FALSE;
+
+ spin_lock(&lock_ipi_check);
+ /*
+ * If this IPI was sent by the big-little code then our cpu_if bit must have
+ * been set in the ipi_check flag. Reset the bit an indicate that its an
+ * internal IPI.
+ */
+ if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
+ rc = TRUE;
+ hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
+ dsb();
+ }
+ spin_unlock(&lock_ipi_check);
+
+ return rc;
}
unsigned check_trigger(unsigned int_id, unsigned int_ack)
{
- unsigned cpuid = read_cpuid();
- unsigned platform = (read32(KFSCB_BASE + KFS_ID) >> 20) & 0xf;
-
- /*
- * If we are not using HYP mode timers for triggering a switchover
- * then check whether this is a suitable local timer interrupt to
- * switch
- */
- if (hyp_timer_trigger == FALSE) {
- /*
- * We need to hijack every 128th timer interrupt on cpu0 and
- * use it as a stimulus to switchover
- */
- if (cpuid == 0 && int_id == LCL_TIMER_IRQ)
- timer_count++;
-
- if (timer_count & LCL_TIMER_FREQ)
- return FALSE;
- }
- /*
- * Trigger a switchover upon getting a HYP timer IRQ. Its
- * targetted only to cpu0.
- */
- else if (int_id != HYP_TIMER_IRQ)
- return FALSE;
-
- /*
- * Do the needful now that it is confirmed that we need to move
- * to the other cluster
- */
-
- /* Indicator on emulation that switches are actually taking place */
- if (platform != 0x1)
- printf("%d", read_clusterid());
-
- /*
- * Send an IPI to all the cores in this cluster to start
- * a switchover.
- */
- signal_switchover();
-
- if (hyp_timer_trigger)
- ack_trigger();
- else
- /*
- * Complete handling of the local timer interrupt at the physical gic
- * level. Its disabled as its level triggerred and will reassert as
- * soon as we leave this function since its not been cleared at the
- * peripheral just yet. The local timer context is saved and this irq
- * cleared in "save_hyp_context". The interrupt is enabled then.
- */
- gic_disable_int(int_id);
-
- /* Finish handling this interrupt */
- gic_eoi_int(int_ack);
- if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
- gic_deactivate_int(int_ack);
-
- return TRUE;
+ unsigned cpuid = read_cpuid();
+ unsigned platform = (read32(KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * If we are not using HYP mode timers for triggering a switchover
+ * then check whether this is a suitable local timer interrupt to
+ * switch
+ */
+ if (hyp_timer_trigger == FALSE) {
+ /*
+ * We need to hijack every 128th timer interrupt on cpu0 and
+ * use it as a stimulus to switchover
+ */
+ if (cpuid == 0 && int_id == LCL_TIMER_IRQ)
+ timer_count++;
+
+ if (timer_count & LCL_TIMER_FREQ)
+ return FALSE;
+ }
+ /*
+ * Trigger a switchover upon getting a HYP timer IRQ. Its
+ * targetted only to cpu0.
+ */
+ else if (int_id != HYP_TIMER_IRQ)
+ return FALSE;
+
+ /*
+ * Do the needful now that it is confirmed that we need to move
+ * to the other cluster
+ */
+
+ /* Indicator on emulation that switches are actually taking place */
+ if (platform != 0x1)
+ printf("%d", read_clusterid());
+
+ /*
+ * Send an IPI to all the cores in this cluster to start
+ * a switchover.
+ */
+ signal_switchover();
+
+ if (hyp_timer_trigger)
+ ack_trigger();
+ else
+ /*
+ * Complete handling of the local timer interrupt at the physical gic
+ * level. Its disabled as its level triggerred and will reassert as
+ * soon as we leave this function since its not been cleared at the
+ * peripheral just yet. The local timer context is saved and this irq
+ * cleared in "save_hyp_context". The interrupt is enabled then.
+ */
+ gic_disable_int(int_id);
+
+ /* Finish handling this interrupt */
+ gic_eoi_int(int_ack);
+ if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
+ gic_deactivate_int(int_ack);
+
+ return TRUE;
}
void keep_trigger_alive(void)
{
- /*
- * The OS might have disabled the HYP timer interrupt
- * while setting up its view of the vGIC. So enable
- * it if disabled upon receiving any other interrupt.
- * Better than virtualising vGIC accesses on the TARGET
- * CPU.
- */
- if (hyp_timer_trigger)
- if (!
- (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
- (1 << HYP_TIMER_IRQ)))
- gic_enable_int(HYP_TIMER_IRQ);
-
- return;
+ /*
+ * The OS might have disabled the HYP timer interrupt
+ * while setting up its view of the vGIC. So enable
+ * it if disabled upon receiving any other interrupt.
+ * Better than virtualising vGIC accesses on the TARGET
+ * CPU.
+ */
+ if (hyp_timer_trigger)
+ if (!
+ (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
+ (1 << HYP_TIMER_IRQ)))
+ gic_enable_int(HYP_TIMER_IRQ);
+
+ return;
}
void enable_trigger(unsigned tval)
{
- unsigned ctl = TIMER_ENABLE;
- unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
-
- /*
- * No need to lock this as its accessed by only one cpu
- * per cluster and that too one at a time.
- */
- static unsigned int rand_no = 0xdeadbeef;
- static struct _rand_state buffer;
-
- /*
- * Nothing needs to be done if physical local timers
- * are being used for doing a switchover.
- */
- if (hyp_timer_trigger == TRUE) {
- if (rand_async_switches) {
- _srand_r(&buffer, rand_no);
- rand_no = (unsigned) _rand_r(&buffer);
- }
-
- /* Enable timer and unmask interrupt */
- write_cnthp_ctl(ctl);
-
- if (rand_async_switches) {
- unsigned interval;
-
- /*
- * TODO: Assuming that the tval is always 12000000
- * Increment or decrement the timer value randomly
- * but never by more than a factor of 10
- */
- if (rand_no % 2)
- interval = tval * (rand_no % 10);
- else
- interval = tval / (rand_no % 10);
-
- write_cnthp_tval(interval);
-
- } else {
- /*
- * Program the timer to fire every 12000000 instructions
- * on the FastModel while 1500000 cycles on the Emulator
- */
- if (platform == 0x1)
- write_cnthp_tval(tval);
- else
- write_cnthp_tval(tval >> 3);
- }
-
- gic_enable_int(HYP_TIMER_IRQ);
- }
-
- return;
+ unsigned ctl = TIMER_ENABLE;
+ unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * No need to lock this as its accessed by only one cpu
+ * per cluster and that too one at a time.
+ */
+ static unsigned int rand_no = 0xdeadbeef;
+ static struct _rand_state buffer;
+
+ /*
+ * Nothing needs to be done if physical local timers
+ * are being used for doing a switchover.
+ */
+ if (hyp_timer_trigger == TRUE) {
+ if (rand_async_switches) {
+ _srand_r(&buffer, rand_no);
+ rand_no = (unsigned)_rand_r(&buffer);
+ }
+
+ /* Enable timer and unmask interrupt */
+ write_cnthp_ctl(ctl);
+
+ if (rand_async_switches) {
+ unsigned interval;
+
+ /*
+ * TODO: Assuming that the tval is always 12000000
+ * Increment or decrement the timer value randomly
+ * but never by more than a factor of 10
+ */
+ if (rand_no % 2)
+ interval = tval * (rand_no % 10);
+ else
+ interval = tval / (rand_no % 10);
+
+ write_cnthp_tval(interval);
+
+ } else {
+ /*
+ * Program the timer to fire every 12000000 instructions
+ * on the FastModel while 1500000 cycles on the Emulator
+ */
+ if (platform == 0x1)
+ write_cnthp_tval(tval);
+ else
+ write_cnthp_tval(tval >> 3);
+ }
+
+ gic_enable_int(HYP_TIMER_IRQ);
+ }
+
+ return;
}
diff --git a/big-little/switcher/trigger/sync_switchover.c b/big-little/switcher/trigger/sync_switchover.c
index 1bc64a3..e8d8e99 100644
--- a/big-little/switcher/trigger/sync_switchover.c
+++ b/big-little/switcher/trigger/sync_switchover.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "misc.h"
#include "virt_helpers.h"
@@ -28,37 +28,37 @@ extern void signal_switchover(void);
unsigned is_hvc()
{
- return ((read_hsr() >> 26) == 0x12 ? TRUE : FALSE);
+ return ((read_hsr() >> 26) == 0x12 ? TRUE : FALSE);
}
unsigned HandleHVC(vm_context * context)
{
- unsigned opcode = read_hsr() & 0xffff;
- unsigned rc = FALSE;
+ unsigned opcode = read_hsr() & 0xffff;
+ unsigned rc = FALSE;
- switch(opcode) {
+ switch (opcode) {
- /*
- * HVC call to switch to the other cluster. This is done
- * by sending a switchover IPI to all the cores in the cluster.
- */
- case SYNC_SWITCHOVER:
- signal_switchover();
- rc = TRUE;
- break;
+ /*
+ * HVC call to switch to the other cluster. This is done
+ * by sending a switchover IPI to all the cores in the cluster.
+ */
+ case SYNC_SWITCHOVER:
+ signal_switchover();
+ rc = TRUE;
+ break;
- /*
- * HVC call to return the physical MPIDR
- */
- case READ_MPIDR:
- context->gp_regs[0] = read_mpidr();
- rc = TRUE;
- break;
+ /*
+ * HVC call to return the physical MPIDR
+ */
+ case READ_MPIDR:
+ context->gp_regs[0] = read_mpidr();
+ rc = TRUE;
+ break;
- default:
- break;
+ default:
+ break;
- }
+ }
- return rc;
+ return rc;
}
diff --git a/big-little/virtualisor/cache_geom.c b/big-little/virtualisor/cache_geom.c
index 1031ba4..17c3ee6 100644
--- a/big-little/virtualisor/cache_geom.c
+++ b/big-little/virtualisor/cache_geom.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "virt_helpers.h"
@@ -40,198 +40,192 @@ static unsigned cm_extline_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 };
* levels and save the geometry at each level.
*
*/
-void find_cache_geometry(cache_geometry *cg_ptr)
+void find_cache_geometry(cache_geometry * cg_ptr)
{
- unsigned ctr, clidr, ccsidr, csselr, old_csselr;
-
- /* Save Cache size selection register */
- old_csselr = read_csselr();
- clidr = read_clidr();
- cg_ptr->clidr = clidr;
-
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
- unsigned cache_type = get_cache_type(clidr, ctr);
-
- /* Only seperate and Unifiied caches */
- if (cache_type >= 0x3) {
- /*
- * Choose the cache level & Data or Unified cache
- * as there are no set/way operations on the ICache
- */
- csselr = ctr << 1;
- write_csselr(csselr);
-
- isb();
-
- /*
- * Read the CCSIDR to record information about this
- * cache level.
- */
- ccsidr = read_ccsidr();
- cg_ptr->ccsidr[ctr] = ccsidr;
-
- } else {
- /*
- * Stop scanning at the first invalid/unsupported
- * cache level
- */
- break;
- }
- }
-
- /* Restore Cache size selection register */
- write_csselr(old_csselr);
- return;
+ unsigned ctr, clidr, ccsidr, csselr, old_csselr;
+
+ /* Save Cache size selection register */
+ old_csselr = read_csselr();
+ clidr = read_clidr();
+ cg_ptr->clidr = clidr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ unsigned cache_type = get_cache_type(clidr, ctr);
+
+ /* Only seperate and Unifiied caches */
+ if (cache_type >= 0x3) {
+ /*
+ * Choose the cache level & Data or Unified cache
+ * as there are no set/way operations on the ICache
+ */
+ csselr = ctr << 1;
+ write_csselr(csselr);
+
+ isb();
+
+ /*
+ * Read the CCSIDR to record information about this
+ * cache level.
+ */
+ ccsidr = read_ccsidr();
+ cg_ptr->ccsidr[ctr] = ccsidr;
+
+ } else {
+ /*
+ * Stop scanning at the first invalid/unsupported
+ * cache level
+ */
+ break;
+ }
+ }
+
+ /* Restore Cache size selection register */
+ write_csselr(old_csselr);
+ return;
}
/*
* Given two cache geometries, find out how they differ
*/
-void find_cache_diff(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+void find_cache_diff(cache_geometry * hcg_ptr, cache_geometry * tcg_ptr,
+ cache_diff * cd_ptr)
{
- unsigned tc_size = 0, hc_size = 0, tc_linelen = 0, hc_linelen = 0;
- unsigned tc_assoc = 0, hc_assoc = 0, tc_numsets = 0, hc_numsets = 0;
- unsigned ctr;
-
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
-
- /* Break at the first unimplemented cache level */
- if (get_cache_type(hcg_ptr->clidr, ctr) == 0)
- break;
-
- /* Cache associativity */
- tc_assoc = get_cache_assoc(tcg_ptr, ctr) + 1;
- hc_assoc = get_cache_assoc(hcg_ptr, ctr) + 1;
-
- /* Number of the sets in the cache */
- tc_numsets = get_cache_numsets(tcg_ptr, ctr) + 1;
- hc_numsets = get_cache_numsets(hcg_ptr, ctr) + 1;
-
- /* Cache line length in words */
- tc_linelen = 1 << (get_cache_linesz(tcg_ptr, ctr) + 2);
- hc_linelen = 1 << (get_cache_linesz(hcg_ptr, ctr) + 2);
-
- /* Cache size in words */
- tc_size = tc_assoc * tc_numsets * tc_linelen;
- hc_size = hc_assoc * hc_numsets * hc_linelen;
-
- /*
- * Find the factor by which the cache line sizes differ.
- * If so, then the target cacheline will have to be
- * multiplied or divided by the factor to get the absolute
- * cache line number. Then, find the number of absolute
- * cache lines in each cache
- */
- if (tc_linelen >= hc_linelen) {
- cd_ptr[ctr].tcline_factor =
- tc_linelen / hc_linelen;
- cd_ptr[ctr].tnumabs_clines =
- tc_assoc * tc_numsets *
- cd_ptr[ctr].tcline_factor;
- cd_ptr[ctr].hnumabs_clines =
- hc_assoc * hc_numsets;
- } else {
- cd_ptr[ctr].hcline_factor =
- hc_linelen / tc_linelen;
- cd_ptr[ctr].hnumabs_clines =
- hc_assoc * hc_numsets *
- cd_ptr[ctr].hcline_factor;
- cd_ptr[ctr].tnumabs_clines =
- tc_assoc * tc_numsets;
- }
-
- /*
- * Find if the cache sizes differ. If so, then set a flag
- * to indicate whether some set/way operations need to be
- * extended on the host cpu or ignored on the target cpu
- */
- if (tc_size > hc_size) {
- cd_ptr[ctr].csize_diff = TCSZ_BIG;
- }
-
- if (tc_size == hc_size) {
- cd_ptr[ctr].csize_diff =
- TCSZ_EQUAL;
- }
-
- if (tc_size < hc_size) {
- cd_ptr[ctr].csize_diff =
- TCSZ_SMALL;
- }
- }
-
- return;
+ unsigned tc_size = 0, hc_size = 0, tc_linelen = 0, hc_linelen = 0;
+ unsigned tc_assoc = 0, hc_assoc = 0, tc_numsets = 0, hc_numsets = 0;
+ unsigned ctr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+
+ /* Break at the first unimplemented cache level */
+ if (get_cache_type(hcg_ptr->clidr, ctr) == 0)
+ break;
+
+ /* Cache associativity */
+ tc_assoc = get_cache_assoc(tcg_ptr, ctr) + 1;
+ hc_assoc = get_cache_assoc(hcg_ptr, ctr) + 1;
+
+ /* Number of the sets in the cache */
+ tc_numsets = get_cache_numsets(tcg_ptr, ctr) + 1;
+ hc_numsets = get_cache_numsets(hcg_ptr, ctr) + 1;
+
+ /* Cache line length in words */
+ tc_linelen = 1 << (get_cache_linesz(tcg_ptr, ctr) + 2);
+ hc_linelen = 1 << (get_cache_linesz(hcg_ptr, ctr) + 2);
+
+ /* Cache size in words */
+ tc_size = tc_assoc * tc_numsets * tc_linelen;
+ hc_size = hc_assoc * hc_numsets * hc_linelen;
+
+ /*
+ * Find the factor by which the cache line sizes differ.
+ * If so, then the target cacheline will have to be
+ * multiplied or divided by the factor to get the absolute
+ * cache line number. Then, find the number of absolute
+ * cache lines in each cache
+ */
+ if (tc_linelen >= hc_linelen) {
+ cd_ptr[ctr].tcline_factor = tc_linelen / hc_linelen;
+ cd_ptr[ctr].tnumabs_clines =
+ tc_assoc * tc_numsets * cd_ptr[ctr].tcline_factor;
+ cd_ptr[ctr].hnumabs_clines = hc_assoc * hc_numsets;
+ } else {
+ cd_ptr[ctr].hcline_factor = hc_linelen / tc_linelen;
+ cd_ptr[ctr].hnumabs_clines =
+ hc_assoc * hc_numsets * cd_ptr[ctr].hcline_factor;
+ cd_ptr[ctr].tnumabs_clines = tc_assoc * tc_numsets;
+ }
+
+ /*
+ * Find if the cache sizes differ. If so, then set a flag
+ * to indicate whether some set/way operations need to be
+ * extended on the host cpu or ignored on the target cpu
+ */
+ if (tc_size > hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_BIG;
+ }
+
+ if (tc_size == hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_EQUAL;
+ }
+
+ if (tc_size < hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_SMALL;
+ }
+ }
+
+ return;
}
-unsigned map_cache_geometries(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+unsigned map_cache_geometries(cache_geometry * hcg_ptr,
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
- unsigned rc = 0, cpu_id = read_cpuid();
- unsigned hcr = 0, cluster_id = read_clusterid(), sibling_cpuid = 0;
- unsigned abs_cpuid = 0;
-
- if (!switcher) {
- sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
- abs_cpuid = abs_cpuid(cpu_id, cluster_id);
- }
-
- if (cluster_id == host_cluster) {
-
- /* Find host cache topology */
- find_cache_geometry(hcg_ptr);
-
- /*
- * Wait for the target cpu to send an event indicating that
- * its discovered its cache topology.
- */
- if (!switcher) {
- wait_for_event(CACHE_GEOM_DONE, abs_cpuid);
- reset_event(CACHE_GEOM_DONE, abs_cpuid);
- }
-
- /*
- * Assuming that only no. of sets, ways and cache line
- * size will be different across the target and host
- * cpu caches. Hence the CLIDRs should look the same
- * Support for absence of cache levels and memory
- * Also this check ensures that the target cpu is
- * always run before the host else the cache geometry
- * will have to be hardcoded.
- * mapped caches will be added later.
- */
- if (hcg_ptr->clidr != tcg_ptr->clidr) {
- printf("%s: Host CLIDR=0x%x : Target CLIDR=0x%x \n",
- __FUNCTION__, hcg_ptr->clidr, tcg_ptr->clidr);
- rc = 1;
- goto out;
- }
-
- find_cache_diff(hcg_ptr, tcg_ptr, cd_ptr);
-
- /*
- * Enable bit for trapping set/way operations &
- * Cache identification regs
- */
- hcr = read_hcr();
- hcr |= HCR_TSW | HCR_TID2;
- write_hcr(hcr);
- dsb();
- isb();
-
- } else {
-
- /* Find the cache geometry on the target cpu */
- find_cache_geometry(tcg_ptr);
-
- /*
- * Send an event to the host cpu indicating that we have
- * discovered our cache topology
- */
- if(!switcher) {
- set_event(CACHE_GEOM_DONE, sibling_cpuid);
- }
- }
+ unsigned rc = 0, cpu_id = read_cpuid();
+ unsigned hcr = 0, cluster_id = read_clusterid(), sibling_cpuid = 0;
+ unsigned abs_cpuid = 0;
+
+ if (!switcher) {
+ sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+ }
+
+ if (cluster_id == host_cluster) {
+
+ /* Find host cache topology */
+ find_cache_geometry(hcg_ptr);
+
+ /*
+ * Wait for the target cpu to send an event indicating that
+ * its discovered its cache topology.
+ */
+ if (!switcher) {
+ wait_for_event(CACHE_GEOM_DONE, abs_cpuid);
+ reset_event(CACHE_GEOM_DONE, abs_cpuid);
+ }
+
+ /*
+ * Assuming that only no. of sets, ways and cache line
+ * size will be different across the target and host
+ * cpu caches. Hence the CLIDRs should look the same
+ * Support for absence of cache levels and memory
+ * Also this check ensures that the target cpu is
+ * always run before the host else the cache geometry
+ * will have to be hardcoded.
+ * mapped caches will be added later.
+ */
+ if (hcg_ptr->clidr != tcg_ptr->clidr) {
+ printf("%s: Host CLIDR=0x%x : Target CLIDR=0x%x \n",
+ __FUNCTION__, hcg_ptr->clidr, tcg_ptr->clidr);
+ rc = 1;
+ goto out;
+ }
+
+ find_cache_diff(hcg_ptr, tcg_ptr, cd_ptr);
+
+ /*
+ * Enable bit for trapping set/way operations &
+ * Cache identification regs
+ */
+ hcr = read_hcr();
+ hcr |= HCR_TSW | HCR_TID2;
+ write_hcr(hcr);
+ dsb();
+ isb();
+
+ } else {
+
+ /* Find the cache geometry on the target cpu */
+ find_cache_geometry(tcg_ptr);
+
+ /*
+ * Send an event to the host cpu indicating that we have
+ * discovered our cache topology
+ */
+ if (!switcher) {
+ set_event(CACHE_GEOM_DONE, sibling_cpuid);
+ }
+ }
out:
- return rc;
+ return rc;
}
/*
@@ -239,205 +233,204 @@ unsigned map_cache_geometries(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr,
* handle a cache maintenance operation by set/way
*/
void handle_cm_op(unsigned reg,
- void (*op_handler) (unsigned),
- cache_geometry *hcg_ptr,
- cache_geometry *tcg_ptr,
- cache_diff *cd_ptr)
+ void (*op_handler) (unsigned),
+ cache_geometry * hcg_ptr,
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
- unsigned clvl = 0, cpu_id = read_cpuid();
- unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
- unsigned hc_assoc = 0, hc_numsets = 0, hc_linesz = 0;
- unsigned lineno = 0, setno = 0, wayno = 0, abs_lineno = 0;
-
- /*
- * If target cache line size is greater than the host then
- * each maintenance op has to be performed on two lines on
- * host. Does not matter is the line size if equal
- */
- unsigned ctr = cd_ptr[clvl].tcline_factor;
-
- /*
- * Find out the cache level for which the set/way operation has invoked.
- * Use this to find the cache geometry in target cache to ascertain the
- * set & way number from the argument. Use this info to calculate the
- * target cache line number.
- */
- clvl = get_cache_level(reg);
- tc_linesz = get_cache_linesz(tcg_ptr, clvl);
- tc_assoc = get_cache_assoc(tcg_ptr, clvl);
- tc_numsets = get_cache_numsets(tcg_ptr, clvl);
-
- wayno = (reg >> __clz(tc_assoc)) & tc_assoc;
- setno = (reg >> (tc_linesz + 4)) & tc_numsets;
- lineno = (setno * (tc_assoc + 1)) + wayno;
-
- if(cmop_debug) {
- /*
- * tc_prev_line is initialised to -1 (unsigned). We can never have so many
- * cache lines. Helps determining when to record the start of a cm op.
- * If count != lineno then either we are not counting or have been counting
- * and now are out of sync. In either case, a new cm op is started
- */
- if (tc_prev_line[cpu_id][clvl] != lineno) {
- tc_prev_line[cpu_id][clvl] = lineno;
- /* All ops start out as partial ops */
- cm_op_stats[cpu_id][clvl].part_cmop_cnt++;
-
- /* Reset all our counters */
- cm_ignline_cnt[cpu_id][clvl] = 0;
- cm_extline_cnt[cpu_id][clvl] = 0;
- hc_line_cnt[cpu_id][clvl] = 0;
- cm_line_cnt[cpu_id][clvl] = 0;
- }
-
- tc_prev_line[cpu_id][clvl]--;
- cm_line_cnt[cpu_id][clvl]++;
- }
-
- /* Convert target cache line no. to absolute cache line no. */
- if (cd_ptr[clvl].tcline_factor)
- abs_lineno = lineno * cd_ptr[clvl].tcline_factor;
-
- /* Convert absolute cache line no. to host cache line no. */
- if (cd_ptr[clvl].hcline_factor)
- lineno = abs_lineno / cd_ptr[clvl].hcline_factor;
-
- /*
- * Find out the set & way no. on the host cache corresponding to the
- * cache line no. calculated on the target cache.
- */
- hc_linesz = get_cache_linesz(hcg_ptr, clvl);
- hc_assoc = get_cache_assoc(hcg_ptr, clvl);
- hc_numsets = get_cache_numsets(hcg_ptr, clvl);
-
- switch (cd_ptr[clvl].csize_diff) {
- case TCSZ_BIG:
- {
- if (abs_lineno <
- cd_ptr[clvl].hnumabs_clines) {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc,
- setno, hc_linesz,
- clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
- } else {
- /* Ignore */
- if(cmop_debug)
- cm_ignline_cnt[cpu_id][clvl]++;
-
- }
- }
- break;
- case TCSZ_EQUAL:
- {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc, setno,
- hc_linesz, clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
- }
- break;
-
- case TCSZ_SMALL:
- {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc, setno,
- hc_linesz, clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
-
- /*
- * If the target cache is smaller than the host cache then we
- * need to extend the maintenance operation to rest of the host
- * cache.
- */
- if ((abs_lineno +
- (1 * cd_ptr[clvl].tcline_factor)) ==
- cd_ptr[clvl].tnumabs_clines) {
-
- /*
- * TODO: Temp hack. Due to the cache line factor we end up incrementing
- * the lineno and miss one line.
- */
- lineno--;
- for (lineno++;
- lineno < (hc_numsets + 1) * (hc_assoc + 1);
- lineno++) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
-
- /* Create new register value for operation on host cache */
- reg =
- get_setway_reg(wayno, hc_assoc,
- setno, hc_linesz,
- clvl);;
- /* Perform the operation */
- op_handler(reg);
-
- if(cmop_debug)
- cm_extline_cnt[cpu_id][clvl]++;
-
- }
- } else {
- /* Ignore */
- }
- break;
- }
- }
-
-
- if(cmop_debug) {
- /*
- * If the op cnt has reached the maximum cache line number then
- * print the statistics collected so far
- *
- * NOTE: We don't reset the counter. It will done at the start
- * of the next cm op automatically. Its value now is one more
- * than the maximum valid target cache line number.
- */
- if (cm_line_cnt[cpu_id][clvl] == (tc_assoc + 1) * (tc_numsets + 1)) {
-
- printf("%s", __FUNCTION__);
- printf(" : TC Lines=0x%x ", cm_line_cnt[cpu_id][clvl]);
- printf(" : HC Lines=0x%x ", hc_line_cnt[cpu_id][clvl]);
- printf(" : Ign Lines=0x%x ", cm_ignline_cnt[cpu_id][clvl]);
- printf(" : Extra Lines=0x%x ", cm_extline_cnt[cpu_id][clvl]);
- printf("\n");
-
- /* Register this as a complete set/way operation */
- cm_op_stats[cpu_id][clvl].part_cmop_cnt--;
- cm_op_stats[cpu_id][clvl].cmpl_cmop_cnt++;
- }
- }
-
- return;
+ unsigned clvl = 0, cpu_id = read_cpuid();
+ unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
+ unsigned hc_assoc = 0, hc_numsets = 0, hc_linesz = 0;
+ unsigned lineno = 0, setno = 0, wayno = 0, abs_lineno = 0;
+
+ /*
+ * If target cache line size is greater than the host then
+ * each maintenance op has to be performed on two lines on
+ * host. Does not matter is the line size if equal
+ */
+ unsigned ctr = cd_ptr[clvl].tcline_factor;
+
+ /*
+ * Find out the cache level for which the set/way operation has invoked.
+ * Use this to find the cache geometry in target cache to ascertain the
+ * set & way number from the argument. Use this info to calculate the
+ * target cache line number.
+ */
+ clvl = get_cache_level(reg);
+ tc_linesz = get_cache_linesz(tcg_ptr, clvl);
+ tc_assoc = get_cache_assoc(tcg_ptr, clvl);
+ tc_numsets = get_cache_numsets(tcg_ptr, clvl);
+
+ wayno = (reg >> __clz(tc_assoc)) & tc_assoc;
+ setno = (reg >> (tc_linesz + 4)) & tc_numsets;
+ lineno = (setno * (tc_assoc + 1)) + wayno;
+
+ if (cmop_debug) {
+ /*
+ * tc_prev_line is initialised to -1 (unsigned). We can never have so many
+ * cache lines. Helps determining when to record the start of a cm op.
+ * If count != lineno then either we are not counting or have been counting
+ * and now are out of sync. In either case, a new cm op is started
+ */
+ if (tc_prev_line[cpu_id][clvl] != lineno) {
+ tc_prev_line[cpu_id][clvl] = lineno;
+ /* All ops start out as partial ops */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt++;
+
+ /* Reset all our counters */
+ cm_ignline_cnt[cpu_id][clvl] = 0;
+ cm_extline_cnt[cpu_id][clvl] = 0;
+ hc_line_cnt[cpu_id][clvl] = 0;
+ cm_line_cnt[cpu_id][clvl] = 0;
+ }
+
+ tc_prev_line[cpu_id][clvl]--;
+ cm_line_cnt[cpu_id][clvl]++;
+ }
+
+ /* Convert target cache line no. to absolute cache line no. */
+ if (cd_ptr[clvl].tcline_factor)
+ abs_lineno = lineno * cd_ptr[clvl].tcline_factor;
+
+ /* Convert absolute cache line no. to host cache line no. */
+ if (cd_ptr[clvl].hcline_factor)
+ lineno = abs_lineno / cd_ptr[clvl].hcline_factor;
+
+ /*
+ * Find out the set & way no. on the host cache corresponding to the
+ * cache line no. calculated on the target cache.
+ */
+ hc_linesz = get_cache_linesz(hcg_ptr, clvl);
+ hc_assoc = get_cache_assoc(hcg_ptr, clvl);
+ hc_numsets = get_cache_numsets(hcg_ptr, clvl);
+
+ switch (cd_ptr[clvl].csize_diff) {
+ case TCSZ_BIG:
+ {
+ if (abs_lineno < cd_ptr[clvl].hnumabs_clines) {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ if (cmop_debug)
+ cm_ignline_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+ case TCSZ_EQUAL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+
+ case TCSZ_SMALL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+
+ /*
+ * If the target cache is smaller than the host cache then we
+ * need to extend the maintenance operation to rest of the host
+ * cache.
+ */
+ if ((abs_lineno +
+ (1 * cd_ptr[clvl].tcline_factor)) ==
+ cd_ptr[clvl].tnumabs_clines) {
+
+ /*
+ * TODO: Temp hack. Due to the cache line factor we end up incrementing
+ * the lineno and miss one line.
+ */
+ lineno--;
+ for (lineno++;
+ lineno < (hc_numsets + 1) * (hc_assoc + 1);
+ lineno++) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+
+ /* Create new register value for operation on host cache */
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ /* Perform the operation */
+ op_handler(reg);
+
+ if (cmop_debug)
+ cm_extline_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ }
+ break;
+ }
+ }
+
+ if (cmop_debug) {
+ /*
+ * If the op cnt has reached the maximum cache line number then
+ * print the statistics collected so far
+ *
+ * NOTE: We don't reset the counter. It will done at the start
+ * of the next cm op automatically. Its value now is one more
+ * than the maximum valid target cache line number.
+ */
+ if (cm_line_cnt[cpu_id][clvl] ==
+ (tc_assoc + 1) * (tc_numsets + 1)) {
+
+ printf("%s", __FUNCTION__);
+ printf(" : TC Lines=0x%x ", cm_line_cnt[cpu_id][clvl]);
+ printf(" : HC Lines=0x%x ", hc_line_cnt[cpu_id][clvl]);
+ printf(" : Ign Lines=0x%x ",
+ cm_ignline_cnt[cpu_id][clvl]);
+ printf(" : Extra Lines=0x%x ",
+ cm_extline_cnt[cpu_id][clvl]);
+ printf("\n");
+
+ /* Register this as a complete set/way operation */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt--;
+ cm_op_stats[cpu_id][clvl].cmpl_cmop_cnt++;
+ }
+ }
+
+ return;
}
-
diff --git a/big-little/virtualisor/cpus/a15/a15.c b/big-little/virtualisor/cpus/a15/a15.c
index 942fd8f..17c91a4 100644
--- a/big-little/virtualisor/cpus/a15/a15.c
+++ b/big-little/virtualisor/cpus/a15/a15.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "bl.h"
#include "virtualisor.h"
@@ -33,41 +33,37 @@ static virt_descriptor a15_virt_desc;
*/
unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a15_trap_save(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a15_trap_restore(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a15_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
{
- if (switcher) {
-
- } else {
- /* Always on */
- }
-
- /*
- * Indicate that cpu specific virtualisor setup
- * has been done. Restore context instead on next
- * invocation
- */
- a15_virt_desc.init[read_cpuid()] = 1;
- return 0;
+ if (switcher) {
+
+ } else {
+ /* Always on */
+ }
+
+ /*
+ * Indicate that cpu specific virtualisor setup
+ * has been done. Restore context instead on next
+ * invocation
+ */
+ a15_virt_desc.init[read_cpuid()] = 1;
+ return 0;
}
-static virt_descriptor a15_virt_desc __attribute__ ((section("virt_desc_section"))) = {
- A15,
- {0},
- a15_trap_setup,
- a15_trap_handle,
- a15_trap_save,
- a15_trap_restore,
-};
+static virt_descriptor a15_virt_desc
+ __attribute__ ((section("virt_desc_section"))) = {
+ A15, {
+0}, a15_trap_setup, a15_trap_handle, a15_trap_save, a15_trap_restore,};
diff --git a/big-little/virtualisor/cpus/a15/include/a15.h b/big-little/virtualisor/cpus/a15/include/a15.h
index c05bd75..3a9515d 100644
--- a/big-little/virtualisor/cpus/a15/include/a15.h
+++ b/big-little/virtualisor/cpus/a15/include/a15.h
@@ -18,9 +18,9 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __A15_H__
#define __A15_H__
-#endif /* __A15_H__ */
+#endif /* __A15_H__ */
diff --git a/big-little/virtualisor/cpus/a7/a7.c b/big-little/virtualisor/cpus/a7/a7.c
index 6ff8f6c..e9e16af 100644
--- a/big-little/virtualisor/cpus/a7/a7.c
+++ b/big-little/virtualisor/cpus/a7/a7.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "bl.h"
#include "virtualisor.h"
@@ -33,41 +33,37 @@ static virt_descriptor a7_virt_desc;
*/
unsigned a7_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a7_trap_save(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a7_trap_restore(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a7_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
{
- if (switcher) {
+ if (switcher) {
- } else {
- /* Always on */
- }
+ } else {
+ /* Always on */
+ }
- /*
- * Indicate that cpu specific virtualisor setup
- * has been done. Restore context instead on next
- * invocation
- */
- a7_virt_desc.init[read_cpuid()] = 1;
- return 0;
+ /*
+ * Indicate that cpu specific virtualisor setup
+ * has been done. Restore context instead on next
+ * invocation
+ */
+ a7_virt_desc.init[read_cpuid()] = 1;
+ return 0;
}
-static virt_descriptor a7_virt_desc __attribute__ ((section("virt_desc_section"))) = {
- A7,
- {0},
- a7_trap_setup,
- a7_trap_handle,
- a7_trap_save,
- a7_trap_restore,
-};
+static virt_descriptor a7_virt_desc
+ __attribute__ ((section("virt_desc_section"))) = {
+ A7, {
+0}, a7_trap_setup, a7_trap_handle, a7_trap_save, a7_trap_restore,};
diff --git a/big-little/virtualisor/cpus/a7/include/a7.h b/big-little/virtualisor/cpus/a7/include/a7.h
index da3db59..ff3000e 100644
--- a/big-little/virtualisor/cpus/a7/include/a7.h
+++ b/big-little/virtualisor/cpus/a7/include/a7.h
@@ -18,9 +18,9 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __A7_H__
#define __A7_H__
-#endif /* __A7_H__ */
+#endif /* __A7_H__ */
diff --git a/big-little/virtualisor/include/cache_geom.h b/big-little/virtualisor/include/cache_geom.h
index 359c480..654a0f0 100644
--- a/big-little/virtualisor/include/cache_geom.h
+++ b/big-little/virtualisor/include/cache_geom.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __CACHE_GEOM_H__
#define __CACHE_GEOM_H__
@@ -46,38 +46,38 @@
* maximum granularity.
*/
typedef struct cache_diff {
- /* Stores whether target cache is =,<,> host cache */
- unsigned csize_diff;
- /*
- * Stores factor by which target cache line
- * has to be multiplied to get absolute line
- * no.
- */
- unsigned tcline_factor;
- /*
- * Stores factor by which absolute cache line
- * no. has to be divided to get host cache line
- * no.
- */
- unsigned hcline_factor;
- /* Max absolute target cpu cache line number */
- unsigned tnumabs_clines;
- /* Max absolute host cpu cache line number */
- unsigned hnumabs_clines;
+ /* Stores whether target cache is =,<,> host cache */
+ unsigned csize_diff;
+ /*
+ * Stores factor by which target cache line
+ * has to be multiplied to get absolute line
+ * no.
+ */
+ unsigned tcline_factor;
+ /*
+ * Stores factor by which absolute cache line
+ * no. has to be divided to get host cache line
+ * no.
+ */
+ unsigned hcline_factor;
+ /* Max absolute target cpu cache line number */
+ unsigned tnumabs_clines;
+ /* Max absolute host cpu cache line number */
+ unsigned hnumabs_clines;
} cache_diff;
/*
* Data structure that defines the cache topology of a cpu
*/
typedef struct cache_geom {
- unsigned clidr;
- /*
- * One for each cpu to store the cache level
- * the OS thinks its operating on.
- */
- unsigned csselr;
- /* One for each cache level */
- unsigned ccsidr[MAX_CACHE_LEVELS];
+ unsigned clidr;
+ /*
+ * One for each cpu to store the cache level
+ * the OS thinks its operating on.
+ */
+ unsigned csselr;
+ /* One for each cache level */
+ unsigned ccsidr[MAX_CACHE_LEVELS];
} cache_geometry;
/*
@@ -85,23 +85,18 @@ typedef struct cache_geom {
* Reset for each switchover.
*/
typedef struct cache_stats {
- /* Number of cm ops which did not cover the whole cache */
- unsigned part_cmop_cnt;
- /* Number of cm ops which spanned the entire cache */
- unsigned cmpl_cmop_cnt;
+ /* Number of cm ops which did not cover the whole cache */
+ unsigned part_cmop_cnt;
+ /* Number of cm ops which spanned the entire cache */
+ unsigned cmpl_cmop_cnt;
} cache_stats;
-extern unsigned map_cache_geometries(cache_geometry *,
- cache_geometry *,
- cache_diff *);
+extern unsigned map_cache_geometries(cache_geometry *,
+ cache_geometry *, cache_diff *);
extern void find_cache_geometry(cache_geometry *);
-extern void find_cache_diff(cache_geometry *,
- cache_geometry *,
- cache_diff *);
+extern void find_cache_diff(cache_geometry *, cache_geometry *, cache_diff *);
extern void handle_cm_op(unsigned,
- void (*) (unsigned),
- cache_geometry *,
- cache_geometry *,
- cache_diff *);
-
-#endif /* __CACHE_GEOM_H__ */
+ void (*)(unsigned),
+ cache_geometry *, cache_geometry *, cache_diff *);
+
+#endif /* __CACHE_GEOM_H__ */
diff --git a/big-little/virtualisor/include/mem_trap.h b/big-little/virtualisor/include/mem_trap.h
index ac23844..ab68259 100644
--- a/big-little/virtualisor/include/mem_trap.h
+++ b/big-little/virtualisor/include/mem_trap.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __MEM_TRAP_H__
#define __MEM_TRAP_H__
@@ -31,23 +31,23 @@
* routines.
*/
typedef struct trap_data {
- /* Does this structure contain valid data */
- unsigned valid;
- /* Which cluster to save/restore this trap on */
- unsigned cluster_id;
- /* Translation table address */
- unsigned long long table;
- /* Index corresponding to mapping */
- unsigned index;
- /* TODO: Revisit why we need two variables here */
- /* Original Descriptor */
- unsigned long long prev_desc;
- /* Current Descriptor */
- unsigned long long cur_desc;
+ /* Does this structure contain valid data */
+ unsigned valid;
+ /* Which cluster to save/restore this trap on */
+ unsigned cluster_id;
+ /* Translation table address */
+ unsigned long long table;
+ /* Index corresponding to mapping */
+ unsigned index;
+ /* TODO: Revisit why we need two variables here */
+ /* Original Descriptor */
+ unsigned long long prev_desc;
+ /* Current Descriptor */
+ unsigned long long cur_desc;
} mem_trap_data;
extern unsigned mem_trap_setup(unsigned, mem_trap_data *);
extern mem_trap_data s2_trap_section$$Base;
extern unsigned s2_trap_section$$Length;
-#endif /* __MEM_TRAP_H__ */
+#endif /* __MEM_TRAP_H__ */
diff --git a/big-little/virtualisor/include/virtualisor.h b/big-little/virtualisor/include/virtualisor.h
index abf74bb..f097d39 100644
--- a/big-little/virtualisor/include/virtualisor.h
+++ b/big-little/virtualisor/include/virtualisor.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __VIRTUALISOR_H__
#define __VIRTUALISOR_H__
@@ -30,9 +30,9 @@
* Data structure that holds a copy of the virtualized regs
*/
typedef struct virt_regs {
- unsigned cluster_id;
- unsigned mpidr;
- unsigned midr;
+ unsigned cluster_id;
+ unsigned mpidr;
+ unsigned midr;
} virt_reg_data;
/*
@@ -40,14 +40,14 @@ typedef struct virt_regs {
* by the Virtualisation Extensions.
*/
typedef struct trap_regs {
- unsigned hcr;
- unsigned hdcr;
- unsigned hcptr;
- unsigned hstr;
+ unsigned hcr;
+ unsigned hdcr;
+ unsigned hcptr;
+ unsigned hstr;
} reg_trap_data;
typedef struct gp_regs {
- unsigned r[15];
+ unsigned r[15];
} gp_regs;
/*
@@ -57,17 +57,17 @@ typedef struct gp_regs {
* -igured trap.
*/
typedef struct virt_desc {
- /* cpu midr contents */
- unsigned cpu_no;
- /*
- * Bitmask to inidicate that Virtualisor setup has been
- * done on both host & target cpus.
- */
- unsigned char init[NUM_CPUS];
- unsigned (*trap_setup) (unsigned, unsigned);
- unsigned (*trap_handle) (gp_regs * regs, unsigned, unsigned);
- unsigned (*trap_save) (unsigned, unsigned);
- unsigned (*trap_restore) (unsigned, unsigned);
+ /* cpu midr contents */
+ unsigned cpu_no;
+ /*
+ * Bitmask to inidicate that Virtualisor setup has been
+ * done on both host & target cpus.
+ */
+ unsigned char init[NUM_CPUS];
+ unsigned (*trap_setup) (unsigned, unsigned);
+ unsigned (*trap_handle) (gp_regs * regs, unsigned, unsigned);
+ unsigned (*trap_save) (unsigned, unsigned);
+ unsigned (*trap_restore) (unsigned, unsigned);
} virt_descriptor;
extern void SetupVirtualisor(unsigned);
@@ -81,4 +81,4 @@ extern unsigned virt_desc_section$$Length;
extern unsigned host_cluster;
extern unsigned switcher;
-#endif /* __VIRTUALISOR_H__ */
+#endif /* __VIRTUALISOR_H__ */
diff --git a/big-little/virtualisor/mem_trap.c b/big-little/virtualisor/mem_trap.c
index 04c0bb8..c40433a 100644
--- a/big-little/virtualisor/mem_trap.c
+++ b/big-little/virtualisor/mem_trap.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "misc.h"
@@ -30,103 +30,112 @@
* HYP mode by invalidating its descriptor in the 2nd stage
* translation tables
*/
-unsigned mem_trap_setup(unsigned periph_addr, mem_trap_data *periph_trap_data)
+unsigned mem_trap_setup(unsigned periph_addr, mem_trap_data * periph_trap_data)
{
- unsigned rc = 0x0, four_kb_index = 0;
- unsigned one_gb_index = 0, two_mb_index = 0;
- unsigned long long vtcr = 0x0, hcr = 0x0, level = 0;
- unsigned long long pagetable_base = 0x0, l2_desc = 0;
- unsigned long long l3_desc = 0, l3_table = 0;
-
- /* Check if 2nd stage translations are enabled */
- hcr = read_hcr();
- if (!(hcr & HCR_VM)) {
- printf("%s: 2nd Stage translations not enabled \n", __FUNCTION__);
- rc = 0x1;
- goto out;
- }
-
- /* Check what level of tables we need to start at */
- vtcr = read_vtcr();
- level = (vtcr >> 6) & 0x3;
-
- /* Read the page table base address. */
- pagetable_base = read_vttbr();
-
- /* Calculate the table indices */
- one_gb_index = periph_addr >> 30;
-
- /* Each GB contains (1 << 9) or 512 2MBs */
- two_mb_index = (periph_addr >> 21) - ((1 << 9) * one_gb_index);
-
- /* Each GB contains (1 << 18) or 262144 4KBs */
- four_kb_index = (periph_addr >> 12) - ((1 << 9) * (periph_addr >> 21));
-
- /* For either starting level find out the level 2 desc */
- switch (level) {
-
- case 0x1:
- {
- /* Start from first level */
- unsigned long long l1_desc = 0;
- unsigned long long l2_table = 0;
-
- l1_desc = ((unsigned long long *)((unsigned)(&pagetable_base)[0]))[one_gb_index];
- if ((l1_desc & 0x3) != TABLE_MAPPING) {
- printf("%s: Invalid 1st level desc : 0x%llu \n", __FUNCTION__, l1_desc);
- rc = 0x1;
- goto out;
- }
-
- l2_table = l1_desc & 0xfffffff000UL;
- l2_desc = ((unsigned long long *)((unsigned)(&l2_table)[0]))[two_mb_index];
- break;
- }
-
- case 0x0:
- {
- /* Start from second level */
- l2_desc = ((unsigned long long *)((unsigned)(&pagetable_base)[0]))[two_mb_index];
- break;
- }
-
- default:
- printf("%s: Invalid Pagetable level \n", __FUNCTION__);
- rc = 0x1;
- }
-
- /* Validate the 2nd level descriptor */
- if ((l2_desc & 0x3) != TABLE_MAPPING) {
- printf("%s: Invalid 2nd level desc : 0x%llu \n",
- __FUNCTION__, l2_desc);
- rc = 0x1;
- goto out;
- }
-
- l3_table = l2_desc & 0xfffffff000UL;
- l3_desc = ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index];
-
- /*
- * Validate the 3rd level descriptor. This means that the mapping is
- * already invalid and we have not touched it
- */
- if ((l3_desc & 0x3) != VALID_MAPPING) {
- printf("%s: Invalid 3rd level desc : 0x%llu \n",
- __FUNCTION__, l3_desc);
- rc = 0x1;
- goto out;
- }
-
- /* Save the info gathered so far */
- periph_trap_data->table = l3_table;
- periph_trap_data->index = four_kb_index;
- periph_trap_data->prev_desc = l3_desc;
- periph_trap_data->cluster_id = read_clusterid();
- periph_trap_data->valid = 1;
-
- /* Invalidate the peripheral page table entry */
- ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index] = 0x0;
+ unsigned rc = 0x0, four_kb_index = 0;
+ unsigned one_gb_index = 0, two_mb_index = 0;
+ unsigned long long vtcr = 0x0, hcr = 0x0, level = 0;
+ unsigned long long pagetable_base = 0x0, l2_desc = 0;
+ unsigned long long l3_desc = 0, l3_table = 0;
+
+ /* Check if 2nd stage translations are enabled */
+ hcr = read_hcr();
+ if (!(hcr & HCR_VM)) {
+ printf("%s: 2nd Stage translations not enabled \n",
+ __FUNCTION__);
+ rc = 0x1;
+ goto out;
+ }
+
+ /* Check what level of tables we need to start at */
+ vtcr = read_vtcr();
+ level = (vtcr >> 6) & 0x3;
+
+ /* Read the page table base address. */
+ pagetable_base = read_vttbr();
+
+ /* Calculate the table indices */
+ one_gb_index = periph_addr >> 30;
+
+ /* Each GB contains (1 << 9) or 512 2MBs */
+ two_mb_index = (periph_addr >> 21) - ((1 << 9) * one_gb_index);
+
+ /* Each GB contains (1 << 18) or 262144 4KBs */
+ four_kb_index = (periph_addr >> 12) - ((1 << 9) * (periph_addr >> 21));
+
+ /* For either starting level find out the level 2 desc */
+ switch (level) {
+
+ case 0x1:
+ {
+ /* Start from first level */
+ unsigned long long l1_desc = 0;
+ unsigned long long l2_table = 0;
+
+ l1_desc =
+ ((unsigned long long
+ *)((unsigned)(&pagetable_base)[0]))[one_gb_index];
+ if ((l1_desc & 0x3) != TABLE_MAPPING) {
+ printf("%s: Invalid 1st level desc : 0x%llu \n",
+ __FUNCTION__, l1_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ l2_table = l1_desc & 0xfffffff000UL;
+ l2_desc =
+ ((unsigned long long
+ *)((unsigned)(&l2_table)[0]))[two_mb_index];
+ break;
+ }
+
+ case 0x0:
+ {
+ /* Start from second level */
+ l2_desc =
+ ((unsigned long long
+ *)((unsigned)(&pagetable_base)[0]))[two_mb_index];
+ break;
+ }
+
+ default:
+ printf("%s: Invalid Pagetable level \n", __FUNCTION__);
+ rc = 0x1;
+ }
+
+ /* Validate the 2nd level descriptor */
+ if ((l2_desc & 0x3) != TABLE_MAPPING) {
+ printf("%s: Invalid 2nd level desc : 0x%llu \n",
+ __FUNCTION__, l2_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ l3_table = l2_desc & 0xfffffff000UL;
+ l3_desc =
+ ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index];
+
+ /*
+ * Validate the 3rd level descriptor. This means that the mapping is
+ * already invalid and we have not touched it
+ */
+ if ((l3_desc & 0x3) != VALID_MAPPING) {
+ printf("%s: Invalid 3rd level desc : 0x%llu \n",
+ __FUNCTION__, l3_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ /* Save the info gathered so far */
+ periph_trap_data->table = l3_table;
+ periph_trap_data->index = four_kb_index;
+ periph_trap_data->prev_desc = l3_desc;
+ periph_trap_data->cluster_id = read_clusterid();
+ periph_trap_data->valid = 1;
+
+ /* Invalidate the peripheral page table entry */
+ ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index] = 0x0;
out:
- return rc;
+ return rc;
}
diff --git a/big-little/virtualisor/vgic_trap_handler.c b/big-little/virtualisor/vgic_trap_handler.c
index 44469fb..4e626d0 100644
--- a/big-little/virtualisor/vgic_trap_handler.c
+++ b/big-little/virtualisor/vgic_trap_handler.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "gic_registers.h"
@@ -31,52 +31,51 @@
*/
void handle_vgic_distif_abort(unsigned pa, unsigned *data, unsigned write)
{
- unsigned value = 0, reg_offset = pa & 0xfff;
+ unsigned value = 0, reg_offset = pa & 0xfff;
- switch (reg_offset >> 7) {
-
- /* Access to Processor Target registers */
- case (GICD_CPUS >> 7):
- if (write) {
- /*
- * OS is trying to reprogram the processor targets register.
- * Find out the cpu interface mask for this cluster and use
- * that instead to program the register.
- */
- value = get_cpuif_mask(*data);
- write32(pa, value);
- } else {
- value = read32(pa);
- *data = get_cpu_mask(value);
- }
+ switch (reg_offset >> 7) {
- break;
-
- /* Access to Software generated interrupt register */
- case (GICD_SW >> 7):
- if (write) {
- /* Get the updated cpu interface mask */
- value = get_cpuif_mask((*data >> 16) & 0xff) << 16;
- value |= *data & ~(0xff << 16);
- /*
- * Clear the old cpu interface mask & update
- * value with new cpu interface mask
- */
- write32(pa, value);
- } else {
- /* Cannot possibly have a read from SGI generation register */
- }
-
- break;
-
- default:
- if (write) {
- write32(pa, *data);
- } else {
- *data = read32(pa);
- }
- }
+ /* Access to Processor Target registers */
+ case (GICD_CPUS >> 7):
+ if (write) {
+ /*
+ * OS is trying to reprogram the processor targets register.
+ * Find out the cpu interface mask for this cluster and use
+ * that instead to program the register.
+ */
+ value = get_cpuif_mask(*data);
+ write32(pa, value);
+ } else {
+ value = read32(pa);
+ *data = get_cpu_mask(value);
+ }
- return;
-}
+ break;
+
+ /* Access to Software generated interrupt register */
+ case (GICD_SW >> 7):
+ if (write) {
+ /* Get the updated cpu interface mask */
+ value = get_cpuif_mask((*data >> 16) & 0xff) << 16;
+ value |= *data & ~(0xff << 16);
+ /*
+ * Clear the old cpu interface mask & update
+ * value with new cpu interface mask
+ */
+ write32(pa, value);
+ } else {
+ /* Cannot possibly have a read from SGI generation register */
+ }
+ break;
+
+ default:
+ if (write) {
+ write32(pa, *data);
+ } else {
+ *data = read32(pa);
+ }
+ }
+
+ return;
+}
diff --git a/big-little/virtualisor/virt_context.c b/big-little/virtualisor/virt_context.c
index 5472587..def3551 100644
--- a/big-little/virtualisor/virt_context.c
+++ b/big-little/virtualisor/virt_context.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "misc.h"
@@ -38,100 +38,100 @@ extern cache_stats cm_op_stats[NUM_CPUS][MAX_CACHE_LEVELS];
*/
void SaveVirtualisor(unsigned first_cpu)
{
- unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
- unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
- mem_trap_data *s2_td = &s2_trap_section$$Base;
- unsigned long long *cd_ptr = 0x0;
- unsigned *periph_addr = 0x0;
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- if (cluster_id == host_cluster) {
- /*
- * Since there is only one second stage translation table, its
- * safe to assume that only one cpu (first_cpu) should save &
- * restore the context.
- */
- len = (unsigned)&s2_trap_section$$Length;
- if (cpu_id == first_cpu) {
- /* Iterate through the array of 2nd stage translation traps */
- for (ctr = 0; ctr < (len / sizeof(mem_trap_data)); ctr++) {
- if (s2_td[ctr].valid
- && s2_td[ctr].cluster_id == cluster_id) {
-
- /*
- * Save the current descriptor and restore the
- * previous. Need not worry about synchronisation
- * issues, as the existing entry was causing
- * translation faults. The TLB never caches fault
- * generating translations.
- */
- cd_ptr =
- &((unsigned long long
- *)((unsigned)(&s2_td[ctr].
- table)[0]))[s2_td[ctr].
- index];
- s2_td[ctr].cur_desc = *cd_ptr;
- *cd_ptr = s2_td[ctr].prev_desc;
- periph_addr = (unsigned *) cd_ptr;
- dsb();
- inv_tlb_mva((unsigned *) periph_addr[0]);
- inv_bpred_all();
- }
- }
- }
-
- /* Save the HYP trap registers for this cpu */
- host_trap_regs[cpu_id].hcr = read_hcr();
- host_trap_regs[cpu_id].hdcr = read_hdcr();
- host_trap_regs[cpu_id].hcptr = read_hcptr();
- host_trap_regs[cpu_id].hstr = read_hstr();
-
- if(cmop_debug) {
- /* Print Cache maintenance statistics */
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
- printf("Cache Level %d", ctr);
- printf(" : Partial ops=0x%x",
- cm_op_stats[cpu_id][ctr].part_cmop_cnt);
- printf(" : Complete ops=0x%x",
- cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt);
- printf("\n");
- }
- }
-
- }
-
- /*
- * Call any cpu specific save routines (if any)
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- handler = vd_array[index].trap_save;
- if(handler) {
- rc = handler(first_cpu, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no =
+ PART_NO(read_midr());
+ unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
+ mem_trap_data *s2_td = &s2_trap_section$$Base;
+ unsigned long long *cd_ptr = 0x0;
+ unsigned *periph_addr = 0x0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Since there is only one second stage translation table, its
+ * safe to assume that only one cpu (first_cpu) should save &
+ * restore the context.
+ */
+ len = (unsigned)&s2_trap_section$$Length;
+ if (cpu_id == first_cpu) {
+ /* Iterate through the array of 2nd stage translation traps */
+ for (ctr = 0; ctr < (len / sizeof(mem_trap_data));
+ ctr++) {
+ if (s2_td[ctr].valid
+ && s2_td[ctr].cluster_id == cluster_id) {
+
+ /*
+ * Save the current descriptor and restore the
+ * previous. Need not worry about synchronisation
+ * issues, as the existing entry was causing
+ * translation faults. The TLB never caches fault
+ * generating translations.
+ */
+ cd_ptr =
+ &((unsigned long long
+ *)((unsigned)(&s2_td[ctr].table)
+ [0]))[s2_td[ctr].index];
+ s2_td[ctr].cur_desc = *cd_ptr;
+ *cd_ptr = s2_td[ctr].prev_desc;
+ periph_addr = (unsigned *)cd_ptr;
+ dsb();
+ inv_tlb_mva((unsigned *)periph_addr[0]);
+ inv_bpred_all();
+ }
+ }
+ }
+
+ /* Save the HYP trap registers for this cpu */
+ host_trap_regs[cpu_id].hcr = read_hcr();
+ host_trap_regs[cpu_id].hdcr = read_hdcr();
+ host_trap_regs[cpu_id].hcptr = read_hcptr();
+ host_trap_regs[cpu_id].hstr = read_hstr();
+
+ if (cmop_debug) {
+ /* Print Cache maintenance statistics */
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ printf("Cache Level %d", ctr);
+ printf(" : Partial ops=0x%x",
+ cm_op_stats[cpu_id][ctr].part_cmop_cnt);
+ printf(" : Complete ops=0x%x",
+ cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt);
+ printf("\n");
+ }
+ }
+
+ }
+
+ /*
+ * Call any cpu specific save routines (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_save;
+ if (handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}
/*
@@ -141,92 +141,93 @@ void SaveVirtualisor(unsigned first_cpu)
*/
void RestoreVirtualisor(unsigned first_cpu)
{
- unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
- unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
- mem_trap_data *s2_td = &s2_trap_section$$Base;
- unsigned long long *cd_ptr = 0x0;
- unsigned *periph_addr = 0x0;
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- if (cluster_id == host_cluster) {
- /*
- * Since there is only one second stage translation table, its
- * safe to assume that only one cpu (first_cpu) should save &
- * restore the context.
- */
- len = (unsigned)&s2_trap_section$$Length;
- if (cpu_id == first_cpu) {
- /* Iterate through the array of 2nd stage translation traps */
- for (ctr = 0; ctr < (len / sizeof(mem_trap_data)); ctr++) {
- if (s2_td[ctr].valid
- && s2_td[ctr].cluster_id == cluster_id) {
- /*
- * Restore the current descriptor and save the previous
- */
- cd_ptr =
- &((unsigned long long
- *)((unsigned)((&s2_td[ctr].
- table)[0])))[s2_td[ctr].
- index];
- s2_td[ctr].prev_desc = *cd_ptr;
- *cd_ptr = s2_td[ctr].cur_desc;
- periph_addr = (unsigned *) cd_ptr;
- dsb();
- inv_tlb_mva((unsigned *) periph_addr[0]);
- inv_bpred_all();
- }
- }
- }
-
- /* Now restore the virtualised ID registers for this cpu */
- write_vmidr(host_virt_regs[cpu_id].midr);
- write_vmpidr(host_virt_regs[cpu_id].mpidr);
-
- /* Restore the HYP trap registers for this cpu */
- write_hcr(host_trap_regs[cpu_id].hcr);
- write_hdcr(host_trap_regs[cpu_id].hdcr);
- write_hcptr(host_trap_regs[cpu_id].hcptr);
- write_hstr(host_trap_regs[cpu_id].hstr);
-
- if(cmop_debug) {
- /* Resetting Cache maintenance statistics */
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
- cm_op_stats[cpu_id][ctr].part_cmop_cnt = 0;
- cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt = 0;
- }
- }
- }
-
- /*
- * Call any cpu specific restore routines (if any)
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- handler = vd_array[index].trap_restore;
- if(handler) {
- rc = handler(first_cpu, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no =
+ PART_NO(read_midr());
+ unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
+ mem_trap_data *s2_td = &s2_trap_section$$Base;
+ unsigned long long *cd_ptr = 0x0;
+ unsigned *periph_addr = 0x0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Since there is only one second stage translation table, its
+ * safe to assume that only one cpu (first_cpu) should save &
+ * restore the context.
+ */
+ len = (unsigned)&s2_trap_section$$Length;
+ if (cpu_id == first_cpu) {
+ /* Iterate through the array of 2nd stage translation traps */
+ for (ctr = 0; ctr < (len / sizeof(mem_trap_data));
+ ctr++) {
+ if (s2_td[ctr].valid
+ && s2_td[ctr].cluster_id == cluster_id) {
+ /*
+ * Restore the current descriptor and save the previous
+ */
+ cd_ptr =
+ &((unsigned long long
+ *)((unsigned)((&s2_td[ctr].table)
+ [0])))[s2_td
+ [ctr].index];
+ s2_td[ctr].prev_desc = *cd_ptr;
+ *cd_ptr = s2_td[ctr].cur_desc;
+ periph_addr = (unsigned *)cd_ptr;
+ dsb();
+ inv_tlb_mva((unsigned *)periph_addr[0]);
+ inv_bpred_all();
+ }
+ }
+ }
+
+ /* Now restore the virtualised ID registers for this cpu */
+ write_vmidr(host_virt_regs[cpu_id].midr);
+ write_vmpidr(host_virt_regs[cpu_id].mpidr);
+
+ /* Restore the HYP trap registers for this cpu */
+ write_hcr(host_trap_regs[cpu_id].hcr);
+ write_hdcr(host_trap_regs[cpu_id].hdcr);
+ write_hcptr(host_trap_regs[cpu_id].hcptr);
+ write_hstr(host_trap_regs[cpu_id].hstr);
+
+ if (cmop_debug) {
+ /* Resetting Cache maintenance statistics */
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ cm_op_stats[cpu_id][ctr].part_cmop_cnt = 0;
+ cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt = 0;
+ }
+ }
+ }
+
+ /*
+ * Call any cpu specific restore routines (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_restore;
+ if (handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}
diff --git a/big-little/virtualisor/virt_handle.c b/big-little/virtualisor/virt_handle.c
index 3e3f3d7..a247534 100644
--- a/big-little/virtualisor/virt_handle.c
+++ b/big-little/virtualisor/virt_handle.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "virt_helpers.h"
@@ -34,556 +34,573 @@ extern cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS];
void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
{
- unsigned Op1, Op2, CRn, CRm, Rt, write, cpu_id = read_cpuid();
+ unsigned Op1, Op2, CRn, CRm, Rt, write, cpu_id = read_cpuid();
- Op2 = (hsr >> 17) & 0x7;
- Op1 = (hsr >> 14) & 0x7;
- CRn = (hsr >> 10) & 0xf;
- Rt = (hsr >> 5) & 0xf;
- CRm = (hsr >> 1) & 0xf;
- write = !(hsr & 0x1);
+ Op2 = (hsr >> 17) & 0x7;
+ Op1 = (hsr >> 14) & 0x7;
+ CRn = (hsr >> 10) & 0xf;
+ Rt = (hsr >> 5) & 0xf;
+ CRm = (hsr >> 1) & 0xf;
+ write = !(hsr & 0x1);
- switch (CRn) {
- case CRN_C0:
- switch (Op1) {
- case 0:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case MIDR:
- if (write)
- goto error;
- regs->r[Rt] = read_vmidr();
- break;
- case CTR:
- if (write)
- goto error;
- regs->r[Rt] = read_ctr();
- break;
- case TCMTR:
- if (write)
- goto error;
- regs->r[Rt] = read_tcmtr();
- break;
- case TLBTR:
- if (write)
- goto error;
- regs->r[Rt] = read_tlbtr();
- break;
- case MPIDR:
- if (write)
- goto error;
- regs->r[Rt] = read_vmpidr();
- break;
- default:
- goto error;
- }
- break;
- case 1:
- switch (Op2) {
- case ID_PFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_pfr0();
- break;
- case ID_PFR1:
- if (write)
- goto error;
- regs->r[Rt] = read_id_pfr1();
- break;
- case ID_DFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_dfr0();
- break;
- case ID_AFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_afr0();
- break;
- case ID_MMFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr0();
- break;
- case ID_MMFR1:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr1();
- break;
- case ID_MMFR2:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr2();
- break;
- case ID_MMFR3:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr3();
- break;
- default:
- goto error;
- }
- break;
- case 2:
- switch (Op2) {
- case ID_ISAR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar0();
- break;
- case ID_ISAR1:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar1();
- break;
- case ID_ISAR2:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar2();
- break;
- case ID_ISAR3:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar3();
- break;
- case ID_ISAR4:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar4();
- break;
- case ID_ISAR5:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar5();
- break;
- default:
- /* RAZ */
- regs->r[Rt] = 0x0;
- }
- break;
- case 3:
- case 4:
- case 5:
- case 6:
- case 7:
- if (write)
- goto error;
- /* RAZ */
- regs->r[Rt] = 0x0;
- break;
- default:
- goto error;
- }
- break;
- case 1:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case CCSIDR:
- if (write)
- goto error;
- regs->r[Rt] =
- target_cache_geometry[cpu_id].
- ccsidr[get_cache_level
- (target_cache_geometry[cpu_id].
- csselr)];
- break;
- case CLIDR:
- if (write)
- goto error;
- regs->r[Rt] =
- target_cache_geometry[cpu_id].clidr;
- break;
- case AIDR:
- if (write)
- goto error;
- regs->r[Rt] = read_aidr();
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- case 2:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case CSSELR:
- if (write)
- target_cache_geometry[cpu_id].
- csselr = regs->r[Rt];
- else
- regs->r[Rt] =
- target_cache_geometry[cpu_id].
- csselr;
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- case CRN_C7:
- switch (Op1) {
- case 0:
- switch (CRm) {
- case 6:
- switch (Op2) {
- case DCISW:
- {
- if (!write)
- goto error;
- handle_cm_op(regs->r[Rt],
- dcisw,
- &host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- break;
- }
- default:
- goto error;
- }
- break;
- case 10:
- switch (Op2) {
- case DCCSW:
- {
- if (!write)
- goto error;
- handle_cm_op(regs->r[Rt],
- dccsw,
- &host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- break;
- }
- default:
- goto error;
- }
- break;
- case 14:
- switch (Op2) {
- case DCCISW:
- {
- if (!write)
- goto error;
- handle_cm_op(regs->r[Rt],
- dccisw,
- &host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- break;
- }
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- case CRN_C9:
- switch (Op1) {
- case 1:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case 2:
- /*
- * A write to the L2CTLR register means trouble
- * as the A7 version does not have all the fields
- * that the A15 has. Handling needs more thought
- */
- if (write) {
- printf("%s: Unexpected L2CTLR write \n",
- __FUNCTION__);
- goto error;
- }
-
- /*
- * A read of the L2CTLR should return the total number
- * of cpus across both the clusters in the "always on"
- * configuration. Since there are only 2 bits for the
- * number of cpus in the L2CTLR we need to flag any
- * system with > 4 cpus.
- */
- if (!switcher) {
- unsigned num_cpus = CLUSTER_CPU_COUNT(host_cluster)
- + CLUSTER_CPU_COUNT(!host_cluster);
-
- if (num_cpus > 4) {
- printf("%s: Unexpected L2CTLR read \n",
- __FUNCTION__);
- goto error;
- }
+ switch (CRn) {
+ case CRN_C0:
+ switch (Op1) {
+ case 0:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case MIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_vmidr();
+ break;
+ case CTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_ctr();
+ break;
+ case TCMTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_tcmtr();
+ break;
+ case TLBTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_tlbtr();
+ break;
+ case MPIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_vmpidr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 1:
+ switch (Op2) {
+ case ID_PFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_pfr0();
+ break;
+ case ID_PFR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_pfr1();
+ break;
+ case ID_DFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_dfr0();
+ break;
+ case ID_AFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_afr0();
+ break;
+ case ID_MMFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr0();
+ break;
+ case ID_MMFR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr1();
+ break;
+ case ID_MMFR2:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr2();
+ break;
+ case ID_MMFR3:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr3();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 2:
+ switch (Op2) {
+ case ID_ISAR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar0();
+ break;
+ case ID_ISAR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar1();
+ break;
+ case ID_ISAR2:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar2();
+ break;
+ case ID_ISAR3:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar3();
+ break;
+ case ID_ISAR4:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar4();
+ break;
+ case ID_ISAR5:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar5();
+ break;
+ default:
+ /* RAZ */
+ regs->r[Rt] = 0x0;
+ }
+ break;
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ if (write)
+ goto error;
+ /* RAZ */
+ regs->r[Rt] = 0x0;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 1:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case CCSIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] =
+ target_cache_geometry[cpu_id].ccsidr
+ [get_cache_level
+ (target_cache_geometry
+ [cpu_id].csselr)];
+ break;
+ case CLIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] =
+ target_cache_geometry[cpu_id].clidr;
+ break;
+ case AIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_aidr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 2:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case CSSELR:
+ if (write)
+ target_cache_geometry
+ [cpu_id].csselr =
+ regs->r[Rt];
+ else
+ regs->r[Rt] =
+ target_cache_geometry
+ [cpu_id].csselr;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case CRN_C7:
+ switch (Op1) {
+ case 0:
+ switch (CRm) {
+ case 6:
+ switch (Op2) {
+ case DCISW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dcisw,
+ &host_cache_geometry
+ [cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta
+ [cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ case 10:
+ switch (Op2) {
+ case DCCSW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dccsw,
+ &host_cache_geometry
+ [cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta
+ [cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ case 14:
+ switch (Op2) {
+ case DCCISW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dccisw,
+ &host_cache_geometry
+ [cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta
+ [cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case CRN_C9:
+ switch (Op1) {
+ case 1:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case 2:
+ /*
+ * A write to the L2CTLR register means trouble
+ * as the A7 version does not have all the fields
+ * that the A15 has. Handling needs more thought
+ */
+ if (write) {
+ printf
+ ("%s: Unexpected L2CTLR write \n",
+ __FUNCTION__);
+ goto error;
+ }
- regs->r[Rt] &= ~(0x3 << 24);
- regs->r[Rt] |= (num_cpus - 1) << 24;
- } else {
- regs->r[Rt] = read_l2ctlr();
- }
- break;
- case 3:
- /*
- * A write to the L2ECTLR register means trouble
- * as it does not exist on A7. Handling needs more
- * thought
- */
- if (write) {
- printf("%s: Unexpected L2ECTLR write \n",
- __FUNCTION__);
- goto error;
- } else {
- regs->r[Rt] = read_l2ectlr();
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
+ /*
+ * A read of the L2CTLR should return the total number
+ * of cpus across both the clusters in the "always on"
+ * configuration. Since there are only 2 bits for the
+ * number of cpus in the L2CTLR we need to flag any
+ * system with > 4 cpus.
+ */
+ if (!switcher) {
+ unsigned num_cpus =
+ CLUSTER_CPU_COUNT
+ (host_cluster)
+ +
+ CLUSTER_CPU_COUNT
+ (!host_cluster);
- /*
- * Support for accesses to the PMON space. Its not been
- * verified whether all the registers are readable &
- * writable. But then, execution will never reach here
- * if a reg is inaccessible. It will be a undef abort
- * instead.
- */
- case 0:
- switch (CRm) {
- case 14:
- switch (Op2) {
- case 0:
- if(write)
- write_pmuserenr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmuserenr();
- break;
- case 1:
- if(write)
- write_pmintenset(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmintenset();
- break;
- case 2:
- if(write)
- write_pmintenclr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmintenclr();
- break;
- case 3:
- if(write)
- write_pmovsset(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmovsset();
- break;
- default:
- goto error;
- }
- break;
-
- case 13:
- switch (Op2) {
- case 0:
- if(write)
- write_pmccntr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmccntr();
- break;
- case 1:
- if(write)
- write_pmxevtyper(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmxevtyper();
- break;
- case 2:
- if(write)
- write_pmxevcntr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmxevcntr();
- break;
- default:
- goto error;
- }
- break;
-
- case 12:
- switch (Op2) {
- case 0:
- if(write)
- write_pmcr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmcr();
- break;
- case 1:
- if(write)
- write_pmcntenset(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmcntenset();
- break;
- case 2:
- if(write)
- write_pmcntenclr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmcntenclr();
- break;
- case 3:
- if(write)
- write_pmovsr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmovsr();
- break;
- case 4:
- if(write)
- write_pmswinc(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmswinc();
- break;
- case 5:
- if(write)
- write_pmselr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmselr();
- break;
- case 6:
- if(write)
- write_pmceid0(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmceid0();
- break;
- case 7:
- if(write)
- write_pmceid1(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmceid1();
- break;
- default:
- goto error;
- }
- break;
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
+ if (num_cpus > 4) {
+ printf
+ ("%s: Unexpected L2CTLR read \n",
+ __FUNCTION__);
+ goto error;
+ }
- return;
+ regs->r[Rt] &= ~(0x3 << 24);
+ regs->r[Rt] |=
+ (num_cpus - 1) << 24;
+ } else {
+ regs->r[Rt] = read_l2ctlr();
+ }
+ break;
+ case 3:
+ /*
+ * A write to the L2ECTLR register means trouble
+ * as it does not exist on A7. Handling needs more
+ * thought
+ */
+ if (write) {
+ printf
+ ("%s: Unexpected L2ECTLR write \n",
+ __FUNCTION__);
+ goto error;
+ } else {
+ regs->r[Rt] = read_l2ectlr();
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ /*
+ * Support for accesses to the PMON space. Its not been
+ * verified whether all the registers are readable &
+ * writable. But then, execution will never reach here
+ * if a reg is inaccessible. It will be a undef abort
+ * instead.
+ */
+ case 0:
+ switch (CRm) {
+ case 14:
+ switch (Op2) {
+ case 0:
+ if (write)
+ write_pmuserenr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmuserenr();
+ break;
+ case 1:
+ if (write)
+ write_pmintenset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmintenset();
+ break;
+ case 2:
+ if (write)
+ write_pmintenclr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmintenclr();
+ break;
+ case 3:
+ if (write)
+ write_pmovsset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmovsset();
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ case 13:
+ switch (Op2) {
+ case 0:
+ if (write)
+ write_pmccntr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmccntr();
+ break;
+ case 1:
+ if (write)
+ write_pmxevtyper(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmxevtyper();
+ break;
+ case 2:
+ if (write)
+ write_pmxevcntr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmxevcntr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ case 12:
+ switch (Op2) {
+ case 0:
+ if (write)
+ write_pmcr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcr();
+ break;
+ case 1:
+ if (write)
+ write_pmcntenset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcntenset();
+ break;
+ case 2:
+ if (write)
+ write_pmcntenclr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcntenclr();
+ break;
+ case 3:
+ if (write)
+ write_pmovsr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmovsr();
+ break;
+ case 4:
+ if (write)
+ write_pmswinc(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmswinc();
+ break;
+ case 5:
+ if (write)
+ write_pmselr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmselr();
+ break;
+ case 6:
+ if (write)
+ write_pmceid0(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmceid0();
+ break;
+ case 7:
+ if (write)
+ write_pmceid1(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmceid1();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ return;
error:
- printf("%s: Unexpected cp15 instruction", __FUNCTION__);
- printf(" : %s", write ? "MCR p15" : "MRC p15");
- printf(", %d, %d, %d, %d, %d \n", Op1, Rt, CRn, CRm, Op2);
- panic();
+ printf("%s: Unexpected cp15 instruction", __FUNCTION__);
+ printf(" : %s", write ? "MCR p15" : "MRC p15");
+ printf(", %d, %d, %d, %d, %d \n", Op1, Rt, CRn, CRm, Op2);
+ panic();
}
void trap_dabort_handle(unsigned hsr, gp_regs * regs)
{
- unsigned hdfar = 0x0, hpfar = 0x0, pa = 0x0, *data = 0x0;
- unsigned write = 0x0;
+ unsigned hdfar = 0x0, hpfar = 0x0, pa = 0x0, *data = 0x0;
+ unsigned write = 0x0;
- hdfar = read_hdfar();
- hpfar = read_hpfar();
+ hdfar = read_hdfar();
+ hpfar = read_hpfar();
- pa = ((hpfar >> 4) << 12) + (hdfar & 0xfff);
- data = &regs->r[(hsr >> 16) & 0xf];
- write = (hsr >> 6) & 0x1;
+ pa = ((hpfar >> 4) << 12) + (hdfar & 0xfff);
+ data = &regs->r[(hsr >> 16) & 0xf];
+ write = (hsr >> 6) & 0x1;
- /* Only distributor accesses are virtualised at the moment */
- if ((pa & ~0xfff) == GIC_ID_PHY_BASE) {
- handle_vgic_distif_abort(pa, data, write);
- }
+ /* Only distributor accesses are virtualised at the moment */
+ if ((pa & ~0xfff) == GIC_ID_PHY_BASE) {
+ handle_vgic_distif_abort(pa, data, write);
+ }
- return;
+ return;
}
void HandleVirtualisor(gp_regs * regs)
{
- unsigned cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr()), rc = 0;
- unsigned hsr = read_hsr(), elr = 0, vd_len = 0, index = 0;
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (gp_regs *, unsigned, unsigned) = 0x0, sibling;
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- /*
- * Perform the generic trap handling
- */
- switch (hsr >> 26) {
- case TRAP_DABORT:
- trap_dabort_handle(hsr, regs);
- break;
- case TRAP_CP15_32:
- trap_cp15_mrc_mcr_handle(hsr, regs);
- break;
- default:
- printf("%s: Unexpected trap", __FUNCTION__);
- printf(": HSR=0x%x Regs=0x%x \n", hsr, (unsigned) regs);
- panic();
- }
+ unsigned cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr()), rc = 0;
+ unsigned hsr = read_hsr(), elr = 0, vd_len = 0, index = 0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (gp_regs *, unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ /*
+ * Perform the generic trap handling
+ */
+ switch (hsr >> 26) {
+ case TRAP_DABORT:
+ trap_dabort_handle(hsr, regs);
+ break;
+ case TRAP_CP15_32:
+ trap_cp15_mrc_mcr_handle(hsr, regs);
+ break;
+ default:
+ printf("%s: Unexpected trap", __FUNCTION__);
+ printf(": HSR=0x%x Regs=0x%x \n", hsr, (unsigned)regs);
+ panic();
+ }
+
+ /*
+ * Do any cpu specific trap handling.
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
- /*
- * Do any cpu specific trap handling.
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- handler = vd_array[index].trap_handle;
- if(handler) {
- rc = handler(regs, hsr, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_handle;
+ if (handler) {
+ rc = handler(regs, hsr, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
- /*
- * This is a trap of the kind where we simply move
- * onto the next instruction in the actual program.
- * Move by 2 bytes if we came from Thumb mode else
- * by 4 bytes.
- */
- elr = ((vm_context *) regs)->elr_hyp;
- if (hsr & (1 << 25))
- elr += 4;
- else
- elr += 2;
- ((vm_context *) regs)->elr_hyp = elr;
+ /*
+ * This is a trap of the kind where we simply move
+ * onto the next instruction in the actual program.
+ * Move by 2 bytes if we came from Thumb mode else
+ * by 4 bytes.
+ */
+ elr = ((vm_context *) regs)->elr_hyp;
+ if (hsr & (1 << 25))
+ elr += 4;
+ else
+ elr += 2;
+ ((vm_context *) regs)->elr_hyp = elr;
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}
diff --git a/big-little/virtualisor/virt_setup.c b/big-little/virtualisor/virt_setup.c
index 8496765..d35adc6 100644
--- a/big-little/virtualisor/virt_setup.c
+++ b/big-little/virtualisor/virt_setup.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virt_helpers.h"
#include "virtualisor.h"
@@ -35,11 +35,9 @@ cache_geometry target_cache_geometry[NUM_CPUS];
/* Cache geometry differences for each cpu at each level */
cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS];
-static mem_trap_data svgic_distif_trap
-__attribute__ ((section("s2_trap_section"))) = {
- 0, 0x0, 0x0, 0x0, 0x0, 0x0,
-};
-
+static mem_trap_data svgic_distif_trap
+ __attribute__ ((section("s2_trap_section"))) = {
+0, 0x0, 0x0, 0x0, 0x0, 0x0,};
/*
* Flags which indicate whether the cpu independent
@@ -55,191 +53,192 @@ static unsigned virt_init[NUM_CPUS];
*/
unsigned find_sibling_cpu()
{
- unsigned cpu_no = PART_NO(read_midr());
-
- switch (DC_SYSTYPE) {
- case A15_A15:
- if(cpu_no == A15)
- return cpu_no;
- break;
- case A7_A15:
- case A15_A7:
- if(cpu_no == A15)
- return A7;
- else if(cpu_no == A7)
- return A15;
- else
- break;
- }
-
- printf("Unsupported Dual cluster system : 0x%x\n", DC_SYSTYPE);
- panic();
-
- return 0;
+ unsigned cpu_no = PART_NO(read_midr());
+
+ switch (DC_SYSTYPE) {
+ case A15_A15:
+ if (cpu_no == A15)
+ return cpu_no;
+ break;
+ case A7_A15:
+ case A15_A7:
+ if (cpu_no == A15)
+ return A7;
+ else if (cpu_no == A7)
+ return A15;
+ else
+ break;
+ }
+
+ printf("Unsupported Dual cluster system : 0x%x\n", DC_SYSTYPE);
+ panic();
+
+ return 0;
}
void SetupVirtualisor(unsigned first_cpu)
{
- unsigned rc = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
- unsigned vd_len = 0, index = 0, cluster_id = read_clusterid();
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
- unsigned sibling_cpuid = 0, abs_cpuid = 0;
-
- if (!switcher) {
- sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
- abs_cpuid = abs_cpuid(cpu_id, cluster_id);
- }
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- /*
- * Do the generic trap setup
- */
- if (virt_init[cpu_id] == FALSE) {
-
- /*
- * In the "always-on" configuration, both clusters have
- * ensure that the L2CTLR register includes the cpu count
- * of both the clusters while reporting the number of
- * secondary cpus. So setup the necessary trap.
- */
- if (!switcher) {
- /*
- * Enable traps to CRn = 9 cp15 space
- */
- write_hstr(read_hstr() | (1 << 9));
- }
-
- /*
- * Cache geometry of each cpu on the host cluster needs
- * to be virtualised if the cpu type is different from
- * that on the target cluster. This can be done generic-
- * ally.
- */
- if (cpu_no != sibling) {
- rc = map_cache_geometries(&host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- if (rc) {
- printf("%s: Failed to map cache geometries \n", __FUNCTION__);
- rc = 1;
- goto out;
- }
-
- }
-
-
- /*
- * Irrespective of what cpu types are present in the
- * dual cluster system, the host cluster has to trap
- * accesses to the vgic distributor when switching.
- */
- if (switcher && cluster_id == host_cluster) {
- if (cpu_id == first_cpu) {
- rc = mem_trap_setup(GIC_ID_PHY_BASE, &svgic_distif_trap);
- if (rc) {
- printf("%s: svgic distif trap setup failed \n",
- __FUNCTION__);
- goto out;
- }
- }
- }
-
-
- /*
- * If the two clusters have different cpu types, then the
- * target saves its midr and the host uses the value to
- * virtualise its midr.
- * mpidr is virtualised on the host cluster whether we are
- * running "always on" or "switching". The latter cares
- * about the cluster id while the former cares about the
- * cpu ids as well.
- */
- if (cluster_id != host_cluster) {
- host_virt_regs[cpu_id].mpidr = read_mpidr();
- if (cpu_no != sibling)
- host_virt_regs[cpu_id].midr = read_midr();
- if (!switcher) {
- /*
- * Send a signal to the host to indicate
- * that the regs is ready to be read. The
- * cpu id is the absolute cpu number across
- * clusters.
- */
- set_event(VID_REGS_DONE, sibling_cpuid);
- }
- } else {
- if (!switcher) {
- /*
- * Wait for the target to read its regs
- * before using them.
- */
- wait_for_event(VID_REGS_DONE, abs_cpuid);
- reset_event(VID_REGS_DONE, abs_cpuid);
-
- /*
- * Add number of cpus in the target cluster to
- * the cpuid of this cpu.
- */
- host_virt_regs[cpu_id].mpidr += CLUSTER_CPU_COUNT(!host_cluster);
- }
- write_vmpidr(host_virt_regs[cpu_id].mpidr);
- if (cpu_no != sibling)
- write_vmidr(host_virt_regs[cpu_id].midr);
- }
-
- if (cluster_id == host_cluster) {
- /*
- * Assuming that with the switcher, the host always
- * runs after the target. So, if we are here then
- * the target must have completed its initialisation
- *
- * In the other case, if we are here after exchanging
- * the events above, then the target has finished
- * initialising.
- */
- virt_init[cpu_id] = 1;
- }
-
- } else {
- if (switcher)
- RestoreVirtualisor(first_cpu);
- }
-
-
- /*
- * Do the cpu specific initialisation (if any)
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- /* If not initialised then setup else restore*/
- if (vd_array[index].init[cpu_id] == 0)
- handler = vd_array[index].trap_setup;
- else
- handler = vd_array[index].trap_restore;
-
- if(handler) {
- rc = handler(first_cpu, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ unsigned rc = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
+ unsigned vd_len = 0, index = 0, cluster_id = read_clusterid();
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+ unsigned sibling_cpuid = 0, abs_cpuid = 0;
+
+ if (!switcher) {
+ sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+ }
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ /*
+ * Do the generic trap setup
+ */
+ if (virt_init[cpu_id] == FALSE) {
+
+ /*
+ * In the "always-on" configuration, both clusters have
+ * ensure that the L2CTLR register includes the cpu count
+ * of both the clusters while reporting the number of
+ * secondary cpus. So setup the necessary trap.
+ */
+ if (!switcher) {
+ /*
+ * Enable traps to CRn = 9 cp15 space
+ */
+ write_hstr(read_hstr() | (1 << 9));
+ }
+
+ /*
+ * Cache geometry of each cpu on the host cluster needs
+ * to be virtualised if the cpu type is different from
+ * that on the target cluster. This can be done generic-
+ * ally.
+ */
+ if (cpu_no != sibling) {
+ rc = map_cache_geometries(&host_cache_geometry[cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta[cpu_id][0]);
+ if (rc) {
+ printf("%s: Failed to map cache geometries \n",
+ __FUNCTION__);
+ rc = 1;
+ goto out;
+ }
+
+ }
+
+ /*
+ * Irrespective of what cpu types are present in the
+ * dual cluster system, the host cluster has to trap
+ * accesses to the vgic distributor when switching.
+ */
+ if (switcher && cluster_id == host_cluster) {
+ if (cpu_id == first_cpu) {
+ rc = mem_trap_setup(GIC_ID_PHY_BASE,
+ &svgic_distif_trap);
+ if (rc) {
+ printf
+ ("%s: svgic distif trap setup failed \n",
+ __FUNCTION__);
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * If the two clusters have different cpu types, then the
+ * target saves its midr and the host uses the value to
+ * virtualise its midr.
+ * mpidr is virtualised on the host cluster whether we are
+ * running "always on" or "switching". The latter cares
+ * about the cluster id while the former cares about the
+ * cpu ids as well.
+ */
+ if (cluster_id != host_cluster) {
+ host_virt_regs[cpu_id].mpidr = read_mpidr();
+ if (cpu_no != sibling)
+ host_virt_regs[cpu_id].midr = read_midr();
+ if (!switcher) {
+ /*
+ * Send a signal to the host to indicate
+ * that the regs is ready to be read. The
+ * cpu id is the absolute cpu number across
+ * clusters.
+ */
+ set_event(VID_REGS_DONE, sibling_cpuid);
+ }
+ } else {
+ if (!switcher) {
+ /*
+ * Wait for the target to read its regs
+ * before using them.
+ */
+ wait_for_event(VID_REGS_DONE, abs_cpuid);
+ reset_event(VID_REGS_DONE, abs_cpuid);
+
+ /*
+ * Add number of cpus in the target cluster to
+ * the cpuid of this cpu.
+ */
+ host_virt_regs[cpu_id].mpidr +=
+ CLUSTER_CPU_COUNT(!host_cluster);
+ }
+ write_vmpidr(host_virt_regs[cpu_id].mpidr);
+ if (cpu_no != sibling)
+ write_vmidr(host_virt_regs[cpu_id].midr);
+ }
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Assuming that with the switcher, the host always
+ * runs after the target. So, if we are here then
+ * the target must have completed its initialisation
+ *
+ * In the other case, if we are here after exchanging
+ * the events above, then the target has finished
+ * initialising.
+ */
+ virt_init[cpu_id] = 1;
+ }
+
+ } else {
+ if (switcher)
+ RestoreVirtualisor(first_cpu);
+ }
+
+ /*
+ * Do the cpu specific initialisation (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ /* If not initialised then setup else restore */
+ if (vd_array[index].init[cpu_id] == 0)
+ handler = vd_array[index].trap_setup;
+ else
+ handler = vd_array[index].trap_restore;
+
+ if (handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}