summaryrefslogtreecommitdiff
path: root/big-little
diff options
context:
space:
mode:
Diffstat (limited to 'big-little')
-rwxr-xr-xbig-little/Makefile227
-rw-r--r--big-little/bl-sec.scf.template53
-rw-r--r--big-little/bl.scf.template37
-rw-r--r--big-little/common/cci.c57
-rw-r--r--big-little/common/hyp_setup.c107
-rw-r--r--big-little/common/hyp_vectors.s399
-rw-r--r--big-little/common/pagetable_setup.c442
-rw-r--r--big-little/common/vgic_handle.c210
-rw-r--r--big-little/common/vgic_setup.c82
-rw-r--r--big-little/common/vgiclib.c498
-rw-r--r--big-little/include/arm.h70
-rw-r--r--big-little/include/bakery.h53
-rw-r--r--big-little/include/bl.h47
-rw-r--r--big-little/include/context.h133
-rw-r--r--big-little/include/events.h78
-rw-r--r--big-little/include/gic_registers.h102
-rw-r--r--big-little/include/handler.h32
-rw-r--r--big-little/include/hvc.h44
-rw-r--r--big-little/include/hyp_types.h38
-rw-r--r--big-little/include/hyp_vmmap.h42
-rw-r--r--big-little/include/int_master.h50
-rw-r--r--big-little/include/misc.h404
-rw-r--r--big-little/include/traps.h102
-rw-r--r--big-little/include/vgiclib.h51
-rw-r--r--big-little/include/virt_helpers.h98
-rw-r--r--big-little/lib/bakery.c71
-rw-r--r--big-little/lib/tube.c58
-rw-r--r--big-little/lib/uart.c125
-rw-r--r--big-little/lib/virt_events.c120
-rw-r--r--big-little/lib/virt_helpers.s442
-rw-r--r--big-little/secure_world/events.c95
-rw-r--r--big-little/secure_world/flat_pagetable.s119
-rw-r--r--big-little/secure_world/monmode_vectors.s391
-rw-r--r--big-little/secure_world/secure_context.c210
-rw-r--r--big-little/secure_world/secure_resets.c292
-rw-r--r--big-little/secure_world/secure_world.h87
-rw-r--r--big-little/secure_world/ve_reset_handler.s58
-rw-r--r--big-little/switcher/context/gic.c264
-rw-r--r--big-little/switcher/context/ns_context.c295
-rw-r--r--big-little/switcher/context/sh_vgic.c225
-rw-r--r--big-little/switcher/trigger/async_switchover.c290
-rw-r--r--big-little/switcher/trigger/handle_switchover.s61
-rw-r--r--big-little/switcher/trigger/sync_switchover.c64
-rw-r--r--big-little/virtualisor/cache_geom.c443
-rw-r--r--big-little/virtualisor/cpus/a15/a15.c73
-rw-r--r--big-little/virtualisor/cpus/a15/include/a15.h26
-rw-r--r--big-little/virtualisor/cpus/a7/a7.c73
-rw-r--r--big-little/virtualisor/cpus/a7/include/a7.h26
-rw-r--r--big-little/virtualisor/include/cache_geom.h107
-rw-r--r--big-little/virtualisor/include/mem_trap.h53
-rw-r--r--big-little/virtualisor/include/virtualisor.h84
-rw-r--r--big-little/virtualisor/mem_trap.c132
-rw-r--r--big-little/virtualisor/vgic_trap_handler.c82
-rw-r--r--big-little/virtualisor/virt_context.c232
-rw-r--r--big-little/virtualisor/virt_handle.c589
-rw-r--r--big-little/virtualisor/virt_setup.c245
56 files changed, 8888 insertions, 0 deletions
diff --git a/big-little/Makefile b/big-little/Makefile
new file mode 100755
index 0000000..7800148
--- /dev/null
+++ b/big-little/Makefile
@@ -0,0 +1,227 @@
+#
+# Copyright (c) 2011, ARM Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with
+# or without modification, are permitted provided that the
+# following conditions are met:
+#
+# Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the
+# following disclaimer.
+#
+# Redistributions in binary form must reproduce the
+# above copyright notice, this list of conditions and
+# the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its
+# contributors may be used to endorse or promote products
+# derived from this software without specific prior written
+# permission.
+#
+
+# Decrease the verbosity of the make script
+# can be made verbose by passing V=1 at the make command line
+ifdef V
+ KBUILD_VERBOSE = $(V)
+else
+ KBUILD_VERBOSE = 0
+endif
+
+ifeq "$(KBUILD_VERBOSE)" "0"
+ Q=@
+else
+ Q=
+endif
+
+HIBASE?=0x8ff
+DEBUG=TRUE
+
+###############################################################################################################
+# Shared/External Virtual GIC defines
+SVGIC_OBJS += sh_vgic.o
+###############################################################################################################
+# Switcher defines
+SWITCHER ?= TRUE
+ASYNC ?= TRUE
+HYP_TIMERS ?= TRUE
+RAND_ASYNC ?= FALSE
+HOST_CLUSTER ?= 1
+FLUSH_OB_L2 ?= TRUE
+FLUSH_L2_FIX ?= FALSE
+TUBE ?= FALSE
+FM_BETA ?= TRUE
+
+ifeq ($(SWITCHER), TRUE)
+ifeq ($(ASYNC), FALSE)
+HYP_TIMERS = FALSE
+endif
+
+vpath %.c switcher switcher/trigger switcher/context common/ lib/ secure_world/ ../acsr
+vpath %.s switcher switcher/trigger switcher/context common/ lib/ secure_world/ ../acsr
+
+SWITCHER_OBJS = ns_context.o hyp_setup.o pagetable_setup.o virt_helpers.o sync_switchover.o \
+ vgiclib.o vgic_handle.o uart.o v7.o gic.o handle_switchover.o tube.o \
+ virt_events.o bakery.o vgic_setup.o async_switchover.o hyp_vectors.o helpers.o
+
+SECURE_ENTRY_POINT = monmode_vector_table
+
+SECURE_OBJS += secure_context.o monmode_vectors.o flat_pagetable.o virt_helpers.o virt_events.o \
+ secure_resets.o bakery.o tube.o helpers.o
+
+SECURE_ASFLAGS = --apcs /inter --cpu=Eagle --keep --fpu=none --pd "FM_BETA SETL {$(FM_BETA)}"
+
+SECURE_CFLAGS = -Iinclude -I. -Isecure_world -I../acsr --cpu=Eagle --fpu=none -O2 \
+ -DHOST_CLUSTER=$(HOST_CLUSTER) -DSWITCHER=$(SWITCHER) \
+ -DFLUSH_OB_L2=$(FLUSH_OB_L2) -DTUBE=$(TUBE) -DFLUSH_L2_FIX=$(FLUSH_L2_FIX) -DFM_BETA=$(FM_BETA)
+
+SECURE_LDFLAGS = --verbose --map --fpu=none --symbols --noremove --datacompressor=off \
+ --entry $(SECURE_ENTRY_POINT) --scatter $(SECURE_MAPFILE) --predefine="-DFM_BETA=$(FM_BETA)"
+
+SWITCHER_ASFLAGS += --pd "ASYNC_SWITCH SETL {$(ASYNC)}" \
+ --pd "RAND_ASYNC SETL {$(RAND_ASYNC)}" \
+ --pd "HOST_CLUSTER SETA $(HOST_CLUSTER)" \
+ --pd "USE_HYP_TIMERS SETL {$(HYP_TIMERS)}" \
+ --pd "SWITCHER SETL {$(SWITCHER)}"
+
+SWITCHER_CFLAGS += -DASYNC_SWITCH=$(ASYNC) \
+ -DRAND_ASYNC=$(RAND_ASYNC) \
+ -DHOST_CLUSTER=$(HOST_CLUSTER) \
+ -DUSE_HYP_TIMERS=$(HYP_TIMERS) \
+ -DFLUSH_OB_L2=$(FLUSH_OB_L2) \
+ -DTUBE=$(TUBE) \
+ -DFLUSH_L2_FIX=$(FLUSH_L2_FIX) \
+ -DSWITCHER=$(SWITCHER)
+endif
+
+###############################################################################################################
+# Virtualisor defines
+CMOP_DEBUG ?= FALSE
+VIRTUALISOR_CFLAGS += -DCMOP_DEBUG=$(CMOP_DEBUG)
+VIRTUALISOR_ASFLAGS += --pd "CMOP_DEBUG SETL {$(CMOP_DEBUG)}"
+
+vpath %.c virtualisor virtualisor/cpus/a15 virtualisor/cpus/a7
+
+VIRTUALISOR_OBJS += virt_handle.o virt_setup.o virt_context.o cache_geom.o mem_trap.o vgic_trap_handler.o \
+ a7.o a15.o
+
+###############################################################################################################
+
+OBJS += cci.o
+
+ASFLAGS = --apcs /inter --cpu=Eagle --keep --fpu=none \
+ $(SWITCHER_ASFLAGS) \
+ $(VIRTUALISOR_ASFLAGS)
+
+CFLAGS = -Iinclude -I. -Ivirtualisor/include -Ivirtualisor/cpus/a7/include \
+ -Ivirtualisor/cpus/a15/include -I../acsr \
+ --cpu=Eagle --fpu=none -O2 $(SWITCHER_CFLAGS) $(VIRTUALISOR_CFLAGS)
+
+ifdef DEBUG
+CFLAGS += -g -O0
+ASFLAGS += -g
+SECURE_CFLAGS += -g -O0
+SECURE_ASFLAGS += -g
+endif
+
+LDFLAGS = --verbose --map --fpu=none --symbols --noremove --datacompressor=off --entry $(ENTRY_POINT) --scatter $(MAPFILE)
+OBJS += $(SWITCHER_OBJS) $(VIRTUALISOR_OBJS) $(SVGIC_OBJS)
+ENTRY_POINT = bl_setup
+LISTFILE = bl_syms.txt
+SECURE_LISTFILE = bl_sec_syms.txt
+MAPFILE = bl.scf
+SECURE_MAPFILE = bl-sec.scf
+
+CC = armcc
+AS = armasm
+AR = armar
+LD = armlink
+
+
+ifeq ($(FM_BETA), FALSE)
+all: bl.axf bl_sec.axf wboot.bin
+else
+all: bl.axf bl_sec.axf
+endif
+
+clean:
+ @echo " CLEAN"
+ $(Q)rm -rf *.zi
+ $(Q)rm -rf *.dump
+ $(Q)rm -rf *.bin
+ $(Q)rm -f *.axf
+ $(Q)rm -f *.o
+ $(Q)rm -f *.ar
+ $(Q)rm -f *.map
+ $(Q)rm -f *.scf
+ $(Q)rm -f $(LISTFILE)
+ $(Q)rm -f $(SECURE_LISTFILE)
+
+dump:
+ @echo " OBJDUMP"
+ fromelf --text -c bl.axf > bl.dump
+ fromelf --text -c bl_sec.axf > bl_sec.dump
+
+%.o: %.s
+ @echo " AS $<"
+ $(Q)$(AS) $(ASFLAGS) $< -o $@
+
+%.o: %.c
+ @echo " CC $<"
+ $(Q)$(CC) $(CFLAGS) -c $< -o $@
+
+bl.axf: $(OBJS)
+ $(Q)cat $(MAPFILE).template > $(MAPFILE)
+ $(Q)sed -i -e "s/HIBASE/${HIBASE}/g" $(MAPFILE)
+ @echo " LD $@"
+ $(Q)$(LD) $(LDFLAGS) --symdefs=bl_symdef.o $(OBJS) -o $@ > $(LISTFILE)
+
+ifeq ($(FM_BETA), FALSE)
+wboot.axf: ve_reset_handler.o
+ @echo " LD $@"
+ $(Q)$(LD) --ro-base=0x0 $< -o $@ > $(LISTFILE)
+
+wboot.bin: wboot.axf
+ @echo " BIN $@"
+ $(Q)fromelf --bin $< --output $@
+else
+SECURE_OBJS += ve_reset_handler.o
+wboot.bin:
+endif
+
+bl.ar: $(OBJS)
+ @echo " AR $@"
+ $(Q)$(AR) -r $@ $(OBJS)
+
+%.bin: %.axf
+ fromelf --bin $< --output $@
+
+ve_reset_handler.o: ve_reset_handler.s
+ @echo " AS $<"
+ $(Q)$(AS) $(SECURE_ASFLAGS) $< -o $@
+
+secure_resets.o: secure_resets.c
+ @echo " CC $<"
+ $(Q)$(CC) $(SECURE_CFLAGS) -c $< -o $@
+
+monmode_vectors.o: monmode_vectors.s
+ @echo " AS $<"
+ $(Q)$(AS) $(SECURE_ASFLAGS) $< -o $@
+
+flat_pagetable.o: flat_pagetable.s
+ @echo " AS $<"
+ $(Q)$(AS) $(SECURE_ASFLAGS) $< -o $@
+
+secure_context.o: secure_context.c
+ @echo " CC $<"
+ $(Q)$(CC) $(SECURE_CFLAGS) -c $< -o $@
+
+bl_sec.axf: $(SECURE_OBJS)
+ $(Q)cat $(SECURE_MAPFILE).template > $(SECURE_MAPFILE)
+ $(Q)sed -i -e "s/HIBASE/${HIBASE}/g" $(SECURE_MAPFILE)
+ $(Q)$(LD) $(SECURE_LDFLAGS) --symdefs=bl_sec_symdef.o $(SECURE_OBJS) -o $@ > $(SECURE_LISTFILE)
+
+bl_sec.ar: $(SECURE_OBJS)
+ $(Q)$(AR) -r $@ $(SECURE_OBJS)
+
+
diff --git a/big-little/bl-sec.scf.template b/big-little/bl-sec.scf.template
new file mode 100644
index 0000000..47aaec4
--- /dev/null
+++ b/big-little/bl-sec.scf.template
@@ -0,0 +1,53 @@
+#! armcc -E
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+#define TRUE 1
+#define FALSE 0
+
+#if FM_BETA
+JUMP_LOAD 0x0 0x100
+{
+ JUMP_EXEC 0x0 0x100
+ {
+ ve_reset_handler.o (+RO, +RW, +ZI)
+ }
+}
+#endif
+
+LOAD_REGION1 HIBASE00000 NOCOMPRESS ALIGN 4096 65536
+{
+ BL_SEC_DV_PAGE +0x0 ALIGN 4096 4096
+ {
+ *(BL_SEC_DV_PAGE)
+ }
+
+ SEC_CODE HIBASE01000 FIXED
+ {
+ *(+RO)
+ }
+
+ SEC_DATA +0x0
+ {
+ *(+ZI,+RW)
+ }
+}
diff --git a/big-little/bl.scf.template b/big-little/bl.scf.template
new file mode 100644
index 0000000..c764526
--- /dev/null
+++ b/big-little/bl.scf.template
@@ -0,0 +1,37 @@
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+LOAD_REGION1 HIBASE0D000 NOCOMPRESS ALIGN 4096 65536
+{
+ BL_DV_PAGE +0x0 ALIGN 4096 4096
+ {
+ *(BL_DV_PAGE)
+ }
+ BL_CODE HIBASE0E000 FIXED
+ {
+ *(+RO)
+ }
+ BL_DATA +0x0
+ {
+ *(+ZI,+RW)
+ }
+}
diff --git a/big-little/common/cci.c b/big-little/common/cci.c
new file mode 100644
index 0000000..8bf4b5f
--- /dev/null
+++ b/big-little/common/cci.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "misc.h"
+#include "virt_helpers.h"
+#include "hyp_types.h"
+
+void enable_cci_snoops(unsigned cluster_id)
+{
+ /* Turn off CCI snoops & DVM Messages */
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+
+ dsb();
+
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+
+ return;
+}
+
+void disable_cci_snoops(unsigned cluster_id)
+{
+ /* Turn off CCI snoops & DVM messages */
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
+
+ dsb();
+
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+
+ return;
+}
diff --git a/big-little/common/hyp_setup.c b/big-little/common/hyp_setup.c
new file mode 100644
index 0000000..d65aafc
--- /dev/null
+++ b/big-little/common/hyp_setup.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "context.h"
+#include "misc.h"
+#include "events.h"
+#include "virt_helpers.h"
+#include "virtualisor.h"
+#include "bl.h"
+
+extern unsigned vectors;
+extern system_context switcher_context;
+extern void SetupVirtExtPageTables(unsigned, unsigned);
+extern void Enable2ndStagePageTables(void);
+extern void monmode_setup(void);
+extern void config_uart(void);
+extern void SetupVGIC(unsigned);
+extern void enable_trigger(unsigned);
+extern void restore_context(unsigned);
+extern unsigned async_switchover;
+
+unsigned host_cluster = HOST_CLUSTER;
+unsigned switcher = SWITCHER;
+vm_state guestos_state[MAX_CPUIFS];
+unsigned guestos_state_size = sizeof(vm_state);
+
+/*
+ * To make events work across a non-coherent interconnect, events
+ * are allocated in an SO or DV page.
+ */
+unsigned event[NUM_CPUS][MAX_EVENTS] __attribute__ ((section("BL_DV_PAGE")));
+
+/*
+ * C function to perform the remaining initialisation
+ * once the MMU has been enabled after a cold reset
+ */
+void bl_rest_init(void)
+{
+ unsigned first_cpu = find_first_cpu();
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned warm_reset = 0;
+
+ /* HYP mode initialisation performed after every reset */
+ write_hvbar((unsigned)&vectors);
+ Enable2ndStagePageTables();
+
+ /* Initialise a per cpu UART */
+ config_uart();
+
+ if (switcher) {
+ /*
+ * Ask the secure world to initialise its context.
+ * Not required when "always on"
+ */
+ smc(SMC_SEC_INIT, 0);
+
+ /*
+ * Since we are using the shared vgic, we need to map
+ * the cpuids to the cpu interfaces as there is no
+ * longer a 1:1 mapping
+ */
+ map_cpuif(cluster_id, cpu_id);
+
+ if (async_switchover && first_cpu == cpu_id)
+ enable_trigger(read_cntfrq());
+ } else {
+
+ /*
+ * Only one cpu should enable the CCI while the other
+ * cpus wait.
+ */
+ if (first_cpu == cpu_id && cluster_id == host_cluster) {
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ dsb();
+ }
+
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+ }
+
+ /* Initialise the Virtual GIC and the Virtualizer */
+ SetupVGIC(warm_reset);
+ SetupVirtualisor(first_cpu);
+
+ return;
+}
diff --git a/big-little/common/hyp_vectors.s b/big-little/common/hyp_vectors.s
new file mode 100644
index 0000000..59d1b66
--- /dev/null
+++ b/big-little/common/hyp_vectors.s
@@ -0,0 +1,399 @@
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+ AREA HypVectors, CODE, READONLY, ALIGN=5
+
+ PRESERVE8
+
+ IMPORT handle_interrupt
+ IMPORT HandleVirtualisor
+ IMPORT guestos_state
+ IMPORT guestos_state_size
+ IMPORT get_sp
+ IMPORT output_string
+ IMPORT virt_dead
+ IMPORT SetupVirtExtPageTables
+ IMPORT Enable2ndStagePageTables
+ IMPORT restore_context
+ IMPORT read_hsctlr
+ IMPORT write_hsctlr
+ IMPORT write_hmair0
+ IMPORT write_httbr
+ IMPORT write_htcr
+ IMPORT bl_rest_init
+ IMPORT hyp_l1_pagetable
+
+ IF ASYNC_SWITCH = {FALSE}
+ IMPORT is_hvc
+ IMPORT HandleHVC
+ ENDIF
+
+ EXPORT vectors
+ EXPORT iabt_entry
+ EXPORT dabt_entry
+ EXPORT undef_entry
+ EXPORT svc_hyp_entry
+ EXPORT fiq_entry
+ EXPORT bl_setup
+ EXPORT hyp_warm_reset_handler
+
+
+ MACRO
+ hyp_entry $reg
+ SUB $reg, $reg, #72
+
+ ; ---------------------------------------------------
+ ; Save all GP registers
+ ; Save User mode LR which the HYP mode will use now.
+ ; Save HYP mode ELR & SPSR in case we are re-entrant
+ ; Pass saved context as argument to next bit of code
+ ; ---------------------------------------------------
+ STMIA $reg, {r0-r12}
+ MRS r0, ELR_hyp
+ MRS r1, SPSR
+ MRS r2, LR_usr
+ ADD r3, $reg, #60
+ STMIA r3, {r0-r2}
+ MEND
+
+ MACRO
+ hyp_exit $reg
+ ADD r3, $reg, #60
+ LDMIA r3, {r0-r2}
+ MSR ELR_hyp, r0
+ MSR SPSR_cxsf, r1
+ MSR LR_usr, r2
+
+ ; ----------------------------------------------------
+ ; We do need to clear the BTAC though since it is
+ ; virtually-addressed with no regard for the NS bit
+ ; ----------------------------------------------------
+ MCR p15, 0, r0, c7, c5, 6 ; invalidate BTAC
+
+ LDMIA $reg, {r0-r12}
+ ADD $reg, $reg, #72
+ ERET
+ MEND
+
+ IF {FALSE}
+dabort_string
+ DCB " Virtualisor-DAB!\n", 0
+undef_string
+ DCB " Virtualisor-UND!\n", 0
+pabort_string
+ DCB " Virtualisor-PAB!\n", 0
+swi_string
+ DCB " Virtualisor-SWI!\n", 0
+irq_string
+ DCB " Virtualisor-IRQ!\n", 0
+fiq_string
+ DCB " Virtualisor-FIQ!\n", 0
+unused_string
+ DCB " Virtualisor-UNU!\n", 0
+
+ ALIGN
+ ENDIF
+
+ ; ----------------------------------------------------
+ ; Defines for enabling HYP mode MMU
+ ; ----------------------------------------------------
+
+ENABLE EQU 0x1
+DISABLE EQU 0x0
+
+ ; ----------------------------------------------------
+ ; HMAIR attributes relevant to us
+ ; ----------------------------------------------------
+HMAIR_INNER_WB_RWA_MEM EQU 0x0f
+HMAIR_OUTER_WB_RWA_MEM EQU 0xf0
+HMAIR_DEVICE_MEM EQU 0x04
+HMAIR_SO_MEM EQU 0x00
+
+IDX0 EQU (HMAIR_DEVICE_MEM << 0)
+IDX1 EQU ((HMAIR_INNER_WB_RWA_MEM :OR: HMAIR_OUTER_WB_RWA_MEM) << 8)
+IDX2 EQU (HMAIR_SO_MEM << 16)
+
+ ; ----------------------------------------------------
+ ; HSCTLR defines
+ ; ----------------------------------------------------
+ICACHE EQU (ENABLE << 12)
+ALIGN EQU (ENABLE << 1)
+DCACHE EQU (ENABLE << 2)
+MMU EQU (ENABLE << 0)
+
+ ; ----------------------------------------------------
+ ; HTCR defines
+ ; ----------------------------------------------------
+CR_C_WBWA EQU 0x1
+CR_OUTER_SH EQU 0x2
+CR_INNER_SH EQU 0x3
+CR_ADDR_SPC_4GB EQU 0x0
+
+EAE EQU (ENABLE << 31)
+T0SZ EQU (CR_ADDR_SPC_4GB << 0)
+IRGN0 EQU (CR_C_WBWA << 8)
+ORGN0 EQU (CR_C_WBWA << 10)
+SH0 EQU (CR_INNER_SH << 12)
+
+vectors
+ B bl_setup ; reset
+ B undef_entry ; undef
+ B svc_hyp_entry ; swi
+ B iabt_entry ; pabt
+ B dabt_entry ; dabt
+ B hvc_entry ; HVC
+ B irq_entry ; irq
+ B fiq_entry ; fiq
+
+bl_setup FUNCTION
+ ; ----------------------------------------------------
+ ; This function is called after a reset. 'r0-r3' can
+ ; be corrupted after a cold reset.
+ ; Its also assumed that we are taking part in coherency
+ ; already (entered in secure world)
+ ; ----------------------------------------------------
+
+ ; ----------------------------------------------------
+ ; Enable Caches
+ ; ----------------------------------------------------
+ mrc p15, 4, r0, c1, c0, 0
+ orr r0, #ICACHE
+ orr r0, #ALIGN
+ orr r0, #DCACHE
+ mcr p15, 4, r0, c1, c0, 0
+ isb
+
+ msr elr_hyp, lr
+
+ ; ----------------------------------------------------
+ ; Give yourself a stack without enabling the MMU so
+ ; that the pagetables can be created in C code.
+ ; ----------------------------------------------------
+
+ ; ----------------------------------------------------
+ ; Allocate the HYP stack first up to do greater things
+ ; ----------------------------------------------------
+ ldr r0, =guestos_state
+ ldr r1, =guestos_state_size
+ ldr r1, [r1]
+ bl get_sp
+ mov sp, r0
+
+ ; ----------------------------------------------------
+ ; Create the 2nd stage and HYP mode page tables
+ ; ----------------------------------------------------
+ bl SetupVirtExtPageTables
+
+ ; ----------------------------------------------------
+ ; Enable the HYP mode MMU before doing anything further
+ ; ----------------------------------------------------
+ ldr r0, =hyp_l1_pagetable
+ MOV r1, #0
+ mcrr p15, 4, r0, r1, c2
+ ldr r0, =(IDX2 :OR: IDX1 :OR: IDX0)
+ mcr p15, 4, r0, c10, c2, 0
+ ldr r0, =(EAE :OR: SH0 :OR: ORGN0 :OR: IRGN0 :OR: T0SZ)
+ mcr p15, 4, r0, c2, c0, 2
+ mrc p15, 4, r0, c1, c0, 0
+ orr r0, #MMU
+ mcr p15, 4, r0, c1, c0, 0
+ dsb
+ isb
+
+ ; ----------------------------------------------------
+ ; Initialise the remaining bits now that the MMU is on
+ ; ----------------------------------------------------
+ hyp_entry sp
+ bl bl_rest_init
+ hyp_exit sp
+
+ ENDFUNC
+
+ IF {FALSE}
+common_abt
+ PUSH {lr}
+
+ BL hexword ; print r0
+
+ MRC p15, 0, r0, c5, c0, 0 ; DFSR
+ BL hexword
+
+ MRC p15, 0, r0, c6, c0, 0 ; DFAR
+ BL hexword
+
+ MRC p15, 4, r0, c5, c2, 0 ; HSR
+ BL hexword
+
+ MRC p15, 4, r0, c6, c0, 0 ; HDFAR
+ BL hexword
+
+ MRC p15, 4, r0, c6, c0, 2 ; HIFAR
+ BL hexword
+
+ MRC p15, 4, r0, c6, c0, 4 ; HPFAR
+ BL hexword
+
+ POP {lr}
+ BX lr
+
+dabt_entry
+ MOV r0, lr ; save lr, just in case it's interesting
+ IF {FALSE}
+ BL common_abt
+ ENDIF
+ LDR r0, =dabort_string
+ BL output_string
+ B dead
+
+iabt_entry
+ MOV r0, lr ; save lr, just in case it's interesting
+ IF {FALSE}
+ BL common_abt
+ ENDIF
+ LDR r0, =pabort_string
+ BL output_string
+ B dead
+
+undef_entry
+ MOV r0, lr ; save lr, just in case it's interesting
+ IF {FALSE}
+ BL common_abt
+ ENDIF
+ LDR r0, =undef_string
+ BL output_string
+ B dead
+
+dead
+ B dead
+ ENDIF
+
+dabt_entry
+ B dabt_entry
+
+iabt_entry
+ B iabt_entry
+
+undef_entry
+ B undef_entry
+
+irq_entry
+ hyp_entry sp
+ ; ----------------------------------------------------
+ ; Pass SP as arg if we intend to initiate a switchover
+ ; ----------------------------------------------------
+ MOV r0, sp
+ BL handle_interrupt
+ hyp_exit sp
+
+svc_hyp_entry
+ B svc_hyp_entry
+
+fiq_entry
+ B fiq_entry
+
+hvc_entry
+ hyp_entry sp
+
+ ; ----------------------------------------------------
+ ; Check if we have an HVC call. The Switcher handles
+ ; it first. If its unable to, its passed to the
+ ; Virtualisor. It should be possible to cascade an HVC
+ ; across the two, but not for the time being.
+ ; ----------------------------------------------------
+ IF ASYNC_SWITCH = {FALSE}
+ BL is_hvc
+ CMP r0, #0
+ BEQ next
+ MOV r0, sp
+ BL HandleHVC
+ TST r0, #1
+ BNE out
+ ENDIF
+next
+ MOV r0, sp
+ BL HandleVirtualisor
+out
+ hyp_exit sp
+
+hyp_warm_reset_handler FUNCTION
+ ; ----------------------------------------------------
+ ; Enable Caches
+ ; ----------------------------------------------------
+ mrc p15, 4, r0, c1, c0, 0
+ orr r0, #ICACHE
+ orr r0, #ALIGN
+ orr r0, #DCACHE
+ mcr p15, 4, r0, c1, c0, 0
+ isb
+
+ ; ----------------------------------------------------
+ ; Enable the HYP mode MMU before doing anything further
+ ; ----------------------------------------------------
+ ldr r0, =hyp_l1_pagetable
+ MOV r1, #0
+ mcrr p15, 4, r0, r1, c2
+ ldr r0, =(IDX2 :OR: IDX1 :OR: IDX0)
+ mcr p15, 4, r0, c10, c2, 0
+ ldr r0, =(EAE :OR: SH0 :OR: ORGN0 :OR: IRGN0 :OR: T0SZ)
+ mcr p15, 4, r0, c2, c0, 2
+ mrc p15, 4, r0, c1, c0, 0
+ orr r0, #MMU
+ mcr p15, 4, r0, c1, c0, 0
+ dsb
+ isb
+
+ ; ----------------------------------------------------
+ ; Initialise the remaining bits now that the MMU is on
+ ; ----------------------------------------------------
+
+ ; ----------------------------------------------------
+ ; Allocate the HYP stack first up to do greater things
+ ; ----------------------------------------------------
+ ldr r0, =guestos_state
+ ldr r1, =guestos_state_size
+ ldr r1, [r1]
+ bl get_sp
+ mov sp, r0
+
+ ; ----------------------------------------------------
+ ; Initialise the HVBAR
+ ; ----------------------------------------------------
+ adr r0, vectors
+ mcr p15, 4, r0, c12, c0, 0
+
+ ; ----------------------------------------------------
+ ; Initialise the 2nd stage translations for NS PL0/1
+ ; ----------------------------------------------------
+ bl Enable2ndStagePageTables
+
+ ; ----------------------------------------------------
+ ; Restore the context now. CPU0 is the first cpu
+ ; ----------------------------------------------------
+ hyp_entry sp
+ mov r0, #0
+ bl restore_context
+ hyp_exit sp
+
+ ENDFUNC
+
+ END
+
diff --git a/big-little/common/pagetable_setup.c b/big-little/common/pagetable_setup.c
new file mode 100644
index 0000000..132ad73
--- /dev/null
+++ b/big-little/common/pagetable_setup.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+/* ----------------------------------------------------------------------------
+ * i n c l u d e s
+ * --------------------------------------------------------------------------*/
+#include "hyp_types.h"
+#include "hyp_vmmap.h"
+#include "misc.h"
+#include "events.h"
+#include "virt_helpers.h"
+
+typedef struct {
+ unsigned va;
+ unsigned pa;
+ unsigned long long attrs;
+ unsigned long long *pt_addr;
+} four_kb_pt_desc;
+
+/* ----------------------------------------------------------------------------
+ * d e f i n e s
+ * --------------------------------------------------------------------------*/
+#define LEVEL1 0x1
+#define LEVEL2 0x2
+
+#define HYP_PA_START 0x00000000 /* Flat mapping */
+#define HYP_PA_END 0xFFFFFFFF
+#define HYP_VA_START HYP_PA_START
+#define HYP_VA_END HYP_PA_END
+
+/*
+ * First level pagetables to cover 512GB.
+ * Only first 4GB used
+ */
+unsigned long long hyp_l1_pagetable[512] __attribute__ ((aligned(4096)));
+
+/*
+ * Second level pagetables to cover each GB.
+ * Arranged contigously for ease
+ */
+unsigned long long hyp_l2_pagetable[4][512] __attribute__ ((aligned(16384)));
+
+/*
+ * Allocate one Level 3 page table which will
+ * create a 4K SO ordered page.
+ */
+unsigned long long hyp_l3_so_pt[512] __attribute__ ((aligned(4096)));
+
+/*
+ * Allocate space for 4 contigous level 2 page
+ * tables which will cover the 32 bit address
+ * space. Align it to the 16K boundary.
+ */
+unsigned long long stage2_l2_pagetable[4][512] __attribute__ ((aligned(16384)));
+
+/*
+ * Allocate one Level 3 page table which will
+ * route guestOS physical cpu interface accesses
+ * to the virtual cpu interface. Align it to the
+ * 4K boundary.
+ */
+unsigned long long stage2_l3_cpuif_pt[512] __attribute__ ((aligned(4096)));
+
+/*
+ * Allocate one Level 3 page table which will
+ * create a 4K SO ordered page.
+ */
+unsigned long long stage2_l3_so_pt[512] __attribute__ ((aligned(4096)));
+
+#define ENABLE 0x1
+#define DISABLE 0x0
+
+/* HMAIR attributes relevant to us */
+#define HMAIR_INNER_WB_RWA_MEM 0x0f
+#define HMAIR_OUTER_WB_RWA_MEM 0xf0
+#define HMAIR_DEVICE_MEM 0x04
+#define HMAIR_SO_MEM 0x00
+
+#define IDX0(x) ((x) << 0)
+#define IDX1(x) ((x) << 8)
+#define IDX2(x) ((x) << 16)
+
+/* Memory attributes index for HMAIR0 */
+#define HMAIR0_DEVICE_MEM_ATTR_IDX (0x0 << 2)
+#define HMAIR0_NORMAL_MEM_ATTR_IDX (0x1 << 2)
+#define HMAIR0_SO_MEM_ATTR_IDX (0x2 << 2)
+
+#define NS_BIT (1 << 5)
+
+/* Access permissions */
+#define AP(x) ((x) << 6)
+/* HAP permissions */
+#define HAP_RO 0x1
+#define HAP_RW 0x3
+/* Simplified Access permissions */
+#define KERN_RO 0x2
+#define KERN_RW 0x0
+
+/* HTCR/VTCR fields */
+#define EAE(x) ((unsigned) x << 31)
+#define T0SZ(x) (x << 0)
+#define IRGN0(x) (x << 8)
+#define ORGN0(x) (x << 10)
+#define SH0(x) (x << 12)
+
+#define CR_C_WBWA 0x1
+#define CR_OUTER_SH 0x2
+#define CR_INNER_SH 0x3
+#define CR_ADDR_SPC_4GB 0x0
+
+/* HSCTLR fields */
+#define MMU(x) (x << 0)
+#define ALIGNMENT(x) (x << 1)
+#define DCACHE(x) (x << 2)
+#define ICACHE(x) (x << 12)
+
+/*
+ * BUG:
+ * Dcache clean by MVA ops added to ensure that main memory is updated prior to
+ * the first page table walk upon entry into NS world. This is potentially an AEM
+ * bug as the descriptors should be picked from the cache itself since the VTCR
+ * marks PTWs as cacheable.
+ * It would be better to collect the writes and then perform the clean rather then
+ * picking them up individually.
+ */
+/*
+ * Map the physical cpu interface to the virtual
+ * cpu interface for OS use.
+ */
+static void Add4KMapping(four_kb_pt_desc * l3_mapping, unsigned level,
+ unsigned long long *base_pt_addr)
+{
+ unsigned one_gb_index = l3_mapping->pa >> 30;
+ unsigned two_mb_index = l3_mapping->pa >> 21;
+ unsigned four_kb_index = 0;
+ unsigned pa_4k_index = 0;
+ unsigned long long l1_desc = 0;
+ unsigned long long *l2_desc = 0;
+ unsigned long long old_attrs = 0;
+ unsigned long long *l1_pt_addr = 0;
+ unsigned long long *l2_pt_addr = 0;
+ unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
+
+ /*
+ * Indices calculated above are relative to the GB or MB they
+ * belong to rather than an offset of 0x0. e.g. for the 2mb index
+ * index = (address >> 21) - (<number of 2MBs in 1GB> x <this GB index>)
+ */
+
+ /* Calculate the level 2 page table descriptor */
+ if (level == 1) {
+ l1_pt_addr = base_pt_addr;
+ l1_desc = l1_pt_addr[one_gb_index];
+ l2_pt_addr =
+ (unsigned long long
+ *)((unsigned)((&l1_desc)[0] & 0xfffff000UL));
+ l2_desc = &l2_pt_addr[two_mb_index - (512 * one_gb_index)];
+ } else {
+ l2_pt_addr = &base_pt_addr[one_gb_index];
+ l2_desc = &l2_pt_addr[two_mb_index - (512 * one_gb_index)];
+ }
+
+ /* Preserve the old attributes */
+ old_attrs = *l2_desc & 0xfff0000000000fffULL;
+ /* Replace block mapping with table mapping */
+ *l2_desc = (unsigned long long)l3_pt_addr | TABLE_MAPPING;
+
+ /* Create a flat mapping for all 4k descriptors to begin with */
+ for (four_kb_index = 0; four_kb_index < 512; four_kb_index++) {
+ l3_pt_addr[four_kb_index] =
+ (((two_mb_index << 9) +
+ four_kb_index) << 12) | old_attrs | VALID_MAPPING;
+ }
+ pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
+
+ /*
+ * Replace the existing descriptor with new mapping and attributes
+ */
+ l3_pt_addr[pa_4k_index] =
+ l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
+
+ return;
+}
+
+void CreateHypModePageTables(void)
+{
+ unsigned num_l1_descs = 0, num_l2_descs = 0;
+ unsigned l1_index, l2_index;
+ unsigned long long l2_attrs = 0;
+ four_kb_pt_desc l3_desc;
+
+ /* Create the pagetables */
+ num_l1_descs = ((HYP_PA_END - HYP_PA_START) >> 30) + 1;
+ num_l2_descs = ((HYP_PA_END - HYP_PA_START) >> 21) + 1;
+
+ /* Only the first 4GB are valid translations */
+ for (l1_index = 0; l1_index < num_l1_descs; l1_index++) {
+ hyp_l1_pagetable[l1_index] =
+ (unsigned long long)&hyp_l2_pagetable[l1_index][0] |
+ TABLE_MAPPING;
+ for (l2_index = 0; l2_index < num_l2_descs / num_l1_descs;
+ l2_index++) {
+
+ if ((l2_index + (l1_index << 9)) < 32) {
+ /* 0-64M(Secure ROM/NOR Flash):Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RO);
+ ((unsigned *) &l2_attrs)[1] |= XN;
+ }
+ else if ((l2_index + (l1_index << 9)) < 64)
+ /* 64-128M(Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+ else if ((l2_index + (l1_index << 9)) < 1024) {
+ /* 128-2048M (Peripherals) : Block mapping of Device memory */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_DEVICE_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+ ((unsigned *) &l2_attrs)[1] |= XN;
+ }
+ else
+ /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+
+ hyp_l2_pagetable[l1_index][l2_index] =
+ ((l2_index + (l1_index << 9)) << 21) | l2_attrs;
+ }
+ }
+
+ /*
+ * Create a mapping for a device page to be used
+ * for Locks, Events & anyything that is shared when both
+ * the clusters are executing at the same time.
+ */
+ l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.attrs =
+ ACCESS_FLAG | HMAIR0_DEVICE_MEM_ATTR_IDX | SHAREABILITY(0x3) |
+ AP(KERN_RW);
+ l3_desc.pt_addr = hyp_l3_so_pt;
+ Add4KMapping(&l3_desc, LEVEL1, (unsigned long long *)hyp_l1_pagetable);
+
+ return;
+}
+
+void EnableHypModePageTables(void)
+{
+ /* Update the HTTBR */
+ write_httbr((unsigned long long)hyp_l1_pagetable);
+
+ /*
+ * Setup the HMAIR0 register.
+ * [7:0] = Device memory
+ * [15:8] = Normal memory, Inner and outer cacheable, WBWA
+ */
+ write_hmair0(IDX2(HMAIR_SO_MEM) |
+ IDX1(HMAIR_INNER_WB_RWA_MEM | HMAIR_OUTER_WB_RWA_MEM) |
+ IDX0(HMAIR_DEVICE_MEM));
+
+ /*
+ * Set the HTCR.
+ * Pagetables are Normal memory, Inner/Outer shareable, Inner/Outer WBWA
+ */
+ write_htcr(EAE(ENABLE) | SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) |
+ IRGN0(CR_C_WBWA) | T0SZ(CR_ADDR_SPC_4GB));
+
+ /* Enable the Hyp MMU */
+ write_hsctlr(ICACHE(ENABLE) | DCACHE(ENABLE) | ALIGNMENT(ENABLE) |
+ MMU(ENABLE));
+
+ return;
+}
+
+void Create2ndStagePageTables(void)
+{
+ unsigned two_mb_index = 0;
+ unsigned one_gb_index = 0;
+ unsigned long long level2_desc = 0;
+ four_kb_pt_desc l3_desc = { 0 };
+
+ /*
+ * Create the flat mapped 2nd stage page tables.
+ * This should be done only once. The remaining
+ * cpus can share the mappings and wait while
+ * this is being done.
+ */
+ for (one_gb_index = 0; one_gb_index < 4; one_gb_index++)
+ for (two_mb_index = 0; two_mb_index < 512; two_mb_index++) {
+
+ if ((two_mb_index + (one_gb_index << 9)) < 32)
+ /* 0-64M (Secure ROM/NOR Flash) : Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RO)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+ else if ((two_mb_index + (one_gb_index << 9)) < 64)
+ /* 64-128M (Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+ else if ((two_mb_index + (one_gb_index << 9)) < 1024)
+ /* 128-2048M (Peripherals) : Block mapping of Device memory */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0x1) | BLOCK_MAPPING;
+ else
+ /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+
+ stage2_l2_pagetable[one_gb_index][two_mb_index] =
+ (two_mb_index +
+ (512 * one_gb_index) << 21) | level2_desc;
+
+ }
+
+ /* Map PCPUIF to the VCPUIF for the payload software */
+ l3_desc.va = VGIC_VM_PHY_BASE;
+ l3_desc.pa = GIC_IC_PHY_BASE;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_cpuif_pt;
+ Add4KMapping(&l3_desc, LEVEL2,
+ (unsigned long long *)stage2_l2_pagetable);
+
+ /*
+ * Create a mapping for a device page to be used
+ * for Locks, Events & anyything that is shared when both
+ * the clusters are executing at the same time.
+ */
+ l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_so_pt;
+ Add4KMapping(&l3_desc, LEVEL2,
+ (unsigned long long *)stage2_l2_pagetable);
+
+ return;
+}
+
+void Enable2ndStagePageTables(void)
+{
+ /*
+ * Set the VTCR to:
+ * Normal memory outer shareable, Device memory shareable
+ * Outer and Inner WBWA
+ * Start at level 2
+ * Size of addressed region is 4GB (16k worth of page tables)
+ */
+ write_vtcr(SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) | IRGN0(CR_C_WBWA));
+
+ /* Address is already aligned to 16k or 2*14 */
+ write_vttbr((unsigned long long)stage2_l2_pagetable);
+
+ write_hcr(read_hcr() | HCR_VM);
+
+ /*
+ * TODO: We do not need a synchronization barrier here as we
+ * are not yet executing out of NS PL0 & PL1 and there will be
+ * a barrier at some point before that.
+ */
+ return;
+}
+
+void SetupVirtExtPageTables(void)
+{
+ unsigned cpu_id = read_cpuid();
+ unsigned first_cpu = find_first_cpu();
+ unsigned cluster_id = read_clusterid();
+ unsigned abs_cpuid = 0;
+
+ if (!switcher)
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+
+ /*
+ * First cpu creates the pagetables after
+ * a cold reset. Reused by all cpus across
+ * warm resets.
+ */
+ if (switcher ) {
+
+ /*
+ * While switching its possible that the host cluster
+ * is brought out of reset first. Hence, the first
+ * cpu of whichever cluster reaches here does the
+ * pagetable setup
+ */
+ if (cpu_id == first_cpu) {
+ CreateHypModePageTables();
+ Create2ndStagePageTables();
+ set_events(VIRT_PGT_DONE);
+ }
+
+ wait_for_event(VIRT_PGT_DONE, cpu_id);
+ reset_event(VIRT_PGT_DONE, cpu_id);
+
+ } else {
+
+ /*
+ * Any cluster can do the initialisation as long as
+ * only one of them does it.
+ */
+ if (cpu_id == first_cpu && cluster_id == host_cluster) {
+ CreateHypModePageTables();
+ Create2ndStagePageTables();
+ set_events(VIRT_PGT_DONE);
+ }
+
+ wait_for_event(VIRT_PGT_DONE, abs_cpuid);
+ reset_event(VIRT_PGT_DONE, abs_cpuid);
+ }
+
+ return;
+}
diff --git a/big-little/common/vgic_handle.c b/big-little/common/vgic_handle.c
new file mode 100644
index 0000000..4a7ded1
--- /dev/null
+++ b/big-little/common/vgic_handle.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "int_master.h"
+#include "gic_registers.h"
+#include "virt_helpers.h"
+#include "misc.h"
+#include "events.h"
+#include "vgiclib.h"
+
+extern vm_context *trigger_entry(vm_context *, unsigned);
+extern unsigned get_cpuinfo(unsigned);
+extern unsigned check_switchover_ipi(unsigned, unsigned);
+extern void keep_trigger_alive(void);
+extern unsigned check_trigger(unsigned, unsigned);
+
+/*
+ * Flag to make the interrupt handling code aware that
+ * each interrupt needs to be checked for it being a
+ * signal to switch to the other cluster
+ */
+unsigned async_switchover = ASYNC_SWITCH;
+
+void gic_send_ipi(unsigned cpu_mask, unsigned ipi_num)
+{
+ write32(GIC_ID_PHY_BASE + GICD_SW,
+ ((cpu_mask & 0xff) << 16) | (ipi_num & 0xf));
+}
+
+void gic_enable_int(unsigned num)
+{
+ unsigned int regbase;
+
+ regbase = GIC_ID_PHY_BASE + GICD_ENABLESET + ((num >> 5) << 2);
+ write32(regbase, 1 << (num & 0x1F));
+}
+
+void gic_disable_int(unsigned num)
+{
+ unsigned int regbase;
+
+ regbase = GIC_ID_PHY_BASE + GICD_ENABLECLEAR + ((num >> 5) << 2);
+ write32(regbase, 1 << (num & 0x1F));
+}
+
+void gic_deactivate_int(unsigned num)
+{
+ write32(GIC_IC_PHY_BASE + GICC_DEACTIVATE, num);
+}
+
+void gic_eoi_int(unsigned num)
+{
+ write32(GIC_IC_PHY_BASE + GICC_EOI, num);
+}
+
+unsigned gic_ack_int(void)
+{
+ return read32(GIC_IC_PHY_BASE + GICC_INTACK);
+}
+
+unsigned gic_int_num(void)
+{
+ unsigned intcount = 0;
+
+ intcount = read32(GIC_ID_PHY_BASE + GICD_CTR);
+ intcount = ((intcount & 0x1F) + 1) * 32;
+
+ return intcount;
+}
+
+/*
+ * handle_interrupt() will be called when an interrupt arrives
+ */
+vm_context *handle_interrupt(vm_context * context)
+{
+ unsigned int status, i, src_cpu = 0;
+ unsigned cpuid = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned list_desc = 0;
+ unsigned int_pri = 0;
+ unsigned cpu_if = get_cpuif(cluster_id, cpuid);
+ vm_context *ret_ctx = context;
+ unsigned do_switch = 0, first_cpu = find_first_cpu();
+
+ /*
+ * Get the interrupt #
+ */
+ status = gic_ack_int();
+ i = status & 0x3FF;
+
+ /*
+ * Stop if there are no more interrupts
+ */
+ if (i == 1023) {
+ printf("Spurious interrupt %d \n", i);
+ return ret_ctx;
+ }
+
+ if (async_switchover && cpuid == first_cpu)
+ keep_trigger_alive();
+
+ /*
+ * Special case IPIs, since we need the source CPU ID
+ */
+ if (i < 16) {
+ src_cpu = (status >> 10) & INTACK_CPUID_MASK;
+
+ /* Check whether we have been requested to switchover */
+ do_switch = check_switchover_ipi(cpu_if, i);
+
+ /*
+ * SGI Ack actually returns the source cpu interface
+ * which needs to be mapped to the apt cpuid.
+ */
+ src_cpu = get_cpuinfo(src_cpu) & 0xf;
+
+ /*
+ * IPI handling:
+ * If Split EOI is not enabled, then writing to the EOI
+ * register drops the priority and deactivates the IPI
+ * together. Otherwise, we need to do it seperately.
+ * Note that in either case, the OS cannot deactivate the
+ * IPI as writing to the virtual EOI register will not
+ * bring about a state change in the physical distributor
+ * state machine.
+ */
+ gic_eoi_int(status);
+ if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
+ gic_deactivate_int(status);
+
+ if (do_switch) {
+ /*
+ * switch_cluster() takes the first_cpu as its arg. Since
+ * all the cores are expected to power down, its reasonable
+ * to assume cpu0 is the first cpu and will take care of
+ * saving all the global context.
+ */
+ switch_cluster(first_cpu);
+ return ret_ctx;
+ }
+ }
+
+ /*
+ * Check if this interrupt is meant to trigger to switch to the
+ * other cluster. If so, then we do not forward the interrupt
+ * to the payload software.
+ */
+ if (async_switchover && check_trigger(i, status))
+ return ret_ctx;
+
+ /*
+ * TODO: Further optimizations can be done later when there are
+ * more interrupts apart from timer & uart.
+ */
+ /*
+ * vGIC 11.0 onwards split EOI functionality has to be used for
+ * all interrupts. EOIing the interrupt from the VCPUIF will only
+ * deactivate the interrupt (clear the active bit) and not clear
+ * the active priority at the PCPUIF.
+ * Do this only for non SGIs as their priority has already been
+ * dropped.
+ */
+ if (i >= 16)
+ write32(GIC_IC_PHY_BASE + GICC_PRIODROP, i);
+
+ /*
+ * Priority reg = (interrupt no. / 4) x 4 bytes.
+ * Priority index = interrupt no. % 4 x 8 bits (8 bits for each priority value)
+ * Prioriity value = Priority reg >> Priority index
+ */
+ int_pri =
+ read32(GIC_ID_PHY_BASE + GICD_PRI +
+ ((i >> 2) << 2)) >> ((i & 0x3) << 3);
+
+ /*
+ * Signal interrupts as secure to the VCPUIF since the OS will write to the EnableS
+ * bit of the VCPUIF through the 2nd stage translations.
+ * TODO: Priority is being read as a 8 bit value from the distributor registers
+ * and passed as a 5 bit value. Need to check if this will cause problems.
+ */
+ if (i < 16)
+ list_desc =
+ STATE(PENDING) | (int_pri >> 3) << 23 | src_cpu << 10 | i;
+ else
+ list_desc =
+ HW_IRQ | STATE(PENDING) | (int_pri >> 3) << 23 | i << 10 | i;
+
+ enqueue_interrupt(list_desc, cpuid);
+
+ return ret_ctx;
+}
diff --git a/big-little/common/vgic_setup.c b/big-little/common/vgic_setup.c
new file mode 100644
index 0000000..b72359e
--- /dev/null
+++ b/big-little/common/vgic_setup.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "int_master.h"
+#include "gic_registers.h"
+#include "virt_helpers.h"
+#include "misc.h"
+#include "events.h"
+#include "vgiclib.h"
+
+/*
+ * The Big-little spftware needs to bother itself with
+ * bareminimal vGIC configuration.
+ *
+ * 1. Distributor. Security bits should be taken care of
+ * by the boot firmaware after a cold reset. Big-little
+ * code should initialise private interrupts as secure
+ * after a warm reset.
+ *
+ * 2. Physical Cpu interface. Initialised by us after
+ * both warm and cold reset.
+ *
+ * 3. Virtual CPU interface (HYP view). Initialised by us
+ * after cold reset & restored after warm reset.
+ *
+ * 4. Virtual CPU interface (CPU view). Initialised by
+ * the payload software after cold reset and restored by
+ * us after a warm reset.
+ */
+static void gic_cpuinit()
+{
+ /* Disable the PCPUIF before configuring it. */
+ write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
+ write32(GIC_IC_PHY_BASE + GICC_BP, 0x0);
+ write32(GIC_IC_PHY_BASE + GICC_PRIMASK, 0xFF);
+ /* Enable split EOI & Non-secure PCPUIF */
+ write32(GIC_IC_PHY_BASE + GICC_CTL, 0x201);
+}
+
+void SetupVGIC(unsigned warm_reset)
+{
+ /*
+ * Initialise the HYP view Virtual CPU interface after
+ * a cold reset
+ */
+ if (!warm_reset)
+ vgic_init();
+
+ /* Initialise the Physical cpu interface */
+ gic_cpuinit();
+
+ /*
+ * Enable Virtual exceptions
+ */
+ write_hcr(read_hcr() | HCR_AMO | HCR_IMO | HCR_FMO);
+
+ /*
+ * TODO: Barriers not needed here as there will surely
+ * be others further down the line before virtual
+ * exceptions are used.
+ */
+ return;
+}
diff --git a/big-little/common/vgiclib.c b/big-little/common/vgiclib.c
new file mode 100644
index 0000000..838fc06
--- /dev/null
+++ b/big-little/common/vgiclib.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "vgiclib.h"
+#include "misc.h"
+#include "virt_helpers.h"
+#include "int_master.h"
+
+/*
+ * Manage overflowints somehow.. static pool with recycling allocators.
+ */
+
+#define MAXOVERFLOWINTS 200
+
+static struct overflowint *freeoverflows[NUM_CPUS];
+static struct overflowint theoverflowints[NUM_CPUS][MAXOVERFLOWINTS];
+static struct gic_cpuif cpuifs[NUM_CPUS];
+static unsigned hv_lr_count[NUM_CPUS] = {0};
+
+void dump_vgic_state()
+{
+ unsigned int i;
+
+ printf("VGIC state:\n");
+ printf(" Control : 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_CTL));
+ printf(" ActivePri: 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_APR0));
+ for (i = 0; i < 4; i++) {
+ printf(" List : 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (i * 4)));
+ }
+}
+
+static struct overflowint *get_overflowint(unsigned cpuid)
+{
+ struct overflowint *p = freeoverflows[cpuid];
+
+ if (!p) {
+ printf("Panic: Out of overflow interrupt slots.\n");
+ printf("Recompile with larger MAXOVERFLOWINTS.\n");
+ panic();
+ }
+
+ freeoverflows[cpuid] = p->next;
+
+ return p;
+}
+
+static void free_overflowint(struct overflowint *p, unsigned cpuid)
+{
+ p->next = freeoverflows[cpuid];
+ freeoverflows[cpuid] = p;
+}
+
+void vgic_init(void)
+{
+ unsigned int i;
+ unsigned cpuid = read_cpuid();
+
+ freeoverflows[cpuid] = 0x0;
+
+ for (i = 0; i < MAXOVERFLOWINTS; i++) {
+ free_overflowint(&(theoverflowints[cpuid][i]), cpuid);
+ }
+
+ /*
+ * Find the number of List registers
+ * TODO: Will not work if individual cpus can have different number
+ * of list registers across clusters. Needs to be detected for each
+ * access then.
+ */
+ hv_lr_count[cpuid] = (read32(VGIC_HV_PHY_BASE + GICH_VTR) & 0x3f) + 1;
+
+ /* Enable virtual interrupts & if required, maintenance interrupts */
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, VGICH_HCR_EN);
+
+ return;
+}
+
+/*
+ * Abstracted entry accessor functions. Work for live or saved state
+ */
+static void set_vgic_entry(unsigned int descr, unsigned int slot)
+{
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4), descr);
+}
+
+static unsigned int get_vgic_entry(unsigned int slot)
+{
+ return read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4));
+}
+
+/*
+ * Abstracted status accessor functions, as above
+ */
+static void set_vgic_status(unsigned int status)
+{
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, status);
+}
+
+static unsigned int get_vgic_status(void)
+{
+ return read32(VGIC_HV_PHY_BASE + GICH_CTL);
+}
+
+/*
+ * Add an entry to the queue, the queue is kept in descending priority
+ * * (that is to say, ascending numerical priority) order.
+ * *
+ * * Static function to assist with this, only called if the int is going in the queue.
+ */
+static void set_vgic_queue_entry(struct gic_cpuif *cpuif, unsigned int descr)
+{
+ unsigned int pri = (descr >> 20) & 0xFF;
+ struct overflowint **oflowh, *oflowp;
+ unsigned cpuid = read_cpuid();
+
+ /*
+ * If we are queuing something and there is currently no queue, set the interrupt bit
+ */
+ if (!(cpuif->overflow))
+ set_vgic_status(get_vgic_status() | 0x2);
+
+ /*
+ * Determine insertion point, might be the end of the list
+ */
+ for (oflowh = &(cpuif->overflow); *oflowh; oflowh = &((*oflowh)->next))
+ if ((*oflowh)->priority > pri)
+ break;
+
+ oflowp = get_overflowint(cpuid);
+ oflowp->priority = pri;
+ oflowp->value = descr;
+ oflowp->next = *oflowh;
+ *oflowh = oflowp;
+}
+
+/*
+ * The vGIC spec implements 64 list registers across two 32-bit status
+ * registers. Since all of the list registers may not be implemented,
+ * this function returns the maximum index we need to bother about.
+ */
+static inline unsigned elrsr_max_index(unsigned cpuid)
+{
+ return (hv_lr_count[cpuid] - 1) >> 5;
+}
+
+/*
+ * In a HYP view list register status register both active and unimplemented
+ * interrupts are represented by a 0 bit. This function returns a 32-bit value
+ * where each set bit represents an active list register. Its basically the
+ * inverse of what the elrsr returns while taking into account unimplemented
+ * interrupts.
+ */
+static unsigned get_elrsr_active_bits(unsigned index, unsigned cpuid, unsigned max_index)
+{
+ unsigned elrsr = ~(read32(VGIC_HV_PHY_BASE + GICH_ELRSR0 + (index << 2)));
+
+ if (index == max_index) {
+ /*
+ * Get the remainder, shift 1 times remainder and subtract 1
+ * from it to form the mask.
+ */
+ elrsr &= (1 << (hv_lr_count[cpuid] - (32 * max_index))) - 1;
+ } else if (index > max_index) {
+ /*
+ * There can never be active virqs when the list registers
+ * do not exist.
+ */
+ elrsr = 0;
+ }
+
+ return elrsr;
+}
+
+void vgic_savestate(unsigned int cpu)
+{
+ struct gic_cpuif *cpuif = &(cpuifs[cpu]);
+ unsigned int i, ctr = 0, cur_elrsr = 0;
+ unsigned max_index = elrsr_max_index(cpu);
+
+ for(ctr = 0; ctr <= max_index; ctr++) {
+ /* Negate read value so that set bit corresponds to a !inactive register */
+ cur_elrsr = get_elrsr_active_bits(ctr, cpu, max_index);
+ cpuif->elrsr[ctr] = cur_elrsr;
+
+ for(i = bitindex(cur_elrsr); ((int) i) >= 0; i = bitindex(cur_elrsr)) {
+ unsigned list_reg =
+ read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2));
+ unsigned int_id = (list_reg >> 10) & 0x3ff;
+
+ /* Clear the saved bit index */
+ cur_elrsr &= ~(1 << i);
+
+ /*
+ * Invalidate the pending/active virtual interrupt. Since its a shared vGIC
+ * this irq will persist till the next switch and hence create a duplicate.
+ */
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2), list_reg & ~(0x3 << 28));
+
+ /*
+ * While saving queued IPI context, ensure that the requesting cpu
+ * interface is mapped to it counterpart on the inbound cluster
+ */
+ if (int_id < 16) {
+ unsigned ob_cpuid = int_id & 0x7;
+ unsigned ob_clusterid = read_clusterid();
+ unsigned ib_cpuif = 0;
+
+ ib_cpuif = get_cpuif(!ob_clusterid, ob_cpuid);
+ /* Clear the cpu interface bits and place inbound cpu interface instead */
+ list_reg = (list_reg & ~(0x7 << 10)) | (ib_cpuif << 10);
+ } else if (int_id < 32) {
+ /*
+ * Pending Private peripheral interrupts will be recreated from scratch
+ * so no need to save them.
+ */
+ cpuif->elrsr[ctr] &= ~(1 << i);
+ continue;
+ }
+
+ cpuif->ints[i] = list_reg;
+
+ }
+ }
+
+ cpuif->status = read32(VGIC_HV_PHY_BASE + GICH_CTL);
+ cpuif->activepris = read32(VGIC_HV_PHY_BASE + GICH_APR0);
+
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, 0); /* SMP */
+
+ return;
+}
+
+void vgic_loadstate(unsigned int cpu)
+{
+ struct gic_cpuif *cpuif = &(cpuifs[cpu]);
+ unsigned int i, ctr = 0, cur_elrsr = 0;
+ unsigned max_index = elrsr_max_index(cpu);
+
+ for(ctr = 0; ctr <= max_index; ctr++) {
+ cur_elrsr = cpuif->elrsr[ctr];
+
+ for(i = bitindex(cur_elrsr); ((int) i) >= 0; i = bitindex(cur_elrsr)) {
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2), cpuif->ints[i]);
+
+ /* Clear the restored bit index */
+ cur_elrsr &= ~(1 << i);
+ }
+ }
+
+ write32(VGIC_HV_PHY_BASE + GICH_CTL, cpuif->status);
+ write32(VGIC_HV_PHY_BASE + GICH_APR0, cpuif->activepris);
+
+ return;
+}
+
+/*
+ * vgic_refresh: Generic "maintenance" routine for the VGIC
+ * *
+ * * This is called:
+ * * - On maintenance interrupt. We get maintenance interrupts for
+ * * two reasons:
+ * * o Non-zero EOI skid. This routine deals with the skid and sets
+ * * the field to 0, quenching the interrupt source.
+ * * o "Nearly empty" interrupt bit set, and nearly empty condition
+ * * exists. This interrupt source is quenched by filling the
+ * * slots (and clearing the interrupt bit if the queue is now empty)
+ * * - When a new interrupt arrives and the cached "free slot" value
+ * * indicates that there are no free slots. We expect to scavenge some
+ * * slots from interrupts which have been completed by the VM.
+ * *
+ * * This routine is O(n) in the number of skidded EOI's + O(m) in the number
+ * * of interrupt slots provided - since this is constant for an
+ * * implementation it's really O(1).
+ * *
+ * * If this VGIC instance is currently live on a CPU it is only legal to
+ * * execute this routine on that CPU.
+ */
+void vgic_refresh(unsigned int cpu)
+{
+ struct gic_cpuif *cpuif = &(cpuifs[cpu]);
+ unsigned int i, value, status, newstatus;
+ struct overflowint **oflowh, *oflowp;
+
+ /*
+ * Grab a copy of the status.
+ */
+ status = get_vgic_status();
+
+ /*
+ * "newstatus" is the value to be written back if needed. Whatever
+ * * happens, we will clear the slipped EOI count by the time we are done
+ */
+ newstatus = status & 0x07FFFFFF;
+
+ /*
+ * See if there are any "slipped" EOIs
+ */
+ i = (status >> 27) & 0x1F;
+
+ if (i) {
+ /*
+ * If there are, let's deal with them.
+ * *
+ * * We will walk through the list of queued interrupts, deactivating the
+ * * ACTIVE ones as needed until we either have no more slipped EOI's to
+ * * do or run out of queued interrupts. If we run out of queued
+ * * interrupts first, that's UNPREDICTABLE behaviour (and the fault of
+ * * the VM). In this case we will just ignore the surplus EOIs.
+ * *
+ * * After EOI'ing, we delete the entry if it was just ACTIVE or set it
+ * * to PENDING if it was PENDING+ACTIVE.
+ * *
+ * * Use a handle to point to the list entries to avoid the need for
+ * * special cases in the loop.
+ */
+ oflowh = &(cpuif->overflow);
+
+ while (i && *oflowh) {
+ value = (*oflowh)->value;
+ if (value & VGIC_ENTRY_ACTIVE) {
+ /*
+ * It's ACTIVE (or PENDING+ACTIVE)
+ */
+ i--;
+
+ if (value & VGIC_ENTRY_HW) {
+ /*
+ * HW bit set, so we need to pass on an EOI. This doesn't ever happen
+ * * for IPIs, so just pass on the 10-bit "Hardware ID"
+ */
+ gic_deactivate_int((value >> 10) &
+ 0x3FF);
+ }
+
+ if (value & VGIC_ENTRY_PENDING) {
+ /*
+ * It was PENDING+ACTIVE, clear the ACTIVE bit and move on
+ */
+ (*oflowh)->value &= ~VGIC_ENTRY_ACTIVE;
+ } else {
+ /*
+ * It was only ACTIVE, so we need to delete it..
+ */
+ oflowp = *oflowh;
+ oflowh = &(oflowp->next);
+ free_overflowint(oflowp, cpu);
+ }
+ } else {
+ /*
+ * It wasn't ACTIVE :( Try the next one.
+ */
+ oflowh = &((*oflowh)->next);
+ }
+ }
+ }
+
+ /*
+ * Now populate any spare slots with entries from the list (if any). Also fix up the free slot bitmap
+ */
+ for (i = 0; i < hv_lr_count[cpu]; i++) {
+ value = get_vgic_entry(i);
+
+ if (value & 0x30000000) {
+ /*
+ * This entry already contains a valid interrupt, skip
+ */
+ continue;
+ }
+
+ /*
+ * Not a valid interrupt
+ */
+ oflowp = cpuif->overflow;
+ if (oflowp) {
+ /*
+ * If there's a queue, move the top entry out of the queue and into
+ * * this slot..
+ */
+ cpuif->overflow = oflowp->next;
+
+ set_vgic_entry(oflowp->value, i);
+ free_overflowint(oflowp, cpu);
+ } else {
+ /*
+ * .. otherwise mark it as available.
+ */
+ cpuif->freelist |= (1 << i);
+ }
+ }
+
+ /*
+ * If we now don't have any overflow, clear the status bit
+ */
+ if (!(cpuif->overflow)) {
+ newstatus &= ~0x2;
+ }
+
+ /*
+ * Refresh status if needed
+ */
+ if (newstatus != status) {
+ set_vgic_status(newstatus);
+ }
+}
+
+/*
+ * Adds the interrupt specified to the active list of the CPU specified.
+ * Expected to cope with the state being live on that CPU, or not.
+ *
+ * It's only valid to call this on the CPU which the corresponding VCPUIF is live on.
+ *
+ * This is O(n) in the number of queued interrupts on the CPUIF in question.
+ */
+void enqueue_interrupt(unsigned int descr, unsigned int cpu)
+{
+ unsigned int slot;
+ struct gic_cpuif *cpuif;
+
+ cpuif = &(cpuifs[cpu]);
+
+ /*
+ * If there are no free slots, trigger a maintenance
+ */
+ if (!(cpuif->freelist)) {
+ vgic_refresh(cpu);
+ }
+
+ if (cpuif->freelist) {
+ /*
+ * There is a free slot, use it.
+ */
+ slot = cpuif->freelist; /* Take the free list.. */
+ slot &= (-slot); /* .. extract one set bit .. */
+ cpuif->freelist &= (~slot); /* .. clear that bit from free list .. */
+ slot = bitindex(slot); /* .. and convert to number. */
+
+ set_vgic_entry(descr, slot);
+ } else {
+ /*
+ * There are no free slots, we are either queuing this one or swapping another out
+ */
+ unsigned int pri = (descr >> 20) & 0xFF;
+ unsigned int minpri = 0;
+ unsigned int minslot = 0;
+ unsigned int i, j;
+
+ if (cpuif->overflow && cpuif->overflow->priority <= pri) {
+ /*
+ * There are already queued interrupts with the same or higher priority, just queue this one
+ */
+ set_vgic_queue_entry(cpuif, descr);
+ return;
+ }
+
+ /*
+ * Otherwise find the lowest priority entry..
+ */
+ for (i = 0; i < hv_lr_count[cpu]; i++) {
+ j = (get_vgic_entry(i) >> 20) & 0xFF; /* Get the priority for the current thing in this slot */
+ if (i == 0 || (j > minpri)) {
+ minpri = j;
+ minslot = i;
+ }
+ }
+
+ if (minpri > pri) {
+ /*
+ * If it's lower priority than this new one we kick it out
+ */
+ set_vgic_queue_entry(cpuif, get_vgic_entry(minslot));
+ set_vgic_entry(descr, minslot);
+ } else {
+ /*
+ * Otherwise just queue the new one
+ */
+ set_vgic_queue_entry(cpuif, descr);
+ }
+ }
+}
diff --git a/big-little/include/arm.h b/big-little/include/arm.h
new file mode 100644
index 0000000..7db58ce
--- /dev/null
+++ b/big-little/include/arm.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef ARM_H
+#define ARM_H
+
+/*
+ * File for ARM Architecture specific defines and constants
+ */
+#define CP15CTL_M 0x1
+#define CP15CTL_A 0x2
+#define CP15CTL_C 0x4
+#define CP15CTL_W 0x8
+/*
+ * 4:6 SBO
+ */
+#define CP15CTL_B 0x80
+#define CP15CTL_S 0x100
+#define CP15CTL_R 0x200
+#define CP15CTL_F 0x400
+#define CP15CTL_Z 0x800
+#define CP15CTL_I 0x1000
+#define CP15CTL_V 0x2000
+#define CP15CTL_RR 0x4000
+#define CP15CTL_L4 0x8000
+
+#define FSR_XTABT_L1 0x0C
+#define FSR_XTABT_L2 0x0E
+
+#define FSR_SECTRANS 0x05
+#define FSR_PAGETRANS 0x07
+
+/*
+ * These macros extract the page/section numbers from an address
+ */
+#define pagenum(x) (((x) >> 12) & 0xFF)
+#define secnum(x) ((x) >> 21) /* i$$NEW$$ */
+//#define secnum(x) ((x) >> 20) /* orig */
+
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_ABT 0x17
+#define MODE_UND 0x1D
+#define MODE_SYS 0x1F
+#define MODE_MON 0x16
+
+#define getmode(x) ((x) & 0x1F)
+
+#endif
diff --git a/big-little/include/bakery.h b/big-little/include/bakery.h
new file mode 100644
index 0000000..e8b9ecc
--- /dev/null
+++ b/big-little/include/bakery.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+#ifndef _BAKERY_H_
+#define _BAKERY_H_
+
+#define MAX_CPUS 4
+
+/*
+ * Bakery structure - declare/allocate one of these for each lock.
+ * A pointer to this struct is passed to the lock/unlock functions.
+ */
+typedef struct {
+ volatile char entering[MAX_CPUS];
+ volatile unsigned number[MAX_CPUS];
+} bakery_t;
+
+/*
+ * Initialize a bakery - only required if the bakery_t is
+ * on the stack or heap, as static data is zeroed anyway.
+ */
+extern void init_bakery_spinlock(bakery_t * bakery);
+
+/*
+ * Claim a bakery lock. Function does not return until
+ * lock has been obtained.
+ */
+extern void get_bakery_spinlock(unsigned cpuid, bakery_t * bakery);
+
+/*
+ * Release a bakery lock.
+ */
+extern void release_bakery_spinlock(unsigned cpuid, bakery_t * bakery);
+
+#endif /* _BAKERY_H_ */
diff --git a/big-little/include/bl.h b/big-little/include/bl.h
new file mode 100644
index 0000000..700afa2
--- /dev/null
+++ b/big-little/include/bl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __BL_H__
+#define __BL_H__
+
+#include "misc.h"
+
+typedef struct vm_c {
+ unsigned gp_regs[15];
+ unsigned elr_hyp;
+ unsigned spsr;
+ unsigned usr_lr;
+} vm_context;
+
+/*
+ * VM context structure: To hold execution context of the preceding
+ * mode upon entry into the HYP mode synchronously/asynchronously.
+ */
+typedef struct vm_s {
+ unsigned stack[STACK_SIZE];
+ vm_context context;
+} vm_state;
+
+extern vm_state guestos_state[MAX_CPUIFS];
+extern void bl_setup(void);
+extern void hyp_warm_reset_handler(void);
+#endif /* __BL_H__ */
diff --git a/big-little/include/context.h b/big-little/include/context.h
new file mode 100644
index 0000000..17656a8
--- /dev/null
+++ b/big-little/include/context.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+#include "misc.h"
+
+typedef struct ns_gic_cpu_context {
+ unsigned int gic_cpu_if_regs[32]; /* GIC context local to the CPU */
+ unsigned int gic_dist_if_pvt_regs[32]; /* GIC SGI/PPI context local to the CPU */
+} gic_cpu_context;
+
+typedef struct fault_regs {
+ unsigned dfar;
+ unsigned ifar;
+ unsigned ifsr;
+ unsigned dfsr;
+ unsigned adfsr;
+ unsigned aifsr;
+} cp15_fault_regs;
+
+typedef struct ns_banked_cp15_context {
+ unsigned int cp15_misc_regs[2]; /* cp15 miscellaneous registers */
+ unsigned int cp15_ctrl_regs[20]; /* cp15 control registers */
+ unsigned int cp15_mmu_regs[16]; /* cp15 mmu registers */
+ cp15_fault_regs ns_cp15_fault_regs; /* cp15 fault status registers */
+} banked_cp15_context;
+
+typedef struct gen_tmr_ctx {
+ unsigned cntfrq;
+ unsigned long long cntvoff;
+ unsigned cnthctl;
+ unsigned cntkctl;
+ unsigned long long cntp_cval;
+ unsigned cntp_tval;
+ unsigned cntp_ctl;
+ unsigned long long cntv_cval;
+ unsigned cntv_tval;
+ unsigned cntv_ctl;
+ unsigned long long cnthp_cval;
+ unsigned cnthp_tval;
+ unsigned cnthp_ctl;
+} generic_timer_context;
+
+typedef struct ns_cpu_context {
+ unsigned int banked_cpu_regs[32]; /* Banked cpu registers */
+ banked_cp15_context banked_cp15_regs; /* Per cpu banked cp15 context */
+ unsigned int pmon_regs[32]; /* Generic performance monitor registers */
+ generic_timer_context cp15_timer_ctx; /* Global counter registers if accessible in NS world */
+ gic_cpu_context gic_cpu_ctx; /* Per cpu GIC distributor and interface context */
+ unsigned int endianess; /* Per cpu endianess */
+ unsigned int vfp_regs[34]; /* Dummy entry for VFP context. */
+ unsigned int debug_regs[32]; /* Dummy entry for Debug context. TODO */
+} cpu_context;
+
+typedef struct ns_global_context {
+ unsigned int gic_dist_if_regs[512]; /* GIC distributor context to be saved by the last cpu. */
+ unsigned int generic_timer_regs[8]; /* Global timers if the NS world has access to them */
+} global_context;
+
+/*
+ * Structure to preserve the OS mmu and stack state for swtich from OS to Switcher
+ * context handler.
+ */
+typedef struct os_state {
+ unsigned sctlr;
+ unsigned dacr;
+ unsigned ttbr0;
+ unsigned nmrr;
+ unsigned prrr;
+} os_state;
+
+/*
+ * Top level structure to hold the complete context of a core in a cluster in
+ * a multi-cluster system
+ */
+typedef struct core_context {
+ /*
+ * Non-secure context save area
+ */
+ cpu_context ns_cpu_ctx;
+
+} core_context;
+
+/*
+ * Top level structure to hold the complete context of a cluster in a multi-
+ * cluster system
+ */
+typedef struct cluster_context {
+ core_context core[MAX_CORES];
+ unsigned num_cores;
+ global_context ns_cluster_ctx;
+} cluster_context;
+
+/*
+ * Top level structure to hold the complete context of a multi cluster system
+ */
+typedef struct system_context {
+ cluster_context cluster;
+ unsigned num_clusters;
+ unsigned warm_reset;
+} system_context;
+
+extern void context_save(unsigned, unsigned);
+extern void context_restore(unsigned, unsigned);
+extern void save_generic_timers(generic_timer_context *);
+extern void restore_eagle_timers(generic_timer_context *);
+extern void save_hyp_context(unsigned, unsigned);
+extern void restore_hyp_context(unsigned, unsigned);
+extern void save_vfp(unsigned *);
+extern void restore_vfp(unsigned *);
+extern void enable_trigger(unsigned);
+#endif /* __CONTEXT_H__ */
diff --git a/big-little/include/events.h b/big-little/include/events.h
new file mode 100644
index 0000000..d6523e3
--- /dev/null
+++ b/big-little/include/events.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __EVENTS_H__
+#define __EVENTS_H__
+
+#include "context.h"
+#include "virt_helpers.h"
+#include "misc.h"
+
+/*
+ * Events for inter/intra-cluster sync
+ */
+#define MAX_EVENTS 12
+
+/* Inter cluster events */
+#define IB_CONTEXT_DONE 0
+#define OB_CONTEXT_DONE 1
+
+/* Intra cluster events */
+#define L2_READY 2
+#define L1_DONE 3
+#define CCI_READY 4
+#define GIC_READY 5
+/* Cores have finished performing inbound headstart specific initialisation */
+#define HS_DONE 6
+/*
+ * Holding pen to ensure that all other context is restored only after all
+ * cpus have finished initialised local and global HYP mode context.
+ */
+#define HYP_CONTEXT_DONE 7
+/*
+ * Holding pen to ensure that all cores have setup the local and global
+ * virtualisor context before any one of them uses it
+ */
+#define VIRT_SETUP_DONE 8
+/*
+ * Event to synchronise creation of HYP mode pagetables
+ */
+#define VIRT_PGT_DONE 9
+
+#define CACHE_GEOM_DONE 10
+#define VID_REGS_DONE 11
+
+/* Defines for Secure events */
+#define MAX_SEC_EVENTS 4
+#define SEC_L1_DONE 0
+#define OB_SHUTDOWN 1
+#define FLUSH_L2 2
+#define SETUP_RST 3
+
+extern void set_event(unsigned, unsigned);
+extern void set_events(unsigned);
+extern unsigned get_event(unsigned, unsigned);
+extern void reset_event(unsigned, unsigned);
+extern void wait_for_event(unsigned, unsigned);
+extern void wait_for_events(unsigned);
+
+#endif /* __EVENTS_H__ */
diff --git a/big-little/include/gic_registers.h b/big-little/include/gic_registers.h
new file mode 100644
index 0000000..92ff5c0
--- /dev/null
+++ b/big-little/include/gic_registers.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __GIC_REGISTERS_H__
+#define __GIC_REGISTERS_H__
+
+#include "hyp_vmmap.h"
+
+#define MAX_INTS 256
+
+/* Distributor interface registers */
+#define GICD_CTL 0x0
+#define GICD_CTR 0x4
+#define GICD_SEC 0x80
+#define GICD_ENABLESET 0x100
+#define GICD_ENABLECLEAR 0x180
+#define GICD_PENDINGSET 0x200
+#define GICD_PENDINGCLEAR 0x280
+#define GICD_ACTIVESET 0x300
+#define GICD_ACTIVECLEAR 0x380
+#define GICD_PRI 0x400
+#define GICD_CPUS 0x800
+#define GICD_CONFIG 0xC00
+#define GICD_SW 0xF00
+#define GICD_CPENDSGIR 0xF10
+#define GICD_SPENDSGIR 0xF20
+
+/* Physical CPU Interface registers */
+#define GICC_CTL 0x0
+#define GICC_PRIMASK 0x4
+#define GICC_BP 0x8
+#define GICC_INTACK 0xC
+#define GICC_EOI 0x10
+#define GICC_RUNNINGPRI 0x14
+#define GICC_HIGHESTPEND 0x18
+#define GICC_DEACTIVATE 0x1000
+#define GICC_PRIODROP GICC_EOI
+
+/* HYP view virtual CPU Interface registers */
+#define GICH_CTL 0x0
+#define GICH_VTR 0x4
+#define GICH_ELRSR0 0x30
+#define GICH_ELRSR1 0x34
+#define GICH_APR0 0xF0
+#define GICH_LR_BASE 0x100
+
+/* GuestOS view virtual CPU Interface registers */
+#define GICV_CTL 0x0
+#define GICV_PRIMASK 0x4
+#define GICV_BP 0x8
+#define GICV_INTACK 0xC
+#define GICV_EOI 0x10
+#define GICV_RUNNINGPRI 0x14
+#define GICV_HIGHESTPEND 0x18
+#define GICV_DEACTIVATE 0x1000
+
+#define VGICH_HCR_EN 0x1
+#define VGICV_NS_EN 0x2
+
+#define GS_ENABLED 0x01
+#define GS_EDGE 0x02
+#define GIC_INTS 128
+#define GIC_PRIMASK 0xF8 /* 32 levels only */
+#define GIC_DISTENABLE 0x1
+#define GIC_CPUIFENABLE 0x2
+
+#define VGIC_PRI 0x200
+#define VGIC_LIST 0x100
+#define VGIC_CONTROL 0x0
+/*
+ * TODO:
+ * Current mechanism to find free slots uses unsigned ints
+ * and is thus restricted to storing just 32 free slots.
+ */
+#define VGIC_LISTENTRIES 64
+
+#define VGIC_ENTRY_HW 0x80000000
+#define VGIC_ENTRY_ACTIVE 0x20000000
+#define VGIC_ENTRY_ACTIVE_PENDING 0x30000000
+#define VGIC_ENTRY_PENDING 0x10000000
+
+#endif /* __GIC_REGISTERS_H__ */
+
diff --git a/big-little/include/handler.h b/big-little/include/handler.h
new file mode 100644
index 0000000..747b31c
--- /dev/null
+++ b/big-little/include/handler.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __HANDLER_H__
+#define __HANDLER_H__
+
+#include "virt_helpers.h"
+#include "context.h"
+#include "misc.h"
+
+extern system_context switcher_context;
+
+#endif /* __HANDLER_H__ */
diff --git a/big-little/include/hvc.h b/big-little/include/hvc.h
new file mode 100644
index 0000000..1f71271
--- /dev/null
+++ b/big-little/include/hvc.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __HVC_H__
+#define __HVC_H__
+
+#include "traps.h"
+#include "handler.h"
+#include "context.h"
+#include "int_master.h"
+
+/* Opcode to trigger a switch from the OS */
+#define SWITCHER_ENTRY 0
+/* Opcode to return to the trigger handler after a switch (NS SVC -> HYP) */
+#define SWITCHER_EXIT 1
+/* Opcode to save HYP mode context */
+#define HYP_SAVE 2
+/* Opcode to restore HYP mode context */
+#define HYP_RESTORE 3
+/* Opcode to test vGIC active bit reg */
+#define VGIC_TEST 4
+
+vm_context *hvc_handler(unsigned, vm_context *);
+
+#endif /* __HVC_H__ */
diff --git a/big-little/include/hyp_types.h b/big-little/include/hyp_types.h
new file mode 100644
index 0000000..123242c
--- /dev/null
+++ b/big-little/include/hyp_types.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __HYP_TYPES_H__
+#define __HYP_TYPES_H__
+
+typedef signed int int32_t;
+typedef signed short int16_t;
+typedef unsigned int uint32_t;
+typedef unsigned short uint16_t;
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+typedef long long int64_t;
+typedef unsigned long long uint64_t;
+
+#define PRIVATE static
+#define PUBLIC
+
+#endif /* __HYP_TYPES_H__ */
diff --git a/big-little/include/hyp_vmmap.h b/big-little/include/hyp_vmmap.h
new file mode 100644
index 0000000..ef3eeb6
--- /dev/null
+++ b/big-little/include/hyp_vmmap.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __HYP_VMMAP_H__
+#define __HYP_VMMAP_H__
+
+#include "hyp_types.h"
+#include "misc.h"
+
+/* ----------------------------------------------------------------------------
+ * d e f i n e s
+ * --------------------------------------------------------------------------*/
+
+#define GIC_ID_PHY_BASE 0x2C001000 /* Physical Distributor */
+#define GIC_IC_PHY_BASE 0x2C002000 /* Physical CPU interface */
+
+#define VGIC_HV_PHY_BASE 0x2C004000 /* Hypervisor's VIew */
+#define VGIC_VM_PHY_BASE 0x2C006000 /* Virtual Machine view */
+
+#define UART0_PHY_BASE 0x1C090000
+#define UART1_PHY_BASE 0x1C0A0000
+
+#endif /* __HYP_VMMAP_H__ */
diff --git a/big-little/include/int_master.h b/big-little/include/int_master.h
new file mode 100644
index 0000000..ec3b1b7
--- /dev/null
+++ b/big-little/include/int_master.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+/*
+ * Master interrupt controller driver - talks to real IC and dispatches
+ * * interrupts to slave ICs or monitor drivers as appropriate
+ */
+
+#ifndef _INT_MASTER_H_
+#define _INT_MASTER_H_
+
+#include "bl.h"
+
+#define INT_ENABLED 0x1 /* Interrupt is enabled, something to pass it on to */
+#define INT_ACTIVE 0x2 /* Interrupt is currently actually disabled at the real controller because it is active */
+
+#define INT_TRIGGER 0
+#define INT_ENABLE 1
+#define INT_DISABLE 2
+#define INT_GETRAW 3
+#define INT_UNTRIGGER 4
+
+vm_context *handle_interrupt(vm_context * context);
+int gic_masterhandler(void *ptr, unsigned int num, unsigned int op);
+void gic_masterinit(void);
+void gic_deactivate_int(unsigned int num);
+void gic_setup_secure(unsigned, unsigned);
+void enable_2ndstage(void);
+void setup_hcr(void);
+void test_vgic(void);
+#endif
diff --git a/big-little/include/misc.h b/big-little/include/misc.h
new file mode 100644
index 0000000..c154ced
--- /dev/null
+++ b/big-little/include/misc.h
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef MISC_H
+#define MISC_H
+
+#include <stdio.h>
+#include <string.h>
+
+#define NUM_CPUS 8
+
+#define inline __inline
+
+#define A7 0xC07
+#define A15 0xC0F
+#define PART_NO(x) ((x >> 4) & 0xfff)
+#define REVISION(x) (x & 0xf)
+#define VARIANT(x) ((x >> 20) & 0xf)
+
+#define MAX_CLUSTERS 2
+#define MAX_CORES 8
+#define MAX_CPUIFS 8
+#define STACK_SIZE 96
+
+#define TRUE 1
+#define FALSE 0
+#define CONTEXT_SAVE 0
+#define CONTEXT_RESTORE 1
+#define SYNC_SWITCHOVER 1
+#define READ_MPIDR 2
+
+/*************************************************
+ * Virtual GIC defines
+ *************************************************/
+
+/* Bit definitions in the secure GICC_CTLR */
+#define EOI_MODE_NS (1 << 10)
+#define EOI_MODE_S (1 << 9)
+
+/* Bit definitions in the Active list registers */
+#define HW_IRQ ((unsigned) 1 << 31)
+#define NS_IRQ (1 << 30)
+#define STATE(x) ((x & 0x3) << 28)
+#define PENDING 0x1
+
+/* Misc */
+#define INTACK_CPUID_MASK 0x7
+
+/*************************************************
+ * Bit definitions in the HYP configuration
+ * register.
+ *************************************************/
+#define HCR_AMO (1 << 5)
+#define HCR_IMO (1 << 4)
+#define HCR_FMO (1 << 3)
+#define HCR_VM (1 << 0)
+#define HCR_TID2 (1 << 17)
+#define HCR_TSW (1 << 22)
+
+/*************************************************
+ * TEX remap defines for first level translations
+ *************************************************/
+/* PRRR fields for memory attributes */
+#define TR0(x) ((x) << 0) // SO
+#define TR1(x) ((x) << 2) // DV
+#define TR4(x) ((x) << 8) // NC
+#define TR7(x) ((x) << 14) // C
+/* PRRR fields for shareability attributes */
+#define NOS0(x) ((x) << 24)
+#define NOS1(x) ((x) << 25)
+#define NOS4(x) ((x) << 28)
+#define NOS7(x) ((x) << 31)
+#define NS1(x) ((x) << 19)
+#define DS1(x) ((x) << 17)
+
+/* Memory attributes */
+#define NORMAL_MEM 0x2
+#define DEVICE_MEM 0x1
+#define SO_MEM 0x0
+#define INNER_SH 0x1
+#define SHAREABLE 0x1
+
+/* NMRR fields */
+#define IR7(x) ((x) << 14) // Inner Cache attributes for TEX,C,B = 1,1,1
+#define IR4(x) ((x) << 8) // Inner Cache attributes for TEX,C,B = 1,0,0
+#define OR7(x) ((x) << 30) // Outer Cache attributes for TEX,C,B = 1,1,1
+#define OR4(x) ((x) << 24) // Outer Cache attributes for TEX,C,B = 1,0,0
+
+/* Normal memory attributes */
+#define NMRR_NC 0x0
+#define NMRR_WBWA 0x1
+
+/************************************************
+ * Page table walk attributes in TTBR0/1
+ ************************************************/
+#define NOS(x) ((x) << 5)
+#define RGN(x) ((x) << 3)
+#define SH(x) ((x) << 1)
+#define IRGN(x) ((((x) & 0x2) << 5) | ((x) & 0x1))
+
+#define TTBR_SH 0x1
+#define TTBR_WBWA 0x1
+
+/*
+ * Bit definitions of Level 2 translation
+ * table entries.
+ */
+
+/* Mapping type[1:0] */
+#define INVALID_MAPPING 0x0
+#define BLOCK_MAPPING 0x1
+#define TABLE_MAPPING 0x3
+
+/*
+ * Bit definitions of Level 3 translation
+ * table entries.
+ */
+
+/* Mapping type[1:0] */
+#define VALID_MAPPING 0x3
+
+/* Lower block attributes[11:2] */
+#define NON_GLOBAL (1 << 11)
+#define ACCESS_FLAG (1 << 10)
+#define SHAREABILITY(x) ((x & 0x3) << 8)
+#define ACCESS_PERM(x) ((x & 0x3) << 6)
+#define MEM_ATTR(x) ((x & 0xf) << 2)
+
+/* Upper block attributes[63:52]. Defined as the upper word */
+#define XN (1 << 22)
+#define PXN (1 << 21)
+
+/*
+ * Cache levels.
+ */
+#define L1 0x0
+#define L2 0x1
+
+/*
+ * Cache maintenance op types.
+ */
+#define INV 0x0
+#define CLN 0x1
+#define CLN_INV 0x2
+
+/*
+ * Cache line length in bytes
+ */
+#define CACHE_LINE_SZ 64
+
+/*
+ * CCI defines
+ */
+#define CCI_BASE 0x2c090000
+#define CCI_PERF_CNT(x) CCI_BASE + ((0xa + x ) << 12)
+#define CCI_CYCLE_CNT CCI_BASE + 0x9000
+#define A15_SL_IFACE_BASE CCI_BASE + 0x4000
+#define A7_SL_IFACE_BASE CCI_BASE + 0x5000
+
+/* PMU Counter Registers */
+#define EVNT_SEL_REG 0x0
+#define CNT_REG 0x4
+#define CNT_CTLR_REG 0x8
+#define OVRFLW_STAT_REG 0xc
+
+/* Control interface register offsets */
+#define CTLR_OVERRIDE_REG 0x0
+#define SPEC_CTLR_REG 0x4
+#define SECURE_ACCESS_REG 0x8
+#define STATUS_REG 0xc
+#define IMPRECISE_ERR_REG 0x10
+#define PERF_MON_CTRL_REG 0x100
+
+/* Slave interface register */
+#define SNOOP_CTLR_REG 0x0
+
+/* PMCR bits */
+#define PMCR_CEN (1 << 0)
+#define PMCR_RST (1 << 1)
+#define PMCR_CCR (1 << 2)
+#define PMCR_CCD (1 << 3)
+#define reset_cci_pmu() write32(CCI_BASE + PERF_MON_CTRL_REG, PMCR_RST | PMCR_CCR)
+#define enable_cci_pmu() write32(CCI_BASE + PERF_MON_CTRL_REG, PMCR_CEN)
+#define enable_cci_cntr(x) write32(CCI_PERF_CNT(x) + CNT_CTLR_REG, 0x1)
+#define disable_cci_cntr(x) write32(CCI_PERF_CNT(x) + CNT_CTLR_REG, 0x0)
+#define select_cci_event(x, y) write32(CCI_PERF_CNT(x) + EVNT_SEL_REG, y)
+#define read_cci_cntr(x) read32(CCI_PERF_CNT(x) + CNT_REG)
+/*
+ * TODO:
+ * Move platform specific definitions to the right places
+ */
+#define KFSCB_BASE 0x60000000
+
+#define RST_HOLD0 0x0
+#define RST_HOLD1 0x4
+#define SYS_SWRESET 0x8
+#define RST_STAT0 0xc
+#define RST_STAT1 0x10
+#define EAG_CFG_R 0x20
+#define EAG_CFG_W 0x24
+#define KFC_CFG_R 0x28
+#define KFC_CFG_W 0x2c
+#define KFS_CFG_R 0x30
+#define RST_HANDLER0 0x40
+#define RST_HANDLER1 0x48
+#define RST_HANDLER2 0x50
+#define RST_HANDLER3 0x58
+#define RST_HANDLER4 0x60
+#define RST_HANDLER5 0x68
+#define RST_HANDLER6 0x70
+#define RST_HANDLER7 0x78
+#define KFS_ID 0xffc
+
+/*
+ * KFSCB Tube offsets. Models only
+ */
+#define KFS_TUBE0 0x400
+#define KFS_TUBE1 0x420
+#define KFS_TUBE2 0x440
+#define KFS_TUBE3 0x460
+
+/*
+ * Map the 4 tubes to the Secure
+ * & non-secure worlds
+ */
+#define SEC_TUBE0 KFS_TUBE0
+#define SEC_TUBE1 KFS_TUBE1
+#define NS_TUBE0 KFS_TUBE2
+#define NS_TUBE1 KFS_TUBE3
+
+/* KFSCB Tube register offsets. */
+#define TUBE_CHAR 0x00
+#define TUBE_DATA0 0x08
+#define TUBE_DATA1 0x10
+#define TUBE_DATA2 0x18
+
+#define CLUSTER_CPU_COUNT(x) (((read32(KFSCB_BASE + KFS_CFG_R) >> 16) >> (x << 2)) & 0xf)
+#define DC_SYSTYPE ((read32(KFSCB_BASE + KFS_ID) >> 16) & 0xf)
+#define asym_clusters() (((read32(KFSCB_BASE + KFS_CFG_R) >> 16) & 0xf) == \
+ ((read32(KFSCB_BASE + KFS_CFG_R) >> 20) & 0xf))
+
+/*
+ * "Always on" uses cpuids that span across clusters e.g.
+ * 0-7 for an MPx4+MPx4 system.
+ */
+#define abs_cpuid(cpu_id, cluster_id) (cluster_id ? cpu_id + CLUSTER_CPU_COUNT(!cluster_id) : cpu_id)
+#define CLUSTER_LVL_RST (1 << 0)
+#define RST_BIT(x) (1 << 4) << x
+#define RST_LVL(x, y) ((x & 0x3) << 8) << (y << 1)
+#define CORE_RESET 0x0
+#define CORE_PORESET 0x1
+#define CLUSTER_RESET 0x2
+#define EAGLE_CORES(x) ((x & 0xf) << 16)
+#define KFC_CORES(x) ((x & 0xf) << 20)
+#define SW_RESET (1 << 2)
+
+#define ENTER_RESET 0x1
+#define EXIT_RESET 0x2
+#define CASCADE_RESET 0x4
+
+#define A15_A15 0x0
+#define A7_A15 0x1
+#define A15_A7 0x2
+
+#define EAGLE 0x0
+#define KFC 0x1
+
+/* Control register bits */
+#define CR_M (1<<0) /* MMU enabled */
+#define CR_A (1<<1) /* Align fault enable */
+#define CR_C (1<<2) /* Data cache */
+#define CR_W (1<<3) /* Write buffer */
+#define CR_Z (1<<11) /* Branch prediction */
+#define CR_I (1<<12) /* Instruction cache */
+#define CR_V (1<<13) /* Vectors */
+#define CR_XP (1<<23) /* Extended page tables */
+#define CR_TRE (1<<28) /* TEX Remap */
+
+/*
+ * Processor modes
+ */
+#define MON_MODE 0x16
+#define SVC_MODE 0x13
+#define HYP_MODE 0x1A
+#define USR_MODE 0x10
+
+/* Timer Bits */
+#define HYP_TIMER_MULT 0xa /* 12Mhz * 10 i.e. interrupt every 10ms. Linux uses 12MHz * 10 */
+#define LCL_TIMER_FREQ 0x7f /* Every 128th timer acts as a trigger */
+#define HYP_TIMER_IRQ 0x1a
+#define LCL_TIMER_IRQ 0x1e
+#define TIMER_ENABLE 0x1
+#define TIMER_DISABLE 0x0
+#define TIMER_MASK_IRQ 0x2
+#define TIMER_IRQ_STAT 0x4
+
+/* Trap ids provided in the HSR */
+#define NUM_TRAPS 0x27
+#define TRAP_UNKNOWN 0x0
+#define TRAP_WFE_WFI 0x1
+#define TRAP_CP15_32 0x3
+#define TRAP_CP15_64 0x4
+#define TRAP_CP14_32 0x5
+#define TRAP_CP14_LDC_STC 0x6
+#define TRAP_HCPTR_1 0x7
+#define TRAP_HCPTR_2 0x8
+#define TRAP_JAZELLE 0x9
+#define TRAP_BXJ 0xA
+#define TRAP_CP14_64 0xC
+#define TRAP_HYP_SVC 0x11
+#define TRAP_HVC 0x12
+#define TRAP_HYP_SMC 0x13
+#define TRAP_IABORT 0x20
+#define TRAP_HYP_IABORT 0x21
+#define TRAP_DABORT 0x24
+#define TRAP_HYP_DABORT 0x25
+
+/*
+ * Defines for making SMC calls
+ */
+#define SMC_SEC_INIT 0x0
+#define SMC_SEC_SAVE 0x1
+#define SMC_SEC_SHUTDOWN 0x2
+
+#define MAX_CACHE_LEVELS 0x8
+#define CRN_C0 0x0
+#define CRN_C7 0x7
+#define CRN_C9 0x9
+#define CRN_C15 0xf
+
+/*
+ * Opcode2 definitions in the corresponding cp15 instruction
+ */
+#define MIDR 0x0
+#define CTR 0x1
+#define TCMTR 0x2
+#define TLBTR 0x3
+#define MPIDR 0x5
+#define CCSIDR 0x0
+#define CLIDR 0x1
+#define AIDR 0x4
+#define CSSELR 0x0
+#define DCISW 0x2
+#define DCCSW 0x2
+#define DCCISW 0x2
+
+#define ID_PFR0 0x0
+#define ID_PFR1 0x1
+#define ID_DFR0 0x2
+#define ID_AFR0 0x3
+#define ID_MMFR0 0x4
+#define ID_MMFR1 0x5
+#define ID_MMFR2 0x6
+#define ID_MMFR3 0x7
+#define ID_ISAR0 0x0
+#define ID_ISAR1 0x1
+#define ID_ISAR2 0x2
+#define ID_ISAR3 0x3
+#define ID_ISAR4 0x4
+#define ID_ISAR5 0x5
+
+extern void enable_cci_snoops(unsigned);
+extern void disable_cci_snoops(unsigned);
+extern void switch_cluster(unsigned);
+extern unsigned long long *get_powerdown_stack(unsigned);
+extern void spin_lock(unsigned int *);
+extern void spin_unlock(unsigned int *);
+extern void panic(void);
+extern unsigned get_inbound(void);
+extern unsigned reset_status(unsigned, unsigned, unsigned);
+extern unsigned map_cpuif(unsigned, unsigned);
+extern unsigned get_cpuif(unsigned, unsigned);
+extern unsigned remap_cpuif(unsigned *);
+extern unsigned get_cpuif_mask(unsigned);
+extern unsigned get_cpu_mask(unsigned);
+extern unsigned BL_DV_PAGE$$Base;
+extern unsigned BL_SEC_DV_PAGE$$Base;
+extern unsigned host_cluster;
+extern unsigned switcher;
+
+#define bitindex(x) (31-__builtin_clz(x))
+#define find_first_cpu() 0
+#define write32(addr, val) (*(volatile unsigned int *)(addr) = (val))
+#define read32(addr) (*(volatile unsigned int *)(addr))
+#endif
diff --git a/big-little/include/traps.h b/big-little/include/traps.h
new file mode 100644
index 0000000..323ba42
--- /dev/null
+++ b/big-little/include/traps.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __TRAPS_H__
+#define __TRAPS_H__
+
+#include "misc.h"
+
+/*
+ * Ignoring the condition field [24:20] for now.
+ */
+#define HSR_ISS_OP2 (0x7 << 17)
+#define HSR_ISS_OP1 (0x7 << 14)
+#define HSR_ISS_CRN (0xf << 10)
+#define HSR_ISS_CRM (0xf << 1)
+#define HSR_ISS_RW (0x1 << 0)
+
+/*
+ * Macro to convert the cp15 instruction info in the HSR
+ * into a unique integer. The integer is used to identify
+ * the handler for that instruction. Format of the integer
+ * is [Op2:Op1:CRn:CRm:RW]
+ */
+#define GET_CP15_OP(x) ((x & HSR_ISS_OP2) >> 5) | ((x & HSR_ISS_OP1) >> 5) | ((x & HSR_ISS_CRN) >> 5) |\
+ (x & HSR_ISS_CRM) | (x & HSR_ISS_RW)
+
+#define MAKE_CP15_OP(op2, op1, crn, crm, rw) ((op2 << 12) | (op1 << 9) | (crn << 5) | (crm << 1) | rw)
+
+#define READ_MIDR MAKE_CP15_OP(0x0, 0x0, 0x0, 0x0, 0x1)
+#define READ_MPIDR MAKE_CP15_OP(0x5, 0x0, 0x0, 0x0, 0x1)
+#define READ_AUXCTRL MAKE_CP15_OP(0x1, 0x0, 0x1, 0x0, 0x1)
+
+#define WRITE_MIDR MAKE_CP15_OP(0x0, 0x0, 0x0, 0x0, 0x0)
+#define WRITE_MPIDR MAKE_CP15_OP(0x5, 0x0, 0x0, 0x0, 0x0)
+#define WRITE_AUXCTRL MAKE_CP15_OP(0x1, 0x0, 0x1, 0x0, 0x0)
+
+/*
+ * Indices into arrays of registers to whom acceses will be
+ * trapped.
+ */
+#define AUXCTRL 0x0
+#define MIDR 0x1
+#define MPIDR 0x2
+#define MAX_REGS 0x10
+
+/*
+ * Indices into array of handlers of the registered traps.
+ * Numbers correspond to the Exception Class field of HSR.
+ */
+#define UNKNOWN 0x0
+#define MRC_MCR_CP15 0x3
+#define MAX_TRAPS 0x25
+
+/*
+ * Structure to hold the registered traps
+ */
+typedef struct tlist {
+ unsigned int hcr;
+ unsigned int hstr;
+} trap_list;
+
+/*
+ * Structure to hold registers to whom accesses will be trapped
+ */
+typedef struct rlist {
+ unsigned int reg[MAX_REGS];
+} reg_list;
+
+/*
+ * Structure to hold platform defined trap handlers
+ */
+typedef struct hlist {
+ int (*handle[MAX_TRAPS]) (unsigned int hsr, unsigned int *operand);
+} handler_list;
+
+extern trap_list cp15_trap_list[NUM_CPUS];
+extern reg_list cp15_reg_list[NUM_CPUS];
+extern handler_list plat_handler_list[NUM_CPUS];
+
+#if !DEBUG
+#define printf(...)
+#endif
+#endif /* __TRAPS_H__ */
diff --git a/big-little/include/vgiclib.h b/big-little/include/vgiclib.h
new file mode 100644
index 0000000..869a8df
--- /dev/null
+++ b/big-little/include/vgiclib.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef VGICLIB_H
+#define VGICLIB_H
+
+#include "gic_registers.h"
+
+struct overflowint {
+ /* This is encoded in the value, but speed optimise by splitting out */
+ unsigned int priority;
+ unsigned int value;
+ struct overflowint *next;
+};
+
+struct gic_cpuif {
+ unsigned int status;
+ unsigned int activepris; /* Copies of the state from the VGIC itself */
+ unsigned int elrsr[2]; /* Copies of Empty list register status registers */
+ unsigned int ints[VGIC_LISTENTRIES];
+
+ struct overflowint *overflow; /* List of overflowed interrupts */
+ unsigned int freelist; /* Bitmask of which list entries are in use */
+};
+
+void vgic_init(void);
+void vgic_savestate(unsigned int cpu);
+void vgic_loadstate(unsigned int cpu);
+void vgic_refresh(unsigned int cpu);
+void enqueue_interrupt(unsigned int descr, unsigned int cpu);
+
+#endif /* VGICLIB_H */
diff --git a/big-little/include/virt_helpers.h b/big-little/include/virt_helpers.h
new file mode 100644
index 0000000..34e4d1c
--- /dev/null
+++ b/big-little/include/virt_helpers.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef _VIRT_HELPERS_H_
+#define _VIRT_HELPERS_H_
+
+#include "bakery.h"
+#include "helpers.h"
+#include "misc.h"
+
+/*******************************************************
+ * Export prototypes of the functions which will be used
+ * to save/restore the Non-secure context.
+ *******************************************************/
+
+/*
+ * Misc functions
+ */
+extern unsigned read_sp(unsigned);
+extern unsigned read_lr(unsigned);
+extern unsigned num_secondaries(void);
+extern unsigned *get_sp(unsigned, unsigned);
+
+extern void virt_dead(void);
+extern void smc(unsigned, unsigned);
+extern void dcisw(unsigned);
+extern void dccsw(unsigned);
+extern void dccisw(unsigned);
+extern void write_sp(unsigned, unsigned);
+extern void write_lr(unsigned, unsigned);
+
+/*
+ * V7 functions
+ */
+extern void disable_clean_inv_l1_dcache_v7(void);
+extern void cache_maint_op(unsigned, unsigned);
+extern unsigned get_loc(void);
+extern void disable_coherency(void);
+extern void disable_dcache(void);
+extern void enable_coherency(void);
+extern void enable_dcache(void);
+extern void flush_to_loc(void);
+extern void inv_tlb_all(void);
+extern void inv_bpred_all(void);
+extern void inv_tlb_mva(unsigned *);
+extern void inv_icache_all(void);
+extern void inv_icache_mva_pou(unsigned *);
+extern void inv_dcache_mva_poc(unsigned *);
+extern void cln_dcache_mva_poc(unsigned *);
+extern void cln_dcache_mva_pou(unsigned *);
+
+/*
+ * GIC functions
+ */
+extern void save_gic_interface(unsigned int *pointer,
+ unsigned gic_interface_address);
+extern int save_gic_distributor_private(unsigned int *pointer,
+ unsigned gic_distributor_address);
+extern int save_gic_distributor_shared(unsigned int *pointer,
+ unsigned gic_distributor_address);
+extern void restore_gic_interface(unsigned int *pointer,
+ unsigned gic_interface_address);
+extern void restore_gic_distributor_private(unsigned int *pointer,
+ unsigned gic_distributor_address);
+extern void restore_gic_distributor_shared(unsigned int *pointer,
+ unsigned gic_distributor_address);
+extern void hyp_save(unsigned, unsigned);
+
+/*
+ * Tube functions
+ */
+#if TUBE
+extern void write_trace(bakery_t *, unsigned, char *, unsigned long long,
+ unsigned long long, unsigned long long);
+#else
+#define write_trace(...)
+#endif
+
+#endif /* _VIRT_HELPERS_H_ */
diff --git a/big-little/lib/bakery.c b/big-little/lib/bakery.c
new file mode 100644
index 0000000..bd2547f
--- /dev/null
+++ b/big-little/lib/bakery.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+/*
+ * bakery.c: Lamport's Bakery algorithm for spinlock handling
+ *
+ * Note that the algorithm requires the stack and the bakery struct
+ * to be in Strongly-Ordered memory.
+ */
+
+#include "misc.h"
+#include <string.h>
+#include "bakery.h"
+
+void init_bakery_spinlock(bakery_t * bakery)
+{
+ memset(bakery, 0, sizeof(bakery_t));
+}
+
+void get_bakery_spinlock(unsigned cpuid, bakery_t * bakery)
+{
+ unsigned i, max = 0, my_full_number, his_full_number;
+
+ /* Get a ticket */
+ bakery->entering[cpuid] = TRUE;
+ for (i = 0; i < MAX_CPUS; ++i) {
+ if (bakery->number[i] > max) {
+ max = bakery->number[i];
+ }
+ }
+ ++max;
+ bakery->number[cpuid] = max;
+ bakery->entering[cpuid] = FALSE;
+
+ /* Wait for our turn */
+ my_full_number = (max << 8) + cpuid;
+ for (i = 0; i < MAX_CPUS; ++i) {
+ while (bakery->entering[i]) ; /* Wait */
+ do {
+ his_full_number = bakery->number[i];
+ if (his_full_number) {
+ his_full_number = (his_full_number << 8) + i;
+ }
+ }
+ while (his_full_number && (his_full_number < my_full_number));
+ }
+}
+
+void release_bakery_spinlock(unsigned cpuid, bakery_t * bakery)
+{
+ bakery->number[cpuid] = 0;
+}
diff --git a/big-little/lib/tube.c b/big-little/lib/tube.c
new file mode 100644
index 0000000..2fd6486
--- /dev/null
+++ b/big-little/lib/tube.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "misc.h"
+#include "virt_helpers.h"
+#include "bakery.h"
+
+#if TUBE
+void write_trace(bakery_t *lock,
+ unsigned tube_offset,
+ char *msg,
+ unsigned long long data0,
+ unsigned long long data1,
+ unsigned long long data2)
+{
+ unsigned long long volatile *data = 0x0;
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+
+ get_bakery_spinlock(cpu_id, lock);
+
+ /* Write the 3 double words that the tube supports */
+ data = (unsigned long long volatile *) (KFSCB_BASE + tube_offset + TUBE_DATA0);
+ *data++ = data0;
+ *data++ = data1;
+ *data = data2;
+
+ /* Write the string to the tube. */
+ while (*msg != '\0') {
+ write32(KFSCB_BASE + tube_offset + TUBE_CHAR, (unsigned) *msg);
+ msg++;
+ }
+ write32(KFSCB_BASE + tube_offset + TUBE_CHAR, *msg);
+
+ release_bakery_spinlock(cpu_id, lock);
+
+ return;
+}
+#endif
diff --git a/big-little/lib/uart.c b/big-little/lib/uart.c
new file mode 100644
index 0000000..2d4486a
--- /dev/null
+++ b/big-little/lib/uart.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+/*
+ * uart.c - boot code to output characters on a PL011 uart
+ * Not SMP-safe, so make sure you only call these functions
+ * from one CPU at a time.
+ * Call config_uart first.
+ * Implements fputc() so you can use printf() in your code.
+ */
+
+#include "misc.h"
+#include "hyp_vmmap.h"
+#include "virt_helpers.h"
+
+//* PL011 Registers Offsets from UART Base adress */
+#define PL011_DR 0x0
+#define PL011_RSR 0x4
+#define PL011_ECR 0x4
+#define PL011_FR 0x18
+#define PL011_ILPR 0x20
+#define PL011_IBRD 0x24
+#define PL011_FBRD 0x28
+#define PL011_LCRH 0x2C
+#define PL011_CR 0x30
+#define PL011_IFLS 0x34
+#define PL011_IMSC 0x38
+#define PL011_RIS 0x3C
+#define PL011_MIS 0x40
+#define PL011_ICR 0x44
+#define PL011_DMACR 0x48
+
+#define PL011_TXFE 0x80
+#define PL011_TXFF 0x20
+
+static unsigned uart_base = NULL;
+
+#define write32(addr, val) (*(volatile unsigned int *)(addr) = (val))
+#define read32(addr) (*(volatile unsigned int *)(addr))
+
+
+void config_uart(void)
+{
+ uart_base = UART1_PHY_BASE;
+ write32(uart_base + PL011_CR, 0);
+ write32(uart_base + PL011_FBRD, 0x01);
+ write32(uart_base + PL011_IBRD, 0x27);
+ write32(uart_base + PL011_LCRH, 0x70);
+ write32(uart_base + PL011_CR, 0xf01); /* TXE|RXE|En|DTR|CTS */
+}
+
+void drain_uart_fifo(void)
+{
+ while (!(read32(uart_base + PL011_FR) & PL011_TXFE))
+ {
+ /* Do nothing */
+ }
+}
+
+static __inline void wait_for_space(void)
+{
+ while ((read32(uart_base + PL011_FR) & PL011_TXFF))
+ {
+ /* Do nothing */
+ }
+}
+
+void output_char(int c)
+{
+ if (c == '\n')
+ {
+ wait_for_space();
+ write32(uart_base + PL011_DR, '\r');
+ }
+ wait_for_space();
+ write32(uart_base + PL011_DR, c);
+}
+
+void output_string(const char *string)
+{
+ int i;
+
+ for (i=0; string[i]; ++i)
+ {
+ output_char(string[i]);
+ }
+}
+
+void hexword(unsigned value)
+{
+ printf(" 0x%8.8x", value);
+ drain_uart_fifo();
+}
+
+typedef struct __FILE
+{
+ int dummy;
+} FILE;
+
+FILE __stdout;
+
+int fputc(int c, FILE *f)
+{
+ output_char(c);
+ return c;
+}
diff --git a/big-little/lib/virt_events.c b/big-little/lib/virt_events.c
new file mode 100644
index 0000000..b842324
--- /dev/null
+++ b/big-little/lib/virt_events.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "events.h"
+#include "misc.h"
+#include "virt_helpers.h"
+
+/*
+ * cpu ids are used as is when "switcher" is true. In the
+ * "always on" case absolute cpu ids are used i.e 0-7 for
+ * an MPx4+MPx4 configuration.
+ */
+/*
+ * Pick up the event definition from the world that wants
+ * to use them.
+ */
+extern unsigned event[][MAX_EVENTS];
+
+/*
+ * Set the specified event for that cpu.
+ */
+void set_event(unsigned event_id, unsigned cpu_id)
+{
+ event[cpu_id][event_id] = TRUE;
+ dsb();
+ sev();
+ return;
+}
+
+inline unsigned get_event(unsigned event_id, unsigned cpu_id)
+{
+ return event[cpu_id][event_id];
+}
+
+void reset_event(unsigned event_id, unsigned cpu_id)
+{
+ event[cpu_id][event_id] = FALSE;
+ return;
+}
+
+void wait_for_event(unsigned event_id, unsigned cpu_id)
+{
+ while (FALSE == get_event(event_id, cpu_id)) {
+ wfe();
+ }
+
+ return;
+}
+
+/*
+ * Wait for events from each core. Its a little trickier than
+ * waiting for a single event. The event register as per the
+ * architecture is just a single bit to flag an event rather
+ * than the number of events. If multiple events are sent by
+ * the time we enter wfe() then each flag variable should be
+ * checked.
+ */
+void wait_for_events(unsigned event_id)
+{
+ unsigned ctr, event_count = 0, num_cpus = 0;
+
+ if (switcher) {
+ num_cpus = num_secondaries() + 1;
+ } else {
+ num_cpus = CLUSTER_CPU_COUNT(host_cluster)
+ + CLUSTER_CPU_COUNT(!host_cluster);
+ }
+
+ do {
+ for (ctr = 0; ctr < num_cpus; ctr++) {
+ if (TRUE == get_event(event_id, ctr)) {
+ event_count++;
+ reset_event(event_id, ctr);
+ }
+ }
+
+ if (event_count != num_cpus)
+ wfe();
+ else
+ break;
+ } while(1);
+
+ return;
+}
+
+void set_events(unsigned event_id)
+{
+ unsigned ctr, num_cpus = 0;
+
+ if (switcher) {
+ num_cpus = num_secondaries() + 1;
+ } else {
+ num_cpus = CLUSTER_CPU_COUNT(host_cluster)
+ + CLUSTER_CPU_COUNT(!host_cluster);
+ }
+
+ for (ctr = 0; ctr < num_cpus; ctr++) {
+ set_event(event_id, ctr);
+ }
+ return;
+}
diff --git a/big-little/lib/virt_helpers.s b/big-little/lib/virt_helpers.s
new file mode 100644
index 0000000..0306e06
--- /dev/null
+++ b/big-little/lib/virt_helpers.s
@@ -0,0 +1,442 @@
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+
+ IMPORT read_actlr
+ IMPORT write_actlr
+
+ EXPORT smc
+ EXPORT dcisw
+ EXPORT dccsw
+ EXPORT dccisw
+ EXPORT read_lr
+ EXPORT read_sp
+ EXPORT write_sp
+ EXPORT write_lr
+ EXPORT panic
+ EXPORT spin_lock
+ EXPORT spin_trylock
+ EXPORT spin_unlock
+ EXPORT virt_memset
+ EXPORT hyp_save
+ EXPORT num_secondaries
+ EXPORT virt_dead
+ EXPORT get_sp
+ EXPORT disable_coherency
+ EXPORT enable_coherency
+ EXPORT inv_tlb_all
+ EXPORT inv_tlb_mva
+ EXPORT inv_icache_all
+ EXPORT inv_bpred_is
+ EXPORT inv_bpred_all
+ EXPORT inv_icache_mva_pou
+ EXPORT inv_dcache_mva_poc
+ EXPORT cln_dcache_mva_pou
+ EXPORT cln_dcache_mva_poc
+ EXPORT cache_maint_op
+
+; Cache maintenance op types
+INV EQU 0x0
+CLN EQU 0x1
+CLN_INV EQU 0x2
+
+ AREA |.text|, CODE
+
+read_lr FUNCTION
+ ; Save r1
+ push {r1}
+ and r0, r0, #0x1f
+ ; Read the current cpsr
+ mrs r1, cpsr
+ and r1, r1, #0x1f
+ ; Check if the desired lr is of the current mode
+ cmp r0, r1
+ moveq r0, LR
+ beq read_lr_out
+ ; Check if desired lr is of user mode
+ cmp r0, #0x10
+ mrseq r0, LR_usr
+ beq read_lr_out
+ ; Check if desired lr is of supervisor mode
+ cmp r0, #0x13
+ mrseq r0, LR_svc
+read_lr_out
+ pop {r1}
+ bx lr
+ ENDFUNC
+
+write_lr FUNCTION
+ ; Save r2
+ push {r2}
+ and r0, r0, #0x1f
+ ; Read the current cpsr
+ mrs r2, cpsr
+ and r2, r2, #0x1f
+ ; Check if the lr is of the current mode
+ cmp r0, r2
+ moveq LR, r1
+ beq write_lr_out
+ ; Check if the lr is of user mode
+ cmp r0, #0x10
+ msreq LR_usr, r1
+ beq write_lr_out
+ ; Check if the lr is of supervisor mode
+ cmp r0, #0x13
+ msreq LR_svc, r1
+write_lr_out
+ pop {r2}
+ bx lr
+ ENDFUNC
+
+read_sp FUNCTION
+ ; Save r1
+ push {r1}
+ and r0, r0, #0x1f
+ ; Read the current cpsr
+ mrs r1, cpsr
+ and r1, r1, #0x1f
+ ; Check if the desired sp is of the current mode
+ cmp r0, r1
+ moveq r0, SP
+ beq read_sp_out
+ ; Check if desired sp is of user mode
+ cmp r0, #0x10
+ mrseq r0, SP_usr
+ beq read_sp_out
+ ; Check if desired sp is of supervisor mode
+ cmp r0, #0x13
+ mrseq r0, SP_svc
+ beq read_sp_out
+ ; Check if desired sp is of irq mode
+ cmp r0, #0x12
+ mrseq r0, SP_irq
+ beq read_sp_out
+ ; Check if desired sp is of supervisor mode
+ cmp r0, #0x1a
+ mrseq r0, SP_hyp
+ beq read_sp_out
+ ; Check if desired sp is of monitor mode
+ cmp r0, #0x16
+ mrseq r0, SP_mon
+read_sp_out
+ pop {r1}
+ bx lr
+ ENDFUNC
+
+write_sp FUNCTION
+ ; Save r2
+ push {r2}
+ and r0, r0, #0x1f
+ ; Read the current cpsr
+ mrs r2, cpsr
+ and r2, r2, #0x1f
+ ; Check if the sp is of the current mode
+ cmp r0, r2
+ moveq SP, r1
+ beq write_sp_out
+ ; Check if the sp is of user mode
+ cmp r0, #0x10
+ msreq SP_usr, r1
+ beq write_sp_out
+ ; Check if the sp is of supervisor mode
+ cmp r0, #0x13
+ msreq SP_svc, r1
+ beq write_sp_out
+ ; Check if the sp is of irq mode
+ cmp r0, #0x12
+ msreq SP_irq, r1
+ beq write_sp_out
+ ; Check if the sp is of hyp mode
+ cmp r0, #0x1a
+ msreq SP_hyp, r1
+ beq write_sp_out
+ ; Check if the sp is of monitor mode
+ cmp r0, #0x16
+ msreq SP_mon, r1
+write_sp_out
+ pop {r2}
+ bx lr
+ ENDFUNC
+
+ ALIGN 4
+
+;--------------------------------------------------------
+; spin_lock
+;--------------------------------------------------------
+spin_lock FUNCTION
+ MOV r2, #1
+sl_tryloop
+ LDREX r1, [r0]
+ CMP r1, #0
+ STREXEQ r1, r2, [r0]
+ CMPEQ r1, #0
+ BNE sl_tryloop
+ MCR p15, 0, r0, c7, c10, 4
+ bx lr
+ ENDFUNC
+
+;--------------------------------------------------------
+; spin_lock
+;--------------------------------------------------------
+spin_trylock FUNCTION
+ MOV r2, #1
+ LDREX r1, [r0]
+ CMP r1, #0
+ STREXEQ r1, r2, [r0]
+ MOV r0, r1
+ MCR p15, 0, r0, c7, c10, 4
+ bx lr
+ ENDFUNC
+
+ ALIGN 4
+
+;--------------------------------------------------------
+; spin_unlock
+;--------------------------------------------------------
+spin_unlock FUNCTION
+ MOV r1, #0
+ STR r1, [r0]
+ MCR p15, 0, r0, c7, c10, 4
+ bx lr
+ ENDFUNC
+
+ ALIGN 4
+
+;--------------------------------------------------------
+; panic
+;--------------------------------------------------------
+panic FUNCTION
+ isb
+ dsb
+ CPSID aif
+ B panic
+ ENDFUNC
+
+;--------------------------------------------------------------
+; Utility function that takes a pointer (r0), stack size (r1).
+; It returns the pointer to the stack offset for the asked cpu
+;--------------------------------------------------------------
+get_sp FUNCTION
+ ldr r2, =0x2c001800
+ ldr r2, [r2]
+ and r2, r2, #0xff
+ clz r2, r2
+ mov r3, #32
+ sub r2, r3, r2
+ mul r2, r2, r1
+ add r0, r0, r2
+ bx lr
+ ENDFUNC
+
+disable_coherency FUNCTION
+ push {lr}
+ bl read_actlr
+ bic r0, r0, #0x40
+ bl write_actlr
+ dsb
+ isb
+ pop {lr}
+ bx lr
+ ENDFUNC
+
+enable_coherency FUNCTION
+ push {lr}
+ bl read_actlr
+ orr r0, r0, #0x40
+ bl write_actlr
+ dsb
+ isb
+ pop {lr}
+ bx lr
+ ENDFUNC
+
+inv_bpred_is FUNCTION
+ mcr p15, 0, r0, c7, c1, 6
+ bx lr
+ ENDFUNC
+
+inv_bpred_all FUNCTION
+ mcr p15, 0, r0, c7, c5, 6
+ bx lr
+ ENDFUNC
+
+inv_tlb_all FUNCTION
+ mcr p15, 0, r0, c8, c7, 0
+ dsb
+ isb
+ bx lr
+ ENDFUNC
+
+inv_tlb_mva FUNCTION
+ mcr p15, 0, r0, c8, c7, 1
+ dsb
+ isb
+ bx lr
+ ENDFUNC
+
+inv_icache_all FUNCTION
+ mcr p15, 0, r10, c7, c5, 0 ; invalidate I cache
+ dsb
+ isb
+ bx lr
+ ENDFUNC
+
+inv_icache_mva_pou FUNCTION
+ mcr p15, 0, r0, c7, c5, 1
+ dsb
+ isb
+ bx lr
+ ENDFUNC
+
+cln_dcache_mva_pou FUNCTION
+ mcr p15, 0, r0, c7, c11, 1
+ dsb
+ isb
+ bx lr
+ ENDFUNC
+
+cln_dcache_mva_poc FUNCTION
+ mcr p15, 0, r0, c7, c10, 1
+ dsb
+ isb
+ bx lr
+ ENDFUNC
+
+inv_dcache_mva_poc FUNCTION
+ mcr p15, 0, r0, c7, c6, 1
+ dsb
+ isb
+ bx lr
+ ENDFUNC
+
+ ; Clean/Invalidate/Clean and invalidate a specified cache level.
+ ; Ignore if the level does not exist.
+cache_maint_op FUNCTION
+ push {r4-r11}
+ dsb
+ lsl r10, r0, #1 ; start clean at specified cache level
+ mrc p15, 1, r0, c0, c0, 1 ; read clidr
+10
+ add r2, r10, r10, lsr #1 ; work out 3x current cache level
+ mov r3, r0, lsr r2 ; extract cache type bits from clidr
+ and r3, r3, #7 ; mask of the bits for current cache only
+ cmp r3, #2 ; see what cache we have at this level
+ blt %f50 ; skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 ; select current cache level in cssr
+ isb ; isb to sych the new cssr&csidr
+ mrc p15, 1, r3, c0, c0, 0 ; read the new csidr
+ and r2, r3, #7 ; extract the length of the cache lines
+ add r2, r2, #4 ; add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r3, lsr #3 ; find maximum number on the way size
+ clz r5, r4 ; find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r3, lsr #13 ; extract max number of the index size
+20
+ mov r9, r4 ; create working copy of max way size
+30
+ orr r11, r10, r9, lsl r5 ; factor way and cache number into r11
+ lsl r6, r9, r5
+ orr r11, r10, r6 ; factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 ; factor index number into r11
+ lsl r6, r7, r2
+ orr r11, r11, r6 ; factor index number into r11
+ cmp r1, #INV
+ mcreq p15, 0, r11, c7, c6, 2 ; invalidate by set/way
+ beq %f40
+ cmp r1, #CLN
+ mcreq p15, 0, r11, c7, c10, 2 ; clean by set/way
+ beq %f40
+ mcr p15, 0, r11, c7, c14, 2 ; clean & invalidate by set/way
+; nop ; nop
+40
+ subs r9, r9, #1 ; decrement the way
+ bge %b30
+ subs r7, r7, #1 ; decrement the index
+ bge %b20
+50
+ mov r10, #0 ; swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 ; select current cache level in cssr
+ dsb
+ isb
+ pop {r4-r11}
+ bx lr
+ ENDFUNC
+
+smc FUNCTION
+ push {r4-r12, lr}
+ smc #0
+ pop {r4-r12, pc}
+ ENDFUNC
+
+hyp_save FUNCTION
+ hvc #2
+ bx lr
+ ENDFUNC
+
+virt_memcpy FUNCTION
+ cmp r2, #0
+ bxeq lr
+0 ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ subs r2, #1
+ bne %b0
+ bx lr
+ ENDFUNC
+
+virt_memset FUNCTION
+ cmp r2, #0
+ bxeq lr
+0 strb r1, [r0], #1
+ subs r2, #1
+ bne %b0
+ bx lr
+ ENDFUNC
+
+virt_dead FUNCTION
+ b virt_dead
+ ENDFUNC
+
+num_secondaries FUNCTION
+ mrc p15, 1, r0, c9, c0, 2
+ lsr r0, r0, #24
+ and r0, r0, #3
+ bx lr
+ ENDFUNC
+
+dcisw FUNCTION
+ mcr p15, 0, r0, c7, c6, 2
+ bx lr
+ ENDFUNC
+
+dccsw FUNCTION
+ mcr p15, 0, r0, c7, c10, 2
+ bx lr
+ ENDFUNC
+
+dccisw FUNCTION
+ mcr p15, 0, r0, c7, c14, 2
+ bx lr
+ ENDFUNC
+
+
+ END
diff --git a/big-little/secure_world/events.c b/big-little/secure_world/events.c
new file mode 100644
index 0000000..d5c5e54
--- /dev/null
+++ b/big-little/secure_world/events.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "events.h"
+
+/*
+ * Set the specified event for that cpu.
+ */
+void _set_event(unsigned event_id, unsigned cpu_id, unsigned event_type)
+{
+ dsb();
+ secure_event[cpu_id][event_id] = TRUE;
+ dsb();
+ sev();
+ return;
+}
+
+inline unsigned _get_event(unsigned event_id, unsigned cpu_id)
+{
+ return secure_event[cpu_id][event_id];
+}
+
+void _reset_event(unsigned event_id, unsigned cpu_id, unsigned event_type)
+{
+ dsb();
+ secure_event[cpu_id][event_id] = FALSE;
+ dsb();
+ return;
+}
+
+void _wait_for_event(unsigned event_id, unsigned cpu_id, unsigned event_type)
+{
+ dsb();
+ do {
+ wfe();
+ isb();
+ dsb();
+ } while (FALSE == _get_event(event_id, cpu_id));
+
+ return;
+}
+
+/*
+ * Wait for events from each core. Its a little trickier than
+ * waiting for a single event. The event register as per the
+ * architecture is just a single bit to flag an event rather
+ * than the number of events. If multiple events are sent by
+ * the time we enter wfe() then each flag variable should be
+ * checked.
+ */
+void _wait_for_events(unsigned event_id, unsigned event_type)
+{
+ unsigned ctr, event_count = 0, num_cpus = num_secondaries() + 1;;
+
+ dsb();
+ do {
+ wfe();
+ for (ctr = 0; ctr < num_cpus; ctr++) {
+ if (TRUE == _get_event(event_id, ctr)) {
+ event_count++;
+ _reset_event(event_id, ctr, event_type);
+ }
+ }
+ } while (event_count != num_cpus);
+
+ return;
+}
+
+void _set_events(unsigned event_id, unsigned event_type)
+{
+ unsigned ctr;
+ for (ctr = 0; ctr < (num_secondaries() + 1); ctr++) {
+ _set_event(event_id, ctr, event_type);
+ }
+ return;
+}
diff --git a/big-little/secure_world/flat_pagetable.s b/big-little/secure_world/flat_pagetable.s
new file mode 100644
index 0000000..38762e9
--- /dev/null
+++ b/big-little/secure_world/flat_pagetable.s
@@ -0,0 +1,119 @@
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+ PRESERVE8
+ AREA PageTable, DATA, READONLY, ALIGN=14
+ EXPORT flat_pagetables
+ GBLL CONFIG_SMP_CPU3_AMP
+CONFIG_SMP_CPU3_AMP SETL {FALSE}
+
+; Definitions for section descriptors
+NGLOBAL EQU (1<<17)
+SHARED EQU (1<<16)
+APX EQU (1<<15)
+TEX1 EQU (1<<12)
+TEX2 EQU (1<<13)
+TEX3 EQU (1<<14)
+AP0 EQU (1<<10)
+AP1 EQU (1<<11)
+PARITY EQU (1<<9)
+XN EQU (1<<4)
+CACHE EQU (1<<3)
+BUFFER EQU (1<<2)
+SECTION EQU 2
+SECURITY EQU 0
+
+; Select WBWA for both Inner and Outer cache
+MEMORY EQU (TEX1 :OR: CACHE :OR: BUFFER :OR: SECTION :OR: AP0 :OR: AP1 :OR: SECURITY)
+S_RO_MEMORY EQU (TEX1 :OR: CACHE :OR: BUFFER :OR: SECTION :OR: AP0 :OR: AP1 :OR: APX)
+S_RW_MEMORY EQU (TEX1 :OR: CACHE :OR: BUFFER :OR: SECTION :OR: AP0 :OR: AP1)
+; Select WBWA Inner cache, WBnWA Outer cache
+;MEMORY EQU (TEX3 | TEX2 | TEX1 | BUFFER | SECTION | AP0 | AP1 | SECURITY)
+
+NC_MEMORY EQU (TEX1 :OR: SECTION :OR: AP0 :OR: AP1 :OR: SECURITY)
+SO_MEMORY EQU (SHARED :OR: SECTION :OR: AP0 :OR: AP1 :OR: SECURITY)
+
+; *Don't* mark device accesses as nonsecure, or things like secure-side GIC config won't work...
+DEVICE EQU (BUFFER :OR: SHARED :OR: SECTION :OR: AP0 :OR: AP1 :OR: XN )
+
+NO_MEMORY EQU (SECTION)
+SHARED_MEMORY EQU (MEMORY :OR: SHARED)
+SHARED_S_RO_MEMORY EQU (S_RO_MEMORY :OR: SHARED)
+SHARED_S_RW_MEMORY EQU (S_RW_MEMORY :OR: SHARED)
+SHARED_NC_MEMORY EQU (NC_MEMORY :OR: SHARED)
+SHARED_SO_MEMORY EQU (SO_MEMORY :OR: SHARED)
+SHARED_DEVICE EQU (DEVICE :OR: SHARED)
+
+; first-level descriptors - all of them are 1MB sections
+
+flat_pagetables
+ GBLA count16
+ GBLA ramstart
+
+count16 SETA 0
+ramstart SETA 0
+
+; NOT FOR RELEASE
+ WHILE count16 < ramstart+0x40
+ ; 0-64MB Secure ROM/NOR Flash
+ DCD (count16<<20) :OR: SHARED_DEVICE
+count16 SETA count16 + 1
+ WEND
+
+ WHILE count16 < ramstart+0x80
+ ; 64-128MB Secure RAM
+ DCD (count16<<20) :OR: SHARED_S_RW_MEMORY
+count16 SETA count16 + 1
+ WEND
+
+ WHILE count16 < ramstart+0x800
+ ; 128-2048MB Peripheral space
+ DCD (count16<<20) :OR: SHARED_DEVICE
+count16 SETA count16 + 1
+ WEND
+
+ WHILE count16 < ramstart+0x810
+ ; 0-16MB Shared Memory
+ DCD (count16<<20) :OR: SHARED_MEMORY
+count16 SETA count16 + 1
+ WEND
+
+ WHILE count16 < ramstart+0x81f
+ ; 16-31MB Strongly Ordered
+ DCD (count16<<20) :OR: SHARED_SO_MEMORY
+count16 SETA count16 + 1
+ WEND
+
+ WHILE count16 < ramstart+0x820
+ ; 31-32MB Shared Noncached Normal Memory
+ DCD (count16<<20) :OR: SHARED_NC_MEMORY
+count16 SETA count16 + 1
+ WEND
+
+ WHILE count16 < ramstart+0x1000
+ ; rest of memory is RAM
+ DCD (count16<<20) :OR: SHARED_MEMORY
+count16 SETA count16 + 1
+ WEND
+
+ END
+
diff --git a/big-little/secure_world/monmode_vectors.s b/big-little/secure_world/monmode_vectors.s
new file mode 100644
index 0000000..14a416f
--- /dev/null
+++ b/big-little/secure_world/monmode_vectors.s
@@ -0,0 +1,391 @@
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+
+ AREA |monmode_vectors|, CODE, ALIGN=5
+ PRESERVE8
+
+SMC_SEC_INIT EQU 0x0
+SMC_SEC_SAVE EQU 0x1
+SMC_SEC_SHUTDOWN EQU 0x2
+L1 EQU 0x0
+L2 EQU 0x1
+INV EQU 0x0
+CLN EQU 0x1
+CLN_INV EQU 0x2
+CR_M EQU (1<<0)
+CR_C EQU (1<<2)
+CR_I EQU (1<<12)
+CR_Z EQU (1<<11)
+CR_U EQU (1<<22)
+CR_TRE EQU (1<<28)
+SCR_NS EQU 0x01
+PT_IRGN EQU (1<<0)
+PT_RGN EQU (1<<3)
+PT_SH EQU (1<<1)
+PT_NOS EQU (1<<5)
+TTBR0_PROP EQU (PT_NOS :OR: PT_SH :OR: PT_RGN :OR: PT_IRGN)
+SO_MEM EQU 0x0
+DV_MEM EQU 0x1
+NM_MEM EQU 0x2
+I_SH EQU 0x1
+SH EQU 0x1
+PRRR_TR0 EQU (SO_MEM<<0)
+PRRR_TR1 EQU (DV_MEM<<2)
+PRRR_TR4 EQU (NM_MEM<<8)
+PRRR_TR7 EQU (NM_MEM<<14)
+PRRR_DS1 EQU (SH<<17)
+PRRR_NS1 EQU (SH<<19)
+PRRR_NOS1 EQU (I_SH<<25)
+PRRR_NOS4 EQU (I_SH<<28)
+PRRR_NOS7 EQU (I_SH<<31)
+NC EQU 0x0
+WBWA EQU 0x1
+NMRR_OR4 EQU (NC<<24)
+NMRR_OR7 EQU (WBWA<<30)
+NMRR_IR4 EQU (NC<<8)
+NMRR_IR7 EQU (WBWA<<14)
+
+; ==============================================================================
+; These should be the same the defines in misc.h
+; ==============================================================================
+MAX_CLUSTERS EQU 2
+MAX_CPUS EQU 8
+STACK_SIZE EQU (96 << 2)
+
+; ==============================================================================
+; Simple vector table
+; ==============================================================================
+ IMPORT ns_entry_ptr
+ IMPORT secure_context_save
+ IMPORT enable_caches
+ IMPORT inv_icache_all
+ IMPORT flat_pagetables
+ IMPORT read_sctlr
+ IMPORT write_sctlr
+ IMPORT read_ttbr0
+ IMPORT write_ttbr0
+ IMPORT inv_tlb_all
+ IMPORT inv_bpred_all
+ IMPORT write_dacr
+ IMPORT write_prrr
+ IMPORT write_nmrr
+ IMPORT get_sp
+ IMPORT secure_context_restore
+ IMPORT powerdown_cluster
+ IMPORT get_powerdown_stack
+ IMPORT wfi
+ IMPORT read_cpuid
+ IMPORT add_dv_page
+ EXPORT monmode_vector_table
+ EXPORT warm_reset
+
+ ; ----------------------------------------------------
+ ; Macro to initialise MMU. Corrupts 'r0'
+ ; ----------------------------------------------------
+ MACRO
+ setup_mmu $r1, $r2
+ MOV $r1, #0x5555
+ MOVT $r1, #0x5555
+ ; Enable our page tables if not
+ LDR r0, =flat_pagetables
+ ORR r0, #TTBR0_PROP
+ ; Write TTBR0
+ MCR p15, 0, r0, c2, c0, 0
+ ; Write DACR
+ MCR p15, 0, $r1, c3, c0, 0
+
+ ; Enable the remap registers to treat OSH memory as ISH memory
+ MOV $r1, #PRRR_TR0
+ ORR $r1, #PRRR_TR1
+ ORR $r1, #PRRR_TR4
+ ORR $r1, #PRRR_TR7
+ ORR $r1, #PRRR_NOS1
+ ORR $r1, #PRRR_NOS4
+ ORR $r1, #PRRR_NOS7
+ ORR $r1, #PRRR_NS1
+ ORR $r1, #PRRR_DS1
+
+ MOV $r2, #NMRR_IR4
+ ORR $r2, #NMRR_IR7
+ ORR $r2, #NMRR_OR4
+ ORR $r2, #NMRR_OR7
+
+ MCR p15, 0, $r1, c10, c2, 0
+ MCR p15, 0, $r2, c10, c2, 1
+
+ ; Enable Dcache, TEX Remap & MMU
+ MRC p15, 0, r0, c1, c0, 0
+ ORR r0, #CR_M
+ ORR r0, #CR_C
+ ORR r0, #CR_TRE
+ MCR p15, 0, r0, c1, c0, 0
+ DSB
+ ISB
+ MEND
+
+ ; ----------------------------------------------------
+ ; Macro to setup secure stacks, Corrupts 'r0-r3'
+ ; ----------------------------------------------------
+ MACRO
+ setup_stack
+ LDR r0, =secure_stacks
+ MOV r1, #STACK_SIZE
+ BL get_sp
+ MOV sp, r0
+ MEND
+
+ ALIGN 32
+monmode_vector_table
+monmode_reset_vec
+ B monmode_reset_vec
+monmode_undef_vec
+ B monmode_undef_vec
+monmode_smc_vec
+ B do_smc
+monmode_pabort_vec
+ B monmode_pabort_vec
+monmode_dabort_vec
+ B monmode_dabort_vec
+monmode_unused_vec
+ B monmode_unused_vec
+monmode_irq_vec
+ B monmode_irq_vec
+monmode_fiq_vec
+ B monmode_fiq_vec
+
+
+ ; SMC handler. Currently accepts three types of calls:
+ ; 1. Init: Sets up stack, mmu, caches & coherency
+ ; 2. Context Save: Saves the secure world context
+ ; 3. Powerdown: Cleans the caches and power downs the cluster
+ ; Also assumes the availability of r4-r7
+do_smc FUNCTION
+ ; Switch to non-secure banked registers
+ MRC p15, 0, r2, c1, c1, 0
+ BIC r2, #SCR_NS
+ MCR p15, 0, r2, c1, c1, 0
+ ISB
+
+ ; Check if we are being called to setup the world
+ CMP r0, #SMC_SEC_INIT
+ BEQ setup_secure
+
+ CMP r0, #SMC_SEC_SAVE
+ BEQ save_secure
+
+ CMP r0, #SMC_SEC_SHUTDOWN
+ BEQ shutdown_cluster
+
+smc_done
+ ; Return to non-secure banked registers
+ MRC p15, 0, r0, c1, c1, 0
+ ORR r0, #SCR_NS
+ MCR p15, 0, r0, c1, c1, 0
+ ISB
+ ERET
+ ENDFUNC
+
+shutdown_cluster
+ BL read_cpuid
+ BL get_powerdown_stack
+ MOV sp, r0
+ BL powerdown_cluster
+enter_wfi
+ BL wfi
+ B enter_wfi
+
+save_secure
+ PUSH {lr}
+ MOV r0, r1
+ BL secure_context_save
+ POP {lr}
+ B smc_done
+
+setup_secure
+ ; Save the LR
+ MOV r4, lr
+
+ ; Turn on the I cache, branch predictor and alingment
+ BL read_sctlr
+ ORR r0, #CR_I
+ ORR r0, #CR_U
+ ORR r0, #CR_Z
+ BL write_sctlr
+ dsb
+ isb
+
+ setup_stack
+
+ ; ----------------------------------------------------
+ ; Safely turn on caches
+ ; TODO: Expensive usage of stacks as we are executing
+ ; out of SO memory. Done only once so can live with it
+ ; ----------------------------------------------------
+ BL enable_caches
+
+ ; ----------------------------------------------------
+ ; Add a page backed by device memory for locks & stacks
+ ; ----------------------------------------------------
+ LDR r0, =flat_pagetables
+ BL add_dv_page
+ setup_mmu r1, r2
+
+ ; Restore LR
+ MOV lr, r4
+ B smc_done
+
+warm_reset FUNCTION
+ ; ----------------------------------------------------
+ ; Start the SO load of the pagetables asap
+ ; ----------------------------------------------------
+ LDR r4, =flat_pagetables
+
+ ; ----------------------------------------------------
+ ; Enable I, C, Z, U bits in the SCTLR and SMP bit in
+ ; the ACTLR right after reset
+ ; ----------------------------------------------------
+ MRC p15, 0, r0, c1, c0, 0
+ ORR r0, r0, #CR_I
+ ORR r0, r0, #CR_U
+ ORR r0, r0, #CR_Z
+ ORR r0, r0, #CR_C
+ MCR p15, 0, r0, c1, c0, 0
+ MRC p15, 0, r1, c1, c0, 1
+ ORR r1, r1, #0x40
+ MCR p15, 0, r1, c1, c0, 1
+ ISB
+
+ ; ----------------------------------------------------
+ ; Enable the MMU even though CCI snoops have not been
+ ; enabled. Should not be a problem as we will not
+ ; access any inter-cluster data till we do so
+ ; ----------------------------------------------------
+ MOV r2, #0x5555
+ MOVT r2, #0x5555
+ ; Enable our page tables if not
+ ORR r4, #TTBR0_PROP
+ ; Write TTBR0
+ MCR p15, 0, r4, c2, c0, 0
+ ; Write DACR
+ MCR p15, 0, r2, c3, c0, 0
+
+ ; Enable the remap registers to treat OSH memory as ISH memory
+ MOV r2, #PRRR_TR0
+ ORR r2, #PRRR_TR1
+ ORR r2, #PRRR_TR4
+ ORR r2, #PRRR_TR7
+ ORR r2, #PRRR_NOS1
+ ORR r2, #PRRR_NOS4
+ ORR r2, #PRRR_NOS7
+ ORR r2, #PRRR_NS1
+ ORR r2, #PRRR_DS1
+ MOV r3, #NMRR_IR4
+ ORR r3, #NMRR_IR7
+ ORR r3, #NMRR_OR4
+ ORR r3, #NMRR_OR7
+ MCR p15, 0, r2, c10, c2, 0
+ MCR p15, 0, r3, c10, c2, 1
+
+ ; Enable Dcache, TEX Remap & MMU
+ MRC p15, 0, r0, c1, c0, 0
+ ORR r0, #CR_M
+ ORR r0, #CR_C
+ ORR r0, #CR_TRE
+ MCR p15, 0, r0, c1, c0, 0
+ ISB
+
+ ; ----------------------------------------------------
+ ; Try Preloading the literal pools before they are
+ ; accessed.
+ ; ----------------------------------------------------
+ ADR r4, warm_reset_ltrls
+ PLD [r4]
+ PLD warm_reset_ltrls
+ LDR r6, =secure_stacks
+
+ ; ----------------------------------------------------
+ ; Safely turn on CCI snoops
+ ; ----------------------------------------------------
+ MOV r4, #0x0
+ MOVT r4, #0x2c09
+ MRC p15, 0, r0, c0, c0, 5
+ UBFX r1, r0, #0, #8
+ UBFX r2, r0, #8, #8
+ CMP r1, #0
+ BNE cci_snoop_status
+ MOV r3, #3
+ CMP r2, #0
+ BEQ a15_snoops
+ MOV r5, #0x5000
+ STR r3, [r4, r5]
+ B cci_snoop_status
+a15_snoops
+ MOV r5, #0x4000
+ STR r3, [r4, r5]
+cci_snoop_status
+ LDR r0, [r4, #0xc]
+ TST r0, #1
+ BNE cci_snoop_status
+
+ ; ----------------------------------------------------
+ ; Switch to Monitor mode straight away as we do not want to worry
+ ; about setting up Secure SVC stacks. All Secure world save/restore
+ ; takes place in the monitor mode.
+ ; ----------------------------------------------------
+ MRS r5, cpsr ; Get current mode (SVC) in r0
+ BIC r1, r5, #0x1f ; Clear all mode bits
+ ORR r1, r1, #0x16 ; Set bits for Monitor mode
+ MSR cpsr_cxsf, r1 ; We are now in Monitor Mode
+ BIC r1, r5, #0x1f ; Clear all mode bits
+ ORR r1, r1, #0x1a ; Set bits for a return to the HYP mode
+ MSR spsr_cxsf, r1
+
+ MOV r0, r6
+ MOV r1, #STACK_SIZE
+ BL get_sp
+ MOV sp, r0
+
+ ; Restore secure world context & enable MMU
+ BL secure_context_restore
+
+ ; Switch to non-secure registers for HYP &
+ ; later non-secure world restore.
+ MRC p15, 0, r1, c1, c1, 0
+ ORR r1, #SCR_NS
+ MCR p15, 0, r1, c1, c1, 0
+ ISB
+
+ ; Setup the NS link register
+ MRC p15, 0, r0, c0, c0, 5
+ ANDS r0, r0, #0xf
+ LDR r1, =ns_entry_ptr
+ ADD r1, r1, r0, lsl #2
+ LDR lr, [r1]
+ ; Switch to Non-secure world
+ ERET
+warm_reset_ltrls
+ ENDFUNC
+
+ AREA stacks, DATA, ALIGN=6
+secure_stacks SPACE MAX_CLUSTERS*MAX_CPUS*STACK_SIZE
+ END
diff --git a/big-little/secure_world/secure_context.c b/big-little/secure_world/secure_context.c
new file mode 100644
index 0000000..ff864f1
--- /dev/null
+++ b/big-little/secure_world/secure_context.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "secure_world.h"
+
+extern powerup_ib_core(unsigned, unsigned);
+
+sec_context secure_context[MAX_CORES] __attribute__ ((aligned(CACHE_LINE_SZ)));
+unsigned ns_entry_ptr[MAX_CORES];
+unsigned small_pagetable[1024] __attribute__ ((aligned(4096)));
+unsigned host_cluster = HOST_CLUSTER;
+unsigned switcher = SWITCHER;
+
+/* Bakery lock to serialize access to the tube. */
+static bakery_t lock_tube1 __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };
+
+void enable_caches(void)
+{
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned first_cpu = find_first_cpu();
+
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable Start", read_cntpct(), 0x0, 0x0);
+
+ /* Turn on coherency */
+ enable_coherency();
+
+ /* Enable caches */
+ write_sctlr(read_sctlr() | CR_I | CR_Z | CR_C);
+ dsb();
+ isb();
+
+ /*
+ * Only one cpu should enable the CCI while the other
+ * cpus wait.
+ */
+ if (first_cpu == cpu_id) {
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3);
+
+ dsb();
+ }
+
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable End", read_cntpct(), 0x0, 0x0);
+
+ return;
+}
+
+void secure_context_restore(void)
+{
+ unsigned cpu_id = read_cpuid();
+ sec_context *sec_ctx = &secure_context[cpu_id];
+
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore Start", read_cntpct(), 0x0, 0x0);
+
+ /* Restore state of CCI SAR */
+ write32(CCI_BASE + SECURE_ACCESS_REG, sec_ctx->cci_sar);
+
+ /* Restore the security state of PPIs. */
+ write32(GIC_ID_PHY_BASE + GICD_SEC, sec_ctx->vgic_icdisr0);
+
+ /* Restore the Priority mask register */
+ write32(GIC_IC_PHY_BASE + GICC_PRIMASK, sec_ctx->vgic_iccpmr);
+
+ /* Restore the coprocessor context */
+ write_cntfrq(sec_ctx->cntfrq);
+ write_mvbar(sec_ctx->mvbar);
+ write_vbar(sec_ctx->vbar);
+ write_nsacr(sec_ctx->nsacr);
+ write_cpacr(sec_ctx->cpacr);
+ write_actlr(sec_ctx->actlr);
+ write_scr(sec_ctx->scr);
+ write_sctlr(read_sctlr() | sec_ctx->sctlr);
+ dsb();
+ isb();
+
+ write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore End", read_cntpct(), 0x0, 0x0);
+ return;
+}
+
+void secure_context_save(unsigned ns_entry_point)
+{
+ unsigned cpu_id = read_cpuid();
+ sec_context *sec_ctx = &secure_context[cpu_id];
+
+ ns_entry_ptr[cpu_id] = ns_entry_point;
+ sec_ctx->cci_sar = read32(CCI_BASE + SECURE_ACCESS_REG);
+ sec_ctx->vgic_icdisr0 = read32(GIC_ID_PHY_BASE + GICD_SEC);
+ sec_ctx->vgic_iccpmr = read32(GIC_IC_PHY_BASE + GICC_PRIMASK);
+ sec_ctx->mvbar = read_mvbar();
+ sec_ctx->vbar = read_vbar();
+ sec_ctx->nsacr = read_nsacr();
+ sec_ctx->cpacr = read_cpacr();
+ sec_ctx->actlr = read_actlr();
+ sec_ctx->scr = read_scr();
+ sec_ctx->sctlr = read_sctlr();
+ sec_ctx->cntfrq = read_cntfrq();
+
+ /*
+ * Now that the context has been saved, its safe to bring
+ * our counterpart on the inbound cluster out of reset.
+ */
+ powerup_ib_core(get_inbound(), cpu_id);
+
+ return;
+}
+
+/* Create the small page level 1 descriptor */
+static void create_l1_sp_desc(unsigned virt_addr, unsigned l1_ttb_va,
+ unsigned l2_ttb_pa)
+{
+ unsigned ttb1_index = 0;
+ unsigned ttb1_desc = 0;
+
+ ttb1_index = (virt_addr & MB_MASK) >> MB_SHIFT;
+
+ /*
+ * Create a mapping if one is not already present.
+ * Assuming that page tables are initialized to 0.
+ */
+ if (!(read32(l1_ttb_va + 4 * ttb1_index) & SMALL_PAGE)) {
+ l2_ttb_pa = l2_ttb_pa & SP_L1_BASE_MASK;
+ ttb1_desc = l2_ttb_pa | SMALL_PAGE;
+ write32(l1_ttb_va + 4 * ttb1_index, ttb1_desc);
+ cln_dcache_mva_pou((unsigned *)l1_ttb_va + 4 * ttb1_index);
+ }
+
+ return;
+}
+
+/* Create the small page level 2 descriptor */
+static void create_l2_sp_desc(unsigned virt_addr, unsigned phys_addr,
+ unsigned l2_ttb_va, unsigned attrs)
+{
+ unsigned int ttb2_index = 0;
+ unsigned int ttb2_desc = 0;
+ unsigned int mem_attrs =
+ SP_SBO | SP_CACHEABLE | SP_BUFFERABLE | SP_TEX0 | SP_SHARED |
+ SP_AP0;
+
+ /* Use default attributes if the user has not passed any */
+ if (attrs) {
+ mem_attrs = attrs;
+ }
+
+ /* Left shift by 12 followed by a right shift by 24 gives 2nd level index */
+ ttb2_index = (virt_addr << PAGE_SHIFT) >> (PAGE_SHIFT * 2);
+
+ /*
+ * Create a mapping if one is not already present
+ * Assuming that page tables are initialized to 0.
+ */
+ if (!(read32(l2_ttb_va + 4 * ttb2_index))) {
+ ttb2_desc = (phys_addr & PAGE_MASK) | mem_attrs;
+ write32(l2_ttb_va + 4 * ttb2_index, ttb2_desc);
+ cln_dcache_mva_pou((unsigned *)l2_ttb_va + 4 * ttb2_index);
+ }
+
+ return;
+}
+
+void add_dv_page(unsigned pt_base)
+{
+ unsigned start_addr = (unsigned)&BL_SEC_DV_PAGE$$Base;
+ unsigned dv_mem_attrs = SP_AP0 | SP_SBO | SP_XN | SP_BUFFERABLE;
+ unsigned addr = 0x0;
+
+ /*
+ * Create the L1 small page descriptor using the base address supplied.
+ * The region specified must all fit within a single 1MB section.
+ */
+ create_l1_sp_desc(start_addr, (unsigned)pt_base,
+ (unsigned)small_pagetable);
+
+ /*
+ * We want all memory to be WBWA/S except for a page
+ * which is device (used for the Bakery locks etc).
+ */
+ for (addr = start_addr & MB_MASK;
+ addr < (start_addr & MB_MASK) + 0x100000; addr += 4096) {
+ create_l2_sp_desc(addr, addr, (unsigned)small_pagetable,
+ (addr == start_addr ? dv_mem_attrs : 0));
+ }
+
+ return;
+}
diff --git a/big-little/secure_world/secure_resets.c b/big-little/secure_world/secure_resets.c
new file mode 100644
index 0000000..cb732ba
--- /dev/null
+++ b/big-little/secure_world/secure_resets.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "secure_world.h"
+#include "events.h"
+#include "bakery.h"
+
+extern unsigned warm_reset;
+
+/* Bakery lock to serialize access to the tube. */
+bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };
+
+/*
+ * Compile time switch to decided whether the outbound
+ * L2 will be kept on always for inbound cache warming
+ * or it will be flushed and reset after the BL context
+ * has been picked up.
+ */
+static unsigned flush_ob_l2 = FLUSH_OB_L2;
+
+#if FM_BETA
+/*
+ * Variable in secure world to indicate the
+ * reset type i.e. cold (0) or warm reset (!0).
+ */
+unsigned ve_reset_type[NUM_CPUS];
+#endif
+
+/*
+ * Allocate secure events in our device page
+ */
+unsigned event[MAX_CORES][MAX_SEC_EVENTS]
+__attribute__ ((section("BL_SEC_DV_PAGE")));
+
+/*
+ * Normal spinlock to guard inbound cluster registers
+ * in the KFSCB. It will always be used when the MMU
+ * is on. Each cluster will anyways use it sequentially.
+ */
+static unsigned lock_ib_kfscb;
+
+/*
+ * Bakery lock to guard outbound cluster registers in
+ * KFSCB. It will always be used when the MMU is off.
+ * Each cluster will anyways use it sequentially
+ */
+static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };
+
+/*
+ * Small stacks for after we have turned our caches off.
+ */
+static unsigned long long powerdown_stacks[NUM_CPUS][32]
+__attribute__ ((section("BL_SEC_DV_PAGE")));
+
+/*
+ * The way a warm reset is detected has changed in the post beta FastModels.
+ * The following workarounds make the earlier approach coexist with the
+ * new one. Instead of dealing with a function pointer, they manipulate a
+ * variable.
+ */
+static void set_reset_handler(unsigned cluster_id, unsigned cpu_id, void (*handler)(void))
+{
+#if FM_BETA
+ ve_reset_type[cpu_id]++;
+ cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
+#else
+ write32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3), (unsigned) handler);
+ dsb();
+#endif
+}
+
+static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id))(void)
+{
+#if FM_BETA
+ return (void (*)(void)) ve_reset_type[cpu_id];
+#else
+ return (void (*)(void)) read32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3));
+#endif
+}
+
+unsigned long long *get_powerdown_stack(unsigned cpu_id)
+{
+ return &powerdown_stacks[cpu_id + 1][0];
+}
+
+unsigned get_inbound()
+{
+ return !read_clusterid();
+}
+
+/*
+ * Simple function which will bring our corresponding core out of reset
+ */
+void powerup_ib_core(unsigned cluster_id, unsigned cpu_id)
+{
+ unsigned rst_stat_reg = 0x0;
+ unsigned cpu_mask = 0x0;
+ void (*cold_reset_handler)(void) = 0x0;
+ void (*warm_reset_handler)(void) = (void (*)(void)) &warm_reset;
+
+ if (cold_reset_handler == get_reset_handler(cluster_id, cpu_id)) {
+ set_reset_handler(cluster_id, cpu_id, warm_reset_handler);
+ } else {
+ if (flush_ob_l2) {
+#if FLUSH_L2_FIX
+ set_event(FLUSH_L2, cpu_id);
+#endif
+ }
+
+ /*
+ * The outbound cluster's last cpu send an event
+ * indicating that its finished the last switchover.
+ * Wait for it before bringing it's cores out of
+ * reset.
+ */
+ wait_for_event(OB_SHUTDOWN, cpu_id);
+ reset_event(OB_SHUTDOWN, cpu_id);
+ }
+
+ write_trace(&lock_tube0, SEC_TUBE0, "Powerup Inbound", read_cntpct(), 0x0, 0x0);
+
+ spin_lock(&lock_ib_kfscb);
+ rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
+ cpu_mask = 1 << 8 | (1 << 4) << cpu_id | 1 << cpu_id;
+ rst_stat_reg &= ~cpu_mask;
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), rst_stat_reg);
+ spin_unlock(&lock_ib_kfscb);
+
+ return;
+}
+
+/*
+ * Simple function to place a core in the outbound cluster
+ * in reset.
+ */
+void powerdown_ob_core(unsigned cluster_id, unsigned cpu_id)
+{
+ unsigned val = 0x0;
+ unsigned mask = 0x0;
+
+ get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+
+ val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
+ mask = (1 << cpu_id) << 4;
+ val |= mask;
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);
+
+ release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+
+ return;
+}
+
+/*
+ * Simple function to the outbound cluster in reset.
+ */
+void powerdown_ob_cluster(unsigned cluster_id, unsigned cpu_id)
+{
+ unsigned val = 0x0;
+ unsigned mask = 0x0;
+
+ get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+
+ val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
+ mask = 1 << 8;
+ val |= mask;
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);
+
+ release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+
+ return;
+}
+
+/*
+ * Do not use this function for Read-Modify-Write of KFSCB registers
+ * as it does not hold a lock.
+ */
+unsigned reset_status(unsigned cluster_id, unsigned rst_level,
+ unsigned cpu_mask)
+{
+ unsigned rst_stat_reg = 0x0;
+
+ rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
+
+ switch (rst_level) {
+ case CLUSTER_RESET:
+ return rst_stat_reg >> 8;
+ case CORE_PORESET:
+ return ((rst_stat_reg >> 4) & 0xf) & cpu_mask;
+ case CORE_RESET:
+ return (rst_stat_reg & 0xf) & cpu_mask;
+ default:
+ return 0;
+ }
+}
+
+void powerdown_cluster(void)
+{
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned secondary_mask = 0x0;
+ unsigned first_cpu = find_first_cpu();
+
+ /*
+ * Brute force way of cleaning the L1 and L2 caches of the outbound cluster.
+ * All cpus flush their L1 caches. The 'first_cpu' waits for the others to
+ * finish this operation before flushing the L2
+ */
+ write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush Begin", read_cntpct(), 0x0, 0x0);
+ write_sctlr(read_sctlr() & ~CR_C & ~CR_M);
+ dsb();
+ isb();
+ inv_icache_all();
+ cache_maint_op(L1, CLN_INV);
+ disable_coherency();
+ write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush End", read_cntpct(), 0x0, 0x0);
+ set_event(SEC_L1_DONE, cpu_id);
+
+ if (cpu_id == first_cpu) {
+
+ wait_for_events(SEC_L1_DONE);
+
+ if (flush_ob_l2) {
+#if FLUSH_L2_FIX
+ wait_for_event(FLUSH_L2, cpu_id);
+ reset_event(FLUSH_L2, cpu_id);
+#endif
+ write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush Begin", read_cntpct(), 0x0, 0x0);
+ cache_maint_op(L2, CLN_INV);
+ write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush End", read_cntpct(), 0x0, 0x0);
+
+ /* Turn off CCI snoops & DVM messages */
+ if (cluster_id)
+ write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
+ else
+ write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
+
+ dsb();
+
+ /* Wait for the dust to settle down */
+ while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
+ }
+
+ /********************* RESET HANDLING **************************************
+ * Secondaries place themselves in reset while the 'first_cpu' waits for
+ * them to do so.
+ ***************************************************************************/
+
+ /*
+ * Read the L2 control to get the number of secondary
+ * cores present on this cluster. Shift mask by one to
+ * get correct mask which includes the primary
+ */
+ secondary_mask = (1 << num_secondaries()) - 1;
+ secondary_mask <<= 1;
+
+ /* Wait for other cpus to enter reset */
+ while (secondary_mask !=
+ reset_status(cluster_id, CORE_PORESET, secondary_mask)) ;
+
+ if (flush_ob_l2)
+ powerdown_ob_cluster(cluster_id, cpu_id);
+ else
+ powerdown_ob_core(cluster_id, cpu_id);
+
+ set_events(OB_SHUTDOWN);
+
+ } else {
+ powerdown_ob_core(cluster_id, cpu_id);
+ }
+
+ write_trace(&lock_tube0, SEC_TUBE0, "Reset Initiated", read_cntpct(), 0x0, 0x0);
+ return;
+}
diff --git a/big-little/secure_world/secure_world.h b/big-little/secure_world/secure_world.h
new file mode 100644
index 0000000..9d6db42
--- /dev/null
+++ b/big-little/secure_world/secure_world.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __SECURE_WORLD_H__
+#define __SECURE_WORLD_H__
+
+#include "hyp_types.h"
+#include "virt_helpers.h"
+#include "events.h"
+#include "misc.h"
+#include "gic_registers.h"
+#include "hyp_vmmap.h"
+
+/* Definitions for creating a 4K page table entry */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_ALIGN(x) (x & PAGE_MASK)
+
+#define MB_SHIFT 20
+#define MB_SIZE (1UL << MB_SHIFT)
+#define MB_MASK (~(MB_SIZE-1))
+#define MB_ALIGN(x) (x & MB_MASK)
+
+#define SP_L1_BASE_SHIFT 10
+#define SP_L1_BASE_MASK (~((1UL << SP_L1_BASE_SHIFT) - 1))
+
+/* Definitions for first level small page descriptors */
+#define SMALL_PAGE (1 << 0)
+#define SMALL_PAGE_NS (1 << 3)
+
+/* Definitions for second level small page descriptors */
+#define SP_XN (1 << 0)
+#define SP_SBO (1 << 1)
+#define SP_BUFFERABLE (1 << 2)
+#define SP_CACHEABLE (1 << 3)
+#define SP_AP0 (1 << 4)
+#define SP_AP1 (1 << 5)
+#define SP_AP2 (1 << 9)
+#define SP_TEX0 (1 << 6)
+#define SP_TEX1 (1 << 7)
+#define SP_TEX2 (1 << 8)
+#define SP_SHARED (1 << 10)
+#define SP_GLOBAL (1 << 11)
+
+typedef struct sec_stack {
+ unsigned stack[STACK_SIZE];
+} sec_stack;
+
+typedef struct sec_context {
+ unsigned sctlr;
+ unsigned actlr;
+ unsigned cpacr;
+ unsigned nsacr;
+ unsigned scr;
+ unsigned vbar;
+ unsigned mvbar;
+ unsigned cntfrq;
+ unsigned cci_sar;
+ unsigned vgic_icdisr0;
+ unsigned vgic_iccpmr;
+} sec_context;
+
+extern void enable_caches(void);
+extern void secure_context_restore(void);
+extern void secure_context_save(unsigned);
+
+#endif /* __SECURE_WORLD_H__ */
diff --git a/big-little/secure_world/ve_reset_handler.s b/big-little/secure_world/ve_reset_handler.s
new file mode 100644
index 0000000..1a5b6a1
--- /dev/null
+++ b/big-little/secure_world/ve_reset_handler.s
@@ -0,0 +1,58 @@
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+ AREA test, CODE
+
+ ENTRY
+
+ [ FM_BETA
+ IMPORT warm_reset
+ IMPORT ve_reset_type
+
+ MRC p15, 0, r0, c0, c0, 5
+ ANDS r0, r0, #0xf
+ LDR r2, =ve_reset_type
+ ADD r2, r2, r0, lsl #2
+ LDR r1, [r2]
+ CMP r1, #0
+ MOVEQ r1, #1
+ STREQ r1, [r2]
+ LDREQ PC, =0x80000000
+ LDRNE PC, =warm_reset
+
+ |
+
+ MRC p15, 0, r0, c0, c0, 5
+ UBFX r1, r0, #0, #8
+ UBFX r2, r0, #8, #8
+ ADD r3, r1, r2, lsl #2
+ LSL r3, #3
+ LDR r4, =0x60000040
+ LDR r4, [r4, r3]
+ CMP r4, #0
+ BXNE r4
+ LDR pc, =0x80000000
+
+ ]
+
+ END
+
diff --git a/big-little/switcher/context/gic.c b/big-little/switcher/context/gic.c
new file mode 100644
index 0000000..4195346
--- /dev/null
+++ b/big-little/switcher/context/gic.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virt_helpers.h"
+#include "misc.h"
+
+struct set_and_clear_regs {
+ volatile unsigned int set[32], clear[32];
+};
+
+typedef struct {
+ /* 0x000 */ volatile unsigned int control;
+ const unsigned int controller_type;
+ const unsigned int implementer;
+ const char padding1[116];
+ /* 0x080 */ volatile unsigned int security[32];
+ /* 0x100 */ struct set_and_clear_regs enable;
+ /* 0x200 */ struct set_and_clear_regs pending;
+ /* 0x300 */ struct set_and_clear_regs active;
+ /* 0x400 */ volatile unsigned int priority[256];
+ /* 0x800 */ volatile unsigned int target[256];
+ /* 0xC00 */ volatile unsigned int configuration[64];
+ /* 0xD00 */ const char padding3[512];
+ /* 0xF00 */ volatile unsigned int software_interrupt;
+ const char padding4[12];
+ /* 0xF10 */ volatile unsigned int sgi_clr_pending[4];
+ /* 0xF20 */ volatile unsigned int sgi_set_pending[4];
+ const char padding5[176];
+ /* 0xFE0 */ unsigned const int peripheral_id[4];
+ /* 0xFF0 */ unsigned const int primecell_id[4];
+} interrupt_distributor;
+
+typedef struct {
+ /* 0x00 */ volatile unsigned int control;
+ /* 0x04 */ volatile unsigned int priority_mask;
+ /* 0x08 */ volatile unsigned int binary_point;
+ /* 0x0c */ volatile unsigned const int interrupt_ack;
+ /* 0x10 */ volatile unsigned int end_of_interrupt;
+ /* 0x14 */ volatile unsigned const int running_priority;
+ /* 0x18 */ volatile unsigned const int highest_pending;
+} cpu_interface;
+
+/*
+ * Saves the GIC CPU interface context
+ * Requires 3 words of memory
+ */
+void save_gic_interface(unsigned int *pointer, unsigned gic_interface_address)
+{
+ cpu_interface *ci = (cpu_interface *) gic_interface_address;
+
+ pointer[0] = ci->control;
+ pointer[1] = ci->priority_mask;
+ pointer[2] = ci->binary_point;
+
+}
+
+/*
+ * Saves this CPU's banked parts of the distributor
+ * Returns non-zero if an SGI/PPI interrupt is pending (after saving all required context)
+ * Requires 19 words of memory
+ */
+int save_gic_distributor_private(unsigned int *pointer,
+ unsigned gic_distributor_address)
+{
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned int *ptr = 0x0;
+
+ *pointer = id->enable.set[0];
+ ++pointer;
+ memcpy((void *) pointer, (const void *) id->priority, 8 << 2);
+ pointer += 8;
+ memcpy((void *) pointer, (const void *) id->target, 8 << 2);
+ pointer += 8;
+
+ /* Save just the PPI configurations (SGIs are not configurable) */
+ *pointer = id->configuration[1];
+ ++pointer;
+
+ /*
+ * Private peripheral interrupts need to be replayed on
+ * the destination cpu interface for consistency. This
+ * is the responsibility of the peripheral driver. When
+ * it sees a pending interrupt while saving its context
+ * it should record enough information to recreate the
+ * interrupt while restoring.
+ * We don't save the Pending/Active status and clear it
+ * so that it does not interfere when we are back.
+ */
+ id->pending.clear[0] = 0xffffffff;
+ id->active.clear[0] = 0xffffffff;
+
+ /*
+ * IPIs are different and can be replayed just by saving
+ * and restoring the set/clear pending registers
+ */
+ ptr = pointer;
+ memcpy((void *) pointer, (const void *) id->sgi_set_pending, 4 << 2);
+ pointer += 8;
+
+ /*
+ * Clear the pending SGIs on this cpuif so that they don't
+ * interfere with the wfi later on.
+ */
+ memcpy((void *) id->sgi_clr_pending, (const void *) ptr, 4 << 2);
+
+ if (*pointer) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * Saves the shared parts of the distributor
+ * Requires 1 word of memory, plus 20 words for each block of 32 SPIs (max 641 words)
+ * Returns non-zero if an SPI interrupt is pending (after saving all required context)
+ */
+int save_gic_distributor_shared(unsigned int *pointer,
+ unsigned gic_distributor_address)
+{
+ int retval = 0;
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned num_spis = 0;
+
+ /* Calculate how many SPIs the GIC supports */
+ num_spis = 32 * (id->controller_type & 0x1f);
+
+ /* Save rest of GIC configuration */
+ if (num_spis) {
+ memcpy((void *) pointer, (const void *) (id->target + 8), (num_spis / 4) << 2);
+ pointer += num_spis / 4;
+ }
+
+ /* Save control register */
+ *pointer = id->control;
+ ++pointer;
+
+ return retval;
+}
+
+void restore_gic_interface(unsigned int *pointer,
+ unsigned gic_interface_address)
+{
+ cpu_interface *ci = (cpu_interface *) gic_interface_address;
+
+ ci->priority_mask = pointer[1];
+ ci->binary_point = pointer[2];
+
+ /* Restore control register last */
+ ci->control = pointer[0];
+}
+
+void restore_gic_distributor_private(unsigned int *pointer,
+ unsigned gic_distributor_address)
+{
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned ctr, prev_val = 0, prev_ctr = 0;
+
+ id->enable.set[0] = *pointer;
+ ++pointer;
+
+ memcpy((void *) id->priority, (const void *) pointer, 8 << 2);
+ pointer += 8;
+ memcpy((void *) id->target, (const void *) pointer, 8 << 2);
+ pointer += 8;
+
+ /* Restore just the PPI configurations (SGIs are not configurable) */
+ id->configuration[1] = *pointer;
+ ++pointer;
+
+ /*
+ * Clear active and pending PPIs as they will be recreated by the
+ * peripiherals
+ */
+ id->active.clear[0] = 0xffffffff;
+ id->pending.clear[0] = 0xffffffff;
+
+ /*
+ * Restore pending IPIs
+ */
+ for (ctr = 0; ctr < 4; ctr++) {
+ if(!pointer[ctr])
+ continue;
+
+ if(pointer[ctr] == prev_val) {
+ pointer[ctr] = pointer[prev_ctr];
+ } else {
+ prev_val = pointer[ctr];
+ prev_ctr = ctr;
+ remap_cpuif(&pointer[ctr]);
+ }
+ }
+
+ memcpy((void *) id->sgi_set_pending, (const void *) pointer, 4 << 2);
+ pointer += 4;
+
+ id->pending.set[0] = *pointer;
+
+ return;
+}
+
+/*
+ * Optimized routine to restore the shared vgic distributor interface.
+ * Saving on outbound and restoring on inbound is redundant as the
+ * context is non-volatile across a switch. Hence, simply R-M-W on
+ * the inbound and remove the 'save' function from the outbound
+ * critical path.
+ */
+void restore_gic_distributor_shared(unsigned int *pointer,
+ unsigned gic_distributor_address)
+{
+ interrupt_distributor *id =
+ (interrupt_distributor *) gic_distributor_address;
+ unsigned num_spis;
+ unsigned ctr, prev_val = 0, prev_ctr = 0;
+
+ /* Calculate how many SPIs the GIC supports */
+ num_spis = 32 * ((id->controller_type) & 0x1f);
+
+ /* Restore rest of GIC configuration */
+ if (num_spis) {
+
+ memcpy((void *) pointer, (const void *) (id->target + 8), (num_spis / 4) << 2);
+
+ for (ctr = 0; ctr < num_spis / 4; ctr++) {
+ if(!pointer[ctr])
+ continue;
+
+ if(pointer[ctr] == prev_val) {
+ pointer[ctr] = pointer[prev_ctr];
+ } else {
+ prev_val = pointer[ctr];
+ prev_ctr = ctr;
+ remap_cpuif(&pointer[ctr]);
+ }
+ }
+
+ memcpy((void *) (id->target + 8), (const void *) pointer, (num_spis / 4) << 2);
+ }
+
+ return;
+}
diff --git a/big-little/switcher/context/ns_context.c b/big-little/switcher/context/ns_context.c
new file mode 100644
index 0000000..891f5bb
--- /dev/null
+++ b/big-little/switcher/context/ns_context.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virt_helpers.h"
+#include "vgiclib.h"
+#include "gic_registers.h"
+#include "int_master.h"
+#include "context.h"
+#include "bl.h"
+#include "misc.h"
+#include "events.h"
+#include "virtualisor.h"
+#include "helpers.h"
+
+extern void gic_enable_int(unsigned);
+extern void SetupVGIC(unsigned);
+extern unsigned async_switchover;
+extern unsigned hyp_timer_trigger;
+
+/* Bakery locks to serialize access to the tube. */
+static bakery_t lock_tube0 __attribute__ ((section("BL_DV_PAGE"))) = { 0 };
+static bakery_t lock_tube1 __attribute__ ((section("BL_DV_PAGE"))) = { 0 };
+
+/*
+ * Top level structure which encapsulates the context of the entire
+ * Kingfisher system
+ */
+system_context switcher_context = {0};
+
+void stop_generic_timer(generic_timer_context *ctr_ctx)
+{
+ /*
+ * Disable the timer and mask the irq to prevent
+ * suprious interrupts on this cpu interface. It
+ * will bite us when we come back if we don't. It
+ * will be replayed on the inbound cluster.
+ */
+ write_cntp_ctl(TIMER_MASK_IRQ);
+
+
+ /*
+ * If the local timer interrupt was being used as
+ * the asynchronous trigger, then it was disabled
+ * in handle_interrupt() to prevent this level-
+ * triggerred interrupt from firing. Now that its
+ * been acked at the peripheral. We can renable it
+ */
+ if(!hyp_timer_trigger) {
+ if (ctr_ctx->cntp_ctl & TIMER_IRQ_STAT)
+ gic_enable_int(LCL_TIMER_IRQ);
+ }
+
+ return;
+}
+
+void save_context(unsigned first_cpu)
+{
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ cpu_context *ns_cpu_ctx =
+ &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
+ unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
+ unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
+ unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
+ banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
+ gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
+ generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
+ cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
+
+ write_trace(&lock_tube0, NS_TUBE0, "Context Save Start", read_cntpct(), 0x0, 0x0);
+
+ /*
+ * Good place to bring the inbound cluster out of reset, but first
+ * we need to save the secure world context.
+ */
+ write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save Start", read_cntpct(), 0x0, 0x0);
+ smc(SMC_SEC_SAVE, (unsigned) hyp_warm_reset_handler);
+ write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save End", read_cntpct(), 0x0, 0x0);
+
+ /*
+ * Save the 32-bit Generic timer context & stop them
+ */
+ save_generic_timer((unsigned *) cp15_timer_ctx, 0x1);
+ stop_generic_timer(cp15_timer_ctx);
+
+ /*
+ * Save v7 generic performance monitors
+ * Save cpu general purpose banked registers
+ * Save cp15 context
+ */
+ save_performance_monitors(pmon_context);
+ save_banked_registers(gp_context);
+ save_cp15(cp15_context->cp15_misc_regs);
+ save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ save_mmu(cp15_context->cp15_mmu_regs);
+ save_fault_status((unsigned *) fault_ctx);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ save_vfp(vfp_context);
+
+
+ /*
+ * Disable the GIC CPU interface tp prevent interrupts from waking
+ * the core from wfi() subsequently.
+ */
+ write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
+
+ /* Save vGIC virtual cpu interface (cpu view) context */
+ save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
+
+ /*
+ * Save the HYP view registers. These registers contain a snapshot
+ * of all the physical interrupts acknowledged till we
+ * entered this HYP mode.
+ */
+ vgic_savestate(cpu_id);
+
+ /*
+ * TODO:
+ * Is it safe for the secondary cpu to save its context
+ * while the GIC distributor is on. Should be as its
+ * banked context and the cpu itself is the only one
+ * who can change it. Still have to consider cases e.g
+ * SGIs/Localtimers becoming pending.
+ */
+ save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
+
+ /* Safe place to save the Virtualisor context */
+ SaveVirtualisor(first_cpu);
+
+ /*
+ * Indicate to the inbound side that the context has been saved and is ready
+ * for pickup.
+ */
+ write_trace(&lock_tube0, NS_TUBE0, "Context Save End", read_cntpct(), 0x0, 0x0);
+ set_event(OB_CONTEXT_DONE, cpu_id);
+
+ /*
+ * Now, we wait for the inbound cluster to signal that its done atleast picking
+ * up the saved context.
+ */
+ if (cpu_id == first_cpu) {
+ wait_for_events(IB_CONTEXT_DONE);
+ write_trace(&lock_tube0, NS_TUBE0, "Inbound done", read_cntpct(), 0x0, 0x0);
+ }
+
+ return;
+}
+
+void restore_context(unsigned first_cpu)
+{
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned warm_reset = 1;
+ cpu_context *ns_cpu_ctx =
+ &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
+ global_context *gbl_context = &switcher_context.cluster.ns_cluster_ctx;
+ unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
+ unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
+ unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
+ gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
+ generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
+ banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
+ cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
+ vm_context *src = 0x0;
+ vm_context *dest = 0x0;
+ unsigned dest_cpuif = 0x0;
+ unsigned src_cpuif = 0x0;
+
+ /*
+ * Map cpuids to cpu interface numbers so that cpu interface
+ * specific context can be correctly restored on the external
+ * vGIC.
+ */
+ map_cpuif(cluster_id, cpu_id);
+ SetupVGIC(warm_reset);
+
+ /*
+ * Inbound headstart i.e. the vGIC configuration, secure context
+ * restore & cache invalidation has been done. Now wait for the
+ * outbound to provide the context.
+ */
+ write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(), 0x0, 0x0);
+ wait_for_event(OB_CONTEXT_DONE, cpu_id);
+ reset_event(OB_CONTEXT_DONE, cpu_id);
+
+ /*
+ * First cpu restores the global context while the others take
+ * care of their own.
+ */
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore Start ", read_cntpct(), 0x0, 0x0);
+ if (cpu_id == first_cpu)
+ restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
+ GIC_ID_PHY_BASE);
+ restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
+ vgic_loadstate(cpu_id);
+
+ SetupVirtualisor(first_cpu);
+
+ /* Restore NS VGIC context */
+ restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,
+ VGIC_VM_PHY_BASE);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ restore_vfp(vfp_context);
+
+ /*
+ * Restore cp15 context
+ * Restore cpu general purpose banked registers
+ * Restore v7 generic performance monitors
+ * Restore the 32-bit Generic timer context
+ */
+ restore_fault_status((unsigned *) fault_ctx);
+ restore_mmu(cp15_context->cp15_mmu_regs);
+ restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ restore_cp15(cp15_context->cp15_misc_regs);
+ restore_banked_registers(gp_context);
+ restore_performance_monitors(pmon_context);
+ restore_generic_timer((unsigned *) cp15_timer_ctx, 0x1);
+
+ /*
+ * Paranoid check to ensure that all HYP/Secure context & Virtualisor
+ * is restored before any core enters the non-secure mode to use it.
+ */
+ if (cpu_id == first_cpu) {
+ set_events(HYP_CONTEXT_DONE);
+ }
+ wait_for_event(HYP_CONTEXT_DONE, cpu_id);
+ reset_event(HYP_CONTEXT_DONE, cpu_id);
+
+ /*
+ * Return the saved general purpose registers saved above the HYP mode
+ * stack of our counterpart cpu on the other cluster.
+ */
+ dest_cpuif = get_cpuif(cluster_id, cpu_id);
+ src_cpuif = get_cpuif(!cluster_id, cpu_id);
+ dest = &guestos_state[dest_cpuif].context;
+ src = &guestos_state[src_cpuif].context;
+
+ dest->gp_regs[0] = src->gp_regs[0];
+ dest->gp_regs[1] = src->gp_regs[1];
+ dest->gp_regs[2] = src->gp_regs[2];
+ dest->gp_regs[3] = src->gp_regs[3];
+ dest->gp_regs[4] = src->gp_regs[4];
+ dest->gp_regs[5] = src->gp_regs[5];
+ dest->gp_regs[6] = src->gp_regs[6];
+ dest->gp_regs[7] = src->gp_regs[7];
+ dest->gp_regs[8] = src->gp_regs[8];
+ dest->gp_regs[9] = src->gp_regs[9];
+ dest->gp_regs[10] = src->gp_regs[10];
+ dest->gp_regs[11] = src->gp_regs[11];
+ dest->gp_regs[12] = src->gp_regs[12];
+ dest->gp_regs[13] = src->gp_regs[13];
+ dest->gp_regs[14] = src->gp_regs[14];
+ dest->elr_hyp = src->elr_hyp;
+ dest->spsr = src->spsr;
+ dest->usr_lr = src->usr_lr;
+
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(), 0x0, 0x0);
+ set_event(IB_CONTEXT_DONE, cpu_id);
+
+ if (async_switchover && cpu_id == first_cpu)
+ enable_trigger(read_cntfrq());
+
+ return;
+}
diff --git a/big-little/switcher/context/sh_vgic.c b/big-little/switcher/context/sh_vgic.c
new file mode 100644
index 0000000..a13f862
--- /dev/null
+++ b/big-little/switcher/context/sh_vgic.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virt_helpers.h"
+#include "gic_registers.h"
+#include "misc.h"
+#include "context.h"
+
+/*
+ * Private data structure that maps each cpuid in a
+ * multicluster system to its physical cpu interface
+ * id when a shared vGIC is used.
+ */
+static unsigned int cpuif_map[MAX_CLUSTERS][MAX_CORES];
+
+/*
+ * Private data structure that maps each cpu interface
+ * id to the corresponding cpuid & clusterid. In each
+ * entry top 4 bits store the cluster id while the bottom
+ * 4 store the cpuid.
+ *
+ * TODO:
+ * No real need for this data structure. Should be
+ * possible to get this info from the previous data
+ * structure and the knowledge of number of clusters
+ * and cpus from the KFSCB
+ */
+static unsigned int cpuinfo_map[MAX_CPUIFS];
+
+/*
+ * IPI to use for cpu interface discovery.
+ */
+#define CPUIF_IPI 0xf
+
+/*
+ * In the presence of the Switcher and the shared vGIC
+ * find the mapping between the cpu interface and the
+ * cpu id. This is required to:
+ * a) Set processor targets correctly during context
+ * save & restore and normal operation (IPI handling)
+ * b) Restoring the context of pending IPIs on the inbound
+ * cluster.
+ * Ideally a platform defined register should have done the
+ * trick. However, we rely on a software mechanism to obtain
+ * this information.
+ *
+ * Assumptions:
+ * a) Expected to be used only in the "Switching" case when
+ * there is a mismatch between the cpuids and the cpuif ids
+ * on the "other" cluster
+ * b) In the "Switching" case with external vGIC, the distributor
+ * interface should never get disabled.
+ * c) Always called in Secure world
+ *
+ * Idea is that, without disturbing the existing GIC state too
+ * much (outbound might be doing things with it), we need to
+ * ensure that only the IPI which we choose gets through our
+ * cpu interface. This should not be a problem as the SPIs will
+ * be targetted to the outbound cluster cpus & there will be no
+ * local peripheral interrupts expected. There is paranoia about
+ * getting IPIs from the outbound but this can be dealt with by
+ * manipulating the IPI priorities so that we only see what we
+ * want to see.
+ *
+ * TODO:
+ * Assuming no IPIs will be received at this point of time. So
+ * no changes will be made to the priority mask registers.
+ */
+unsigned map_cpuif(unsigned cluster_id, unsigned cpu_id)
+{
+ unsigned cpuif_id = 0;
+
+ cpuif_id = bitindex(read32(GIC_ID_PHY_BASE + GICD_CPUS) & 0xff);
+ cpuif_map[cluster_id][cpu_id] = cpuif_id;
+ cpuinfo_map[cpuif_id] = (cluster_id << 4) | cpu_id;
+
+ return 0;
+}
+
+/*
+ * Given a cpu and cluster id find the cpu interface it maps to.
+ */
+unsigned get_cpuif(unsigned cluster_id, unsigned cpu_id)
+{
+ return cpuif_map[cluster_id][cpu_id];
+}
+
+/*
+ * Given a cpu interface id, find what cpu and cluster id it maps to.
+ */
+unsigned get_cpuinfo(unsigned cpuif)
+{
+ return cpuinfo_map[cpuif];
+}
+
+/*
+ * Given a cpu interface mask, find the corresponding cpuid mask on that cluster.
+ */
+unsigned get_cpu_mask(unsigned cpuif_mask)
+{
+ unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
+ unsigned cpuif = 0, clusterid = read_clusterid(), cpu_mask = 0;
+ unsigned cpuid = 0;
+
+ for (ctr = 0; ctr < num_bytes; ctr++) { /* Iterate through the cpu_mask byte wise */
+ unsigned byte = 0;
+ unsigned char lz = 0;
+
+ byte = (cpuif_mask >> (ctr << 3)) & 0xff;
+ while ((lz = __clz(byte)) != 0x20) {
+ cpuif = 31 - lz;
+ byte &= ~(1 << cpuif); /* Clear the bit just discovered */
+ cpuid = get_cpuinfo(cpuif) & 0xf;
+ cpu_mask |= (1 << cpuid) << (ctr << 3);
+ }
+ }
+
+ return cpu_mask;
+}
+
+/*
+ * Given a cpu mask, find the corresponding cpu interface mask on that cluster.
+ */
+unsigned get_cpuif_mask(unsigned cpu_mask)
+{
+ unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
+ unsigned cpuif = 0, clusterid = read_clusterid(), cpuif_mask = 0;
+ unsigned cpuid = 0;
+
+ for (ctr = 0; ctr < num_bytes; ctr++) { /* Iterate through the cpu_mask byte wise */
+ unsigned byte = 0;
+ unsigned char lz = 0;
+
+ byte = (cpu_mask >> (ctr << 3)) & 0xff;
+ while ((lz = __clz(byte)) != 0x20) {
+ cpuid = 31 - lz;
+ byte &= ~(1 << cpuid); /* Clear the bit just discovered */
+ cpuif = get_cpuif(clusterid, cpuid);
+ cpuif_mask |= (1 << cpuif) << (ctr << 3);
+ }
+ }
+
+ return cpuif_mask;
+}
+
+/*
+ * Given a cpu interface mask, find its corresponding mask on the other cluster
+ * NOTE: Creates the new mask in-place.
+ */
+#if 1
+/*
+ * This is the fast version of remapping cpu interface ids to cpuids. Instead of
+ * remapping each bit (target interface) in the arg passed, it simply shifts all
+ * the bits by the number of cpus available.
+ */
+unsigned remap_cpuif(unsigned *cpuif_mask)
+{
+ unsigned cluster_id = read_clusterid(), num_cpus = num_secondaries() + 1;
+
+
+ if(cluster_id == EAGLE)
+ *cpuif_mask = *cpuif_mask >> num_cpus;
+ else
+ *cpuif_mask = *cpuif_mask << num_cpus;
+
+ return 0;
+}
+#else
+unsigned remap_cpuif(unsigned *cpuif_mask)
+{
+ unsigned ib_cpuif_mask = 0, ob_cpuif = 0, ib_cpuif = 0, ob_cpuid =
+ 0, ob_clusterid = 0, ib_cpuid = 0, ib_clusterid = 0;
+ unsigned num_bytes = sizeof(unsigned int) / sizeof(unsigned char), ctr;
+
+ for (ctr = 0; ctr < num_bytes; ctr++) {
+ unsigned byte = 0;
+ unsigned char lz = 0;
+
+ byte = (*cpuif_mask >> (ctr << 3)) & 0xff;
+
+ while ((lz = __clz(byte)) != 0x20) {
+ ob_cpuif = 31 - lz;
+ byte &= ~(1 << ob_cpuif); /* Clear the bit just discovered */
+ ob_cpuid = get_cpuinfo(ob_cpuif) & 0xf;
+ ob_clusterid = (get_cpuinfo(ob_cpuif) >> 4) & 0xf;
+
+ /*
+ * TODO: Can we assume that the inbound and outbound clusters will
+ * always be logical complements of each other
+ */
+ ib_clusterid = !ob_clusterid;
+
+ /*
+ * TODO: Assuming that the cpuids have a 1:1 mapping i.e. cpuX on
+ * one cluster will always map to cpuX on the other cluster.
+ */
+ ib_cpuid = ob_cpuid;
+ ib_cpuif = get_cpuif(ib_clusterid, ib_cpuid);
+ ib_cpuif_mask |= (1 << ib_cpuif) << (ctr << 3);
+ }
+ }
+
+ *cpuif_mask = ib_cpuif_mask;
+ return 0;
+}
+#endif
diff --git a/big-little/switcher/trigger/async_switchover.c b/big-little/switcher/trigger/async_switchover.c
new file mode 100644
index 0000000..056c8a1
--- /dev/null
+++ b/big-little/switcher/trigger/async_switchover.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virt_helpers.h"
+#include "misc.h"
+#include "stdlib.h"
+#include "gic_registers.h"
+
+extern void gic_enable_int(unsigned);
+extern void gic_disable_int(unsigned);
+extern void gic_send_ipi(unsigned, unsigned);
+extern void gic_eoi_int(unsigned);
+extern void gic_deactivate_int(unsigned);
+extern int __rand_r(struct _rand_state *);
+/*
+ * Set of flags used by the interrupt handling code
+ * to distinguish between IPIs sent by the big-little
+ * code and the payload software.
+ * TODO: Assumes only one cpu will send an IPI at a
+ * time rather than multiple cpus sending the same
+ * IPI to each other at the same time from within the
+ * HYP mode.
+ */
+static unsigned lock_ipi_check;
+static unsigned hyp_ipi_check[16];
+static unsigned timer_count;
+/* Support for the switchover interval randomly but sanely */
+static unsigned rand_async_switches = RAND_ASYNC;
+/* Use HYP timer for async switches */
+unsigned hyp_timer_trigger = USE_HYP_TIMERS;
+
+/*
+ * Returns the id of the first IPI that is not pending on
+ * our cpu interface or the first IPI that is pending but
+ * was not generated by us. Returns 16 if no such IPI is
+ * found
+ */
+static unsigned get_free_ipi(void)
+{
+ unsigned ctr, shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
+ read_clusterid();
+
+ cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
+
+ /* Find the register offset */
+ for (ctr = 0; ctr < 4; ctr++)
+ /* Check whether IPI<shift> has already been generated by us */
+ for (shift = 0; shift < 4; shift++) {
+ if (read32
+ (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
+ (ctr << 2)) & (cpu_if_bit << (shift << 3)))
+ continue;
+
+ return (ctr << 2) + shift;
+ }
+
+ return 16;
+}
+
+static void ack_trigger(void)
+{
+ unsigned ctl = 0;
+
+ ctl = read_cnthp_ctl();
+ if (ctl & TIMER_IRQ_STAT) {
+ /* Disable timer and mask interrupt */
+ write_cnthp_ctl(TIMER_MASK_IRQ);
+ } else {
+ printf("Spurious HYP timer irq \n");
+ panic();
+ }
+
+ return;
+}
+
+/*
+ * Broadcast first available IPI so that all cpus can start switching to
+ * the other cluster.
+ */
+void signal_switchover(void)
+{
+ unsigned ipi_no = 0x0;
+
+ /* If x is the no. of cpus then corresponding mask would be (1 << x) - 1 */
+ unsigned cpu_mask = (1 << (num_secondaries() + 1)) - 1;
+ /*
+ * Map the target cpuids to their cpu interfaces as the 1:1 mapping
+ * no longer exists with the external vGIC.
+ */
+ unsigned cpuif_mask = get_cpuif_mask(cpu_mask);
+
+ /*
+ * Send an ipi to all the cpus in the cluster including ourselves
+ * to start a switch to the inbound cluster. First choose a non-
+ * pending IPI to avoid a clash with the OS.
+ */
+ ipi_no = get_free_ipi();
+
+ /*
+ * For this IPI set the mask in our global variable. We do it, payload software
+ * does not. But, first check whether any earlier IPIs have already been acked
+ */
+ while (hyp_ipi_check[ipi_no]) ;
+ spin_lock(&lock_ipi_check);
+ hyp_ipi_check[ipi_no] = cpuif_mask;
+ dsb();
+ spin_unlock(&lock_ipi_check);
+
+ /* Send the IPI to the cpu_mask */
+ gic_send_ipi(cpuif_mask, ipi_no);
+
+ return;
+}
+
+unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
+{
+ unsigned rc = FALSE;
+
+ spin_lock(&lock_ipi_check);
+ /*
+ * If this IPI was sent by the big-little code then our cpu_if bit must have
+ * been set in the ipi_check flag. Reset the bit an indicate that its an
+ * internal IPI.
+ */
+ if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
+ rc = TRUE;
+ hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
+ dsb();
+ }
+ spin_unlock(&lock_ipi_check);
+
+ return rc;
+}
+
+unsigned check_trigger(unsigned int_id, unsigned int_ack)
+{
+ unsigned cpuid = read_cpuid();
+ unsigned platform = (read32(KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * If we are not using HYP mode timers for triggering a switchover
+ * then check whether this is a suitable local timer interrupt to
+ * switch
+ */
+ if (hyp_timer_trigger == FALSE) {
+ /*
+ * We need to hijack every 128th timer interrupt on cpu0 and
+ * use it as a stimulus to switchover
+ */
+ if (cpuid == 0 && int_id == LCL_TIMER_IRQ)
+ timer_count++;
+
+ if (timer_count & LCL_TIMER_FREQ)
+ return FALSE;
+ }
+ /*
+ * Trigger a switchover upon getting a HYP timer IRQ. Its
+ * targetted only to cpu0.
+ */
+ else if (int_id != HYP_TIMER_IRQ)
+ return FALSE;
+
+ /*
+ * Do the needful now that it is confirmed that we need to move
+ * to the other cluster
+ */
+
+ /* Indicator on emulation that switches are actually taking place */
+ if (platform != 0x1)
+ printf("%d", read_clusterid());
+
+ /*
+ * Send an IPI to all the cores in this cluster to start
+ * a switchover.
+ */
+ signal_switchover();
+
+ if (hyp_timer_trigger)
+ ack_trigger();
+ else
+ /*
+ * Complete handling of the local timer interrupt at the physical gic
+ * level. Its disabled as its level triggerred and will reassert as
+ * soon as we leave this function since its not been cleared at the
+ * peripheral just yet. The local timer context is saved and this irq
+ * cleared in "save_hyp_context". The interrupt is enabled then.
+ */
+ gic_disable_int(int_id);
+
+ /* Finish handling this interrupt */
+ gic_eoi_int(int_ack);
+ if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
+ gic_deactivate_int(int_ack);
+
+ return TRUE;
+}
+
+void keep_trigger_alive(void)
+{
+ /*
+ * The OS might have disabled the HYP timer interrupt
+ * while setting up its view of the vGIC. So enable
+ * it if disabled upon receiving any other interrupt.
+ * Better than virtualising vGIC accesses on the TARGET
+ * CPU.
+ */
+ if (hyp_timer_trigger)
+ if (!
+ (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
+ (1 << HYP_TIMER_IRQ)))
+ gic_enable_int(HYP_TIMER_IRQ);
+
+ return;
+}
+
+void enable_trigger(unsigned tval)
+{
+ unsigned ctl = TIMER_ENABLE;
+ unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * No need to lock this as its accessed by only one cpu
+ * per cluster and that too one at a time.
+ */
+ static unsigned int rand_no = 0xdeadbeef;
+ static struct _rand_state buffer;
+
+ /*
+ * Nothing needs to be done if physical local timers
+ * are being used for doing a switchover.
+ */
+ if (hyp_timer_trigger == TRUE) {
+ if (rand_async_switches) {
+ _srand_r(&buffer, rand_no);
+ rand_no = (unsigned) _rand_r(&buffer);
+ }
+
+ /* Enable timer and unmask interrupt */
+ write_cnthp_ctl(ctl);
+
+ if (rand_async_switches) {
+ unsigned interval;
+
+ /*
+ * TODO: Assuming that the tval is always 12000000
+ * Increment or decrement the timer value randomly
+ * but never by more than a factor of 10
+ */
+ if (rand_no % 2)
+ interval = tval * (rand_no % 10);
+ else
+ interval = tval / (rand_no % 10);
+
+ write_cnthp_tval(interval);
+
+ } else {
+ /*
+ * Program the timer to fire every 12000000 instructions
+ * on the FastModel while 1500000 cycles on the Emulator
+ */
+ if (platform == 0x1)
+ write_cnthp_tval(tval);
+ else
+ write_cnthp_tval(tval >> 3);
+ }
+
+ gic_enable_int(HYP_TIMER_IRQ);
+ }
+
+ return;
+}
diff --git a/big-little/switcher/trigger/handle_switchover.s b/big-little/switcher/trigger/handle_switchover.s
new file mode 100644
index 0000000..d18ba21
--- /dev/null
+++ b/big-little/switcher/trigger/handle_switchover.s
@@ -0,0 +1,61 @@
+ ;
+ ; Copyright (c) 2011, ARM Limited. All rights reserved.
+ ;
+ ; Redistribution and use in source and binary forms, with
+ ; or without modification, are permitted provided that the
+ ; following conditions are met:
+ ;
+ ; Redistributions of source code must retain the above
+ ; copyright notice, this list of conditions and the
+ ; following disclaimer.
+ ;
+ ; Redistributions in binary form must reproduce the
+ ; above copyright notice, this list of conditions and
+ ; the following disclaimer in the documentation
+ ; and/or other materials provided with the distribution.
+ ;
+ ; Neither the name of ARM nor the names of its
+ ; contributors may be used to endorse or promote products
+ ; derived from this software without specific prior written
+ ; permission.
+ ;
+
+ AREA SwitchoverCode, CODE, READONLY, ALIGN=5
+
+ PRESERVE8
+
+ IMPORT save_context
+ IMPORT smc
+ EXPORT switch_cluster
+
+SMC_SEC_SHUTDOWN EQU 0x2
+
+ ; ----------------------------------------------------
+ ; This function directs the switchover to the inbound
+ ; cluster. The context is first saved, stacks switched
+ ; & the cluster is powered down.
+ ; We need to switch stacks from being resident in normal
+ ; WBWA/S memory to SO memory to prevent potential stack
+ ; corruption after turning off the C bit in the HSCTLR.
+ ; Subsequent accesses will be SO while there will be
+ ; valid cache lines of the stack from prior accesses
+ ; ----------------------------------------------------
+switch_cluster FUNCTION
+ ; ----------------------------------------------------
+ ; We don't push any registers on the stack as we are
+ ; not going to return from this function
+ ; ----------------------------------------------------
+ MOV r4, r0
+ BL save_context
+ ; ----------------------------------------------------
+ ; We are now through with saving the context and the
+ ; inbound cluster has started picking it up. Switch to
+ ; the secure world to clean the caches and power down
+ ; the cluster
+ ; ----------------------------------------------------
+ MOV r0, #SMC_SEC_SHUTDOWN
+ BL smc
+ ENDFUNC
+
+ END
+
diff --git a/big-little/switcher/trigger/sync_switchover.c b/big-little/switcher/trigger/sync_switchover.c
new file mode 100644
index 0000000..ad257bc
--- /dev/null
+++ b/big-little/switcher/trigger/sync_switchover.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "misc.h"
+#include "virt_helpers.h"
+#include "bl.h"
+
+extern void signal_switchover(void);
+
+unsigned is_hvc()
+{
+ return ((read_hsr() >> 26) == 0x12 ? TRUE : FALSE);
+}
+
+unsigned HandleHVC(vm_context * context)
+{
+ unsigned opcode = read_hsr() & 0xffff;
+ unsigned rc = FALSE;
+
+ switch(opcode) {
+
+ /*
+ * HVC call to switch to the other cluster. This is done
+ * by sending a switchover IPI to all the cores in the cluster.
+ */
+ case SYNC_SWITCHOVER:
+ signal_switchover();
+ rc = TRUE;
+ break;
+
+ /*
+ * HVC call to return the physical MPIDR
+ */
+ case READ_MPIDR:
+ context->gp_regs[0] = read_mpidr();
+ rc = TRUE;
+ break;
+
+ default:
+ break;
+
+ }
+
+ return rc;
+}
diff --git a/big-little/virtualisor/cache_geom.c b/big-little/virtualisor/cache_geom.c
new file mode 100644
index 0000000..4138eeb
--- /dev/null
+++ b/big-little/virtualisor/cache_geom.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virtualisor.h"
+#include "virt_helpers.h"
+#include "misc.h"
+#include "context.h"
+#include "cache_geom.h"
+#include "events.h"
+
+unsigned cmop_debug = CMOP_DEBUG;
+cache_stats cm_op_stats[NUM_CPUS][MAX_CACHE_LEVELS];
+static unsigned tc_prev_line[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 };
+static unsigned cm_line_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0xffffffff };
+static unsigned hc_line_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 };
+static unsigned cm_ignline_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 };
+static unsigned cm_extline_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 };
+
+/*
+ * Iterate through all the implemented cache
+ * levels and save the geometry at each level.
+ *
+ */
+void find_cache_geometry(cache_geometry *cg_ptr)
+{
+ unsigned ctr, clidr, ccsidr, csselr, old_csselr;
+
+ /* Save Cache size selection register */
+ old_csselr = read_csselr();
+ clidr = read_clidr();
+ cg_ptr->clidr = clidr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ unsigned cache_type = get_cache_type(clidr, ctr);
+
+ /* Only seperate and Unifiied caches */
+ if (cache_type >= 0x3) {
+ /*
+ * Choose the cache level & Data or Unified cache
+ * as there are no set/way operations on the ICache
+ */
+ csselr = ctr << 1;
+ write_csselr(csselr);
+
+ isb();
+
+ /*
+ * Read the CCSIDR to record information about this
+ * cache level.
+ */
+ ccsidr = read_ccsidr();
+ cg_ptr->ccsidr[ctr] = ccsidr;
+
+ } else {
+ /*
+ * Stop scanning at the first invalid/unsupported
+ * cache level
+ */
+ break;
+ }
+ }
+
+ /* Restore Cache size selection register */
+ write_csselr(old_csselr);
+ return;
+}
+
+/*
+ * Given two cache geometries, find out how they differ
+ */
+void find_cache_diff(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+{
+ unsigned tc_size = 0, hc_size = 0, tc_linelen = 0, hc_linelen = 0;
+ unsigned tc_assoc = 0, hc_assoc = 0, tc_numsets = 0, hc_numsets = 0;
+ unsigned ctr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+
+ /* Break at the first unimplemented cache level */
+ if (get_cache_type(hcg_ptr->clidr, ctr) == 0)
+ break;
+
+ /* Cache associativity */
+ tc_assoc = get_cache_assoc(tcg_ptr, ctr) + 1;
+ hc_assoc = get_cache_assoc(hcg_ptr, ctr) + 1;
+
+ /* Number of the sets in the cache */
+ tc_numsets = get_cache_numsets(tcg_ptr, ctr) + 1;
+ hc_numsets = get_cache_numsets(hcg_ptr, ctr) + 1;
+
+ /* Cache line length in words */
+ tc_linelen = 1 << (get_cache_linesz(tcg_ptr, ctr) + 2);
+ hc_linelen = 1 << (get_cache_linesz(hcg_ptr, ctr) + 2);
+
+ /* Cache size in words */
+ tc_size = tc_assoc * tc_numsets * tc_linelen;
+ hc_size = hc_assoc * hc_numsets * hc_linelen;
+
+ /*
+ * Find the factor by which the cache line sizes differ.
+ * If so, then the target cacheline will have to be
+ * multiplied or divided by the factor to get the absolute
+ * cache line number. Then, find the number of absolute
+ * cache lines in each cache
+ */
+ if (tc_linelen >= hc_linelen) {
+ cd_ptr[ctr].tcline_factor =
+ tc_linelen / hc_linelen;
+ cd_ptr[ctr].tnumabs_clines =
+ tc_assoc * tc_numsets *
+ cd_ptr[ctr].tcline_factor;
+ cd_ptr[ctr].hnumabs_clines =
+ hc_assoc * hc_numsets;
+ } else {
+ cd_ptr[ctr].hcline_factor =
+ hc_linelen / tc_linelen;
+ cd_ptr[ctr].hnumabs_clines =
+ hc_assoc * hc_numsets *
+ cd_ptr[ctr].hcline_factor;
+ cd_ptr[ctr].tnumabs_clines =
+ tc_assoc * tc_numsets;
+ }
+
+ /*
+ * Find if the cache sizes differ. If so, then set a flag
+ * to indicate whether some set/way operations need to be
+ * extended on the host cpu or ignored on the target cpu
+ */
+ if (tc_size > hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_BIG;
+ }
+
+ if (tc_size == hc_size) {
+ cd_ptr[ctr].csize_diff =
+ TCSZ_EQUAL;
+ }
+
+ if (tc_size < hc_size) {
+ cd_ptr[ctr].csize_diff =
+ TCSZ_SMALL;
+ }
+ }
+
+ return;
+}
+
+unsigned map_cache_geometries(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+{
+ unsigned rc = 0, cpu_id = read_cpuid();
+ unsigned hcr = 0, cluster_id = read_clusterid(), sibling_cpuid = 0;
+ unsigned abs_cpuid = 0;
+
+ if (!switcher) {
+ sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+ }
+
+ if (cluster_id == host_cluster) {
+
+ /* Find host cache topology */
+ find_cache_geometry(hcg_ptr);
+
+ /*
+ * Wait for the target cpu to send an event indicating that
+ * its discovered its cache topology.
+ */
+ if (!switcher) {
+ wait_for_event(CACHE_GEOM_DONE, abs_cpuid);
+ reset_event(CACHE_GEOM_DONE, abs_cpuid);
+ }
+
+ /*
+ * Assuming that only no. of sets, ways and cache line
+ * size will be different across the target and host
+ * cpu caches. Hence the CLIDRs should look the same
+ * Support for absence of cache levels and memory
+ * Also this check ensures that the target cpu is
+ * always run before the host else the cache geometry
+ * will have to be hardcoded.
+ * mapped caches will be added later.
+ */
+ if (hcg_ptr->clidr != tcg_ptr->clidr) {
+ printf("%s: Host CLIDR=0x%x : Target CLIDR=0x%x \n",
+ __FUNCTION__, hcg_ptr->clidr, tcg_ptr->clidr);
+ rc = 1;
+ goto out;
+ }
+
+ find_cache_diff(hcg_ptr, tcg_ptr, cd_ptr);
+
+ /*
+ * Enable bit for trapping set/way operations &
+ * Cache identification regs
+ */
+ hcr = read_hcr();
+ hcr |= HCR_TSW | HCR_TID2;
+ write_hcr(hcr);
+ dsb();
+ isb();
+
+ } else {
+
+ /* Find the cache geometry on the target cpu */
+ find_cache_geometry(tcg_ptr);
+
+ /*
+ * Send an event to the host cpu indicating that we have
+ * discovered our cache topology
+ */
+ if(!switcher) {
+ set_event(CACHE_GEOM_DONE, sibling_cpuid);
+ }
+ }
+ out:
+ return rc;
+}
+
+/*
+ * Given two cache geometries and the difference between them
+ * handle a cache maintenance operation by set/way
+ */
+void handle_cm_op(unsigned reg,
+ void (*op_handler) (unsigned),
+ cache_geometry *hcg_ptr,
+ cache_geometry *tcg_ptr,
+ cache_diff *cd_ptr)
+{
+ unsigned clvl = 0, cpu_id = read_cpuid();
+ unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
+ unsigned hc_assoc = 0, hc_numsets = 0, hc_linesz = 0;
+ unsigned lineno = 0, setno = 0, wayno = 0, abs_lineno = 0;
+
+ /*
+ * If target cache line size is greater than the host then
+ * each maintenance op has to be performed on two lines on
+ * host. Does not matter is the line size if equal
+ */
+ unsigned ctr = cd_ptr[clvl].tcline_factor;
+
+ /*
+ * Find out the cache level for which the set/way operation has invoked.
+ * Use this to find the cache geometry in target cache to ascertain the
+ * set & way number from the argument. Use this info to calculate the
+ * target cache line number.
+ */
+ clvl = get_cache_level(reg);
+ tc_linesz = get_cache_linesz(tcg_ptr, clvl);
+ tc_assoc = get_cache_assoc(tcg_ptr, clvl);
+ tc_numsets = get_cache_numsets(tcg_ptr, clvl);
+
+ wayno = (reg >> __clz(tc_assoc)) & tc_assoc;
+ setno = (reg >> (tc_linesz + 4)) & tc_numsets;
+ lineno = (setno * (tc_assoc + 1)) + wayno;
+
+ if(cmop_debug) {
+ /*
+ * tc_prev_line is initialised to -1 (unsigned). We can never have so many
+ * cache lines. Helps determining when to record the start of a cm op.
+ * If count != lineno then either we are not counting or have been counting
+ * and now are out of sync. In either case, a new cm op is started
+ */
+ if (tc_prev_line[cpu_id][clvl] != lineno) {
+ tc_prev_line[cpu_id][clvl] = lineno;
+ /* All ops start out as partial ops */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt++;
+
+ /* Reset all our counters */
+ cm_ignline_cnt[cpu_id][clvl] = 0;
+ cm_extline_cnt[cpu_id][clvl] = 0;
+ hc_line_cnt[cpu_id][clvl] = 0;
+ cm_line_cnt[cpu_id][clvl] = 0;
+ }
+
+ tc_prev_line[cpu_id][clvl]--;
+ cm_line_cnt[cpu_id][clvl]++;
+ }
+
+ /* Convert target cache line no. to absolute cache line no. */
+ if (cd_ptr[clvl].tcline_factor)
+ abs_lineno = lineno * cd_ptr[clvl].tcline_factor;
+
+ /* Convert absolute cache line no. to host cache line no. */
+ if (cd_ptr[clvl].hcline_factor)
+ lineno = abs_lineno / cd_ptr[clvl].hcline_factor;
+
+ /*
+ * Find out the set & way no. on the host cache corresponding to the
+ * cache line no. calculated on the target cache.
+ */
+ hc_linesz = get_cache_linesz(hcg_ptr, clvl);
+ hc_assoc = get_cache_assoc(hcg_ptr, clvl);
+ hc_numsets = get_cache_numsets(hcg_ptr, clvl);
+
+ switch (cd_ptr[clvl].csize_diff) {
+ case TCSZ_BIG:
+ {
+ if (abs_lineno <
+ cd_ptr[clvl].hnumabs_clines) {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if(cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ if(cmop_debug)
+ cm_ignline_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+ case TCSZ_EQUAL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if(cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+
+ case TCSZ_SMALL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if(cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+
+ /*
+ * If the target cache is smaller than the host cache then we
+ * need to extend the maintenance operation to rest of the host
+ * cache.
+ */
+ if ((abs_lineno +
+ (1 * cd_ptr[clvl].tcline_factor)) ==
+ cd_ptr[clvl].tnumabs_clines) {
+
+ /*
+ * TODO: Temp hack. Due to the cache line factor we end up incrementing
+ * the lineno and miss one line.
+ */
+ lineno--;
+ for (lineno++;
+ lineno < (hc_numsets + 1) * (hc_assoc + 1);
+ lineno++) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+
+ /* Create new register value for operation on host cache */
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ /* Perform the operation */
+ op_handler(reg);
+
+ if(cmop_debug)
+ cm_extline_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ }
+ break;
+ }
+ }
+
+
+ if(cmop_debug) {
+ /*
+ * If the op cnt has reached the maximum cache line number then
+ * print the statistics collected so far
+ *
+ * NOTE: We don't reset the counter. It will done at the start
+ * of the next cm op automatically. Its value now is one more
+ * than the maximum valid target cache line number.
+ */
+ if (cm_line_cnt[cpu_id][clvl] == (tc_assoc + 1) * (tc_numsets + 1)) {
+
+ printf("%s", __FUNCTION__);
+ printf(" : TC Lines=0x%x ", cm_line_cnt[cpu_id][clvl]);
+ printf(" : HC Lines=0x%x ", hc_line_cnt[cpu_id][clvl]);
+ printf(" : Ign Lines=0x%x ", cm_ignline_cnt[cpu_id][clvl]);
+ printf(" : Extra Lines=0x%x ", cm_extline_cnt[cpu_id][clvl]);
+ printf("\n");
+
+ /* Register this as a complete set/way operation */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt--;
+ cm_op_stats[cpu_id][clvl].cmpl_cmop_cnt++;
+ }
+ }
+
+ return;
+}
+
diff --git a/big-little/virtualisor/cpus/a15/a15.c b/big-little/virtualisor/cpus/a15/a15.c
new file mode 100644
index 0000000..92bebac
--- /dev/null
+++ b/big-little/virtualisor/cpus/a15/a15.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "bl.h"
+#include "virtualisor.h"
+#include "a15.h"
+
+/* Forward declaration */
+static virt_descriptor a15_virt_desc;
+
+/*
+ * Dummy functions for setting up any cpu
+ * specific traps.
+ */
+unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
+{
+ return 0;
+}
+
+unsigned a15_trap_save(unsigned first_cpu, unsigned sibling_cpu)
+{
+ return 0;
+}
+
+unsigned a15_trap_restore(unsigned first_cpu, unsigned sibling_cpu)
+{
+ return 0;
+}
+
+unsigned a15_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
+{
+ if (switcher) {
+
+ } else {
+ /* Always on */
+ }
+
+ /*
+ * Indicate that cpu specific virtualisor setup
+ * has been done. Restore context instead on next
+ * invocation
+ */
+ a15_virt_desc.init[read_cpuid()] = 1;
+ return 0;
+}
+
+static virt_descriptor a15_virt_desc __attribute__ ((section("virt_desc_section"))) = {
+ A15,
+ {0},
+ a15_trap_setup,
+ a15_trap_handle,
+ a15_trap_save,
+ a15_trap_restore,
+};
diff --git a/big-little/virtualisor/cpus/a15/include/a15.h b/big-little/virtualisor/cpus/a15/include/a15.h
new file mode 100644
index 0000000..554e401
--- /dev/null
+++ b/big-little/virtualisor/cpus/a15/include/a15.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __A15_H__
+#define __A15_H__
+
+#endif /* __A15_H__ */
diff --git a/big-little/virtualisor/cpus/a7/a7.c b/big-little/virtualisor/cpus/a7/a7.c
new file mode 100644
index 0000000..9c3cef5
--- /dev/null
+++ b/big-little/virtualisor/cpus/a7/a7.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "bl.h"
+#include "virtualisor.h"
+#include "a7.h"
+
+/* Forward declaration */
+static virt_descriptor a7_virt_desc;
+
+/*
+ * Dummy functions for setting up any cpu
+ * specific traps.
+ */
+unsigned a7_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
+{
+ return 0;
+}
+
+unsigned a7_trap_save(unsigned first_cpu, unsigned sibling_cpu)
+{
+ return 0;
+}
+
+unsigned a7_trap_restore(unsigned first_cpu, unsigned sibling_cpu)
+{
+ return 0;
+}
+
+unsigned a7_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
+{
+ if (switcher) {
+
+ } else {
+ /* Always on */
+ }
+
+ /*
+ * Indicate that cpu specific virtualisor setup
+ * has been done. Restore context instead on next
+ * invocation
+ */
+ a7_virt_desc.init[read_cpuid()] = 1;
+ return 0;
+}
+
+static virt_descriptor a7_virt_desc __attribute__ ((section("virt_desc_section"))) = {
+ A7,
+ {0},
+ a7_trap_setup,
+ a7_trap_handle,
+ a7_trap_save,
+ a7_trap_restore,
+};
diff --git a/big-little/virtualisor/cpus/a7/include/a7.h b/big-little/virtualisor/cpus/a7/include/a7.h
new file mode 100644
index 0000000..5e2b62f
--- /dev/null
+++ b/big-little/virtualisor/cpus/a7/include/a7.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __A7_H__
+#define __A7_H__
+
+#endif /* __A7_H__ */
diff --git a/big-little/virtualisor/include/cache_geom.h b/big-little/virtualisor/include/cache_geom.h
new file mode 100644
index 0000000..642e0e9
--- /dev/null
+++ b/big-little/virtualisor/include/cache_geom.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __CACHE_GEOM_H__
+#define __CACHE_GEOM_H__
+
+#define MAX_CACHE_LEVELS 0x8
+
+/* Target cpu cache relative to host cpu cache size */
+#define TCSZ_EQUAL 0x0
+#define TCSZ_SMALL 0x1
+#define TCSZ_BIG 0x2
+
+#define get_setway_reg(a, b , c, d, e) ((a << __clz(b)) | (c << (d + 4)) | (e << 1))
+#define get_cache_type(clidr, lvl) ((clidr >> (lvl * 0x3)) & 0x7)
+#define get_cache_level(reg) (reg >> 1) & 0x7
+#define get_cache_linesz(cg, lvl) (cg->ccsidr[lvl] & 0x7)
+#define get_cache_assoc(cg, lvl) ((cg->ccsidr[lvl] >> 3) & 0x3ff)
+#define get_cache_numsets(cg, lvl) ((cg->ccsidr[lvl] >> 13) & 0x7fff)
+
+/*
+ * Data structure that stores the foreseeable differences
+ * between the host and target caches at each implemented
+ * cache level.
+ * Absolute cache line numbers are calculated relative to
+ * the cache line size of the smaller cache to get the
+ * maximum granularity.
+ */
+typedef struct cache_diff {
+ /* Stores whether target cache is =,<,> host cache */
+ unsigned csize_diff;
+ /*
+ * Stores factor by which target cache line
+ * has to be multiplied to get absolute line
+ * no.
+ */
+ unsigned tcline_factor;
+ /*
+ * Stores factor by which absolute cache line
+ * no. has to be divided to get host cache line
+ * no.
+ */
+ unsigned hcline_factor;
+ /* Max absolute target cpu cache line number */
+ unsigned tnumabs_clines;
+ /* Max absolute host cpu cache line number */
+ unsigned hnumabs_clines;
+} cache_diff;
+
+/*
+ * Data structure that defines the cache topology of a cpu
+ */
+typedef struct cache_geom {
+ unsigned clidr;
+ /*
+ * One for each cpu to store the cache level
+ * the OS thinks its operating on.
+ */
+ unsigned ccselr;
+ /* One for each cache level */
+ unsigned ccsidr[MAX_CACHE_LEVELS];
+} cache_geometry;
+
+/*
+ * Data structure to hold cache virtualisation statistics.
+ * Reset for each switchover.
+ */
+typedef struct cache_stats {
+ /* Number of cm ops which did not cover the whole cache */
+ unsigned part_cmop_cnt;
+ /* Number of cm ops which spanned the entire cache */
+ unsigned cmpl_cmop_cnt;
+} cache_stats;
+
+extern unsigned map_cache_geometries(cache_geometry *,
+ cache_geometry *,
+ cache_diff *);
+extern void find_cache_geometry(cache_geometry *);
+extern void find_cache_diff(cache_geometry *,
+ cache_geometry *,
+ cache_diff *);
+extern void handle_cm_op(unsigned,
+ void (*) (unsigned),
+ cache_geometry *,
+ cache_geometry *,
+ cache_diff *);
+
+#endif /* __CACHE_GEOM_H__ */
diff --git a/big-little/virtualisor/include/mem_trap.h b/big-little/virtualisor/include/mem_trap.h
new file mode 100644
index 0000000..4c4a200
--- /dev/null
+++ b/big-little/virtualisor/include/mem_trap.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __MEM_TRAP_H__
+#define __MEM_TRAP_H__
+
+/*
+ * Data structure that holds info about all traps populated
+ * in the 2nd stage translation tables. It does not need to
+ * interpret the traps but simple save and restore them.
+ * This should prevent the usage of trap specific save/restore
+ * routines.
+ */
+typedef struct trap_data {
+ /* Does this structure contain valid data */
+ unsigned valid;
+ /* Which cluster to save/restore this trap on */
+ unsigned cluster_id;
+ /* Translation table address */
+ unsigned long long table;
+ /* Index corresponding to mapping */
+ unsigned index;
+ /* TODO: Revisit why we need two variables here */
+ /* Original Descriptor */
+ unsigned long long prev_desc;
+ /* Current Descriptor */
+ unsigned long long cur_desc;
+} mem_trap_data;
+
+extern unsigned mem_trap_setup(unsigned, mem_trap_data *);
+extern mem_trap_data s2_trap_section$$Base;
+extern unsigned s2_trap_section$$Length;
+
+#endif /* __MEM_TRAP_H__ */
diff --git a/big-little/virtualisor/include/virtualisor.h b/big-little/virtualisor/include/virtualisor.h
new file mode 100644
index 0000000..c3bf2c1
--- /dev/null
+++ b/big-little/virtualisor/include/virtualisor.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __VIRTUALISOR_H__
+#define __VIRTUALISOR_H__
+
+#include "misc.h"
+#include "virt_helpers.h"
+
+/*
+ * Data structure that holds a copy of the virtualized regs
+ */
+typedef struct virt_regs {
+ unsigned cluster_id;
+ unsigned mpidr;
+ unsigned midr;
+} virt_reg_data;
+
+/*
+ * Data structure that holds all the trap registers exported
+ * by the Virtualisation Extensions.
+ */
+typedef struct trap_regs {
+ unsigned hcr;
+ unsigned hdcr;
+ unsigned hcptr;
+ unsigned hstr;
+} reg_trap_data;
+
+typedef struct gp_regs {
+ unsigned r[15];
+} gp_regs;
+
+/*
+ * Descriptor exported by each processor describing
+ * which traps it wants to implement along with
+ * handlers for saving and restoring for each conf-
+ * -igured trap.
+ */
+typedef struct virt_desc {
+ /* cpu midr contents */
+ unsigned cpu_no;
+ /*
+ * Bitmask to inidicate that Virtualisor setup has been
+ * done on both host & target cpus.
+ */
+ unsigned char init[NUM_CPUS];
+ unsigned (*trap_setup) (unsigned, unsigned);
+ unsigned (*trap_handle) (gp_regs * regs, unsigned, unsigned);
+ unsigned (*trap_save) (unsigned, unsigned);
+ unsigned (*trap_restore) (unsigned, unsigned);
+} virt_descriptor;
+
+extern void SetupVirtualisor(unsigned);
+extern void SaveVirtualisor(unsigned);
+extern void RestoreVirtualisor(unsigned);
+extern void HandleVirtualisor(gp_regs *);
+extern void handle_vgic_distif_abort(unsigned, unsigned *, unsigned);
+extern unsigned find_sibling_cpu(void);
+extern virt_descriptor virt_desc_section$$Base;
+extern unsigned virt_desc_section$$Length;
+extern unsigned host_cluster;
+extern unsigned switcher;
+
+#endif /* __VIRTUALISOR_H__ */
diff --git a/big-little/virtualisor/mem_trap.c b/big-little/virtualisor/mem_trap.c
new file mode 100644
index 0000000..a3a2de8
--- /dev/null
+++ b/big-little/virtualisor/mem_trap.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virtualisor.h"
+#include "misc.h"
+#include "virt_helpers.h"
+#include "mem_trap.h"
+
+/*
+ * Generic call to make accesses to a peripheral trap into the
+ * HYP mode by invalidating its descriptor in the 2nd stage
+ * translation tables
+ */
+unsigned mem_trap_setup(unsigned periph_addr, mem_trap_data *periph_trap_data)
+{
+ unsigned rc = 0x0, four_kb_index = 0;
+ unsigned one_gb_index = 0, two_mb_index = 0;
+ unsigned long long vtcr = 0x0, hcr = 0x0, level = 0;
+ unsigned long long pagetable_base = 0x0, l2_desc = 0;
+ unsigned long long l3_desc = 0, l3_table = 0;
+
+ /* Check if 2nd stage translations are enabled */
+ hcr = read_hcr();
+ if (!(hcr & HCR_VM)) {
+ printf("%s: 2nd Stage translations not enabled \n", __FUNCTION__);
+ rc = 0x1;
+ goto out;
+ }
+
+ /* Check what level of tables we need to start at */
+ vtcr = read_vtcr();
+ level = (vtcr >> 6) & 0x3;
+
+ /* Read the page table base address. */
+ pagetable_base = read_vttbr();
+
+ /* Calculate the table indices */
+ one_gb_index = periph_addr >> 30;
+
+ /* Each GB contains (1 << 9) or 512 2MBs */
+ two_mb_index = (periph_addr >> 21) - ((1 << 9) * one_gb_index);
+
+ /* Each GB contains (1 << 18) or 262144 4KBs */
+ four_kb_index = (periph_addr >> 12) - ((1 << 9) * (periph_addr >> 21));
+
+ /* For either starting level find out the level 2 desc */
+ switch (level) {
+
+ case 0x1:
+ {
+ /* Start from first level */
+ unsigned long long l1_desc = 0;
+ unsigned long long l2_table = 0;
+
+ l1_desc = ((unsigned long long *)((unsigned)(&pagetable_base)[0]))[one_gb_index];
+ if ((l1_desc & 0x3) != TABLE_MAPPING) {
+ printf("%s: Invalid 1st level desc : 0x%llu \n", __FUNCTION__, l1_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ l2_table = l1_desc & 0xfffffff000UL;
+ l2_desc = ((unsigned long long *)((unsigned)(&l2_table)[0]))[two_mb_index];
+ break;
+ }
+
+ case 0x0:
+ {
+ /* Start from second level */
+ l2_desc = ((unsigned long long *)((unsigned)(&pagetable_base)[0]))[two_mb_index];
+ break;
+ }
+
+ default:
+ printf("%s: Invalid Pagetable level \n", __FUNCTION__);
+ rc = 0x1;
+ }
+
+ /* Validate the 2nd level descriptor */
+ if ((l2_desc & 0x3) != TABLE_MAPPING) {
+ printf("%s: Invalid 2nd level desc : 0x%llu \n",
+ __FUNCTION__, l2_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ l3_table = l2_desc & 0xfffffff000UL;
+ l3_desc = ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index];
+
+ /*
+ * Validate the 3rd level descriptor. This means that the mapping is
+ * already invalid and we have not touched it
+ */
+ if ((l3_desc & 0x3) != VALID_MAPPING) {
+ printf("%s: Invalid 3rd level desc : 0x%llu \n",
+ __FUNCTION__, l3_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ /* Save the info gathered so far */
+ periph_trap_data->table = l3_table;
+ periph_trap_data->index = four_kb_index;
+ periph_trap_data->prev_desc = l3_desc;
+ periph_trap_data->cluster_id = read_clusterid();
+ periph_trap_data->valid = 1;
+
+ /* Invalidate the peripheral page table entry */
+ ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index] = 0x0;
+
+ out:
+ return rc;
+}
diff --git a/big-little/virtualisor/vgic_trap_handler.c b/big-little/virtualisor/vgic_trap_handler.c
new file mode 100644
index 0000000..77ac14c
--- /dev/null
+++ b/big-little/virtualisor/vgic_trap_handler.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virtualisor.h"
+#include "gic_registers.h"
+#include "misc.h"
+#include "virt_helpers.h"
+
+/*
+ * Whether A15 or A7, the distributor accesses are virtualised in
+ * exactly the same manner.
+ */
+void handle_vgic_distif_abort(unsigned pa, unsigned *data, unsigned write)
+{
+ unsigned value = 0, reg_offset = pa & 0xfff;
+
+ switch (reg_offset >> 7) {
+
+ /* Access to Processor Target registers */
+ case (GICD_CPUS >> 7):
+ if (write) {
+ /*
+ * OS is trying to reprogram the processor targets register.
+ * Find out the cpu interface mask for this cluster and use
+ * that instead to program the register.
+ */
+ value = get_cpuif_mask(*data);
+ write32(pa, value);
+ } else {
+ value = read32(pa);
+ *data = get_cpu_mask(value);
+ }
+
+ break;
+
+ /* Access to Software generated interrupt register */
+ case (GICD_SW >> 7):
+ if (write) {
+ /* Get the updated cpu interface mask */
+ value = get_cpuif_mask((*data >> 16) & 0xff) << 16;
+ value |= *data & ~(0xff << 16);
+ /*
+ * Clear the old cpu interface mask & update
+ * value with new cpu interface mask
+ */
+ write32(pa, value);
+ } else {
+ /* Cannot possibly have a read from SGI generation register */
+ }
+
+ break;
+
+ default:
+ if (write) {
+ write32(pa, *data);
+ } else {
+ *data = read32(pa);
+ }
+ }
+
+ return;
+}
+
diff --git a/big-little/virtualisor/virt_context.c b/big-little/virtualisor/virt_context.c
new file mode 100644
index 0000000..80a79b2
--- /dev/null
+++ b/big-little/virtualisor/virt_context.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virtualisor.h"
+#include "misc.h"
+#include "virt_helpers.h"
+#include "cache_geom.h"
+#include "mem_trap.h"
+extern virt_reg_data host_virt_regs[];
+extern reg_trap_data host_trap_regs[];
+extern unsigned cmop_debug;
+extern cache_stats cm_op_stats[NUM_CPUS][MAX_CACHE_LEVELS];
+
+/*
+ * Save/Restore of Virtualisor should be done only on the host cpu
+ * & host cluster unlike setup which is done on both. The cluster
+ * is need for cases where both clusters have same cpu type and one
+ * cluster does not use the Virtualisor.
+ */
+void SaveVirtualisor(unsigned first_cpu)
+{
+ unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
+ unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
+ mem_trap_data *s2_td = &s2_trap_section$$Base;
+ unsigned long long *cd_ptr = 0x0;
+ unsigned *periph_addr = 0x0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Since there is only one second stage translation table, its
+ * safe to assume that only one cpu (first_cpu) should save &
+ * restore the context.
+ */
+ len = (unsigned)&s2_trap_section$$Length;
+ if (cpu_id == first_cpu) {
+ /* Iterate through the array of 2nd stage translation traps */
+ for (ctr = 0; ctr < (len / sizeof(mem_trap_data)); ctr++) {
+ if (s2_td[ctr].valid
+ && s2_td[ctr].cluster_id == cluster_id) {
+
+ /*
+ * Save the current descriptor and restore the
+ * previous. Need not worry about synchronisation
+ * issues, as the existing entry was causing
+ * translation faults. The TLB never caches fault
+ * generating translations.
+ */
+ cd_ptr =
+ &((unsigned long long
+ *)((unsigned)(&s2_td[ctr].
+ table)[0]))[s2_td[ctr].
+ index];
+ s2_td[ctr].cur_desc = *cd_ptr;
+ *cd_ptr = s2_td[ctr].prev_desc;
+ periph_addr = (unsigned *) cd_ptr;
+ dsb();
+ inv_tlb_mva((unsigned *) periph_addr[0]);
+ inv_bpred_all();
+ }
+ }
+ }
+
+ /* Save the HYP trap registers for this cpu */
+ host_trap_regs[cpu_id].hcr = read_hcr();
+ host_trap_regs[cpu_id].hdcr = read_hdcr();
+ host_trap_regs[cpu_id].hcptr = read_hcptr();
+ host_trap_regs[cpu_id].hstr = read_hstr();
+
+ if(cmop_debug) {
+ /* Print Cache maintenance statistics */
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ printf("Cache Level %d", ctr);
+ printf(" : Partial ops=0x%x",
+ cm_op_stats[cpu_id][ctr].part_cmop_cnt);
+ printf(" : Complete ops=0x%x",
+ cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt);
+ printf("\n");
+ }
+ }
+
+ }
+
+ /*
+ * Call any cpu specific save routines (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_save;
+ if(handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__,
+ cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
+
+ out:
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
+
+ return;
+}
+
+/*
+ * TODO: not required as we can invoke the cpu restore function
+ * directly from SetupVirtualisor and don't need to go through
+ * the descriptor array again.
+ */
+void RestoreVirtualisor(unsigned first_cpu)
+{
+ unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
+ unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
+ mem_trap_data *s2_td = &s2_trap_section$$Base;
+ unsigned long long *cd_ptr = 0x0;
+ unsigned *periph_addr = 0x0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Since there is only one second stage translation table, its
+ * safe to assume that only one cpu (first_cpu) should save &
+ * restore the context.
+ */
+ len = (unsigned)&s2_trap_section$$Length;
+ if (cpu_id == first_cpu) {
+ /* Iterate through the array of 2nd stage translation traps */
+ for (ctr = 0; ctr < (len / sizeof(mem_trap_data)); ctr++) {
+ if (s2_td[ctr].valid
+ && s2_td[ctr].cluster_id == cluster_id) {
+ /*
+ * Restore the current descriptor and save the previous
+ */
+ cd_ptr =
+ &((unsigned long long
+ *)((unsigned)((&s2_td[ctr].
+ table)[0])))[s2_td[ctr].
+ index];
+ s2_td[ctr].prev_desc = *cd_ptr;
+ *cd_ptr = s2_td[ctr].cur_desc;
+ periph_addr = (unsigned *) cd_ptr;
+ dsb();
+ inv_tlb_mva((unsigned *) periph_addr[0]);
+ inv_bpred_all();
+ }
+ }
+ }
+
+ /* Now restore the virtualised ID registers for this cpu */
+ write_vmidr(host_virt_regs[cpu_id].midr);
+ write_vmpidr(host_virt_regs[cpu_id].mpidr);
+
+ /* Restore the HYP trap registers for this cpu */
+ write_hcr(host_trap_regs[cpu_id].hcr);
+ write_hdcr(host_trap_regs[cpu_id].hdcr);
+ write_hcptr(host_trap_regs[cpu_id].hcptr);
+ write_hstr(host_trap_regs[cpu_id].hstr);
+
+ if(cmop_debug) {
+ /* Resetting Cache maintenance statistics */
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ cm_op_stats[cpu_id][ctr].part_cmop_cnt = 0;
+ cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt = 0;
+ }
+ }
+ }
+
+ /*
+ * Call any cpu specific restore routines (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_restore;
+ if(handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__,
+ cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
+
+ out:
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
+
+ return;
+}
diff --git a/big-little/virtualisor/virt_handle.c b/big-little/virtualisor/virt_handle.c
new file mode 100644
index 0000000..2cfb2cc
--- /dev/null
+++ b/big-little/virtualisor/virt_handle.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virtualisor.h"
+#include "virt_helpers.h"
+#include "hyp_types.h"
+#include "cache_geom.h"
+#include "mem_trap.h"
+#include "gic_registers.h"
+#include "bl.h"
+
+extern cache_geometry host_cache_geometry[];
+extern cache_geometry target_cache_geometry[];
+extern cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS];
+
+void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
+{
+ unsigned Op1, Op2, CRn, CRm, Rt, write, cpu_id = read_cpuid();
+
+ Op2 = (hsr >> 17) & 0x7;
+ Op1 = (hsr >> 14) & 0x7;
+ CRn = (hsr >> 10) & 0xf;
+ Rt = (hsr >> 5) & 0xf;
+ CRm = (hsr >> 1) & 0xf;
+ write = !(hsr & 0x1);
+
+ switch (CRn) {
+ case CRN_C0:
+ switch (Op1) {
+ case 0:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case MIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_vmidr();
+ break;
+ case CTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_ctr();
+ break;
+ case TCMTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_tcmtr();
+ break;
+ case TLBTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_tlbtr();
+ break;
+ case MPIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_vmpidr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 1:
+ switch (Op2) {
+ case ID_PFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_pfr0();
+ break;
+ case ID_PFR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_pfr1();
+ break;
+ case ID_DFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_dfr0();
+ break;
+ case ID_AFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_afr0();
+ break;
+ case ID_MMFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr0();
+ break;
+ case ID_MMFR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr1();
+ break;
+ case ID_MMFR2:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr2();
+ break;
+ case ID_MMFR3:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr3();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 2:
+ switch (Op2) {
+ case ID_ISAR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar0();
+ break;
+ case ID_ISAR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar1();
+ break;
+ case ID_ISAR2:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar2();
+ break;
+ case ID_ISAR3:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar3();
+ break;
+ case ID_ISAR4:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar4();
+ break;
+ case ID_ISAR5:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar5();
+ break;
+ default:
+ /* RAZ */
+ regs->r[Rt] = 0x0;
+ }
+ break;
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ if (write)
+ goto error;
+ /* RAZ */
+ regs->r[Rt] = 0x0;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 1:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case CCSIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] =
+ target_cache_geometry[cpu_id].
+ ccsidr[get_cache_level
+ (target_cache_geometry[cpu_id].
+ ccselr)];
+ break;
+ case CLIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] =
+ target_cache_geometry[cpu_id].clidr;
+ break;
+ case AIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_aidr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 2:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case CSSELR:
+ if (write)
+ target_cache_geometry[cpu_id].
+ ccselr = regs->r[Rt];
+ else
+ regs->r[Rt] =
+ target_cache_geometry[cpu_id].
+ ccselr;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case CRN_C7:
+ switch (Op1) {
+ case 0:
+ switch (CRm) {
+ case 6:
+ switch (Op2) {
+ case DCISW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dcisw,
+ &host_cache_geometry[cpu_id],
+ &target_cache_geometry[cpu_id],
+ &cache_delta[cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ case 10:
+ switch (Op2) {
+ case DCCSW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dccsw,
+ &host_cache_geometry[cpu_id],
+ &target_cache_geometry[cpu_id],
+ &cache_delta[cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ case 14:
+ switch (Op2) {
+ case DCCISW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dccsw,
+ &host_cache_geometry[cpu_id],
+ &target_cache_geometry[cpu_id],
+ &cache_delta[cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case CRN_C9:
+ switch (Op1) {
+ case 1:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case 2:
+ /*
+ * A write to the L2CTLR register means trouble
+ * as the A7 version does not have all the fields
+ * that the A15 has. Handling needs more thought
+ */
+ if (write) {
+ printf("%s: Unexpected L2CTLR write \n",
+ __FUNCTION__);
+ goto error;
+ }
+
+ /*
+ * A read of the L2CTLR should return the total number
+ * of cpus across both the clusters in the "always on"
+ * configuration. Since there are only 2 bits for the
+ * number of cpus in the L2CTLR we need to flag any
+ * system with > 4 cpus.
+ */
+ if (!switcher) {
+ unsigned num_cpus = CLUSTER_CPU_COUNT(host_cluster)
+ + CLUSTER_CPU_COUNT(!host_cluster);
+
+ if (num_cpus > 4) {
+ printf("%s: Unexpected L2CTLR read \n",
+ __FUNCTION__);
+ goto error;
+ }
+
+ regs->r[Rt] &= ~(0x3 << 24);
+ regs->r[Rt] |= (num_cpus - 1) << 24;
+ } else {
+ regs->r[Rt] = read_l2ctlr();
+ }
+ break;
+ case 3:
+ /*
+ * A write to the L2ECTLR register means trouble
+ * as it does not exist on A7. Handling needs more
+ * thought
+ */
+ if (write) {
+ printf("%s: Unexpected L2ECTLR write \n",
+ __FUNCTION__);
+ goto error;
+ } else {
+ regs->r[Rt] = read_l2ectlr();
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ /*
+ * Support for accesses to the PMON space. Its not been
+ * verified whether all the registers are readable &
+ * writable. But then, execution will never reach here
+ * if a reg is inaccessible. It will be a undef abort
+ * instead.
+ */
+ case 0:
+ switch (CRm) {
+ case 14:
+ switch (Op2) {
+ case 0:
+ if(write)
+ write_pmuserenr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmuserenr();
+ break;
+ case 1:
+ if(write)
+ write_pmintenset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmintenset();
+ break;
+ case 2:
+ if(write)
+ write_pmintenclr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmintenclr();
+ break;
+ case 3:
+ if(write)
+ write_pmovsset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmovsset();
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ case 13:
+ switch (Op2) {
+ case 0:
+ if(write)
+ write_pmccntr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmccntr();
+ break;
+ case 1:
+ if(write)
+ write_pmxevtyper(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmxevtyper();
+ break;
+ case 2:
+ if(write)
+ write_pmxevcntr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmxevcntr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ case 12:
+ switch (Op2) {
+ case 0:
+ if(write)
+ write_pmcr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcr();
+ break;
+ case 1:
+ if(write)
+ write_pmcntenset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcntenset();
+ break;
+ case 2:
+ if(write)
+ write_pmcntenclr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcntenclr();
+ break;
+ case 3:
+ if(write)
+ write_pmovsr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmovsr();
+ break;
+ case 4:
+ if(write)
+ write_pmswinc(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmswinc();
+ break;
+ case 5:
+ if(write)
+ write_pmselr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmselr();
+ break;
+ case 6:
+ if(write)
+ write_pmceid0(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmceid0();
+ break;
+ case 7:
+ if(write)
+ write_pmceid1(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmceid1();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ return;
+
+ error:
+ printf("%s: Unexpected cp15 instruction", __FUNCTION__);
+ printf(" : %s", write ? "MCR p15" : "MRC p15");
+ printf(", %d, %d, %d, %d, %d \n", Op1, Rt, CRn, CRm, Op2);
+ panic();
+
+}
+
+void trap_dabort_handle(unsigned hsr, gp_regs * regs)
+{
+ unsigned hdfar = 0x0, hpfar = 0x0, pa = 0x0, *data = 0x0;
+ unsigned write = 0x0;
+
+ hdfar = read_hdfar();
+ hpfar = read_hpfar();
+
+ pa = ((hpfar >> 4) << 12) + (hdfar & 0xfff);
+ data = &regs->r[(hsr >> 16) & 0xf];
+ write = (hsr >> 6) & 0x1;
+
+ /* Only distributor accesses are virtualised at the moment */
+ if ((pa & ~0xfff) == GIC_ID_PHY_BASE) {
+ handle_vgic_distif_abort(pa, data, write);
+ }
+
+ return;
+}
+
+void HandleVirtualisor(gp_regs * regs)
+{
+ unsigned cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr()), rc = 0;
+ unsigned hsr = read_hsr(), elr = 0, vd_len = 0, index = 0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (gp_regs *, unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ /*
+ * Perform the generic trap handling
+ */
+ switch (hsr >> 26) {
+ case TRAP_DABORT:
+ trap_dabort_handle(hsr, regs);
+ break;
+ case TRAP_CP15_32:
+ trap_cp15_mrc_mcr_handle(hsr, regs);
+ break;
+ default:
+ printf("%s: Unexpected trap", __FUNCTION__);
+ printf(": HSR=0x%x Regs=0x%x \n", hsr, (unsigned) regs);
+ panic();
+ }
+
+ /*
+ * Do any cpu specific trap handling.
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_handle;
+ if(handler) {
+ rc = handler(regs, hsr, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__,
+ cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
+
+ /*
+ * This is a trap of the kind where we simply move
+ * onto the next instruction in the actual program.
+ * Move by 2 bytes if we came from Thumb mode else
+ * by 4 bytes.
+ */
+ elr = ((vm_context *) regs)->elr_hyp;
+ if (hsr & (1 << 25))
+ elr += 4;
+ else
+ elr += 2;
+ ((vm_context *) regs)->elr_hyp = elr;
+
+ out:
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
+
+ return;
+}
diff --git a/big-little/virtualisor/virt_setup.c b/big-little/virtualisor/virt_setup.c
new file mode 100644
index 0000000..c2dd75e
--- /dev/null
+++ b/big-little/virtualisor/virt_setup.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virt_helpers.h"
+#include "virtualisor.h"
+#include "events.h"
+#include "misc.h"
+#include "cache_geom.h"
+#include "mem_trap.h"
+#include "gic_registers.h"
+
+virt_reg_data host_virt_regs[NUM_CPUS];
+reg_trap_data host_trap_regs[NUM_CPUS];
+cache_geometry host_cache_geometry[NUM_CPUS];
+cache_geometry target_cache_geometry[NUM_CPUS];
+
+/* Cache geometry differences for each cpu at each level */
+cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS];
+static mem_trap_data svgic_distif_trap
+__attribute__ ((section("s2_trap_section"))) = {
+ 0, 0x0, 0x0, 0x0, 0x0, 0x0,
+};
+
+
+/*
+ * Flags which indicate whether the cpu independent
+ * functionality of the Virtualisor has been setup
+ * on both the host and target clusters.
+ */
+static unsigned virt_init[NUM_CPUS];
+
+/*
+ * Detect the type of dual cluster system we are, read
+ * our cpu type and then use the KFS_ID register to
+ * return the type of cpu on the other cluster.
+ */
+unsigned find_sibling_cpu()
+{
+ unsigned cpu_no = PART_NO(read_midr());
+
+ switch (DC_SYSTYPE) {
+ case A15_A15:
+ if(cpu_no == A15)
+ return cpu_no;
+ break;
+ case A7_A15:
+ case A15_A7:
+ if(cpu_no == A15)
+ return A7;
+ else if(cpu_no == A7)
+ return A15;
+ else
+ break;
+ }
+
+ printf("Unsupported Dual cluster system : 0x%x\n", DC_SYSTYPE);
+ panic();
+
+ return 0;
+}
+
+void SetupVirtualisor(unsigned first_cpu)
+{
+ unsigned rc = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
+ unsigned vd_len = 0, index = 0, cluster_id = read_clusterid();
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+ unsigned sibling_cpuid = 0, abs_cpuid = 0;
+
+ if (!switcher) {
+ sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+ }
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ /*
+ * Do the generic trap setup
+ */
+ if (virt_init[cpu_id] == FALSE) {
+
+ /*
+ * In the "always-on" configuration, both clusters have
+ * ensure that the L2CTLR register includes the cpu count
+ * of both the clusters while reporting the number of
+ * secondary cpus. So setup the necessary trap.
+ */
+ if (!switcher) {
+ /*
+ * Enable traps to CRn = 9 cp15 space
+ */
+ write_hstr(read_hstr() | (1 << 9));
+ }
+
+ /*
+ * Cache geometry of each cpu on the host cluster needs
+ * to be virtualised if the cpu type is different from
+ * that on the target cluster. This can be done generic-
+ * ally.
+ */
+ if (cpu_no != sibling) {
+ rc = map_cache_geometries(&host_cache_geometry[cpu_id],
+ &target_cache_geometry[cpu_id],
+ &cache_delta[cpu_id][0]);
+ if (rc) {
+ printf("%s: Failed to map cache geometries \n", __FUNCTION__);
+ rc = 1;
+ goto out;
+ }
+
+ }
+
+
+ /*
+ * Irrespective of what cpu types are present in the
+ * dual cluster system, the host cluster has to trap
+ * accesses to the vgic distributor when switching.
+ */
+ if (switcher && cluster_id == host_cluster) {
+ if (cpu_id == first_cpu) {
+ rc = mem_trap_setup(GIC_ID_PHY_BASE, &svgic_distif_trap);
+ if (rc) {
+ printf("%s: svgic distif trap setup failed \n",
+ __FUNCTION__);
+ goto out;
+ }
+ }
+ }
+
+
+ /*
+ * If the two clusters have different cpu types, then the
+ * target saves its midr and the host uses the value to
+ * virtualise its midr.
+ * mpidr is virtualised on the host cluster whether we are
+ * running "always on" or "switching". The latter cares
+ * about the cluster id while the former cares about the
+ * cpu ids as well.
+ */
+ if (cluster_id != host_cluster) {
+ host_virt_regs[cpu_id].mpidr = read_mpidr();
+ if (cpu_no != sibling)
+ host_virt_regs[cpu_id].midr = read_midr();
+ if (!switcher) {
+ /*
+ * Send a signal to the host to indicate
+ * that the regs is ready to be read. The
+ * cpu id is the absolute cpu number across
+ * clusters.
+ */
+ set_event(VID_REGS_DONE, sibling_cpuid);
+ }
+ } else {
+ if (!switcher) {
+ /*
+ * Wait for the target to read its regs
+ * before using them.
+ */
+ wait_for_event(VID_REGS_DONE, abs_cpuid);
+ reset_event(VID_REGS_DONE, abs_cpuid);
+
+ /*
+ * Add number of cpus in the target cluster to
+ * the cpuid of this cpu.
+ */
+ host_virt_regs[cpu_id].mpidr += CLUSTER_CPU_COUNT(!host_cluster);
+ }
+ write_vmpidr(host_virt_regs[cpu_id].mpidr);
+ if (cpu_no != sibling)
+ write_vmidr(host_virt_regs[cpu_id].midr);
+ }
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Assuming that with the switcher, the host always
+ * runs after the target. So, if we are here then
+ * the target must have completed its initialisation
+ *
+ * In the other case, if we are here after exchanging
+ * the events above, then the target has finished
+ * initialising.
+ */
+ virt_init[cpu_id] = 1;
+ }
+
+ } else {
+ if (switcher)
+ RestoreVirtualisor(first_cpu);
+ }
+
+
+ /*
+ * Do the cpu specific initialisation (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ /* If not initialised then setup else restore*/
+ if (vd_array[index].init[cpu_id] == 0)
+ handler = vd_array[index].trap_setup;
+ else
+ handler = vd_array[index].trap_restore;
+
+ if(handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__,
+ cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
+
+ out:
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
+
+ return;
+}