summaryrefslogtreecommitdiff
path: root/ArmPkg
diff options
context:
space:
mode:
authorHarry Liebel <Harry.Liebel@arm.com>2013-07-18 18:07:46 +0000
committeroliviermartin <oliviermartin@6f19259b-4bc3-4df7-8a09-765794883524>2013-07-18 18:07:46 +0000
commit25402f5d0660acde3ee382a36b065945251990dc (patch)
tree293fd0d8b229479d4d7cd540e034a10e08c18058 /ArmPkg
parent8477cb6e159008d1703381b9f6159e8c90ccc2bf (diff)
ArmPkg: Added Aarch64 support
Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Harry Liebel <Harry.Liebel@arm.com> Signed-off-by: Olivier Martin <olivier.martin@arm.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@14486 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'ArmPkg')
-rw-r--r--ArmPkg/ArmPkg.dec9
-rw-r--r--ArmPkg/ArmPkg.dsc45
-rw-r--r--ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.c2
-rw-r--r--ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.c50
-rw-r--r--ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.inf35
-rw-r--r--ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.c42
-rw-r--r--ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.inf38
-rw-r--r--ArmPkg/Drivers/CpuDxe/AArch64/Exception.c153
-rw-r--r--ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S381
-rw-r--r--ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c189
-rw-r--r--ArmPkg/Drivers/CpuDxe/CpuDxe.h9
-rw-r--r--ArmPkg/Drivers/CpuDxe/CpuDxe.inf4
-rw-r--r--ArmPkg/Drivers/TimerDxe/TimerDxe.c2
-rw-r--r--ArmPkg/Filesystem/SemihostFs/SemihostFs.inf4
-rw-r--r--ArmPkg/Include/AsmMacroIoLibV8.h200
-rw-r--r--ArmPkg/Include/Chipset/AArch64.h179
-rw-r--r--ArmPkg/Include/Chipset/AArch64Mmu.h208
-rw-r--r--ArmPkg/Include/Chipset/ArmAemV8.h21
-rw-r--r--ArmPkg/Include/Chipset/ArmArchTimer.h (renamed from ArmPkg/Include/Chipset/ArmV7ArchTimer.h)9
-rw-r--r--ArmPkg/Include/Chipset/ArmCortexA5x.h23
-rw-r--r--ArmPkg/Include/Chipset/ArmV7.h2
-rw-r--r--ArmPkg/Include/Library/ArmArchTimerLib.h (renamed from ArmPkg/Include/Library/ArmV7ArchTimerLib.h)10
-rw-r--r--ArmPkg/Include/Library/ArmLib.h13
-rw-r--r--ArmPkg/Library/ArmArchTimerLib/ArmArchTimerLib.c7
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c275
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S140
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c263
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h98
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf44
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf48
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf43
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c644
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Support.S503
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/ArmLib.c53
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h82
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S127
-rw-r--r--ArmPkg/Library/ArmLib/ArmV7/ArmV7ArchTimer.c2
-rw-r--r--ArmPkg/Library/ArmLib/Null/NullArmLib.inf3
-rw-r--r--ArmPkg/Library/ArmSmcLib/AArch64/ArmSmc.S78
-rw-r--r--ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf3
-rw-r--r--ArmPkg/Library/ArmSmcLibNull/AArch64/ArmSmcNull.S32
-rw-r--r--ArmPkg/Library/ArmSmcLibNull/ArmSmcLibNull.inf3
-rw-r--r--ArmPkg/Library/BaseMemoryLibStm/AArch64/CopyMem.c146
-rw-r--r--ArmPkg/Library/BaseMemoryLibStm/AArch64/SetMem.c84
-rwxr-xr-xArmPkg/Library/BaseMemoryLibStm/BaseMemoryLibStm.inf6
-rw-r--r--ArmPkg/Library/BasePeCoffLib/AArch64/PeCoffLoaderEx.c129
-rwxr-xr-xArmPkg/Library/BasePeCoffLib/BasePeCoffLib.inf6
-rw-r--r--ArmPkg/Library/BdsLib/BdsLinuxFdt.c2
-rw-r--r--ArmPkg/Library/CompilerIntrinsicsLib/AArch64/memcpy.S125
-rw-r--r--ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf5
-rw-r--r--ArmPkg/Library/DebugAgentSymbolsBaseLib/AArch64/DebugAgentException.S93
-rw-r--r--ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.c2
-rw-r--r--ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.inf3
-rw-r--r--ArmPkg/Library/DefaultExceptionHandlerLib/AArch64/DefaultExceptionHandler.c112
-rw-r--r--ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf6
-rw-r--r--ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLibBase.inf3
-rw-r--r--ArmPkg/Library/SemihostLib/AArch64/GccSemihost.S23
-rw-r--r--ArmPkg/Library/SemihostLib/SemihostLib.inf8
58 files changed, 4786 insertions, 43 deletions
diff --git a/ArmPkg/ArmPkg.dec b/ArmPkg/ArmPkg.dec
index 145c6b08f..101232901 100644
--- a/ArmPkg/ArmPkg.dec
+++ b/ArmPkg/ArmPkg.dec
@@ -181,3 +181,12 @@
gArmTokenSpaceGuid.PcdArmLinuxFdtMaxOffset|0x4000|UINT32|0x00000023
# The FDT blob must be loaded at a 64bit aligned address.
gArmTokenSpaceGuid.PcdArmLinuxFdtAlignment|0x8|UINT32|0x00000026
+
+[PcdsFixedAtBuild.AARCH64]
+ # By default we do transition to EL2 non-secure mode with Stack for EL2.
+ # Mode Description Bits
+ # NS EL2 SP2 all interupts disabled = 0x3c9
+ # NS EL1 SP1 all interupts disabled = 0x3c5
+ # Other modes include using SP0 or switching to Aarch32, but these are
+ # not currently supported.
+ gArmTokenSpaceGuid.PcdArmNonSecModeTransition|0x3c9|UINT32|0x0000003E
diff --git a/ArmPkg/ArmPkg.dsc b/ArmPkg/ArmPkg.dsc
index 1b8cff110..638054d32 100644
--- a/ArmPkg/ArmPkg.dsc
+++ b/ArmPkg/ArmPkg.dsc
@@ -25,7 +25,7 @@
PLATFORM_VERSION = 0.1
DSC_SPECIFICATION = 0x00010005
OUTPUT_DIRECTORY = Build/Arm
- SUPPORTED_ARCHITECTURES = ARM
+ SUPPORTED_ARCHITECTURES = ARM|AARCH64
BUILD_TARGETS = DEBUG|RELEASE
SKUID_IDENTIFIER = DEFAULT
@@ -61,8 +61,8 @@
DxeServicesTableLib|MdePkg/Library/DxeServicesTableLib/DxeServicesTableLib.inf
DefaultExceptionHandlerLib|ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf
- ArmLib|ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.inf
CpuLib|MdePkg/Library/BaseCpuLib/BaseCpuLib.inf
+ ArmGicLib|ArmPkg/Drivers/PL390Gic/PL390GicLib.inf
ArmSmcLib|ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf
ArmDisassemblerLib|ArmPkg/Library/ArmDisassemblerLib/ArmDisassemblerLib.inf
DmaLib|ArmPkg/Library/ArmDmaLib/ArmDmaLib.inf
@@ -76,6 +76,12 @@
IoLib|MdePkg/Library/BaseIoLibIntrinsic/BaseIoLibIntrinsic.inf
+[LibraryClasses.ARM]
+ ArmLib|ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.inf
+
+[LibraryClasses.AARCH64]
+ ArmLib|ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf
+
[LibraryClasses.common.PEIM]
HobLib|MdePkg/Library/PeiHobLib/PeiHobLib.inf
PeimEntryPoint|MdePkg/Library/PeimEntryPoint/PeimEntryPoint.inf
@@ -89,20 +95,15 @@
[LibraryClasses.ARM]
NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf
+[LibraryClasses.AARCH64]
+ NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf
+
[Components.common]
ArmPkg/Library/ArmCacheMaintenanceLib/ArmCacheMaintenanceLib.inf
ArmPkg/Library/ArmDisassemblerLib/ArmDisassemblerLib.inf
ArmPkg/Library/ArmDmaLib/ArmDmaLib.inf
-# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLib.inf
-# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLibPrePi.inf
-# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLib.inf
-# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLibPrePi.inf
- ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.inf
- ArmPkg/Library/ArmLib/ArmV7/ArmV7LibPrePi.inf
- ArmPkg/Library/ArmLib/ArmV7/ArmV7LibSec.inf
ArmPkg/Library/ArmLib/Null/NullArmLib.inf
ArmPkg/Library/BaseMemoryLibStm/BaseMemoryLibStm.inf
- ArmPkg/Library/BaseMemoryLibVstm/BaseMemoryLibVstm.inf
ArmPkg/Library/BasePeCoffLib/BasePeCoffLib.inf
ArmPkg/Library/BdsLib/BdsLib.inf
ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf
@@ -116,9 +117,6 @@
ArmPkg/Library/SemihostLib/SemihostLib.inf
ArmPkg/Library/UncachedMemoryAllocationLib/UncachedMemoryAllocationLib.inf
- ArmPkg/Drivers/ArmCpuLib/ArmCortexA8Lib/ArmCortexA8Lib.inf
- ArmPkg/Drivers/ArmCpuLib/ArmCortexA9Lib/ArmCortexA9Lib.inf
- ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.inf
ArmPkg/Drivers/CpuDxe/CpuDxe.inf
ArmPkg/Drivers/CpuPei/CpuPei.inf
ArmPkg/Drivers/PL390Gic/PL390GicDxe.inf
@@ -133,3 +131,24 @@
ArmPkg/Application/LinuxLoader/LinuxAtagLoader.inf
ArmPkg/Application/LinuxLoader/LinuxFdtLoader.inf
+
+[Components.ARM]
+ ArmPkg/Library/BaseMemoryLibVstm/BaseMemoryLibVstm.inf
+
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA8Lib/ArmCortexA8Lib.inf
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA9Lib/ArmCortexA9Lib.inf
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.inf
+
+# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLib.inf
+# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLibPrePi.inf
+# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLib.inf
+# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLibPrePi.inf
+ ArmPkg/Library/ArmLib/ArmV7/ArmV7LibSec.inf
+ ArmPkg/Library/ArmLib/ArmV7/ArmV7LibPrePi.inf
+
+[Components.AARCH64]
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.inf
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.inf
+
+ ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf
+ ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf
diff --git a/ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.c b/ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.c
index b15597893..2e62a4f77 100644
--- a/ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.c
+++ b/ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.c
@@ -15,7 +15,7 @@
#include <Base.h>
#include <Library/ArmLib.h>
#include <Library/ArmCpuLib.h>
-#include <Library/ArmV7ArchTimerLib.h>
+#include <Library/ArmArchTimerLib.h>
#include <Library/DebugLib.h>
#include <Library/IoLib.h>
#include <Library/PcdLib.h>
diff --git a/ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.c b/ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.c
new file mode 100644
index 000000000..d13c7fd6f
--- /dev/null
+++ b/ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.c
@@ -0,0 +1,50 @@
+/** @file
+
+ Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Base.h>
+#include <Library/ArmLib.h>
+#include <Library/ArmCpuLib.h>
+#include <Library/ArmArchTimerLib.h>
+#include <Library/DebugLib.h>
+#include <Library/IoLib.h>
+#include <Library/PcdLib.h>
+
+#include <Chipset/ArmCortexA5x.h>
+
+VOID
+ArmCpuSetup (
+ IN UINTN MpId
+ )
+{
+ // Check if Architectural Timer frequency is valid number (should not be 0)
+ ASSERT (PcdGet32 (PcdArmArchTimerFreqInHz));
+ ASSERT (ArmIsArchTimerImplemented () != 0);
+
+ // Note: System Counter frequency can only be set in Secure privileged mode,
+ // if security extensions are implemented.
+ ArmArchTimerSetTimerFreq (PcdGet32 (PcdArmArchTimerFreqInHz));
+
+ if (ArmIsMpCore ()) {
+ // Turn on SMP coherency
+ ArmSetAuxCrBit (A5X_FEATURE_SMP);
+ }
+
+}
+
+VOID
+ArmCpuSetupSmpNonSecure (
+ IN UINTN MpId
+ )
+{
+}
diff --git a/ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.inf b/ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.inf
new file mode 100644
index 000000000..486aba74a
--- /dev/null
+++ b/ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.inf
@@ -0,0 +1,35 @@
+#/* @file
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#*/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = ArmCortexA5xLib
+ FILE_GUID = 08107938-85d8-4967-ba65-b673f708fcb2
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmCpuLib
+
+[Packages]
+ MdePkg/MdePkg.dec
+ ArmPkg/ArmPkg.dec
+
+[LibraryClasses]
+ ArmLib
+ IoLib
+ PcdLib
+
+[Sources.common]
+ ArmCortexA5xLib.c
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmArchTimerFreqInHz
diff --git a/ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.c b/ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.c
new file mode 100644
index 000000000..d38723f4b
--- /dev/null
+++ b/ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.c
@@ -0,0 +1,42 @@
+/** @file
+
+ Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Base.h>
+#include <Library/ArmLib.h>
+#include <Library/ArmCpuLib.h>
+#include <Library/ArmGicLib.h>
+#include <Library/ArmArchTimerLib.h>
+#include <Library/IoLib.h>
+#include <Library/PcdLib.h>
+
+#include <Chipset/ArmAemV8.h>
+
+VOID
+ArmCpuSetup (
+ IN UINTN MpId
+ )
+{
+ // Note: System Counter frequency can only be set in Secure privileged mode,
+ // if security extensions are implemented.
+ ArmArchTimerSetTimerFreq (PcdGet32 (PcdArmArchTimerFreqInHz));
+}
+
+
+VOID
+ArmCpuSetupSmpNonSecure (
+ IN UINTN MpId
+ )
+{
+ // Nothing to do
+}
diff --git a/ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.inf b/ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.inf
new file mode 100644
index 000000000..0b21991a5
--- /dev/null
+++ b/ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.inf
@@ -0,0 +1,38 @@
+#/* @file
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#*/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = ArmCortexAEMv8Lib
+ FILE_GUID = 8ab5a7e3-86b1-4dd3-a092-09ee801e774b
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmCpuLib
+
+[Packages]
+ MdePkg/MdePkg.dec
+ ArmPkg/ArmPkg.dec
+
+[LibraryClasses]
+ ArmLib
+ IoLib
+ PcdLib
+
+[Sources.common]
+ ArmCortexAEMv8Lib.c
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmPrimaryCoreMask
+ gArmTokenSpaceGuid.PcdArmPrimaryCore
+
+ gArmTokenSpaceGuid.PcdArmArchTimerFreqInHz
diff --git a/ArmPkg/Drivers/CpuDxe/AArch64/Exception.c b/ArmPkg/Drivers/CpuDxe/AArch64/Exception.c
new file mode 100644
index 000000000..1aac6a5a9
--- /dev/null
+++ b/ArmPkg/Drivers/CpuDxe/AArch64/Exception.c
@@ -0,0 +1,153 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "CpuDxe.h"
+
+#include <Chipset/AArch64.h>
+
+VOID
+ExceptionHandlersStart (
+ VOID
+ );
+
+VOID
+ExceptionHandlersEnd (
+ VOID
+ );
+
+VOID
+CommonExceptionEntry (
+ VOID
+ );
+
+VOID
+AsmCommonExceptionEntry (
+ VOID
+ );
+
+
+EFI_EXCEPTION_CALLBACK gExceptionHandlers[MAX_ARM_EXCEPTION + 1];
+EFI_EXCEPTION_CALLBACK gDebuggerExceptionHandlers[MAX_ARM_EXCEPTION + 1];
+
+
+
+/**
+ This function registers and enables the handler specified by InterruptHandler for a processor
+ interrupt or exception type specified by InterruptType. If InterruptHandler is NULL, then the
+ handler for the processor interrupt or exception type specified by InterruptType is uninstalled.
+ The installed handler is called once for each processor interrupt or exception.
+
+ @param InterruptType A pointer to the processor's current interrupt state. Set to TRUE if interrupts
+ are enabled and FALSE if interrupts are disabled.
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER that is called
+ when a processor interrupt occurs. If this parameter is NULL, then the handler
+ will be uninstalled.
+
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was
+ previously installed.
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not
+ previously installed.
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.
+
+**/
+EFI_STATUS
+RegisterInterruptHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler
+ )
+{
+ if (InterruptType > MAX_ARM_EXCEPTION) {
+ return EFI_UNSUPPORTED;
+ }
+
+ if ((InterruptHandler != NULL) && (gExceptionHandlers[InterruptType] != NULL)) {
+ return EFI_ALREADY_STARTED;
+ }
+
+ gExceptionHandlers[InterruptType] = InterruptHandler;
+
+ return EFI_SUCCESS;
+}
+
+
+
+VOID
+EFIAPI
+CommonCExceptionHandler (
+ IN EFI_EXCEPTION_TYPE ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ if (ExceptionType <= MAX_AARCH64_EXCEPTION) {
+ if (gExceptionHandlers[ExceptionType]) {
+ gExceptionHandlers[ExceptionType] (ExceptionType, SystemContext);
+ return;
+ }
+ } else {
+ DEBUG ((EFI_D_ERROR, "Unknown exception type %d from %016lx\n", ExceptionType, SystemContext.SystemContextAArch64->ELR));
+ ASSERT (FALSE);
+ }
+
+ DefaultExceptionHandler (ExceptionType, SystemContext);
+}
+
+
+
+EFI_STATUS
+InitializeExceptions (
+ IN EFI_CPU_ARCH_PROTOCOL *Cpu
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN IrqEnabled;
+ BOOLEAN FiqEnabled;
+
+ Status = EFI_SUCCESS;
+ ZeroMem (gExceptionHandlers,sizeof(*gExceptionHandlers));
+
+ //
+ // Disable interrupts
+ //
+ Cpu->GetInterruptState (Cpu, &IrqEnabled);
+ Cpu->DisableInterrupt (Cpu);
+
+ //
+ // EFI does not use the FIQ, but a debugger might so we must disable
+ // as we take over the exception vectors.
+ //
+ FiqEnabled = ArmGetFiqState ();
+ ArmDisableFiq ();
+
+ // AArch64 alignment? The Vector table must be 2k-byte aligned (bottom 11 bits zero)?
+ //DEBUG ((EFI_D_ERROR, "vbar set addr: 0x%016lx\n",(UINTN)ExceptionHandlersStart));
+ //ASSERT(((UINTN)ExceptionHandlersStart & ((1 << 11)-1)) == 0);
+
+ // We do not copy the Exception Table at PcdGet32(PcdCpuVectorBaseAddress). We just set Vector Base Address to point into CpuDxe code.
+ ArmWriteVBar ((UINTN)ExceptionHandlersStart);
+
+ if (FiqEnabled) {
+ ArmEnableFiq ();
+ }
+
+ if (IrqEnabled) {
+ //
+ // Restore interrupt state
+ //
+ Status = Cpu->EnableInterrupt (Cpu);
+ }
+
+ return Status;
+}
diff --git a/ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S b/ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S
new file mode 100644
index 000000000..981ffd5c3
--- /dev/null
+++ b/ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S
@@ -0,0 +1,381 @@
+//
+// Copyright (c) 2011 - 2013 ARM LTD. All rights reserved.<BR>
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD License
+// which accompanies this distribution. The full text of the license may be found at
+// http://opensource.org/licenses/bsd-license.php
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+//
+//------------------------------------------------------------------------------
+
+#include <Library/PcdLib.h>
+#include <AsmMacroIoLibV8.h>
+
+/*
+ This is the stack constructed by the exception handler (low address to high address).
+ X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64.
+
+ UINT64 X0; 0x000
+ UINT64 X1; 0x008
+ UINT64 X2; 0x010
+ UINT64 X3; 0x018
+ UINT64 X4; 0x020
+ UINT64 X5; 0x028
+ UINT64 X6; 0x030
+ UINT64 X7; 0x038
+ UINT64 X8; 0x040
+ UINT64 X9; 0x048
+ UINT64 X10; 0x050
+ UINT64 X11; 0x058
+ UINT64 X12; 0x060
+ UINT64 X13; 0x068
+ UINT64 X14; 0x070
+ UINT64 X15; 0x078
+ UINT64 X16; 0x080
+ UINT64 X17; 0x088
+ UINT64 X18; 0x090
+ UINT64 X19; 0x098
+ UINT64 X20; 0x0a0
+ UINT64 X21; 0x0a8
+ UINT64 X22; 0x0b0
+ UINT64 X23; 0x0b8
+ UINT64 X24; 0x0c0
+ UINT64 X25; 0x0c8
+ UINT64 X26; 0x0d0
+ UINT64 X27; 0x0d8
+ UINT64 X28; 0x0e0
+ UINT64 FP; 0x0e8 // x29 - Frame Pointer
+ UINT64 LR; 0x0f0 // x30 - Link Register
+ UINT64 SP; 0x0f8 // x31 - Stack Pointer
+
+ // FP/SIMD Registers. 128bit if used as Q-regs.
+ UINT64 V0[2]; 0x100
+ UINT64 V1[2]; 0x110
+ UINT64 V2[2]; 0x120
+ UINT64 V3[2]; 0x130
+ UINT64 V4[2]; 0x140
+ UINT64 V5[2]; 0x150
+ UINT64 V6[2]; 0x160
+ UINT64 V7[2]; 0x170
+ UINT64 V8[2]; 0x180
+ UINT64 V9[2]; 0x190
+ UINT64 V10[2]; 0x1a0
+ UINT64 V11[2]; 0x1b0
+ UINT64 V12[2]; 0x1c0
+ UINT64 V13[2]; 0x1d0
+ UINT64 V14[2]; 0x1e0
+ UINT64 V15[2]; 0x1f0
+ UINT64 V16[2]; 0x200
+ UINT64 V17[2]; 0x210
+ UINT64 V18[2]; 0x220
+ UINT64 V19[2]; 0x230
+ UINT64 V20[2]; 0x240
+ UINT64 V21[2]; 0x250
+ UINT64 V22[2]; 0x260
+ UINT64 V23[2]; 0x270
+ UINT64 V24[2]; 0x280
+ UINT64 V25[2]; 0x290
+ UINT64 V26[2]; 0x2a0
+ UINT64 V27[2]; 0x2b0
+ UINT64 V28[2]; 0x2c0
+ UINT64 V29[2]; 0x2d0
+ UINT64 V30[2]; 0x2e0
+ UINT64 V31[2]; 0x2f0
+
+ // System Context
+ UINT64 ELR; 0x300 // Exception Link Register
+ UINT64 SPSR; 0x308 // Saved Processor Status Register
+ UINT64 FPSR; 0x310 // Floating Point Status Register
+ UINT64 ESR; 0x318 // EL1 Fault Address Register
+ UINT64 FAR; 0x320 // EL1 Exception syndrome register
+ UINT64 Padding;0x328 // Required for stack alignment
+*/
+
+ASM_GLOBAL ASM_PFX(ExceptionHandlersStart)
+ASM_GLOBAL ASM_PFX(ExceptionHandlersEnd)
+ASM_GLOBAL ASM_PFX(CommonExceptionEntry)
+ASM_GLOBAL ASM_PFX(AsmCommonExceptionEntry)
+ASM_GLOBAL ASM_PFX(CommonCExceptionHandler)
+
+.text
+.align 11
+
+#define GP_CONTEXT_SIZE (32 * 8)
+#define FP_CONTEXT_SIZE (32 * 16)
+#define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10)
+
+// Cannot str x31 directly
+#define ALL_GP_REGS \
+ REG_PAIR (x0, x1, 0x000, GP_CONTEXT_SIZE); \
+ REG_PAIR (x2, x3, 0x010, GP_CONTEXT_SIZE); \
+ REG_PAIR (x4, x5, 0x020, GP_CONTEXT_SIZE); \
+ REG_PAIR (x6, x7, 0x030, GP_CONTEXT_SIZE); \
+ REG_PAIR (x8, x9, 0x040, GP_CONTEXT_SIZE); \
+ REG_PAIR (x10, x11, 0x050, GP_CONTEXT_SIZE); \
+ REG_PAIR (x12, x13, 0x060, GP_CONTEXT_SIZE); \
+ REG_PAIR (x14, x15, 0x070, GP_CONTEXT_SIZE); \
+ REG_PAIR (x16, x17, 0x080, GP_CONTEXT_SIZE); \
+ REG_PAIR (x18, x19, 0x090, GP_CONTEXT_SIZE); \
+ REG_PAIR (x20, x21, 0x0a0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x22, x23, 0x0b0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x24, x25, 0x0c0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x26, x27, 0x0d0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x28, x29, 0x0e0, GP_CONTEXT_SIZE); \
+ REG_ONE (x30, 0x0f0, GP_CONTEXT_SIZE);
+
+// In order to save the SP we need to put it somwhere else first.
+// STR only works with XZR/WZR directly
+#define SAVE_SP \
+ add x1, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE; \
+ REG_ONE (x1, 0x0f8, GP_CONTEXT_SIZE);
+
+#define ALL_FP_REGS \
+ REG_PAIR (q0, q1, 0x000, FP_CONTEXT_SIZE); \
+ REG_PAIR (q2, q3, 0x020, FP_CONTEXT_SIZE); \
+ REG_PAIR (q4, q5, 0x040, FP_CONTEXT_SIZE); \
+ REG_PAIR (q6, q7, 0x060, FP_CONTEXT_SIZE); \
+ REG_PAIR (q8, q9, 0x080, FP_CONTEXT_SIZE); \
+ REG_PAIR (q10, q11, 0x0a0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q12, q13, 0x0c0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q14, q15, 0x0e0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q16, q17, 0x100, FP_CONTEXT_SIZE); \
+ REG_PAIR (q18, q19, 0x120, FP_CONTEXT_SIZE); \
+ REG_PAIR (q20, q21, 0x140, FP_CONTEXT_SIZE); \
+ REG_PAIR (q22, q23, 0x160, FP_CONTEXT_SIZE); \
+ REG_PAIR (q24, q25, 0x180, FP_CONTEXT_SIZE); \
+ REG_PAIR (q26, q27, 0x1a0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q28, q29, 0x1c0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q30, q31, 0x1e0, FP_CONTEXT_SIZE);
+
+#define ALL_SYS_REGS \
+ REG_PAIR (x1, x2, 0x000, SYS_CONTEXT_SIZE); \
+ REG_PAIR (x3, x4, 0x010, SYS_CONTEXT_SIZE); \
+ REG_ONE (x5, 0x020, SYS_CONTEXT_SIZE);
+
+//
+// This code gets copied to the ARM vector table
+// VectorTableStart - VectorTableEnd gets copied
+//
+ASM_PFX(ExceptionHandlersStart):
+
+//
+// Current EL with SP0 : 0x0 - 0x180
+//
+.align 7
+ASM_PFX(SynchronousExceptionSP0):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqSP0):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqSP0):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorSP0):
+ b ASM_PFX(SErrorEntry)
+
+//
+// Current EL with SPx: 0x200 - 0x380
+//
+.align 7
+ASM_PFX(SynchronousExceptionSPx):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqSPx):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqSPx):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorSPx):
+ b ASM_PFX(SErrorEntry)
+
+//
+// Lower EL using AArch64 : 0x400 - 0x580
+//
+.align 7
+ASM_PFX(SynchronousExceptionA64):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqA64):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqA64):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorA64):
+ b ASM_PFX(SErrorEntry)
+
+//
+// Lower EL using AArch32 : 0x0 - 0x180
+//
+.align 7
+ASM_PFX(SynchronousExceptionA32):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqA32):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqA32):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorA32):
+ b ASM_PFX(SErrorEntry)
+
+
+#undef REG_PAIR
+#undef REG_ONE
+#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) stp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)]
+#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) str REG1, [sp, #(OFFSET-CONTEXT_SIZE)]
+
+ASM_PFX(SynchronousExceptionEntry):
+ // Move the stackpointer so we can reach our structure with the str instruction.
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+
+ // Save all the General regs before touching x0 and x1.
+ // This does not save r31(SP) as it is special. We do that later.
+ ALL_GP_REGS
+
+ // Record the tipe of exception that occured.
+ mov x0, #EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
+
+ // Jump to our general handler to deal with all the common parts and process the exception.
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+ASM_PFX(IrqEntry):
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+ ALL_GP_REGS
+ mov x0, #EXCEPT_AARCH64_IRQ
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+ASM_PFX(FiqEntry):
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+ ALL_GP_REGS
+ mov x0, #EXCEPT_AARCH64_FIQ
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+ASM_PFX(SErrorEntry):
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+ ALL_GP_REGS
+ mov x0, #EXCEPT_AARCH64_SERROR
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+
+//
+// This gets patched by the C code that patches in the vector table
+//
+.align 3
+ASM_PFX(CommonExceptionEntry):
+ .dword ASM_PFX(AsmCommonExceptionEntry)
+
+ASM_PFX(ExceptionHandlersEnd):
+
+
+
+//
+// This code runs from CpuDxe driver loaded address. It is patched into
+// CommonExceptionEntry.
+//
+ASM_PFX(AsmCommonExceptionEntry):
+ /* NOTE:
+ We have to break up the save code because the immidiate value to be used
+ with the SP is to big to do it all in one step so we need to shuffle the SP
+ along as we go. (we only have 9bits of immediate to work with) */
+
+ // Save the current Stack pointer before we start modifying it.
+ SAVE_SP
+
+ // Preserve the stack pointer we came in with before we modify it
+ EL1_OR_EL2(x1)
+1:mrs x1, elr_el1 // Exception Link Register
+ mrs x2, spsr_el1 // Saved Processor Status Register 32bit
+ mrs x3, fpsr // Floating point Status Register 32bit
+ mrs x4, esr_el1 // EL1 Exception syndrome register 32bit
+ mrs x5, far_el1 // EL1 Fault Address Register
+ b 3f
+
+2:mrs x1, elr_el2 // Exception Link Register
+ mrs x2, spsr_el2 // Saved Processor Status Register 32bit
+ mrs x3, fpsr // Floating point Status Register 32bit
+ mrs x4, esr_el2 // EL1 Exception syndrome register 32bit
+ mrs x5, far_el2 // EL1 Fault Address Register
+
+ // Adjust SP to save next set
+3:add sp, sp, FP_CONTEXT_SIZE
+
+ // Push FP regs to Stack.
+ ALL_FP_REGS
+
+ // Adjust SP to save next set
+ add sp, sp, SYS_CONTEXT_SIZE
+
+ // Save the SYS regs
+ ALL_SYS_REGS
+
+ // Point to top of struct after all regs saved
+ sub sp, sp, GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+
+ // x0 still holds the exception type.
+ // Set x1 to point to the top of our struct on the Stack
+ mov x1, sp
+
+// CommonCExceptionHandler (
+// IN EFI_EXCEPTION_TYPE ExceptionType, R0
+// IN OUT EFI_SYSTEM_CONTEXT SystemContext R1
+// )
+
+ // Call the handler as defined above
+
+ // For now we spin in the handler if we received an abort of some kind.
+ // We do not try to recover.
+ bl ASM_PFX(CommonCExceptionHandler) // Call exception handler
+
+
+// Defines for popping from stack
+
+#undef REG_PAIR
+#undef REG_ONE
+#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) ldp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)]
+
+#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) ldr REG1, [sp, #(OFFSET-CONTEXT_SIZE)]
+
+
+ // pop all regs and return from exception.
+ add sp, sp, GP_CONTEXT_SIZE
+ ALL_GP_REGS
+
+ // Adjust SP to pop next set
+ add sp, sp, FP_CONTEXT_SIZE
+ // Pop FP regs to Stack.
+ ALL_FP_REGS
+
+ // Adjust SP to be where we started from when we came into the handler.
+ // The handler can not change the SP.
+ add sp, sp, SYS_CONTEXT_SIZE
+
+ eret
+
+#undef REG_PAIR
+#undef REG_ONE
+
+dead:
+ b dead
diff --git a/ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c b/ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c
new file mode 100644
index 000000000..9043afaff
--- /dev/null
+++ b/ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c
@@ -0,0 +1,189 @@
+/*++
+
+Copyright (c) 2009, Hewlett-Packard Company. All rights reserved.<BR>
+Portions copyright (c) 2010, Apple Inc. All rights reserved.<BR>
+Portions copyright (c) 2011-2013, ARM Ltd. All rights reserved.<BR>
+
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+
+--*/
+
+#include "CpuDxe.h"
+
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)
+
+STATIC
+UINT64
+GetFirstPageAttribute (
+ IN UINT64 *FirstLevelTableAddress,
+ IN UINTN TableLevel
+ )
+{
+ UINT64 FirstEntry;
+
+ // Get the first entry of the table
+ FirstEntry = *FirstLevelTableAddress;
+
+ if ((FirstEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
+ // Only valid for Levels 0, 1 and 2
+ ASSERT (TableLevel < 3);
+
+ // Get the attribute of the subsequent table
+ return GetFirstPageAttribute ((UINT64*)(FirstEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE), TableLevel + 1);
+ } else if (((FirstEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) ||
+ ((TableLevel == 3) && ((FirstEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3)))
+ {
+ return FirstEntry & TT_ATTR_INDX_MASK;
+ } else {
+ return TT_ATTR_INDX_INVALID;
+ }
+}
+
+STATIC
+UINT64
+GetNextEntryAttribute (
+ IN UINT64 *TableAddress,
+ IN UINTN EntryCount,
+ IN UINTN TableLevel,
+ IN UINT64 BaseAddress,
+ IN OUT UINT32 *PrevEntryAttribute,
+ IN OUT UINT64 *StartGcdRegion
+ )
+{
+ UINTN Index;
+ UINT64 Entry;
+ UINT32 EntryAttribute;
+ UINT32 EntryType;
+ EFI_STATUS Status;
+ UINTN NumberOfDescriptors;
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
+
+ // Get the memory space map from GCD
+ MemorySpaceMap = NULL;
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);
+ ASSERT_EFI_ERROR (Status);
+
+ // We cannot get more than 3-level page table
+ ASSERT (TableLevel <= 3);
+
+ // While the top level table might not contain TT_ENTRY_COUNT entries;
+ // the subsequent ones should be filled up
+ for (Index = 0; Index < EntryCount; Index++) {
+ Entry = TableAddress[Index];
+ EntryType = Entry & TT_TYPE_MASK;
+ EntryAttribute = Entry & TT_ATTR_INDX_MASK;
+
+ // If Entry is a Table Descriptor type entry then go through the sub-level table
+ if ((EntryType == TT_TYPE_BLOCK_ENTRY) ||
+ ((TableLevel == 3) && (EntryType == TT_TYPE_BLOCK_ENTRY_LEVEL3))) {
+ if ((*PrevEntryAttribute == TT_ATTR_INDX_INVALID) || (EntryAttribute != *PrevEntryAttribute)) {
+ if (*PrevEntryAttribute != TT_ATTR_INDX_INVALID) {
+ // Update GCD with the last region
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,
+ *StartGcdRegion,
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel)) - 1) - *StartGcdRegion,
+ PageAttributeToGcdAttribute (EntryAttribute));
+ }
+
+ // Start of the new region
+ *StartGcdRegion = BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel));
+ *PrevEntryAttribute = EntryAttribute;
+ } else {
+ continue;
+ }
+ } else if (EntryType == TT_TYPE_TABLE_ENTRY) {
+ // Table Entry type is only valid for Level 0, 1, 2
+ ASSERT (TableLevel < 3);
+
+ // Increase the level number and scan the sub-level table
+ GetNextEntryAttribute ((UINT64*)(Entry & TT_ADDRESS_MASK_DESCRIPTION_TABLE),
+ TT_ENTRY_COUNT, TableLevel + 1,
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel))),
+ PrevEntryAttribute, StartGcdRegion);
+ } else {
+ if (*PrevEntryAttribute != TT_ATTR_INDX_INVALID) {
+ // Update GCD with the last region
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,
+ *StartGcdRegion,
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel)) - 1) - *StartGcdRegion,
+ PageAttributeToGcdAttribute (EntryAttribute));
+
+ // Start of the new region
+ *StartGcdRegion = BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel));
+ *PrevEntryAttribute = TT_ATTR_INDX_INVALID;
+ }
+ }
+ }
+
+ return BaseAddress + (EntryCount * TT_ADDRESS_AT_LEVEL(TableLevel));
+}
+
+EFI_STATUS
+SyncCacheConfig (
+ IN EFI_CPU_ARCH_PROTOCOL *CpuProtocol
+ )
+{
+ EFI_STATUS Status;
+ UINT32 PageAttribute = 0;
+ UINT64 *FirstLevelTableAddress;
+ UINTN TableLevel;
+ UINTN TableCount;
+ UINTN NumberOfDescriptors;
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
+ UINTN Tcr;
+ UINTN T0SZ;
+ UINT64 BaseAddressGcdRegion;
+ UINT64 EndAddressGcdRegion;
+
+ // This code assumes MMU is enabled and filed with section translations
+ ASSERT (ArmMmuEnabled ());
+
+ //
+ // Get the memory space map from GCD
+ //
+ MemorySpaceMap = NULL;
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);
+ ASSERT_EFI_ERROR (Status);
+
+ // The GCD implementation maintains its own copy of the state of memory space attributes. GCD needs
+ // to know what the initial memory space attributes are. The CPU Arch. Protocol does not provide a
+ // GetMemoryAttributes function for GCD to get this so we must resort to calling GCD (as if we were
+ // a client) to update its copy of the attributes. This is bad architecture and should be replaced
+ // with a way for GCD to query the CPU Arch. driver of the existing memory space attributes instead.
+
+ // Obtain page table base
+ FirstLevelTableAddress = (UINT64*)(ArmGetTTBR0BaseAddress ());
+
+ // Get Translation Control Register value
+ Tcr = ArmGetTCR ();
+ // Get Address Region Size
+ T0SZ = Tcr & TCR_T0SZ_MASK;
+
+ // Get the level of the first table for the indicated Address Region Size
+ GetRootTranslationTableInfo (T0SZ, &TableLevel, &TableCount);
+
+ // First Attribute of the Page Tables
+ PageAttribute = GetFirstPageAttribute (FirstLevelTableAddress, TableLevel);
+
+ // We scan from the start of the memory map (ie: at the address 0x0)
+ BaseAddressGcdRegion = 0x0;
+ EndAddressGcdRegion = GetNextEntryAttribute (FirstLevelTableAddress,
+ TableCount, TableLevel,
+ BaseAddressGcdRegion,
+ &PageAttribute, &BaseAddressGcdRegion);
+
+ // Update GCD with the last region
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,
+ BaseAddressGcdRegion,
+ EndAddressGcdRegion - BaseAddressGcdRegion,
+ PageAttributeToGcdAttribute (PageAttribute));
+
+ return EFI_SUCCESS;
+}
diff --git a/ArmPkg/Drivers/CpuDxe/CpuDxe.h b/ArmPkg/Drivers/CpuDxe/CpuDxe.h
index 7f59ca653..075f2a18b 100644
--- a/ArmPkg/Drivers/CpuDxe/CpuDxe.h
+++ b/ArmPkg/Drivers/CpuDxe/CpuDxe.h
@@ -148,9 +148,16 @@ SetMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS VirtualMask
);
+VOID
+GetRootTranslationTableInfo (
+ IN UINTN T0SZ,
+ OUT UINTN *TableLevel,
+ OUT UINTN *TableEntryCount
+ );
+
EFI_STATUS
SetGcdMemorySpaceAttributes (
- IN EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap,
+ IN EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap,
IN UINTN NumberOfDescriptors,
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
diff --git a/ArmPkg/Drivers/CpuDxe/CpuDxe.inf b/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
index 6068a0fce..c30cec609 100644
--- a/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
+++ b/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
@@ -46,6 +46,10 @@
ArmV6/ExceptionSupport.asm | RVCT
ArmV6/ExceptionSupport.S | GCC
+[Sources.AARCH64]
+ AArch64/Mmu.c
+ AArch64/Exception.c
+ AArch64/ExceptionSupport.S | GCC
[Packages]
ArmPkg/ArmPkg.dec
diff --git a/ArmPkg/Drivers/TimerDxe/TimerDxe.c b/ArmPkg/Drivers/TimerDxe/TimerDxe.c
index dabcc8313..a43a10f48 100644
--- a/ArmPkg/Drivers/TimerDxe/TimerDxe.c
+++ b/ArmPkg/Drivers/TimerDxe/TimerDxe.c
@@ -24,7 +24,7 @@
#include <Library/UefiLib.h>
#include <Library/PcdLib.h>
#include <Library/IoLib.h>
-#include <Library/ArmV7ArchTimerLib.h>
+#include <Library/ArmArchTimerLib.h>
#include <Protocol/Timer.h>
#include <Protocol/HardwareInterrupt.h>
diff --git a/ArmPkg/Filesystem/SemihostFs/SemihostFs.inf b/ArmPkg/Filesystem/SemihostFs/SemihostFs.inf
index 30ce6665f..fdf669be5 100644
--- a/ArmPkg/Filesystem/SemihostFs/SemihostFs.inf
+++ b/ArmPkg/Filesystem/SemihostFs/SemihostFs.inf
@@ -2,6 +2,8 @@
# Support a Semi Host file system over a debuggers JTAG
#
# Copyright (c) 2009, Apple Inc. All rights reserved.<BR>
+# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.
+#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -21,7 +23,7 @@
ENTRY_POINT = SemihostFsEntryPoint
-[Sources.ARM]
+[Sources.ARM, Sources.AARCH64]
Arm/SemihostFs.c
[Packages]
diff --git a/ArmPkg/Include/AsmMacroIoLibV8.h b/ArmPkg/Include/AsmMacroIoLibV8.h
new file mode 100644
index 000000000..933ef7043
--- /dev/null
+++ b/ArmPkg/Include/AsmMacroIoLibV8.h
@@ -0,0 +1,200 @@
+/** @file
+ Macros to work around lack of Apple support for LDR register, =expr
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+
+#ifndef __MACRO_IO_LIBV8_H__
+#define __MACRO_IO_LIBV8_H__
+
+#if defined (__GNUC__)
+
+#define MmioWrite32(Address, Data) \
+ ldr x1, =Address ; \
+ ldr x0, =Data ; \
+ str x0, [x1]
+
+#define MmioOr32(Address, OrData) \
+ ldr x1, =Address ; \
+ ldr x2, =OrData ; \
+ ldr x0, [x1] ; \
+ orr x0, x0, x2 ; \
+ str x0, [x1]
+
+#define MmioAnd32(Address, AndData) \
+ ldr x1, =Address ; \
+ ldr x2, =AndData ; \
+ ldr x0, [x1] ; \
+ and x0, x0, x2 ; \
+ str x0, [x1]
+
+#define MmioAndThenOr32(Address, AndData, OrData) \
+ ldr x1, =Address ; \
+ ldr x0, [x1] ; \
+ ldr x2, =AndData ; \
+ and x0, x0, x2 ; \
+ ldr x2, =OrData ; \
+ orr x0, x0, x2 ; \
+ str x0, [x1]
+
+#define MmioWriteFromReg32(Address, Reg) \
+ ldr x1, =Address ; \
+ str Reg, [x1]
+
+#define MmioRead32(Address) \
+ ldr x1, =Address ; \
+ ldr x0, [x1]
+
+#define MmioReadToReg32(Address, Reg) \
+ ldr x1, =Address ; \
+ ldr Reg, [x1]
+
+#define LoadConstant(Data) \
+ ldr x0, =Data
+
+#define LoadConstantToReg(Data, Reg) \
+ ldr Reg, =Data
+
+#define SetPrimaryStack(StackTop, GlobalSize, Tmp, Tmp1) \
+ ands Tmp, GlobalSize, #15 ; \
+ mov Tmp1, #16 ; \
+ sub Tmp1, Tmp1, Tmp ; \
+ csel Tmp, Tmp1, Tmp, ne ; \
+ add GlobalSize, GlobalSize, Tmp ; \
+ sub sp, StackTop, GlobalSize ; \
+ ; \
+ mov Tmp, sp ; \
+ mov GlobalSize, #0x0 ; \
+_SetPrimaryStackInitGlobals: ; \
+ cmp Tmp, StackTop ; \
+ b.eq _SetPrimaryStackEnd ; \
+ str GlobalSize, [Tmp], #8 ; \
+ b _SetPrimaryStackInitGlobals ; \
+_SetPrimaryStackEnd:
+
+// Initialize the Global Variable with '0'
+#define InitializePrimaryStack(GlobalSize, Tmp1, Tmp2) \
+ and Tmp1, GlobalSize, #15 ; \
+ mov Tmp2, #16 ; \
+ sub Tmp2, Tmp2, Tmp1 ; \
+ add GlobalSize, GlobalSize, Tmp2 ; \
+ ; \
+ mov Tmp1, sp ; \
+ sub sp, sp, GlobalSize ; \
+ mov GlobalSize, #0x0 ; \
+_InitializePrimaryStackLoop: ; \
+ mov Tmp2, sp ; \
+ cmp Tmp1, Tmp2 ; \
+ bls _InitializePrimaryStackEnd ; \
+ str GlobalSize, [Tmp1, #-8]! ; \
+ b _InitializePrimaryStackLoop ; \
+_InitializePrimaryStackEnd:
+
+// CurrentEL : 0xC = EL3; 8 = EL2; 4 = EL1
+// This only selects between EL1 and EL2, else we die.
+// Provide the Macro with a safe temp xreg to use.
+#define EL1_OR_EL2(SAFE_XREG) \
+ mrs SAFE_XREG, CurrentEL ;\
+ cmp SAFE_XREG, #0x4 ;\
+ b.eq 1f ;\
+ cmp SAFE_XREG, #0x8 ;\
+ b.eq 2f ;\
+ b dead ;// We should never get here.
+
+// CurrentEL : 0xC = EL3; 8 = EL2; 4 = EL1
+// This only selects between EL1 and EL2 and EL3, else we die.
+// Provide the Macro with a safe temp xreg to use.
+#define EL1_OR_EL2_OR_EL3(SAFE_XREG) \
+ mrs SAFE_XREG, CurrentEL ;\
+ cmp SAFE_XREG, #0x4 ;\
+ b.eq 1f ;\
+ cmp SAFE_XREG, #0x8 ;\
+ b.eq 2f ;\
+ cmp SAFE_XREG, #0xC ;\
+ b.eq 3f ;\
+ b dead ;// We should never get here.
+
+#else
+
+//
+// Use ARM assembly macros, form armasm
+//
+// Less magic in the macros if ldr reg, =expr works
+//
+
+// returns _Data in X0 and _Address in X1
+
+
+
+#define MmioWrite32(Address, Data) MmioWrite32Macro Address, Data
+
+
+
+
+// returns Data in X0 and Address in X1, and OrData in X2
+#define MmioOr32(Address, OrData) MmioOr32Macro Address, OrData
+
+
+// returns _Data in X0 and _Address in X1, and _OrData in X2
+
+
+#define MmioAnd32(Address, AndData) MmioAnd32Macro Address, AndData
+
+// returns result in X0, _Address in X1, and _OrData in X2
+
+
+#define MmioAndThenOr32(Address, AndData, OrData) MmioAndThenOr32Macro Address, AndData, OrData
+
+
+// returns _Data in _Reg and _Address in X1
+
+
+#define MmioWriteFromReg32(Address, Reg) MmioWriteFromReg32Macro Address, Reg
+
+// returns _Data in X0 and _Address in X1
+
+
+#define MmioRead32(Address) MmioRead32Macro Address
+
+// returns _Data in Reg and _Address in X1
+
+
+#define MmioReadToReg32(Address, Reg) MmioReadToReg32Macro Address, Reg
+
+
+// load X0 with _Data
+
+
+#define LoadConstant(Data) LoadConstantMacro Data
+
+// load _Reg with _Data
+
+
+#define LoadConstantToReg(Data, Reg) LoadConstantToRegMacro Data, Reg
+
+// conditional load testing eq flag
+#define LoadConstantToRegIfEq(Data, Reg) LoadConstantToRegIfEqMacro Data, Reg
+
+#define SetPrimaryStack(StackTop,GlobalSize,Tmp, Tmp1) SetPrimaryStack StackTop, GlobalSize, Tmp, Tmp1
+
+#define InitializePrimaryStack(GlobalSize, Tmp1, Tmp2) InitializePrimaryStack GlobalSize, Tmp1, Tmp2
+
+#define EL1_OR_EL2(SAFE_XREG) EL1_OR_EL2 SAFE_XREG
+
+#define EL1_OR_EL2_OR_EL3(SAFE_XREG) EL1_OR_EL2_OR_EL3 SAFE_XREG
+
+#endif
+
+#endif // __MACRO_IO_LIBV8_H__
+
diff --git a/ArmPkg/Include/Chipset/AArch64.h b/ArmPkg/Include/Chipset/AArch64.h
new file mode 100644
index 000000000..7f1f44ccc
--- /dev/null
+++ b/ArmPkg/Include/Chipset/AArch64.h
@@ -0,0 +1,179 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __AARCH64_H__
+#define __AARCH64_H__
+
+#include <Chipset/AArch64Mmu.h>
+#include <Chipset/ArmArchTimer.h>
+
+// ARM Interrupt ID in Exception Table
+#define ARM_ARCH_EXCEPTION_IRQ EXCEPT_AARCH64_IRQ
+
+// CPACR - Coprocessor Access Control Register definitions
+#define CPACR_TTA_EN (1UL << 28)
+#define CPACR_FPEN_EL1 (1UL << 20)
+#define CPACR_FPEN_FULL (3UL << 20)
+#define CPACR_CP_FULL_ACCESS 0x300000
+
+// Coprocessor Trap Register (CPTR)
+#define AARCH64_CPTR_TFP (1 << 10)
+
+// ID_AA64PFR0 - AArch64 Processor Feature Register 0 definitions
+#define AARCH64_PFR0_FP (0xF << 16)
+
+// NSACR - Non-Secure Access Control Register definitions
+#define NSACR_CP(cp) ((1 << (cp)) & 0x3FFF)
+#define NSACR_NSD32DIS (1 << 14)
+#define NSACR_NSASEDIS (1 << 15)
+#define NSACR_PLE (1 << 16)
+#define NSACR_TL (1 << 17)
+#define NSACR_NS_SMP (1 << 18)
+#define NSACR_RFR (1 << 19)
+
+// SCR - Secure Configuration Register definitions
+#define SCR_NS (1 << 0)
+#define SCR_IRQ (1 << 1)
+#define SCR_FIQ (1 << 2)
+#define SCR_EA (1 << 3)
+#define SCR_FW (1 << 4)
+#define SCR_AW (1 << 5)
+
+// MIDR - Main ID Register definitions
+#define ARM_CPU_TYPE_MASK 0xFFF
+#define ARM_CPU_TYPE_AEMv8 0xD0F
+#define ARM_CPU_TYPE_A15 0xC0F
+#define ARM_CPU_TYPE_A9 0xC09
+#define ARM_CPU_TYPE_A5 0xC05
+
+// Hypervisor Configuration Register
+#define ARM_HCR_FMO BIT3
+#define ARM_HCR_IMO BIT4
+#define ARM_HCR_AMO BIT5
+#define ARM_HCR_TGE BIT27
+
+// AArch64 Exception Level
+#define AARCH64_EL3 0xC
+#define AARCH64_EL2 0x8
+#define AARCH64_EL1 0x4
+
+#define ARM_VECTOR_TABLE_ALIGNMENT ((1 << 11)-1)
+
+VOID
+EFIAPI
+ArmEnableSWPInstruction (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmReadCbar (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmReadTpidrurw (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmWriteTpidrurw (
+ UINTN Value
+ );
+
+UINTN
+EFIAPI
+ArmIsArchTimerImplemented (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmReadIdPfr0 (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmReadIdPfr1 (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmGetTCR (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmSetTCR (
+ UINTN Value
+ );
+
+UINTN
+EFIAPI
+ArmGetMAIR (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmSetMAIR (
+ UINTN Value
+ );
+
+VOID
+EFIAPI
+ArmDisableAlignmentCheck (
+ VOID
+ );
+
+
+VOID
+EFIAPI
+ArmEnableAlignmentCheck (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmDisableAllExceptions (
+ VOID
+ );
+
+VOID
+ArmWriteHcr (
+ IN UINTN Hcr
+ );
+
+UINTN
+ArmReadCurrentEL (
+ VOID
+ );
+
+UINT64
+PageAttributeToGcdAttribute (
+ IN UINT64 PageAttributes
+ );
+
+UINT64
+GcdAttributeToPageAttribute (
+ IN UINT64 GcdAttributes
+ );
+
+#endif // __AARCH64_H__
diff --git a/ArmPkg/Include/Chipset/AArch64Mmu.h b/ArmPkg/Include/Chipset/AArch64Mmu.h
new file mode 100644
index 000000000..0799734a1
--- /dev/null
+++ b/ArmPkg/Include/Chipset/AArch64Mmu.h
@@ -0,0 +1,208 @@
+/** @file
+*
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+*
+* This program and the accompanying materials
+* are licensed and made available under the terms and conditions of the BSD License
+* which accompanies this distribution. The full text of the license may be found at
+* http://opensource.org/licenses/bsd-license.php
+*
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+*
+**/
+
+#ifndef __AARCH64_MMU_H_
+#define __AARCH64_MMU_H_
+
+//
+// Memory Attribute Indirection register Definitions
+//
+#define MAIR_ATTR_DEVICE_MEMORY 0x0ULL
+#define MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE 0x44ULL
+#define MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH 0xBBULL
+#define MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK 0xFFULL
+
+#define MAIR_ATTR(n,value) ((value) << (((n) >> 2)*8))
+
+//
+// Long-descriptor Translation Table format
+//
+
+// Return the smallest offset from the table level.
+// The first offset starts at 12bit. There are 4 levels of 9-bit address range from level 3 to level 0
+#define TT_ADDRESS_OFFSET_AT_LEVEL(TableLevel) (12 + ((3 - (TableLevel)) * 9))
+
+#define TT_BLOCK_ENTRY_SIZE_AT_LEVEL(Level) (1 << TT_ADDRESS_OFFSET_AT_LEVEL(Level))
+
+// Get the associated entry in the given Translation Table
+#define TT_GET_ENTRY_FOR_ADDRESS(TranslationTable, Level, Address) \
+ ((UINTN)(TranslationTable) + ((((Address) >> TT_ADDRESS_OFFSET_AT_LEVEL(Level)) & (BIT9-1)) * sizeof(UINT64)))
+
+// Return the smallest address granularity from the table level.
+// The first offset starts at 12bit. There are 4 levels of 9-bit address range from level 3 to level 0
+#define TT_ADDRESS_AT_LEVEL(TableLevel) (1 << TT_ADDRESS_OFFSET_AT_LEVEL(TableLevel))
+
+// There are 512 entries per table when 4K Granularity
+#define TT_ENTRY_COUNT 512
+#define TT_ALIGNMENT_BLOCK_ENTRY BIT12
+#define TT_ALIGNMENT_DESCRIPTION_TABLE BIT12
+
+#define TT_ADDRESS_MASK_BLOCK_ENTRY (0xFFFFFFFULL << 12)
+#define TT_ADDRESS_MASK_DESCRIPTION_TABLE (0xFFFFFFFULL << 12)
+
+#define TT_TYPE_MASK 0x3
+#define TT_TYPE_TABLE_ENTRY 0x3
+#define TT_TYPE_BLOCK_ENTRY 0x1
+#define TT_TYPE_BLOCK_ENTRY_LEVEL3 0x3
+
+#define TT_ATTR_INDX_MASK (0x7 << 2)
+#define TT_ATTR_INDX_DEVICE_MEMORY (0x0 << 2)
+#define TT_ATTR_INDX_MEMORY_NON_CACHEABLE (0x1 << 2)
+#define TT_ATTR_INDX_MEMORY_WRITE_THROUGH (0x2 << 2)
+#define TT_ATTR_INDX_MEMORY_WRITE_BACK (0x3 << 2)
+
+#define TT_AP_MASK (0x3UL << 6)
+#define TT_AP_NO_RW (0x0UL << 6)
+#define TT_AP_RW_RW (0x1UL << 6)
+#define TT_AP_NO_RO (0x2UL << 6)
+#define TT_AP_RO_RO (0x3UL << 6)
+
+#define TT_NS BIT5
+#define TT_AF BIT10
+
+#define TT_PXN_MASK BIT53
+#define TT_UXN_MASK BIT54
+
+#define TT_ATTRIBUTES_MASK ((0xFFFULL << 52) | (0x3FFULL << 2))
+
+#define TT_TABLE_PXN BIT59
+#define TT_TABLE_XN BIT60
+#define TT_TABLE_NS BIT63
+
+#define TT_TABLE_AP_MASK (BIT62 | BIT61)
+#define TT_TABLE_AP_NO_PERMISSION (0x0ULL << 61)
+#define TT_TABLE_AP_EL0_NO_ACCESS (0x1ULL << 61)
+#define TT_TABLE_AP_NO_WRITE_ACCESS (0x2ULL << 61)
+
+//
+// Translation Control Register
+//
+#define TCR_T0SZ_MASK 0x3F
+
+#define TCR_PS_4GB (0 << 16)
+#define TCR_PS_64GB (1 << 16)
+#define TCR_PS_1TB (2 << 16)
+#define TCR_PS_4TB (3 << 16)
+#define TCR_PS_16TB (4 << 16)
+#define TCR_PS_256TB (5 << 16)
+
+#define TCR_TG0_4KB (0 << 14)
+
+#define TCR_IPS_4GB (0UL << 32)
+#define TCR_IPS_64GB (1UL << 32)
+#define TCR_IPS_1TB (2UL << 32)
+#define TCR_IPS_4TB (3UL << 32)
+#define TCR_IPS_16TB (4UL << 32)
+#define TCR_IPS_256TB (5UL << 32)
+
+
+#define TTBR_ASID_FIELD (48)
+#define TTBR_ASID_MASK (0xFF << TTBR_ASID_FIELD)
+#define TTBR_BADDR_MASK (0xFFFFFFFFFFFF ) // The width of this field depends on the values in TxSZ. Addr occupies bottom 48bits
+
+#define TCR_EL1_T0SZ_FIELD (0)
+#define TCR_EL1_EPD0_FIELD (7)
+#define TCR_EL1_IRGN0_FIELD (8)
+#define TCR_EL1_ORGN0_FIELD (10)
+#define TCR_EL1_SH0_FIELD (12)
+#define TCR_EL1_TG0_FIELD (14)
+#define TCR_EL1_T1SZ_FIELD (16)
+#define TCR_EL1_A1_FIELD (22)
+#define TCR_EL1_EPD1_FIELD (23)
+#define TCR_EL1_IRGN1_FIELD (24)
+#define TCR_EL1_ORGN1_FIELD (26)
+#define TCR_EL1_SH1_FIELD (28)
+#define TCR_EL1_TG1_FIELD (30)
+#define TCR_EL1_IPS_FIELD (32)
+#define TCR_EL1_AS_FIELD (36)
+#define TCR_EL1_TBI0_FIELD (37)
+#define TCR_EL1_TBI1_FIELD (38)
+#define TCR_EL1_T0SZ_MASK (0x1F << TCR_EL1_T0SZ_FIELD)
+#define TCR_EL1_EPD0_MASK (0x1 << TCR_EL1_EPD0_FIELD)
+#define TCR_EL1_IRGN0_MASK (0x3 << TCR_EL1_IRGN0_FIELD)
+#define TCR_EL1_ORGN0_MASK (0x3 << TCR_EL1_ORGN0_FIELD)
+#define TCR_EL1_SH0_MASK (0x3 << TCR_EL1_SH0_FIELD)
+#define TCR_EL1_TG0_MASK (0x1 << TCR_EL1_TG0_FIELD)
+#define TCR_EL1_T1SZ_MASK (0x1F << TCR_EL1_T1SZ_FIELD)
+#define TCR_EL1_A1_MASK (0x1 << TCR_EL1_A1_FIELD)
+#define TCR_EL1_EPD1_MASK (0x1 << TCR_EL1_EPD1_FIELD)
+#define TCR_EL1_IRGN1_MASK (0x3 << TCR_EL1_IRGN1_FIELD)
+#define TCR_EL1_ORGN1_MASK (0x3 << TCR_EL1_ORGN1_FIELD)
+#define TCR_EL1_SH1_MASK (0x3 << TCR_EL1_SH1_FIELD)
+#define TCR_EL1_TG1_MASK (0x1 << TCR_EL1_TG1_FIELD)
+#define TCR_EL1_IPS_MASK (0x7 << TCR_EL1_IPS_FIELD)
+#define TCR_EL1_AS_MASK (0x1 << TCR_EL1_AS_FIELD)
+#define TCR_EL1_TBI0_MASK (0x1 << TCR_EL1_TBI0_FIELD)
+#define TCR_EL1_TBI1_MASK (0x1 << TCR_EL1_TBI1_FIELD)
+
+
+#define VTCR_EL23_T0SZ_FIELD (0)
+#define VTCR_EL23_IRGN0_FIELD (8)
+#define VTCR_EL23_ORGN0_FIELD (10)
+#define VTCR_EL23_SH0_FIELD (12)
+#define TCR_EL23_TG0_FIELD (14)
+#define VTCR_EL23_PS_FIELD (16)
+#define TCR_EL23_T0SZ_MASK (0x1F << VTCR_EL23_T0SZ_FIELD)
+#define TCR_EL23_IRGN0_MASK (0x3 << VTCR_EL23_IRGN0_FIELD)
+#define TCR_EL23_ORGN0_MASK (0x3 << VTCR_EL23_ORGN0_FIELD)
+#define TCR_EL23_SH0_MASK (0x3 << VTCR_EL23_SH0_FIELD)
+#define TCR_EL23_TG0_MASK (0x1 << TCR_EL23_TG0_FIELD)
+#define TCR_EL23_PS_MASK (0x7 << VTCR_EL23_PS_FIELD)
+
+
+#define VTCR_EL2_T0SZ_FIELD (0)
+#define VTCR_EL2_SL0_FIELD (6)
+#define VTCR_EL2_IRGN0_FIELD (8)
+#define VTCR_EL2_ORGN0_FIELD (10)
+#define VTCR_EL2_SH0_FIELD (12)
+#define VTCR_EL2_TG0_FIELD (14)
+#define VTCR_EL2_PS_FIELD (16)
+#define VTCR_EL2_T0SZ_MASK (0x1F << VTCR_EL2_T0SZ_FIELD)
+#define VTCR_EL2_SL0_MASK (0x1F << VTCR_EL2_SL0_FIELD)
+#define VTCR_EL2_IRGN0_MASK (0x3 << VTCR_EL2_IRGN0_FIELD)
+#define VTCR_EL2_ORGN0_MASK (0x3 << VTCR_EL2_ORGN0_FIELD)
+#define VTCR_EL2_SH0_MASK (0x3 << VTCR_EL2_SH0_FIELD)
+#define VTCR_EL2_TG0_MASK (0x1 << VTCR_EL2_TG0_FIELD)
+#define VTCR_EL2_PS_MASK (0x7 << VTCR_EL2_PS_FIELD)
+
+
+#define TCR_RGN_OUTER_NON_CACHEABLE (0x0 << 10)
+#define TCR_RGN_OUTER_WRITE_BACK_ALLOC (0x1 << 10)
+#define TCR_RGN_OUTER_WRITE_THROUGH (0x2 << 10)
+#define TCR_RGN_OUTER_WRITE_BACK_NO_ALLOC (0x3 << 10)
+
+#define TCR_RGN_INNER_NON_CACHEABLE (0x0 << 8)
+#define TCR_RGN_INNER_WRITE_BACK_ALLOC (0x1 << 8)
+#define TCR_RGN_INNER_WRITE_THROUGH (0x2 << 8)
+#define TCR_RGN_INNER_WRITE_BACK_NO_ALLOC (0x3 << 8)
+
+#define TCR_SH_NON_SHAREABLE (0x0 << 12)
+#define TCR_SH_OUTER_SHAREABLE (0x2 << 12)
+#define TCR_SH_INNER_SHAREABLE (0x3 << 12)
+
+#define TCR_PASZ_32BITS_4GB (0x0)
+#define TCR_PASZ_36BITS_64GB (0x1)
+#define TCR_PASZ_40BITS_1TB (0x2)
+#define TCR_PASZ_42BITS_4TB (0x3)
+#define TCR_PASZ_44BITS_16TB (0x4)
+#define TCR_PASZ_48BITS_256TB (0x5)
+
+// The value written to the T*SZ fields are defined as 2^(64-T*SZ). So a 39Bit
+// Virtual address range for 512GB of virtual space sets T*SZ to 25
+#define INPUT_ADDRESS_SIZE_TO_TxSZ(a) (64 - a)
+
+// Uses LPAE Page Table format
+
+#endif // __AARCH64_MMU_H_
+
diff --git a/ArmPkg/Include/Chipset/ArmAemV8.h b/ArmPkg/Include/Chipset/ArmAemV8.h
new file mode 100644
index 000000000..a64a92124
--- /dev/null
+++ b/ArmPkg/Include/Chipset/ArmAemV8.h
@@ -0,0 +1,21 @@
+/** @file
+
+ Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __ARM_AEM_V8_H__
+#define __ARM_AEM_V8_H__
+
+#include <Chipset/AArch64.h>
+
+#endif //__ARM_AEM_V8_H__
+
diff --git a/ArmPkg/Include/Chipset/ArmV7ArchTimer.h b/ArmPkg/Include/Chipset/ArmArchTimer.h
index 734c8855e..fcc03ca92 100644
--- a/ArmPkg/Include/Chipset/ArmV7ArchTimer.h
+++ b/ArmPkg/Include/Chipset/ArmArchTimer.h
@@ -1,6 +1,6 @@
/** @file
*
-* Copyright (c) 2011, ARM Limited. All rights reserved.
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
*
* This program and the accompanying materials
* are licensed and made available under the terms and conditions of the BSD License
@@ -12,8 +12,8 @@
*
**/
-#ifndef __ARMV7_ARCH_TIMER_H_
-#define __ARMV7_ARCH_TIMER_H_
+#ifndef __ARM_ARCH_TIMER_H_
+#define __ARM_ARCH_TIMER_H_
UINTN
EFIAPI
@@ -135,4 +135,5 @@ ArmWriteCntvOff (
UINT64 Val
);
-#endif
+#endif // __ARM_ARCH_TIMER_H_
+
diff --git a/ArmPkg/Include/Chipset/ArmCortexA5x.h b/ArmPkg/Include/Chipset/ArmCortexA5x.h
new file mode 100644
index 000000000..e2217e3f3
--- /dev/null
+++ b/ArmPkg/Include/Chipset/ArmCortexA5x.h
@@ -0,0 +1,23 @@
+/** @file
+
+ Copyright (c) 2012-2013, ARM Limited. All rights reserved.
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __ARM_CORTEX_A5x_H__
+#define __ARM_CORTEX_A5x_H__
+
+//
+// Cortex A5x feature bit definitions
+//
+#define A5X_FEATURE_SMP (1 << 6)
+
+#endif
diff --git a/ArmPkg/Include/Chipset/ArmV7.h b/ArmPkg/Include/Chipset/ArmV7.h
index e64deb141..479e8d05e 100644
--- a/ArmPkg/Include/Chipset/ArmV7.h
+++ b/ArmPkg/Include/Chipset/ArmV7.h
@@ -17,7 +17,7 @@
#define __ARM_V7_H__
#include <Chipset/ArmV7Mmu.h>
-#include <Chipset/ArmV7ArchTimer.h>
+#include <Chipset/ArmArchTimer.h>
// ARM Interrupt ID in Exception Table
#define ARM_ARCH_EXCEPTION_IRQ EXCEPT_ARM_IRQ
diff --git a/ArmPkg/Include/Library/ArmV7ArchTimerLib.h b/ArmPkg/Include/Library/ArmArchTimerLib.h
index 983184810..1ecada383 100644
--- a/ArmPkg/Include/Library/ArmV7ArchTimerLib.h
+++ b/ArmPkg/Include/Library/ArmArchTimerLib.h
@@ -1,6 +1,6 @@
/** @file
- Copyright (c) 2011, ARM Ltd. All rights reserved.<BR>
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
@@ -12,8 +12,8 @@
**/
-#ifndef __ARM_V7_ARCH_TIMER_LIB_H__
-#define __ARM_V7_ARCH_TIMER_LIB_H__
+#ifndef __ARM_ARCH_TIMER_LIB_H__
+#define __ARM_ARCH_TIMER_LIB_H__
#define ARM_ARCH_TIMER_ENABLE (1 << 0)
#define ARM_ARCH_TIMER_IMASK (1 << 1)
@@ -36,7 +36,7 @@ typedef enum {
CnthpCtl,
CnthpCval,
RegMaximum
-}ARM_ARCH_TIMER_REGS;
+} ARM_ARCH_TIMER_REGS;
VOID
EFIAPI
@@ -112,4 +112,4 @@ ArmArchTimerSetCompareVal (
IN UINT64 Val
);
-#endif // __ARM_V7_ARCH_TIMER_LIB_H__
+#endif // __ARM_ARCH_TIMER_LIB_H__
diff --git a/ArmPkg/Include/Library/ArmLib.h b/ArmPkg/Include/Library/ArmLib.h
index 8174845c6..1f1fd9e51 100644
--- a/ArmPkg/Include/Library/ArmLib.h
+++ b/ArmPkg/Include/Library/ArmLib.h
@@ -18,10 +18,16 @@
#include <Uefi/UefiBaseType.h>
-#ifdef ARM_CPU_ARMv6
-#include <Chipset/ARM1176JZ-S.h>
+#ifdef MDE_CPU_ARM
+ #ifdef ARM_CPU_ARMv6
+ #include <Chipset/ARM1176JZ-S.h>
+ #else
+ #include <Chipset/ArmV7.h>
+ #endif
+#elif defined(MDE_CPU_AARCH64)
+ #include <Chipset/AArch64.h>
#else
-#include <Chipset/ArmV7.h>
+ #error "Unknown chipset."
#endif
typedef enum {
@@ -501,6 +507,7 @@ ArmCallWFE (
VOID
EFIAPI
ArmCallWFI (
+
VOID
);
diff --git a/ArmPkg/Library/ArmArchTimerLib/ArmArchTimerLib.c b/ArmPkg/Library/ArmArchTimerLib/ArmArchTimerLib.c
index 8c1fe415d..970bde34c 100644
--- a/ArmPkg/Library/ArmArchTimerLib/ArmArchTimerLib.c
+++ b/ArmPkg/Library/ArmArchTimerLib/ArmArchTimerLib.c
@@ -19,7 +19,7 @@
#include <Library/TimerLib.h>
#include <Library/DebugLib.h>
#include <Library/PcdLib.h>
-#include <Library/ArmV7ArchTimerLib.h>
+#include <Library/ArmArchTimerLib.h>
#include <Chipset/ArmV7.h>
#define TICKS_PER_MICRO_SEC (PcdGet32 (PcdArmArchTimerFreqInHz)/1000000U)
@@ -43,15 +43,18 @@ TimerConstructor (
// manual lower bound of the frequency is in the range of 1-10MHz
ASSERT (TICKS_PER_MICRO_SEC);
+#ifdef MDE_CPU_ARM
+ // Only set the frequency for ARMv7. We expect the secure firmware to have already do it
// If the security extensions are not implemented set Timer Frequency
if ((ArmReadIdPfr1 () & 0xF0) == 0x0) {
ArmArchTimerSetTimerFreq (PcdGet32 (PcdArmArchTimerFreqInHz));
}
+#endif
// Architectural Timer Frequency must be set in the Secure privileged(if secure extensions are supported) mode.
// If the reset value (0) is returned just ASSERT.
TimerFreq = ArmArchTimerGetTimerFreq ();
- ASSERT (TimerFreq);
+ ASSERT (TimerFreq != 0);
} else {
DEBUG ((EFI_D_ERROR, "ARM Architectural Timer is not available in the CPU, hence this library can not be used.\n"));
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c
new file mode 100644
index 000000000..fa4f7c741
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c
@@ -0,0 +1,275 @@
+/** @file
+*
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+*
+* This program and the accompanying materials
+* are licensed and made available under the terms and conditions of the BSD License
+* which accompanies this distribution. The full text of the license may be found at
+* http://opensource.org/licenses/bsd-license.php
+*
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include "AArch64Lib.h"
+#include "ArmLibPrivate.h"
+#include <Library/ArmArchTimerLib.h>
+
+VOID
+EFIAPI
+ArmArchTimerReadReg (
+ IN ARM_ARCH_TIMER_REGS Reg,
+ OUT VOID *DstBuf
+ )
+{
+ // Check if the Generic/Architecture timer is implemented
+ if (ArmIsArchTimerImplemented ()) {
+
+ switch (Reg) {
+
+ case CntFrq:
+ *((UINTN *)DstBuf) = ArmReadCntFrq ();
+ break;
+
+ case CntPct:
+ *((UINT64 *)DstBuf) = ArmReadCntPct ();
+ break;
+
+ case CntkCtl:
+ *((UINTN *)DstBuf) = ArmReadCntkCtl();
+ break;
+
+ case CntpTval:
+ *((UINTN *)DstBuf) = ArmReadCntpTval ();
+ break;
+
+ case CntpCtl:
+ *((UINTN *)DstBuf) = ArmReadCntpCtl ();
+ break;
+
+ case CntvTval:
+ *((UINTN *)DstBuf) = ArmReadCntvTval ();
+ break;
+
+ case CntvCtl:
+ *((UINTN *)DstBuf) = ArmReadCntvCtl ();
+ break;
+
+ case CntvCt:
+ *((UINT64 *)DstBuf) = ArmReadCntvCt ();
+ break;
+
+ case CntpCval:
+ *((UINT64 *)DstBuf) = ArmReadCntpCval ();
+ break;
+
+ case CntvCval:
+ *((UINT64 *)DstBuf) = ArmReadCntvCval ();
+ break;
+
+ case CntvOff:
+ *((UINT64 *)DstBuf) = ArmReadCntvOff ();
+ break;
+
+ case CnthCtl:
+ case CnthpTval:
+ case CnthpCtl:
+ case CnthpCval:
+ DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));
+ break;
+
+ default:
+ DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));
+ }
+ } else {
+ DEBUG ((EFI_D_ERROR, "Attempt to read ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));
+ ASSERT (0);
+ }
+}
+
+VOID
+EFIAPI
+ArmArchTimerWriteReg (
+ IN ARM_ARCH_TIMER_REGS Reg,
+ IN VOID *SrcBuf
+ )
+{
+ // Check if the Generic/Architecture timer is implemented
+ if (ArmIsArchTimerImplemented ()) {
+
+ switch (Reg) {
+
+ case CntFrq:
+ ArmWriteCntFrq (*((UINTN *)SrcBuf));
+ break;
+
+ case CntPct:
+ DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTPCT \n"));
+ break;
+
+ case CntkCtl:
+ ArmWriteCntkCtl (*((UINTN *)SrcBuf));
+ break;
+
+ case CntpTval:
+ ArmWriteCntpTval (*((UINTN *)SrcBuf));
+ break;
+
+ case CntpCtl:
+ ArmWriteCntpCtl (*((UINTN *)SrcBuf));
+ break;
+
+ case CntvTval:
+ ArmWriteCntvTval (*((UINTN *)SrcBuf));
+ break;
+
+ case CntvCtl:
+ ArmWriteCntvCtl (*((UINTN *)SrcBuf));
+ break;
+
+ case CntvCt:
+ DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTVCT \n"));
+ break;
+
+ case CntpCval:
+ ArmWriteCntpCval (*((UINT64 *)SrcBuf) );
+ break;
+
+ case CntvCval:
+ ArmWriteCntvCval (*((UINT64 *)SrcBuf) );
+ break;
+
+ case CntvOff:
+ ArmWriteCntvOff (*((UINT64 *)SrcBuf));
+ break;
+
+ case CnthCtl:
+ case CnthpTval:
+ case CnthpCtl:
+ case CnthpCval:
+ DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));
+ break;
+
+ default:
+ DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));
+ }
+ } else {
+ DEBUG ((EFI_D_ERROR, "Attempt to write to ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));
+ ASSERT (0);
+ }
+}
+
+VOID
+EFIAPI
+ArmArchTimerEnableTimer (
+ VOID
+ )
+{
+ UINTN TimerCtrlReg;
+
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);
+ TimerCtrlReg |= ARM_ARCH_TIMER_ENABLE;
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);
+}
+
+VOID
+EFIAPI
+ArmArchTimerDisableTimer (
+ VOID
+ )
+{
+ UINTN TimerCtrlReg;
+
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);
+ TimerCtrlReg &= ~ARM_ARCH_TIMER_ENABLE;
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);
+}
+
+VOID
+EFIAPI
+ArmArchTimerSetTimerFreq (
+ IN UINTN FreqInHz
+ )
+{
+ ArmArchTimerWriteReg (CntFrq, (VOID *)&FreqInHz);
+}
+
+UINTN
+EFIAPI
+ArmArchTimerGetTimerFreq (
+ VOID
+ )
+{
+ UINTN ArchTimerFreq = 0;
+ ArmArchTimerReadReg (CntFrq, (VOID *)&ArchTimerFreq);
+ return ArchTimerFreq;
+}
+
+UINTN
+EFIAPI
+ArmArchTimerGetTimerVal (
+ VOID
+ )
+{
+ UINTN ArchTimerVal;
+ ArmArchTimerReadReg (CntpTval, (VOID *)&ArchTimerVal);
+ return ArchTimerVal;
+}
+
+
+VOID
+EFIAPI
+ArmArchTimerSetTimerVal (
+ IN UINTN Val
+ )
+{
+ ArmArchTimerWriteReg (CntpTval, (VOID *)&Val);
+}
+
+UINT64
+EFIAPI
+ArmArchTimerGetSystemCount (
+ VOID
+ )
+{
+ UINT64 SystemCount;
+ ArmArchTimerReadReg (CntPct, (VOID *)&SystemCount);
+ return SystemCount;
+}
+
+UINTN
+EFIAPI
+ArmArchTimerGetTimerCtrlReg (
+ VOID
+ )
+{
+ UINTN Val;
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&Val);
+ return Val;
+}
+
+VOID
+EFIAPI
+ArmArchTimerSetTimerCtrlReg (
+ UINTN Val
+ )
+{
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&Val);
+}
+
+VOID
+EFIAPI
+ArmArchTimerSetCompareVal (
+ IN UINT64 Val
+ )
+{
+ ArmArchTimerWriteReg (CntpCval, (VOID *)&Val);
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S
new file mode 100644
index 000000000..c6087aa61
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S
@@ -0,0 +1,140 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+.text
+.align 2
+
+ASM_GLOBAL ASM_PFX(ArmReadCntFrq)
+ASM_GLOBAL ASM_PFX(ArmWriteCntFrq)
+ASM_GLOBAL ASM_PFX(ArmReadCntPct)
+ASM_GLOBAL ASM_PFX(ArmReadCntkCtl)
+ASM_GLOBAL ASM_PFX(ArmWriteCntkCtl)
+ASM_GLOBAL ASM_PFX(ArmReadCntpTval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntpTval)
+ASM_GLOBAL ASM_PFX(ArmReadCntpCtl)
+ASM_GLOBAL ASM_PFX(ArmWriteCntpCtl)
+ASM_GLOBAL ASM_PFX(ArmReadCntvTval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvTval)
+ASM_GLOBAL ASM_PFX(ArmReadCntvCtl)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvCtl)
+ASM_GLOBAL ASM_PFX(ArmReadCntvCt)
+ASM_GLOBAL ASM_PFX(ArmReadCntpCval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntpCval)
+ASM_GLOBAL ASM_PFX(ArmReadCntvCval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvCval)
+ASM_GLOBAL ASM_PFX(ArmReadCntvOff)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvOff)
+
+ASM_PFX(ArmReadCntFrq):
+ mrs x0, cntfrq_el0 // Read CNTFRQ
+ ret
+
+
+# NOTE - Can only write while at highest implemented EL level (EL3 on model). Else ReadOnly (EL2, EL1, EL0)
+ASM_PFX(ArmWriteCntFrq):
+ msr cntfrq_el0, x0 // Write to CNTFRQ
+ ret
+
+
+ASM_PFX(ArmReadCntPct):
+ mrs x0, cntpct_el0 // Read CNTPCT (Physical counter register)
+ ret
+
+
+ASM_PFX(ArmReadCntkCtl):
+ mrs x0, cntkctl_el1 // Read CNTK_CTL (Timer PL1 Control Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntkCtl):
+ mrs x0, cntkctl_el1 // Write to CNTK_CTL (Timer PL1 Control Register)
+ ret
+
+
+ASM_PFX(ArmReadCntpTval):
+ mrs x0, cntp_tval_el0 // Read CNTP_TVAL (PL1 physical timer value register)
+ ret
+
+
+ASM_PFX(ArmWriteCntpTval):
+ msr cntp_tval_el0, x0 // Write to CNTP_TVAL (PL1 physical timer value register)
+ ret
+
+
+ASM_PFX(ArmReadCntpCtl):
+ mrs x0, cntp_ctl_el0 // Read CNTP_CTL (PL1 Physical Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntpCtl):
+ msr cntp_ctl_el0, x0 // Write to CNTP_CTL (PL1 Physical Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvTval):
+ mrs x0, cntv_tval_el0 // Read CNTV_TVAL (Virtual Timer Value register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvTval):
+ msr cntv_tval_el0, x0 // Write to CNTV_TVAL (Virtual Timer Value register)
+ ret
+
+
+ASM_PFX(ArmReadCntvCtl):
+ mrs x0, cntv_ctl_el0 // Read CNTV_CTL (Virtual Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvCtl):
+ msr cntv_ctl_el0, x0 // Write to CNTV_CTL (Virtual Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvCt):
+ mrs x0, cntvct_el0 // Read CNTVCT (Virtual Count Register)
+ ret
+
+
+ASM_PFX(ArmReadCntpCval):
+ mrs x0, cntp_cval_el0 // Read CNTP_CTVAL (Physical Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntpCval):
+ msr cntp_cval_el0, x0 // Write to CNTP_CTVAL (Physical Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvCval):
+ mrs x0, cntv_cval_el0 // Read CNTV_CTVAL (Virtual Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvCval):
+ msr cntv_cval_el0, x0 // write to CNTV_CTVAL (Virtual Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvOff):
+ mrs x0, cntvoff_el2 // Read CNTVOFF (virtual Offset register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvOff):
+ msr cntvoff_el2, x0 // Write to CNTVOFF (Virtual Offset register)
+ ret
+
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
new file mode 100644
index 000000000..fd7f14f9c
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
@@ -0,0 +1,263 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/IoLib.h>
+#include "AArch64Lib.h"
+#include "ArmLibPrivate.h"
+
+ARM_CACHE_TYPE
+EFIAPI
+ArmCacheType (
+ VOID
+ )
+{
+ return ARM_CACHE_TYPE_WRITE_BACK;
+}
+
+ARM_CACHE_ARCHITECTURE
+EFIAPI
+ArmCacheArchitecture (
+ VOID
+ )
+{
+ UINT32 CLIDR = ReadCLIDR ();
+
+ return (ARM_CACHE_ARCHITECTURE)CLIDR; // BugBug Fix Me
+}
+
+BOOLEAN
+EFIAPI
+ArmDataCachePresent (
+ VOID
+ )
+{
+ UINT32 CLIDR = ReadCLIDR ();
+
+ if ((CLIDR & 0x2) == 0x2) {
+ // Instruction cache exists
+ return TRUE;
+ }
+ if ((CLIDR & 0x7) == 0x4) {
+ // Unified cache
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+UINTN
+EFIAPI
+ArmDataCacheSize (
+ VOID
+ )
+{
+ UINT32 NumSets;
+ UINT32 Associativity;
+ UINT32 LineSize;
+ UINT32 CCSIDR = ReadCCSIDR (0);
+
+ LineSize = (1 << ((CCSIDR & 0x7) + 2));
+ Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;
+ NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;
+
+ // LineSize is in words (4 byte chunks)
+ return NumSets * Associativity * LineSize * 4;
+}
+
+UINTN
+EFIAPI
+ArmDataCacheAssociativity (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (0);
+
+ return ((CCSIDR >> 3) & 0x3ff) + 1;
+}
+
+UINTN
+ArmDataCacheSets (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (0);
+
+ return ((CCSIDR >> 13) & 0x7fff) + 1;
+}
+
+UINTN
+EFIAPI
+ArmDataCacheLineLength (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (0) & 7;
+
+ // * 4 converts to bytes
+ return (1 << (CCSIDR + 2)) * 4;
+}
+
+BOOLEAN
+EFIAPI
+ArmInstructionCachePresent (
+ VOID
+ )
+{
+ UINT32 CLIDR = ReadCLIDR ();
+
+ if ((CLIDR & 1) == 1) {
+ // Instruction cache exists
+ return TRUE;
+ }
+ if ((CLIDR & 0x7) == 0x4) {
+ // Unified cache
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheSize (
+ VOID
+ )
+{
+ UINT32 NumSets;
+ UINT32 Associativity;
+ UINT32 LineSize;
+ UINT32 CCSIDR = ReadCCSIDR (1);
+
+ LineSize = (1 << ((CCSIDR & 0x7) + 2));
+ Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;
+ NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;
+
+ // LineSize is in words (4 byte chunks)
+ return NumSets * Associativity * LineSize * 4;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheAssociativity (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (1);
+
+ return ((CCSIDR >> 3) & 0x3ff) + 1;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheSets (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (1);
+
+ return ((CCSIDR >> 13) & 0x7fff) + 1;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheLineLength (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (1) & 7;
+
+ // * 4 converts to bytes
+ return (1 << (CCSIDR + 2)) * 4;
+}
+
+
+VOID
+AArch64DataCacheOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ )
+{
+ UINTN SavedInterruptState;
+
+ SavedInterruptState = ArmGetInterruptState ();
+ ArmDisableInterrupts();
+
+ AArch64AllDataCachesOperation (DataCacheOperation);
+
+ ArmDrainWriteBuffer ();
+
+ if (SavedInterruptState) {
+ ArmEnableInterrupts ();
+ }
+}
+
+
+VOID
+AArch64PoUDataCacheOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ )
+{
+ UINTN SavedInterruptState;
+
+ SavedInterruptState = ArmGetInterruptState ();
+ ArmDisableInterrupts ();
+
+ AArch64PerformPoUDataCacheOperation (DataCacheOperation);
+
+ ArmDrainWriteBuffer ();
+
+ if (SavedInterruptState) {
+ ArmEnableInterrupts ();
+ }
+}
+
+VOID
+EFIAPI
+ArmInvalidateDataCache (
+ VOID
+ )
+{
+ AArch64DataCacheOperation (ArmInvalidateDataCacheEntryBySetWay);
+}
+
+VOID
+EFIAPI
+ArmCleanInvalidateDataCache (
+ VOID
+ )
+{
+ AArch64DataCacheOperation (ArmCleanInvalidateDataCacheEntryBySetWay);
+}
+
+VOID
+EFIAPI
+ArmCleanDataCache (
+ VOID
+ )
+{
+ AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
+}
+
+VOID
+EFIAPI
+ArmCleanDataCacheToPoU (
+ VOID
+ )
+{
+ AArch64PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
new file mode 100644
index 000000000..04e3be042
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
@@ -0,0 +1,98 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __AARCH64_LIB_H__
+#define __AARCH64_LIB_H__
+
+typedef VOID (*AARCH64_CACHE_OPERATION)(UINTN);
+
+VOID
+EFIAPI
+ArmDrainWriteBuffer (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmInvalidateDataCacheEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmCleanDataCacheEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmCleanDataCacheToPoUEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmCleanInvalidateDataCacheEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmEnableAsynchronousAbort (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmDisableAsynchronousAbort (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmEnableIrq (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmDisableIrq (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmEnableFiq (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmDisableFiq (
+ VOID
+ );
+
+VOID
+AArch64PerformPoUDataCacheOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ );
+
+VOID
+AArch64AllDataCachesOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ );
+
+#endif // __AARCH64_LIB_H__
+
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf
new file mode 100644
index 000000000..dca0b22dd
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf
@@ -0,0 +1,44 @@
+#/** @file
+#
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Portions copyright (c) 2011-2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#
+#**/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = AArch64Lib
+ FILE_GUID = ef20ddf5-b334-47b3-94cf-52ff44c29138
+ MODULE_TYPE = DXE_DRIVER
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmLib
+
+[Sources.AARCH64]
+ AArch64Lib.c
+ AArch64Mmu.c
+ AArch64ArchTimer.c
+ ArmLibSupportV8.S | GCC
+ ../Common/AArch64/ArmLibSupport.S | GCC
+ AArch64Support.S | GCC
+ AArch64ArchTimerSupport.S | GCC
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ MdePkg/MdePkg.dec
+
+[LibraryClasses]
+ MemoryAllocationLib
+
+[Protocols]
+ gEfiCpuArchProtocolGuid
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf b/ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf
new file mode 100644
index 000000000..42f7e5628
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf
@@ -0,0 +1,48 @@
+#/** @file
+#
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Portions copyright (c) 2011-2013, ARM Ltd. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#
+#**/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = AArch64LibPrePi
+ FILE_GUID = fd72688d-dbd8-4cf2-91a3-15171dea7816
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmLib
+
+[Sources.common]
+ ArmLibSupportV8.S | GCC
+ AArch64Support.S | GCC
+
+ ../Common/AArch64/ArmLibSupport.S | GCC
+ ../Common/ArmLib.c
+
+ AArch64Lib.c
+ AArch64Mmu.c
+
+ AArch64ArchTimer.c
+ AArch64ArchTimerSupport.S | GCC
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ MdePkg/MdePkg.dec
+
+[LibraryClasses]
+ PrePiLib
+
+[Protocols]
+ gEfiCpuArchProtocolGuid
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf b/ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf
new file mode 100644
index 000000000..9bb0bd21d
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf
@@ -0,0 +1,43 @@
+#/* @file
+#
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#*/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = AArch64Lib
+ FILE_GUID = eb7441e4-3ddf-48b8-a009-14f428b19e49
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmLib
+
+[Sources.common]
+ ArmLibSupportV8.S | GCC
+ AArch64Support.S | GCC
+ ArmLib.c
+
+ ../Common/AArch64/ArmLibSupport.S | GCC
+
+ AArch64Lib.c
+
+ AArch64ArchTimer.c
+ AArch64ArchTimerSupport.S | GCC
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ MdePkg/MdePkg.dec
+
+[Protocols]
+ gEfiCpuArchProtocolGuid
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c b/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
new file mode 100644
index 000000000..8abc73a25
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
@@ -0,0 +1,644 @@
+/** @file
+* File managing the MMU for ARMv8 architecture
+*
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+*
+* This program and the accompanying materials
+* are licensed and made available under the terms and conditions of the BSD License
+* which accompanies this distribution. The full text of the license may be found at
+* http://opensource.org/licenses/bsd-license.php
+*
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include "AArch64Lib.h"
+#include "ArmLibPrivate.h"
+
+// We use this index definition to define an invalid block entry
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)
+
+STATIC
+UINT64
+ArmMemoryAttributeToPageAttribute (
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
+ )
+{
+ switch (Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ return TT_ATTR_INDX_DEVICE_MEMORY;
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ return TT_ATTR_INDX_DEVICE_MEMORY;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+ default:
+ ASSERT(0);
+ return TT_ATTR_INDX_DEVICE_MEMORY;
+ }
+}
+
+UINT64
+PageAttributeToGcdAttribute (
+ IN UINT64 PageAttributes
+ )
+{
+ UINT64 GcdAttributes;
+
+ switch (PageAttributes & TT_ATTR_INDX_MASK) {
+ case TT_ATTR_INDX_DEVICE_MEMORY:
+ GcdAttributes = EFI_MEMORY_UC;
+ break;
+ case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
+ GcdAttributes = EFI_MEMORY_WC;
+ break;
+ case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
+ GcdAttributes = EFI_MEMORY_WT;
+ break;
+ case TT_ATTR_INDX_MEMORY_WRITE_BACK:
+ GcdAttributes = EFI_MEMORY_WB;
+ break;
+ default:
+ DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
+ ASSERT (0);
+ // The Global Coherency Domain (GCD) value is defined as a bit set.
+ // Returning 0 means no attribute has been set.
+ GcdAttributes = 0;
+ }
+
+ // Determine protection attributes
+ if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
+ // Read only cases map to write-protect
+ GcdAttributes |= EFI_MEMORY_WP;
+ }
+
+ // Process eXecute Never attribute
+ if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
+ GcdAttributes |= EFI_MEMORY_XP;
+ }
+
+ return GcdAttributes;
+}
+
+UINT64
+GcdAttributeToPageAttribute (
+ IN UINT64 GcdAttributes
+ )
+{
+ UINT64 PageAttributes;
+
+ switch (GcdAttributes & 0xFF) {
+ case EFI_MEMORY_UC:
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
+ break;
+ case EFI_MEMORY_WC:
+ PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+ break;
+ case EFI_MEMORY_WT:
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
+ break;
+ case EFI_MEMORY_WB:
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK;
+ break;
+ default:
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToPageAttribute: 0x%X attributes is not supported.\n", GcdAttributes));
+ ASSERT (0);
+ // If no match has been found then we mark the memory as device memory.
+ // The only side effect of using device memory should be a slow down in the performance.
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
+ }
+
+ // Determine protection attributes
+ if (GcdAttributes & EFI_MEMORY_WP) {
+ // Read only cases map to write-protect
+ PageAttributes |= TT_AP_RO_RO;
+ }
+
+ // Process eXecute Never attribute
+ if (GcdAttributes & EFI_MEMORY_XP) {
+ PageAttributes |= (TT_PXN_MASK | TT_UXN_MASK);
+ }
+
+ return PageAttributes;
+}
+
+ARM_MEMORY_REGION_ATTRIBUTES
+GcdAttributeToArmAttribute (
+ IN UINT64 GcdAttributes
+ )
+{
+ switch (GcdAttributes & 0xFF) {
+ case EFI_MEMORY_UC:
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
+ case EFI_MEMORY_WC:
+ return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
+ case EFI_MEMORY_WT:
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
+ case EFI_MEMORY_WB:
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
+ default:
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
+ ASSERT (0);
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
+ }
+}
+
+// Describe the T0SZ values for each translation table level
+typedef struct {
+ UINTN MinT0SZ;
+ UINTN MaxT0SZ;
+ UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
+ // the MaxT0SZ is not at the boundary of the table
+} T0SZ_DESCRIPTION_PER_LEVEL;
+
+// Map table for the corresponding Level of Table
+STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
+ { 16, 24, 24 }, // Table Level 0
+ { 25, 33, 33 }, // Table Level 1
+ { 34, 39, 42 } // Table Level 2
+};
+
+VOID
+GetRootTranslationTableInfo (
+ IN UINTN T0SZ,
+ OUT UINTN *TableLevel,
+ OUT UINTN *TableEntryCount
+ )
+{
+ UINTN Index;
+
+ // Identify the level of the root table from the given T0SZ
+ for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
+ if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
+ break;
+ }
+ }
+
+ // If we have not found the corresponding maximum T0SZ then we use the last one
+ if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
+ Index--;
+ }
+
+ // Get the level of the root table
+ if (TableLevel) {
+ *TableLevel = Index;
+ }
+
+ // The Size of the Table is 2^(T0SZ-LargestT0SZ)
+ if (TableEntryCount) {
+ *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
+ }
+}
+
+STATIC
+VOID
+LookupAddresstoRootTable (
+ IN UINT64 MaxAddress,
+ OUT UINTN *T0SZ,
+ OUT UINTN *TableEntryCount
+ )
+{
+ UINTN TopBit;
+
+ // Check the parameters are not NULL
+ ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
+
+ // Look for the highest bit set in MaxAddress
+ for (TopBit = 63; TopBit != 0; TopBit--) {
+ if ((1ULL << TopBit) & MaxAddress) {
+ // MaxAddress top bit is found
+ TopBit = TopBit + 1;
+ break;
+ }
+ }
+ ASSERT (TopBit != 0);
+
+ // Calculate T0SZ from the top bit of the MaxAddress
+ *T0SZ = 64 - TopBit;
+
+ // Get the Table info from T0SZ
+ GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
+}
+
+STATIC
+UINT64*
+GetBlockEntryListFromAddress (
+ IN UINT64 *RootTable,
+ IN UINT64 RegionStart,
+ OUT UINTN *TableLevel,
+ IN OUT UINT64 *BlockEntrySize,
+ IN OUT UINT64 **LastBlockEntry
+ )
+{
+ UINTN RootTableLevel;
+ UINTN RootTableEntryCount;
+ UINT64 *TranslationTable;
+ UINT64 *BlockEntry;
+ UINT64 BlockEntryAddress;
+ UINTN BaseAddressAlignment;
+ UINTN PageLevel;
+ UINTN Index;
+ UINTN IndexLevel;
+ UINTN T0SZ;
+ UINT64 Attributes;
+ UINT64 TableAttributes;
+
+ // Initialize variable
+ BlockEntry = NULL;
+
+ // Ensure the parameters are valid
+ ASSERT (TableLevel && BlockEntrySize && LastBlockEntry);
+
+ // Ensure the Region is aligned on 4KB boundary
+ ASSERT ((RegionStart & (SIZE_4KB - 1)) == 0);
+
+ // Ensure the required size is aligned on 4KB boundary
+ ASSERT ((*BlockEntrySize & (SIZE_4KB - 1)) == 0);
+
+ //
+ // Calculate LastBlockEntry from T0SZ
+ //
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
+ // Get the Table info from T0SZ
+ GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
+ // The last block of the root table depends on the number of entry in this table
+ *LastBlockEntry = (UINT64*)((UINTN)RootTable + (RootTableEntryCount * sizeof(UINT64)));
+
+ // If the start address is 0x0 then we use the size of the region to identify the alignment
+ if (RegionStart == 0) {
+ // Identify the highest possible alignment for the Region Size
+ for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
+ if ((1 << BaseAddressAlignment) & *BlockEntrySize) {
+ break;
+ }
+ }
+ } else {
+ // Identify the highest possible alignment for the Base Address
+ for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
+ if ((1 << BaseAddressAlignment) & RegionStart) {
+ break;
+ }
+ }
+ }
+
+ // Identify the Page Level the RegionStart must belongs to
+ PageLevel = 3 - ((BaseAddressAlignment - 12) / 9);
+
+ // If the required size is smaller than the current block size then we need to go to the page bellow.
+ if (*BlockEntrySize < TT_ADDRESS_AT_LEVEL(PageLevel)) {
+ // It does not fit so we need to go a page level above
+ PageLevel++;
+ }
+
+ // Expose the found PageLevel to the caller
+ *TableLevel = PageLevel;
+
+ // Now, we have the Table Level we can get the Block Size associated to this table
+ *BlockEntrySize = TT_ADDRESS_AT_LEVEL(PageLevel);
+
+ //
+ // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
+ //
+
+ TranslationTable = RootTable;
+ for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
+
+ if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
+ // Go to the next table
+ TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+
+ // If we are at the last level then update the output
+ if (IndexLevel == PageLevel) {
+ // And get the appropriate BlockEntry at the next level
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel + 1, RegionStart);
+
+ // Set the last block for this new table
+ *LastBlockEntry = (UINT64*)((UINTN)TranslationTable + (TT_ENTRY_COUNT * sizeof(UINT64)));
+ }
+ } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
+ // If we are not at the last level then we need to split this BlockEntry
+ if (IndexLevel != PageLevel) {
+ // Retrieve the attributes from the block entry
+ Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
+
+ // Convert the block entry attributes into Table descriptor attributes
+ TableAttributes = TT_TABLE_AP_NO_PERMISSION;
+ if (Attributes & TT_PXN_MASK) {
+ TableAttributes = TT_TABLE_PXN;
+ }
+ if (Attributes & TT_UXN_MASK) {
+ TableAttributes = TT_TABLE_XN;
+ }
+ if (Attributes & TT_NS) {
+ TableAttributes = TT_TABLE_NS;
+ }
+
+ // Get the address corresponding at this entry
+ BlockEntryAddress = RegionStart;
+ BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
+ // Shift back to right to set zero before the effective address
+ BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
+
+ // Set the correct entry type
+ if (IndexLevel + 1 == 3) {
+ Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ } else {
+ Attributes |= TT_TYPE_BLOCK_ENTRY;
+ }
+
+ // Create a new translation table
+ TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
+ if (TranslationTable == NULL) {
+ return NULL;
+ }
+ TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+
+ // Fill the new BlockEntry with the TranslationTable
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY;
+
+ // Populate the newly created lower level table
+ BlockEntry = TranslationTable;
+ for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
+ *BlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
+ BlockEntry++;
+ }
+ // Block Entry points at the beginning of the Translation Table
+ BlockEntry = TranslationTable;
+ }
+ } else {
+ // Case of Invalid Entry and we are at a page level above of the one targetted.
+ if (IndexLevel != PageLevel) {
+ // Create a new translation table
+ TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
+ if (TranslationTable == NULL) {
+ return NULL;
+ }
+ TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+
+ ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
+
+ // Fill the new BlockEntry with the TranslationTable
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
+ }
+ }
+ }
+
+ return BlockEntry;
+}
+
+STATIC
+RETURN_STATUS
+FillTranslationTable (
+ IN UINT64 *RootTable,
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
+ )
+{
+ UINT64 Attributes;
+ UINT32 Type;
+ UINT64 RegionStart;
+ UINT64 RemainingRegionLength;
+ UINT64 *BlockEntry;
+ UINT64 *LastBlockEntry;
+ UINT64 BlockEntrySize;
+ UINTN TableLevel;
+
+ // Ensure the Length is aligned on 4KB boundary
+ ASSERT ((MemoryRegion->Length > 0) && ((MemoryRegion->Length & (SIZE_4KB - 1)) == 0));
+
+ // Variable initialization
+ Attributes = ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF;
+ RemainingRegionLength = MemoryRegion->Length;
+ RegionStart = MemoryRegion->VirtualBase;
+
+ do {
+ // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
+ // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
+ BlockEntrySize = RemainingRegionLength;
+ BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
+ if (BlockEntry == NULL) {
+ // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
+ return RETURN_OUT_OF_RESOURCES;
+ }
+
+ if (TableLevel != 3) {
+ Type = TT_TYPE_BLOCK_ENTRY;
+ } else {
+ Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ }
+
+ do {
+ // Fill the Block Entry with attribute and output block address
+ *BlockEntry = (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
+
+ // Go to the next BlockEntry
+ RegionStart += BlockEntrySize;
+ RemainingRegionLength -= BlockEntrySize;
+ BlockEntry++;
+ } while ((RemainingRegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
+ } while (RemainingRegionLength != 0);
+
+ return RETURN_SUCCESS;
+}
+
+RETURN_STATUS
+SetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ IN EFI_PHYSICAL_ADDRESS VirtualMask
+ )
+{
+ RETURN_STATUS Status;
+ ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
+ UINT64 *TranslationTable;
+
+ MemoryRegion.PhysicalBase = BaseAddress;
+ MemoryRegion.VirtualBase = BaseAddress;
+ MemoryRegion.Length = Length;
+ MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
+
+ TranslationTable = ArmGetTTBR0BaseAddress ();
+
+ Status = FillTranslationTable (TranslationTable, &MemoryRegion);
+ if (RETURN_ERROR (Status)) {
+ return Status;
+ }
+
+ // Flush d-cache so descriptors make it back to uncached memory for subsequent table walks
+ // flush and invalidate pages
+ ArmCleanInvalidateDataCache ();
+
+ ArmInvalidateInstructionCache ();
+
+ // Invalidate all TLB entries so changes are synced
+ ArmInvalidateTlb ();
+
+ return RETURN_SUCCESS;
+}
+
+RETURN_STATUS
+EFIAPI
+ArmConfigureMmu (
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
+ OUT VOID **TranslationTableBase OPTIONAL,
+ OUT UINTN *TranslationTableSize OPTIONAL
+ )
+{
+ VOID* TranslationTable;
+ UINTN TranslationTablePageCount;
+ UINT32 TranslationTableAttribute;
+ ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
+ UINT64 MaxAddress;
+ UINT64 TopAddress;
+ UINTN T0SZ;
+ UINTN RootTableEntryCount;
+ UINT64 TCR;
+ RETURN_STATUS Status;
+
+ ASSERT (MemoryTable != NULL);
+
+ // Identify the highest address of the memory table
+ MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
+ MemoryTableEntry = MemoryTable;
+ while (MemoryTableEntry->Length != 0) {
+ TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
+ if (TopAddress > MaxAddress) {
+ MaxAddress = TopAddress;
+ }
+ MemoryTableEntry++;
+ }
+
+ // Lookup the Table Level to get the information
+ LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
+
+ //
+ // Set TCR that allows us to retrieve T0SZ in the subsequent functions
+ //
+ if ((ArmReadCurrentEL () == AARCH64_EL2) || (ArmReadCurrentEL () == AARCH64_EL3)) {
+ //Note: Bits 23 and 31 are reserved bits in TCR_EL2 and TCR_EL3
+ TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
+
+ // Set the Physical Address Size using MaxAddress
+ if (MaxAddress < SIZE_4GB) {
+ TCR |= TCR_PS_4GB;
+ } else if (MaxAddress < SIZE_64GB) {
+ TCR |= TCR_PS_64GB;
+ } else if (MaxAddress < SIZE_1TB) {
+ TCR |= TCR_PS_1TB;
+ } else if (MaxAddress < SIZE_4TB) {
+ TCR |= TCR_PS_4TB;
+ } else if (MaxAddress < SIZE_16TB) {
+ TCR |= TCR_PS_16TB;
+ } else if (MaxAddress < SIZE_256TB) {
+ TCR |= TCR_PS_256TB;
+ } else {
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU support.\n", MaxAddress));
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return RETURN_UNSUPPORTED;
+ }
+
+ // Set TCR
+ ArmSetTCR (TCR);
+
+ // Allocate pages for translation table
+ TranslationTablePageCount = EFI_SIZE_TO_PAGES((RootTableEntryCount * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE);
+ TranslationTable = AllocatePages (TranslationTablePageCount);
+ if (TranslationTable == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ TranslationTable = (VOID*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+ // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
+ // functions without needing to pass this value across the functions. The MMU is only enabled
+ // after the translation tables are populated.
+ ArmSetTTBR0 (TranslationTable);
+
+ if (TranslationTableBase != NULL) {
+ *TranslationTableBase = TranslationTable;
+ }
+
+ if (TranslationTableSize != NULL) {
+ *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
+ }
+
+ ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
+
+ // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
+ ArmDisableMmu ();
+ ArmDisableDataCache ();
+ ArmDisableInstructionCache ();
+
+ // Make sure nothing sneaked into the cache
+ ArmCleanInvalidateDataCache ();
+ ArmInvalidateInstructionCache ();
+
+ TranslationTableAttribute = TT_ATTR_INDX_INVALID;
+ while (MemoryTable->Length != 0) {
+ // Find the memory attribute for the Translation Table
+ if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
+ ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
+ TranslationTableAttribute = MemoryTable->Attributes;
+ }
+
+ Status = FillTranslationTable (TranslationTable, MemoryTable);
+ if (RETURN_ERROR (Status)) {
+ goto FREE_TRANSLATION_TABLE;
+ }
+ MemoryTable++;
+ }
+
+ // Translate the Memory Attributes into Translation Table Register Attributes
+ if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
+ TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
+ } else {
+ // If we failed to find a mapping that contains the root translation table then it probably means the translation table
+ // is not mapped in the given memory map.
+ ASSERT (0);
+ Status = RETURN_UNSUPPORTED;
+ goto FREE_TRANSLATION_TABLE;
+ }
+
+ ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
+
+ ArmDisableAlignmentCheck ();
+ ArmEnableInstructionCache ();
+ ArmEnableDataCache ();
+
+ ArmEnableMmu ();
+ return RETURN_SUCCESS;
+
+FREE_TRANSLATION_TABLE:
+ FreePages (TranslationTable, TranslationTablePageCount);
+ return Status;
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
new file mode 100644
index 000000000..c45e33d6b
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
@@ -0,0 +1,503 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+#include <Chipset/AArch64.h>
+#include <AsmMacroIoLibV8.h>
+
+.text
+.align 3
+
+GCC_ASM_EXPORT (ArmInvalidateInstructionCache)
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)
+GCC_ASM_EXPORT (ArmDrainWriteBuffer)
+GCC_ASM_EXPORT (ArmEnableMmu)
+GCC_ASM_EXPORT (ArmDisableMmu)
+GCC_ASM_EXPORT (ArmDisableCachesAndMmu)
+GCC_ASM_EXPORT (ArmMmuEnabled)
+GCC_ASM_EXPORT (ArmEnableDataCache)
+GCC_ASM_EXPORT (ArmDisableDataCache)
+GCC_ASM_EXPORT (ArmEnableInstructionCache)
+GCC_ASM_EXPORT (ArmDisableInstructionCache)
+GCC_ASM_EXPORT (ArmDisableAlignmentCheck)
+GCC_ASM_EXPORT (ArmEnableAlignmentCheck)
+GCC_ASM_EXPORT (ArmEnableBranchPrediction)
+GCC_ASM_EXPORT (ArmDisableBranchPrediction)
+GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
+GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)
+GCC_ASM_EXPORT (ArmDataMemoryBarrier)
+GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)
+GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
+GCC_ASM_EXPORT (ArmWriteVBar)
+GCC_ASM_EXPORT (ArmVFPImplemented)
+GCC_ASM_EXPORT (ArmEnableVFP)
+GCC_ASM_EXPORT (ArmCallWFI)
+GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)
+GCC_ASM_EXPORT (ArmReadMpidr)
+GCC_ASM_EXPORT (ArmReadTpidrurw)
+GCC_ASM_EXPORT (ArmWriteTpidrurw)
+GCC_ASM_EXPORT (ArmIsArchTimerImplemented)
+GCC_ASM_EXPORT (ArmReadIdPfr0)
+GCC_ASM_EXPORT (ArmReadIdPfr1)
+GCC_ASM_EXPORT (ArmWriteHcr)
+GCC_ASM_EXPORT (ArmReadCurrentEL)
+
+.set CTRL_M_BIT, (1 << 0)
+.set CTRL_A_BIT, (1 << 1)
+.set CTRL_C_BIT, (1 << 2)
+.set CTRL_I_BIT, (1 << 12)
+.set CTRL_V_BIT, (1 << 12)
+.set CPACR_VFP_BITS, (3 << 20)
+
+ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
+ dc ivac, x0 // Invalidate single data cache line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanDataCacheEntryByMVA):
+ dc cvac, x0 // Clean single data cache line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
+ dc civac, x0 // Clean and invalidate single data cache line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
+ dc isw, x0 // Invalidate this line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
+ dc cisw, x0 // Clean and Invalidate this line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanDataCacheEntryBySetWay):
+ dc csw, x0 // Clean this line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmInvalidateInstructionCache):
+ ic iallu // Invalidate entire instruction cache
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableMmu):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Read System control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Read System control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Read System control register EL3
+4: orr x0, x0, #CTRL_M_BIT // Set MMU enable bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: tlbi alle1
+ isb
+ msr sctlr_el1, x0 // Write back
+ b 4f
+2: tlbi alle2
+ isb
+ msr sctlr_el2, x0 // Write back
+ b 4f
+3: tlbi alle3
+ isb
+ msr sctlr_el3, x0 // Write back
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableMmu):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Read System Control Register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Read System Control Register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Read System Control Register EL3
+4: bic x0, x0, #CTRL_M_BIT // Clear MMU enable bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back
+ tlbi alle1
+ b 4f
+2: msr sctlr_el2, x0 // Write back
+ tlbi alle2
+ b 4f
+3: msr sctlr_el3, x0 // Write back
+ tlbi alle3
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableCachesAndMmu):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_M_BIT // Disable MMU
+ bic x0, x0, #CTRL_C_BIT // Disable D Cache
+ bic x0, x0, #CTRL_I_BIT // Disable I Cache
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmMmuEnabled):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: and x0, x0, #CTRL_M_BIT
+ ret
+
+
+ASM_PFX(ArmEnableDataCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: orr x0, x0, #CTRL_C_BIT // Set C bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableDataCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_C_BIT // Clear C bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableInstructionCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: orr x0, x0, #CTRL_I_BIT // Set I bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableInstructionCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_I_BIT // Clear I bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableAlignmentCheck):
+ EL1_OR_EL2(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 3f
+2: mrs x0, sctlr_el2 // Get control register EL2
+3: orr x0, x0, #CTRL_A_BIT // Set A (alignment check) bit
+ EL1_OR_EL2(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 3f
+2: msr sctlr_el2, x0 // Write back control register
+3: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableAlignmentCheck):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_A_BIT // Clear A (alignment check) bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now
+ASM_PFX(ArmEnableBranchPrediction):
+ ret
+
+
+// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.
+ASM_PFX(ArmDisableBranchPrediction):
+ ret
+
+
+ASM_PFX(AArch64AllDataCachesOperation):
+// We can use regs 0-7 and 9-15 without having to save/restore.
+// Save our link register on the stack.
+ str x30, [sp, #-0x10]!
+ mov x1, x0 // Save Function call in x1
+ mrs x6, clidr_el1 // Read EL1 CLIDR
+ and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)
+ lsr x3, x3, #23 // Left align cache level value
+ cbz x3, L_Finished // No need to clean if LoC is 0
+ mov x10, #0 // Start clean at cache level 0
+ b Loop1
+
+ASM_PFX(AArch64PerformPoUDataCacheOperation):
+// We can use regs 0-7 and 9-15 without having to save/restore.
+// Save our link register on the stack.
+ str x30, [sp, #-0x10]!
+ mov x1, x0 // Save Function call in x1
+ mrs x6, clidr_el1 // Read EL1 CLIDR
+ and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)
+ lsr x3, x3, #26 // Left align cache level value
+ cbz x3, L_Finished // No need to clean if LoC is 0
+ mov x10, #0 // Start clean at cache level 0
+
+Loop1:
+ add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info
+ lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level
+ and x12, x12, #7 // get those 3 bits alone
+ cmp x12, #2 // what cache at this level?
+ b.lt L_Skip // no cache or only instruction cache at this level
+ msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)
+ isb // isb to sync the change to the CacheSizeID reg
+ mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)
+ and x2, x12, #0x7 // extract the line length field
+ add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)
+ mov x4, #0x400
+ sub x4, x4, #1
+ and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)
+ clz w5, w4 // w5 is the bit position of the way size increment
+ mov x7, #0x00008000
+ sub x7, x7, #1
+ and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)
+
+Loop2:
+ mov x9, x4 // x9 working copy of the max way size (right aligned)
+
+Loop3:
+ lsl x11, x9, x5
+ orr x0, x10, x11 // factor in the way number and cache number
+ lsl x11, x7, x2
+ orr x0, x0, x11 // factor in the index number
+
+ blr x1 // Goto requested cache operation
+
+ subs x9, x9, #1 // decrement the way number
+ b.ge Loop3
+ subs x7, x7, #1 // decrement the index
+ b.ge Loop2
+L_Skip:
+ add x10, x10, #2 // increment the cache number
+ cmp x3, x10
+ b.gt Loop1
+
+L_Finished:
+ dsb sy
+ isb
+ ldr x30, [sp], #0x10
+ ret
+
+
+ASM_PFX(ArmDataMemoryBarrier):
+ dmb sy
+ ret
+
+
+ASM_PFX(ArmDataSyncronizationBarrier):
+ASM_PFX(ArmDrainWriteBuffer):
+ dsb sy
+ ret
+
+
+ASM_PFX(ArmInstructionSynchronizationBarrier):
+ isb
+ ret
+
+
+ASM_PFX(ArmWriteVBar):
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register
+ b 4f
+2: msr vbar_el2, x0 // Set the Address of the EL2 Vector Table in the VBAR register
+ b 4f
+3: msr vbar_el3, x0 // Set the Address of the EL3 Vector Table in the VBAR register
+4: isb
+ ret
+
+ASM_PFX(ArmEnableVFP):
+ // Check whether floating-point is implemented in the processor.
+ mov x1, x30 // Save LR
+ bl ArmReadIdPfr0 // Read EL1 Processor Feature Register (PFR0)
+ mov x30, x1 // Restore LR
+ ands x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation
+ cmp x0, #0 // VFP is implemented if '0'.
+ b.ne 4f // Exit if VFP not implemented.
+ // FVP is implemented.
+ // Make sure VFP exceptions are not trapped (to any exception level).
+ mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)
+ orr x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1
+ msr cpacr_el1, x0 // Write back EL1 Coprocessor Access Control Register (CPACR)
+ mov x1, #AARCH64_CPTR_TFP // TFP Bit for trapping VFP Exceptions
+ EL1_OR_EL2_OR_EL3(x2)
+1:ret // Not configurable in EL1
+2:mrs x0, cptr_el2 // Disable VFP traps to EL2
+ bic x0, x0, x1
+ msr cptr_el2, x0
+ ret
+3:mrs x0, cptr_el3 // Disable VFP traps to EL3
+ bic x0, x0, x1
+ msr cptr_el3, x0
+4:ret
+
+
+ASM_PFX(ArmCallWFI):
+ wfi
+ ret
+
+
+ASM_PFX(ArmInvalidateInstructionAndDataTlb):
+ EL1_OR_EL2_OR_EL3(x0)
+1: tlbi alle1
+ b 4f
+2: tlbi alle2
+ b 4f
+3: tlbi alle3
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmReadMpidr):
+ mrs x0, mpidr_el1 // read EL1 MPIDR
+ ret
+
+
+// Keep old function names for C compatibilty for now. Change later?
+ASM_PFX(ArmReadTpidrurw):
+ mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
+ ret
+
+
+// Keep old function names for C compatibilty for now. Change later?
+ASM_PFX(ArmWriteTpidrurw):
+ msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
+ ret
+
+
+// Arch timers are mandatory on AArch64
+ASM_PFX(ArmIsArchTimerImplemented):
+ mov x0, #1
+ ret
+
+
+ASM_PFX(ArmReadIdPfr0):
+ mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register
+ ret
+
+
+// Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?
+// A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.
+// See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c
+// Not defined yet, but stick in here for now, should read all zeros.
+ASM_PFX(ArmReadIdPfr1):
+ mrs x0, id_aa64pfr1_el1 // Read ID_PFR1 Register
+ ret
+
+// VOID ArmWriteHcr(UINTN Hcr)
+ASM_PFX(ArmWriteHcr):
+ msr hcr_el2, x0 // Write the passed HCR value
+ ret
+
+// UINTN ArmReadCurrentEL(VOID)
+ASM_PFX(ArmReadCurrentEL):
+ mrs x0, CurrentEL
+ ret
+
+dead:
+ b dead
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/ArmPkg/Library/ArmLib/AArch64/ArmLib.c b/ArmPkg/Library/ArmLib/AArch64/ArmLib.c
new file mode 100644
index 000000000..fa95d352d
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/ArmLib.c
@@ -0,0 +1,53 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Base.h>
+
+#include <Library/ArmLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PcdLib.h>
+
+#include "ArmLibPrivate.h"
+
+VOID
+EFIAPI
+ArmCacheInformation (
+ OUT ARM_CACHE_INFO *CacheInfo
+ )
+{
+ if (CacheInfo != NULL) {
+ CacheInfo->Type = ArmCacheType();
+ CacheInfo->Architecture = ArmCacheArchitecture();
+ CacheInfo->DataCachePresent = ArmDataCachePresent();
+ CacheInfo->DataCacheSize = ArmDataCacheSize();
+ CacheInfo->DataCacheAssociativity = ArmDataCacheAssociativity();
+ CacheInfo->DataCacheLineLength = ArmDataCacheLineLength();
+ CacheInfo->InstructionCachePresent = ArmInstructionCachePresent();
+ CacheInfo->InstructionCacheSize = ArmInstructionCacheSize();
+ CacheInfo->InstructionCacheAssociativity = ArmInstructionCacheAssociativity();
+ CacheInfo->InstructionCacheLineLength = ArmInstructionCacheLineLength();
+ }
+}
+
+VOID
+EFIAPI
+ArmSetAuxCrBit (
+ IN UINT32 Bits
+ )
+{
+ UINT32 val = ArmReadAuxCr();
+ val |= Bits;
+ ArmWriteAuxCr(val);
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h b/ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h
new file mode 100644
index 000000000..d2804fc10
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h
@@ -0,0 +1,82 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __ARM_LIB_PRIVATE_H__
+#define __ARM_LIB_PRIVATE_H__
+
+#define CACHE_SIZE_4_KB (3UL)
+#define CACHE_SIZE_8_KB (4UL)
+#define CACHE_SIZE_16_KB (5UL)
+#define CACHE_SIZE_32_KB (6UL)
+#define CACHE_SIZE_64_KB (7UL)
+#define CACHE_SIZE_128_KB (8UL)
+
+#define CACHE_ASSOCIATIVITY_DIRECT (0UL)
+#define CACHE_ASSOCIATIVITY_4_WAY (2UL)
+#define CACHE_ASSOCIATIVITY_8_WAY (3UL)
+
+#define CACHE_PRESENT (0UL)
+#define CACHE_NOT_PRESENT (1UL)
+
+#define CACHE_LINE_LENGTH_32_BYTES (2UL)
+
+#define SIZE_FIELD_TO_CACHE_SIZE(x) (((x) >> 6) & 0x0F)
+#define SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(x) (((x) >> 3) & 0x07)
+#define SIZE_FIELD_TO_CACHE_PRESENCE(x) (((x) >> 2) & 0x01)
+#define SIZE_FIELD_TO_CACHE_LINE_LENGTH(x) (((x) >> 0) & 0x03)
+
+#define DATA_CACHE_SIZE_FIELD(x) (((x) >> 12) & 0x0FFF)
+#define INSTRUCTION_CACHE_SIZE_FIELD(x) (((x) >> 0) & 0x0FFF)
+
+#define DATA_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(DATA_CACHE_SIZE_FIELD(x)))
+#define DATA_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(DATA_CACHE_SIZE_FIELD(x)))
+#define DATA_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(DATA_CACHE_SIZE_FIELD(x)))
+#define DATA_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(DATA_CACHE_SIZE_FIELD(x)))
+
+#define INSTRUCTION_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+#define INSTRUCTION_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+#define INSTRUCTION_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+#define INSTRUCTION_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+
+#define CACHE_TYPE(x) (((x) >> 25) & 0x0F)
+#define CACHE_TYPE_WRITE_BACK (0x0EUL)
+
+#define CACHE_ARCHITECTURE(x) (((x) >> 24) & 0x01)
+#define CACHE_ARCHITECTURE_UNIFIED (0UL)
+#define CACHE_ARCHITECTURE_SEPARATE (1UL)
+
+
+VOID
+CPSRMaskInsert (
+ IN UINT32 Mask,
+ IN UINT32 Value
+ );
+
+UINT32
+CPSRRead (
+ VOID
+ );
+
+UINT32
+ReadCCSIDR (
+ IN UINT32 CSSELR
+ );
+
+UINT32
+ReadCLIDR (
+ VOID
+ );
+
+#endif // __ARM_LIB_PRIVATE_H__
diff --git a/ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S b/ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S
new file mode 100644
index 000000000..aee3dc118
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S
@@ -0,0 +1,127 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+#include <AsmMacroIoLib.h>
+
+.text
+.align 3
+
+GCC_ASM_EXPORT (ArmIsMpCore)
+GCC_ASM_EXPORT (ArmEnableAsynchronousAbort)
+GCC_ASM_EXPORT (ArmDisableAsynchronousAbort)
+GCC_ASM_EXPORT (ArmEnableIrq)
+GCC_ASM_EXPORT (ArmDisableIrq)
+GCC_ASM_EXPORT (ArmEnableFiq)
+GCC_ASM_EXPORT (ArmDisableFiq)
+GCC_ASM_EXPORT (ArmEnableInterrupts)
+GCC_ASM_EXPORT (ArmDisableInterrupts)
+GCC_ASM_EXPORT (ArmDisableAllExceptions)
+GCC_ASM_EXPORT (ReadCCSIDR)
+GCC_ASM_EXPORT (ReadCLIDR)
+
+#------------------------------------------------------------------------------
+
+.set MPIDR_U_BIT, (30)
+.set MPIDR_U_MASK, (1 << MPIDR_U_BIT)
+.set DAIF_FIQ_BIT, (1 << 0)
+.set DAIF_IRQ_BIT, (1 << 1)
+.set DAIF_ABORT_BIT, (1 << 2)
+.set DAIF_DEBUG_BIT, (1 << 3)
+.set DAIF_INT_BITS, (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+.set DAIF_ALL, (DAIF_DEBUG_BIT | DAIF_ABORT_BIT | DAIF_INT_BITS)
+
+
+ASM_PFX(ArmIsMpCore):
+ mrs x0, mpidr_el1 // Read EL1 Mutliprocessor Affinty Reg (MPIDR)
+ and x0, x0, #MPIDR_U_MASK // U Bit clear, the processor is part of a multiprocessor system
+ lsr x0, x0, #MPIDR_U_BIT
+ eor x0, x0, #1
+ ret
+
+
+ASM_PFX(ArmEnableAsynchronousAbort):
+ msr daifclr, #DAIF_ABORT_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableAsynchronousAbort):
+ msr daifset, #DAIF_ABORT_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableIrq):
+ msr daifclr, #DAIF_IRQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableIrq):
+ msr daifset, #DAIF_IRQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableFiq):
+ msr daifclr, #DAIF_FIQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableFiq):
+ msr daifset, #DAIF_FIQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableInterrupts):
+ msr daifclr, #DAIF_INT_BITS
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableInterrupts):
+ msr daifset, #DAIF_INT_BITS
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableAllExceptions):
+ msr daifset, #DAIF_ALL
+ isb
+ ret
+
+
+// UINT32
+// ReadCCSIDR (
+// IN UINT32 CSSELR
+// )
+ASM_PFX(ReadCCSIDR):
+ msr csselr_el1, x0 // Write Cache Size Selection Register (CSSELR)
+ isb
+ mrs x0, ccsidr_el1 // Read current Cache Size ID Register (CCSIDR)
+ ret
+
+
+// UINT32
+// ReadCLIDR (
+// IN UINT32 CSSELR
+// )
+ASM_PFX(ReadCLIDR):
+ mrs x0, clidr_el1 // Read Cache Level ID Register
+ ret
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/ArmPkg/Library/ArmLib/ArmV7/ArmV7ArchTimer.c b/ArmPkg/Library/ArmLib/ArmV7/ArmV7ArchTimer.c
index 478a5da69..d04211b29 100644
--- a/ArmPkg/Library/ArmLib/ArmV7/ArmV7ArchTimer.c
+++ b/ArmPkg/Library/ArmLib/ArmV7/ArmV7ArchTimer.c
@@ -21,7 +21,7 @@
#include <Library/DebugLib.h>
#include "ArmV7Lib.h"
#include "ArmLibPrivate.h"
-#include <Library/ArmV7ArchTimerLib.h>
+#include <Library/ArmArchTimerLib.h>
VOID
EFIAPI
diff --git a/ArmPkg/Library/ArmLib/Null/NullArmLib.inf b/ArmPkg/Library/ArmLib/Null/NullArmLib.inf
index d43719810..f6d8b1942 100644
--- a/ArmPkg/Library/ArmLib/Null/NullArmLib.inf
+++ b/ArmPkg/Library/ArmLib/Null/NullArmLib.inf
@@ -31,6 +31,9 @@
../Common/Arm/ArmLibSupport.S | GCC
../Common/Arm/ArmLibSupport.asm | RVCT
+[Sources.AARCH64]
+ ../Common/AArch64/ArmLibSupport.S | GCC
+
[Packages]
ArmPkg/ArmPkg.dec
MdePkg/MdePkg.dec
diff --git a/ArmPkg/Library/ArmSmcLib/AArch64/ArmSmc.S b/ArmPkg/Library/ArmSmcLib/AArch64/ArmSmc.S
new file mode 100644
index 000000000..579a73e96
--- /dev/null
+++ b/ArmPkg/Library/ArmSmcLib/AArch64/ArmSmc.S
@@ -0,0 +1,78 @@
+//
+// Copyright (c) 2012-2013, ARM Limited. All rights reserved.
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD License
+// which accompanies this distribution. The full text of the license may be found at
+// http://opensource.org/licenses/bsd-license.php
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+//
+//
+
+.text
+.align 3
+
+GCC_ASM_EXPORT(ArmCallSmc)
+GCC_ASM_EXPORT(ArmCallSmcArg1)
+GCC_ASM_EXPORT(ArmCallSmcArg2)
+GCC_ASM_EXPORT(ArmCallSmcArg3)
+
+ASM_PFX(ArmCallSmc):
+ str x1, [sp, #-0x10]!
+ mov x1, x0
+ ldr x0,[x1]
+ smc #0
+ str x0,[x1]
+ ldr x1, [sp], #0x10
+ ret
+
+ASM_PFX(ArmCallSmcArg1):
+ stp x2, x3, [sp, #-0x10]!
+ mov x2, x0
+ mov x3, x1
+ ldr x0,[x2]
+ ldr x1,[x3]
+ smc #0
+ str x0,[x2]
+ str x1,[x3]
+ ldp x2, x3, [sp], #0x10
+ ret
+
+ASM_PFX(ArmCallSmcArg2):
+ stp x3, x4, [sp, #-0x10]!
+ str x5, [sp, #-8]!
+ mov x3, x0
+ mov x4, x1
+ mov x5, x2
+ ldr x0,[x3]
+ ldr x1,[x4]
+ ldr x2,[x5]
+ smc #0
+ str x0,[x3]
+ str x1,[x4]
+ str x2,[x5]
+ ldr x5, [sp], #8
+ ldp x3, x4, [sp], #0x10
+ ret
+
+ASM_PFX(ArmCallSmcArg3):
+ stp x4, x5, [sp, #-0x10]!
+ stp x6, x7, [sp, #-0x10]!
+ mov x4, x0
+ mov x5, x1
+ mov x6, x2
+ mov x7, x3
+ ldr x0,[x4]
+ ldr x1,[x5]
+ ldr x2,[x6]
+ ldr x3,[x7]
+ smc #0
+ str x0,[x4]
+ str x1,[x5]
+ str x2,[x6]
+ str x3,[x7]
+ ldp x4, x5, [sp], #0x10
+ ldp x6, x7, [sp], #0x10
+ ret
diff --git a/ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf b/ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf
index a3e12d26d..f07a569dd 100644
--- a/ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf
+++ b/ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf
@@ -23,6 +23,9 @@
Arm/ArmSmc.asm | RVCT
Arm/ArmSmc.S | GCC
+[Sources.AARCH64]
+ AArch64/ArmSmc.S | GCC
+
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec
diff --git a/ArmPkg/Library/ArmSmcLibNull/AArch64/ArmSmcNull.S b/ArmPkg/Library/ArmSmcLibNull/AArch64/ArmSmcNull.S
new file mode 100644
index 000000000..ec6e067ac
--- /dev/null
+++ b/ArmPkg/Library/ArmSmcLibNull/AArch64/ArmSmcNull.S
@@ -0,0 +1,32 @@
+//
+// Copyright (c) 2012-2013, ARM Limited. All rights reserved.
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD License
+// which accompanies this distribution. The full text of the license may be found at
+// http://opensource.org/licenses/bsd-license.php
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+//
+//
+
+.text
+.align 3
+
+GCC_ASM_EXPORT(ArmCallSmc)
+GCC_ASM_EXPORT(ArmCallSmcArg1)
+GCC_ASM_EXPORT(ArmCallSmcArg2)
+GCC_ASM_EXPORT(ArmCallSmcArg3)
+
+ASM_PFX(ArmCallSmc):
+ ret
+
+ASM_PFX(ArmCallSmcArg1):
+ ret
+
+ASM_PFX(ArmCallSmcArg2):
+ ret
+
+ASM_PFX(ArmCallSmcArg3):
+ ret
diff --git a/ArmPkg/Library/ArmSmcLibNull/ArmSmcLibNull.inf b/ArmPkg/Library/ArmSmcLibNull/ArmSmcLibNull.inf
index fed6e95dd..51006e78b 100644
--- a/ArmPkg/Library/ArmSmcLibNull/ArmSmcLibNull.inf
+++ b/ArmPkg/Library/ArmSmcLibNull/ArmSmcLibNull.inf
@@ -26,6 +26,9 @@
Arm/ArmSmcNull.asm | RVCT
Arm/ArmSmcNull.S | GCC
+[Sources.AARCH64]
+ AArch64/ArmSmcNull.S | GCC
+
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec
diff --git a/ArmPkg/Library/BaseMemoryLibStm/AArch64/CopyMem.c b/ArmPkg/Library/BaseMemoryLibStm/AArch64/CopyMem.c
new file mode 100644
index 000000000..71ae02a03
--- /dev/null
+++ b/ArmPkg/Library/BaseMemoryLibStm/AArch64/CopyMem.c
@@ -0,0 +1,146 @@
+/** @file
+
+ Copyright (c) 2012-2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "MemLibInternals.h"
+
+/**
+ Copy Length bytes from Source to Destination.
+
+ @param DestinationBuffer Target of copy
+ @param SourceBuffer Place to copy from
+ @param Length Number of bytes to copy
+
+ @return Destination
+
+**/
+VOID *
+EFIAPI
+InternalMemCopyMem (
+ OUT VOID *DestinationBuffer,
+ IN CONST VOID *SourceBuffer,
+ IN UINTN Length
+ )
+{
+ //
+ // Declare the local variables that actually move the data elements as
+ // volatile to prevent the optimizer from replacing this function with
+ // the intrinsic memcpy()
+ //
+ volatile UINT8 *Destination8;
+ CONST UINT8 *Source8;
+ volatile UINT32 *Destination32;
+ CONST UINT32 *Source32;
+ volatile UINT64 *Destination64;
+ CONST UINT64 *Source64;
+ UINTN Alignment;
+
+ if ((((UINTN)DestinationBuffer & 0x7) == 0) && (((UINTN)SourceBuffer & 0x7) == 0) && (Length >= 8)) {
+ if (SourceBuffer > DestinationBuffer) {
+ Destination64 = (UINT64*)DestinationBuffer;
+ Source64 = (CONST UINT64*)SourceBuffer;
+ while (Length >= 8) {
+ *(Destination64++) = *(Source64++);
+ Length -= 8;
+ }
+
+ // Finish if there are still some bytes to copy
+ Destination8 = (UINT8*)Destination64;
+ Source8 = (CONST UINT8*)Source64;
+ while (Length-- != 0) {
+ *(Destination8++) = *(Source8++);
+ }
+ } else if (SourceBuffer < DestinationBuffer) {
+ Destination64 = (UINT64*)((UINTN)DestinationBuffer + Length);
+ Source64 = (CONST UINT64*)((UINTN)SourceBuffer + Length);
+
+ // Destination64 and Source64 were aligned on a 64-bit boundary
+ // but if length is not a multiple of 8 bytes then they won't be
+ // anymore.
+
+ Alignment = Length & 0x7;
+ if (Alignment != 0) {
+ Destination8 = (UINT8*)Destination64;
+ Source8 = (CONST UINT8*)Source64;
+
+ while (Alignment-- != 0) {
+ *(--Destination8) = *(--Source8);
+ --Length;
+ }
+ Destination64 = (UINT64*)Destination8;
+ Source64 = (CONST UINT64*)Source8;
+ }
+
+ while (Length > 0) {
+ *(--Destination64) = *(--Source64);
+ Length -= 8;
+ }
+ }
+ } else if ((((UINTN)DestinationBuffer & 0x3) == 0) && (((UINTN)SourceBuffer & 0x3) == 0) && (Length >= 4)) {
+ if (SourceBuffer > DestinationBuffer) {
+ Destination32 = (UINT32*)DestinationBuffer;
+ Source32 = (CONST UINT32*)SourceBuffer;
+ while (Length >= 4) {
+ *(Destination32++) = *(Source32++);
+ Length -= 4;
+ }
+
+ // Finish if there are still some bytes to copy
+ Destination8 = (UINT8*)Destination32;
+ Source8 = (CONST UINT8*)Source32;
+ while (Length-- != 0) {
+ *(Destination8++) = *(Source8++);
+ }
+ } else if (SourceBuffer < DestinationBuffer) {
+ Destination32 = (UINT32*)((UINTN)DestinationBuffer + Length);
+ Source32 = (CONST UINT32*)((UINTN)SourceBuffer + Length);
+
+ // Destination32 and Source32 were aligned on a 32-bit boundary
+ // but if length is not a multiple of 4 bytes then they won't be
+ // anymore.
+
+ Alignment = Length & 0x3;
+ if (Alignment != 0) {
+ Destination8 = (UINT8*)Destination32;
+ Source8 = (CONST UINT8*)Source32;
+
+ while (Alignment-- != 0) {
+ *(--Destination8) = *(--Source8);
+ --Length;
+ }
+ Destination32 = (UINT32*)Destination8;
+ Source32 = (CONST UINT32*)Source8;
+ }
+
+ while (Length > 0) {
+ *(--Destination32) = *(--Source32);
+ Length -= 4;
+ }
+ }
+ } else {
+ if (SourceBuffer > DestinationBuffer) {
+ Destination8 = (UINT8*)DestinationBuffer;
+ Source8 = (CONST UINT8*)SourceBuffer;
+ while (Length-- != 0) {
+ *(Destination8++) = *(Source8++);
+ }
+ } else if (SourceBuffer < DestinationBuffer) {
+ Destination8 = (UINT8*)DestinationBuffer + Length;
+ Source8 = (CONST UINT8*)SourceBuffer + Length;
+ while (Length-- != 0) {
+ *(--Destination8) = *(--Source8);
+ }
+ }
+ }
+ return DestinationBuffer;
+}
diff --git a/ArmPkg/Library/BaseMemoryLibStm/AArch64/SetMem.c b/ArmPkg/Library/BaseMemoryLibStm/AArch64/SetMem.c
new file mode 100644
index 000000000..368a819e5
--- /dev/null
+++ b/ArmPkg/Library/BaseMemoryLibStm/AArch64/SetMem.c
@@ -0,0 +1,84 @@
+/** @file
+
+ Copyright (c) 2012-2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "MemLibInternals.h"
+
+/**
+ Set Buffer to Value for Size bytes.
+
+ @param Buffer Memory to set.
+ @param Length Number of bytes to set
+ @param Value Value of the set operation.
+
+ @return Buffer
+
+**/
+VOID *
+EFIAPI
+InternalMemSetMem (
+ OUT VOID *Buffer,
+ IN UINTN Length,
+ IN UINT8 Value
+ )
+{
+ //
+ // Declare the local variables that actually move the data elements as
+ // volatile to prevent the optimizer from replacing this function with
+ // the intrinsic memset()
+ //
+ volatile UINT8 *Pointer8;
+ volatile UINT32 *Pointer32;
+ volatile UINT64 *Pointer64;
+ UINT32 Value32;
+ UINT64 Value64;
+
+ if ((((UINTN)Buffer & 0x7) == 0) && (Length >= 8)) {
+ // Generate the 64bit value
+ Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;
+ Value64 = (((UINT64)Value32) << 32) | Value32;
+
+ Pointer64 = (UINT64*)Buffer;
+ while (Length >= 8) {
+ *(Pointer64++) = Value64;
+ Length -= 8;
+ }
+
+ // Finish with bytes if needed
+ Pointer8 = (UINT8*)Pointer64;
+ while (Length-- > 0) {
+ *(Pointer8++) = Value;
+ }
+ } else if ((((UINTN)Buffer & 0x3) == 0) && (Length >= 4)) {
+ // Generate the 32bit value
+ Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;
+
+ Pointer32 = (UINT32*)Buffer;
+ while (Length >= 4) {
+ *(Pointer32++) = Value32;
+ Length -= 4;
+ }
+
+ // Finish with bytes if needed
+ Pointer8 = (UINT8*)Pointer32;
+ while (Length-- > 0) {
+ *(Pointer8++) = Value;
+ }
+ } else {
+ Pointer8 = (UINT8*)Buffer;
+ while (Length-- > 0) {
+ *(Pointer8++) = Value;
+ }
+ }
+ return Buffer;
+}
diff --git a/ArmPkg/Library/BaseMemoryLibStm/BaseMemoryLibStm.inf b/ArmPkg/Library/BaseMemoryLibStm/BaseMemoryLibStm.inf
index 7d82b12ab..a4d28036b 100755
--- a/ArmPkg/Library/BaseMemoryLibStm/BaseMemoryLibStm.inf
+++ b/ArmPkg/Library/BaseMemoryLibStm/BaseMemoryLibStm.inf
@@ -7,6 +7,7 @@
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2010, Apple Inc. All rights reserved.<BR>
+# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -28,7 +29,7 @@
#
-# VALID_ARCHITECTURES = ARM
+# VALID_ARCHITECTURES = ARM AARCH64
#
@@ -54,6 +55,9 @@
Arm/SetMem.asm
Arm/SetMem.S
+[Sources.AARCH64]
+ AArch64/CopyMem.c
+ AArch64/SetMem.c
[Packages]
MdePkg/MdePkg.dec
diff --git a/ArmPkg/Library/BasePeCoffLib/AArch64/PeCoffLoaderEx.c b/ArmPkg/Library/BasePeCoffLib/AArch64/PeCoffLoaderEx.c
new file mode 100644
index 000000000..8a1829a26
--- /dev/null
+++ b/ArmPkg/Library/BasePeCoffLib/AArch64/PeCoffLoaderEx.c
@@ -0,0 +1,129 @@
+/** @file
+ Specific relocation fixups for ARM architecture.
+
+ Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>
+ Portions copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php.
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "BasePeCoffLibInternals.h"
+#include <Library/BaseLib.h>
+
+//
+// Note: Currently only large memory model is supported by UEFI relocation code.
+//
+
+/**
+ Performs an AARCH64-based specific relocation fixup and is a no-op on other
+ instruction sets.
+
+ @param Reloc The pointer to the relocation record.
+ @param Fixup The pointer to the address to fix up.
+ @param FixupData The pointer to a buffer to log the fixups.
+ @param Adjust The offset to adjust the fixup.
+
+ @return Status code.
+
+**/
+RETURN_STATUS
+PeCoffLoaderRelocateImageEx (
+ IN UINT16 **Reloc,
+ IN OUT CHAR8 *Fixup,
+ IN OUT CHAR8 **FixupData,
+ IN UINT64 Adjust
+ )
+{
+ UINT64 *F64;
+
+ switch ((**Reloc) >> 12) {
+
+ case EFI_IMAGE_REL_BASED_DIR64:
+ F64 = (UINT64 *) Fixup;
+ *F64 = *F64 + (UINT64) Adjust;
+ if (*FixupData != NULL) {
+ *FixupData = ALIGN_POINTER(*FixupData, sizeof(UINT64));
+ *(UINT64 *)(*FixupData) = *F64;
+ *FixupData = *FixupData + sizeof(UINT64);
+ }
+ break;
+
+ default:
+ return RETURN_UNSUPPORTED;
+ }
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Returns TRUE if the machine type of PE/COFF image is supported. Supported
+ does not mean the image can be executed it means the PE/COFF loader supports
+ loading and relocating of the image type. It's up to the caller to support
+ the entry point.
+
+ @param Machine Machine type from the PE Header.
+
+ @return TRUE if this PE/COFF loader can load the image
+
+**/
+BOOLEAN
+PeCoffLoaderImageFormatSupported (
+ IN UINT16 Machine
+ )
+{
+ if ((Machine == IMAGE_FILE_MACHINE_AARCH64) || (Machine == IMAGE_FILE_MACHINE_EBC)) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/**
+ Performs an ARM-based specific re-relocation fixup and is a no-op on other
+ instruction sets. This is used to re-relocated the image into the EFI virtual
+ space for runtime calls.
+
+ @param Reloc The pointer to the relocation record.
+ @param Fixup The pointer to the address to fix up.
+ @param FixupData The pointer to a buffer to log the fixups.
+ @param Adjust The offset to adjust the fixup.
+
+ @return Status code.
+
+**/
+RETURN_STATUS
+PeHotRelocateImageEx (
+ IN UINT16 **Reloc,
+ IN OUT CHAR8 *Fixup,
+ IN OUT CHAR8 **FixupData,
+ IN UINT64 Adjust
+ )
+{
+ UINT64 *Fixup64;
+
+ switch ((**Reloc) >> 12) {
+ case EFI_IMAGE_REL_BASED_DIR64:
+ Fixup64 = (UINT64 *) Fixup;
+ *FixupData = ALIGN_POINTER (*FixupData, sizeof (UINT64));
+ if (*(UINT64 *) (*FixupData) == *Fixup64) {
+ *Fixup64 = *Fixup64 + (UINT64) Adjust;
+ }
+
+ *FixupData = *FixupData + sizeof (UINT64);
+ break;
+
+ default:
+ DEBUG ((EFI_D_ERROR, "PeHotRelocateEx:unknown fixed type\n"));
+ return RETURN_UNSUPPORTED;
+ }
+
+ return RETURN_SUCCESS;
+}
diff --git a/ArmPkg/Library/BasePeCoffLib/BasePeCoffLib.inf b/ArmPkg/Library/BasePeCoffLib/BasePeCoffLib.inf
index 7236db791..929d789cb 100755
--- a/ArmPkg/Library/BasePeCoffLib/BasePeCoffLib.inf
+++ b/ArmPkg/Library/BasePeCoffLib/BasePeCoffLib.inf
@@ -6,6 +6,7 @@
#
# Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -27,7 +28,7 @@
#
-# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM
+# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM AARCH64
#
[Sources]
@@ -43,6 +44,9 @@
[Sources.ARM]
Arm/PeCoffLoaderEx.c
+[Sources.AARCH64]
+ AArch64/PeCoffLoaderEx.c
+
[Packages]
MdePkg/MdePkg.dec
diff --git a/ArmPkg/Library/BdsLib/BdsLinuxFdt.c b/ArmPkg/Library/BdsLib/BdsLinuxFdt.c
index 1927de726..57d8dcc90 100644
--- a/ArmPkg/Library/BdsLib/BdsLinuxFdt.c
+++ b/ArmPkg/Library/BdsLib/BdsLinuxFdt.c
@@ -336,7 +336,7 @@ PrepareFdt (
INTN err;
INTN node;
INTN cpu_node;
- INTN lenp;
+ INT32 lenp;
CONST VOID* BootArg;
CONST VOID* Method;
EFI_PHYSICAL_ADDRESS InitrdImageStart;
diff --git a/ArmPkg/Library/CompilerIntrinsicsLib/AArch64/memcpy.S b/ArmPkg/Library/CompilerIntrinsicsLib/AArch64/memcpy.S
new file mode 100644
index 000000000..18433b3d5
--- /dev/null
+++ b/ArmPkg/Library/CompilerIntrinsicsLib/AArch64/memcpy.S
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2011 - 2013, ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+.text
+.align 2
+
+
+ASM_GLOBAL ASM_PFX(memcpy)
+
+
+// Taken from Newlib BSD implementation.
+ASM_PFX(memcpy):
+ // Copy dst to x6, so we can preserve return value.
+ mov x6, x0
+
+ // NOTE: although size_t is unsigned, this code uses signed
+ // comparisons on x2 so relies on nb never having its top bit
+ // set. In practice this is not going to be a real problem.
+
+ // Require at least 64 bytes to be worth aligning.
+ cmp x2, #64
+ blt qwordcopy
+
+ // Compute offset to align destination to 16 bytes.
+ neg x3, x0
+ and x3, x3, 15
+
+ cbz x3, blockcopy // offset == 0 is likely
+
+ // We know there is at least 64 bytes to be done, so we
+ // do a 16 byte misaligned copy at first and then later do
+ // all 16-byte aligned copies. Some bytes will be copied
+ // twice, but there's no harm in that since memcpy does not
+ // guarantee correctness on overlap.
+
+ sub x2, x2, x3 // nb -= offset
+ ldp x4, x5, [x1]
+ add x1, x1, x3
+ stp x4, x5, [x6]
+ add x6, x6, x3
+
+ // The destination pointer is now qword (16 byte) aligned.
+ // (The src pointer might be.)
+
+blockcopy:
+ // Copy 64 bytes at a time.
+ subs x2, x2, #64
+ blt 3f
+2: subs x2, x2, #64
+ ldp x4, x5, [x1,#0]
+ ldp x8, x9, [x1,#16]
+ ldp x10,x11,[x1,#32]
+ ldp x12,x13,[x1,#48]
+ add x1, x1, #64
+ stp x4, x5, [x6,#0]
+ stp x8, x9, [x6,#16]
+ stp x10,x11,[x6,#32]
+ stp x12,x13,[x6,#48]
+ add x6, x6, #64
+ bge 2b
+
+ // Unwind pre-decrement
+3: add x2, x2, #64
+
+qwordcopy:
+ // Copy 0-48 bytes, 16 bytes at a time.
+ subs x2, x2, #16
+ blt tailcopy
+2: ldp x4, x5, [x1],#16
+ subs x2, x2, #16
+ stp x4, x5, [x6],#16
+ bge 2b
+
+ // No need to unwind the pre-decrement, it would not change
+ // the low 4 bits of the count. But how likely is it for the
+ // byte count to be multiple of 16? Is it worth the overhead
+ // of testing for x2 == -16?
+
+tailcopy:
+ // Copy trailing 0-15 bytes.
+ tbz x2, #3, 1f
+ ldr x4, [x1],#8 // copy 8 bytes
+ str x4, [x6],#8
+1:
+ tbz x2, #2, 1f
+ ldr w4, [x1],#4 // copy 4 bytes
+ str w4, [x6],#4
+1:
+ tbz x2, #1, 1f
+ ldrh w4, [x1],#2 // copy 2 bytes
+ strh w4, [x6],#2
+1:
+ tbz x2, #0, return
+ ldrb w4, [x1] // copy 1 byte
+ strb w4, [x6]
+
+return:
+ // This is the only return point of memcpy.
+ ret
diff --git a/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf b/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf
index 53b1b15c7..3e95105cd 100644
--- a/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf
+++ b/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf
@@ -22,9 +22,8 @@
VERSION_STRING = 1.0
LIBRARY_CLASS = CompilerIntrinsicsLib
-
-[Sources.common]
-
+[Sources.AARCH64]
+ AArch64/memcpy.S | GCC
[Sources.ARM]
Arm/mullu.asm | RVCT
diff --git a/ArmPkg/Library/DebugAgentSymbolsBaseLib/AArch64/DebugAgentException.S b/ArmPkg/Library/DebugAgentSymbolsBaseLib/AArch64/DebugAgentException.S
new file mode 100644
index 000000000..022e2796c
--- /dev/null
+++ b/ArmPkg/Library/DebugAgentSymbolsBaseLib/AArch64/DebugAgentException.S
@@ -0,0 +1,93 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+GCC_ASM_EXPORT(DebugAgentVectorTable)
+GCC_ASM_IMPORT(DefaultExceptionHandler)
+
+.text
+ASM_PFX(DebugAgentVectorTable):
+
+//
+// Current EL with SP0 : 0x0 - 0x180
+//
+.align 11
+ASM_PFX(SynchronousExceptionSP0):
+ b ASM_PFX(SynchronousExceptionSP0)
+
+.align 7
+ASM_PFX(IrqSP0):
+ b ASM_PFX(IrqSP0)
+
+.align 7
+ASM_PFX(FiqSP0):
+ b ASM_PFX(FiqSP0)
+
+.align 7
+ASM_PFX(SErrorSP0):
+ b ASM_PFX(SErrorSP0)
+
+//
+// Current EL with SPx: 0x200 - 0x380
+//
+.align 7
+ASM_PFX(SynchronousExceptionSPx):
+ b ASM_PFX(SynchronousExceptionSPx)
+
+.align 7
+ASM_PFX(IrqSPx):
+ b ASM_PFX(IrqSPx)
+
+.align 7
+ASM_PFX(FiqSPx):
+ b ASM_PFX(FiqSPx)
+
+.align 7
+ASM_PFX(SErrorSPx):
+ b ASM_PFX(SErrorSPx)
+
+/* Lower EL using AArch64 : 0x400 - 0x580 */
+.align 7
+ASM_PFX(SynchronousExceptionA64):
+ b ASM_PFX(SynchronousExceptionA64)
+
+.align 7
+ASM_PFX(IrqA64):
+ b ASM_PFX(IrqA64)
+
+.align 7
+ASM_PFX(FiqA64):
+ b ASM_PFX(FiqA64)
+
+.align 7
+ASM_PFX(SErrorA64):
+ b ASM_PFX(SErrorA64)
+
+//
+// Lower EL using AArch32 : 0x0 - 0x180
+//
+.align 7
+ASM_PFX(SynchronousExceptionA32):
+ b ASM_PFX(SynchronousExceptionA32)
+
+.align 7
+ASM_PFX(IrqA32):
+ b ASM_PFX(IrqA32)
+
+.align 7
+ASM_PFX(FiqA32):
+ b ASM_PFX(FiqA32)
+
+.align 7
+ASM_PFX(SErrorA32):
+ b ASM_PFX(SErrorA32)
diff --git a/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.c b/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.c
index 25a904e3f..6da648d99 100644
--- a/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.c
+++ b/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.c
@@ -285,7 +285,7 @@ InitializeDebugAgent (
// Now we've got UART, make the check:
// - The Vector table must be 32-byte aligned
- ASSERT(((UINTN)DebugAgentVectorTable & ARM_VECTOR_TABLE_ALIGNMENT) == 0);
+ //Need to fix basetools ASSERT(((UINTN)DebugAgentVectorTable & ARM_VECTOR_TABLE_ALIGNMENT) == 0);
ArmWriteVBar ((UINTN)DebugAgentVectorTable);
// We use InitFlag to know if DebugAgent has been intialized from
diff --git a/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.inf b/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.inf
index f6a0ec293..f7dfa01d5 100644
--- a/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.inf
+++ b/ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.inf
@@ -26,6 +26,9 @@
Arm/DebugAgentException.asm | RVCT
Arm/DebugAgentException.S | GCC
+[Sources.AARCH64]
+ AArch64/DebugAgentException.S | GCC
+
[Packages]
MdePkg/MdePkg.dec
MdeModulePkg/MdeModulePkg.dec
diff --git a/ArmPkg/Library/DefaultExceptionHandlerLib/AArch64/DefaultExceptionHandler.c b/ArmPkg/Library/DefaultExceptionHandlerLib/AArch64/DefaultExceptionHandler.c
new file mode 100644
index 000000000..27a09044b
--- /dev/null
+++ b/ArmPkg/Library/DefaultExceptionHandlerLib/AArch64/DefaultExceptionHandler.c
@@ -0,0 +1,112 @@
+/** @file
+ Default exception handler
+
+ Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Uefi.h>
+#include <Library/UefiLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PeCoffGetEntryPointLib.h>
+#include <Library/PrintLib.h>
+#include <Library/ArmDisassemblerLib.h>
+#include <Library/SerialPortLib.h>
+
+#include <Guid/DebugImageInfoTable.h>
+#include <Protocol/DebugSupport.h>
+#include <Protocol/LoadedImage.h>
+
+EFI_DEBUG_IMAGE_INFO_TABLE_HEADER *gDebugImageTableHeader = NULL;
+
+STATIC CHAR8 *gExceptionTypeString[] = {
+ "Synchronous",
+ "IRQ",
+ "FIQ",
+ "SError"
+};
+
+CHAR8 *
+GetImageName (
+ IN UINTN FaultAddress,
+ OUT UINTN *ImageBase,
+ OUT UINTN *PeCoffSizeOfHeaders
+ );
+
+/**
+ This is the default action to take on an unexpected exception
+
+ Since this is exception context don't do anything crazy like try to allcoate memory.
+
+ @param ExceptionType Type of the exception
+ @param SystemContext Register state at the time of the Exception
+
+**/
+VOID
+DefaultExceptionHandler (
+ IN EFI_EXCEPTION_TYPE ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ CHAR8 Buffer[100];
+ UINTN CharCount;
+
+ CharCount = AsciiSPrint (Buffer,sizeof (Buffer),"\n\n%a Exception: \n", gExceptionTypeString[ExceptionType]);
+ SerialPortWrite ((UINT8 *) Buffer, CharCount);
+
+ DEBUG_CODE_BEGIN ();
+ CHAR8 *Pdb;
+ UINTN ImageBase;
+ UINTN PeCoffSizeOfHeader;
+ Pdb = GetImageName (SystemContext.SystemContextAArch64->ELR, &ImageBase, &PeCoffSizeOfHeader);
+ if (Pdb != NULL) {
+ DEBUG ((EFI_D_ERROR, "%a loaded at 0x%016lx \n", Pdb, ImageBase));
+ }
+ DEBUG_CODE_END ();
+
+ DEBUG ((EFI_D_ERROR, "\n X0 0x%016lx X1 0x%016lx X2 0x%016lx X3 0x%016lx\n", SystemContext.SystemContextAArch64->X0, SystemContext.SystemContextAArch64->X1, SystemContext.SystemContextAArch64->X2, SystemContext.SystemContextAArch64->X3));
+ DEBUG ((EFI_D_ERROR, " X4 0x%016lx X5 0x%016lx X6 0x%016lx X7 0x%016lx\n", SystemContext.SystemContextAArch64->X4, SystemContext.SystemContextAArch64->X5, SystemContext.SystemContextAArch64->X6, SystemContext.SystemContextAArch64->X7));
+ DEBUG ((EFI_D_ERROR, " X8 0x%016lx X9 0x%016lx X10 0x%016lx X11 0x%016lx\n", SystemContext.SystemContextAArch64->X8, SystemContext.SystemContextAArch64->X9, SystemContext.SystemContextAArch64->X10, SystemContext.SystemContextAArch64->X11));
+ DEBUG ((EFI_D_ERROR, " X12 0x%016lx X13 0x%016lx X14 0x%016lx X15 0x%016lx\n", SystemContext.SystemContextAArch64->X12, SystemContext.SystemContextAArch64->X13, SystemContext.SystemContextAArch64->X14, SystemContext.SystemContextAArch64->X15));
+ DEBUG ((EFI_D_ERROR, " X16 0x%016lx X17 0x%016lx X18 0x%016lx X19 0x%016lx\n", SystemContext.SystemContextAArch64->X16, SystemContext.SystemContextAArch64->X17, SystemContext.SystemContextAArch64->X18, SystemContext.SystemContextAArch64->X19));
+ DEBUG ((EFI_D_ERROR, " X20 0x%016lx X21 0x%016lx X22 0x%016lx X23 0x%016lx\n", SystemContext.SystemContextAArch64->X20, SystemContext.SystemContextAArch64->X21, SystemContext.SystemContextAArch64->X22, SystemContext.SystemContextAArch64->X23));
+ DEBUG ((EFI_D_ERROR, " X24 0x%016lx X25 0x%016lx X26 0x%016lx X27 0x%016lx\n", SystemContext.SystemContextAArch64->X24, SystemContext.SystemContextAArch64->X25, SystemContext.SystemContextAArch64->X26, SystemContext.SystemContextAArch64->X27));
+ DEBUG ((EFI_D_ERROR, " X28 0x%016lx FP 0x%016lx LR 0x%016lx \n", SystemContext.SystemContextAArch64->X28, SystemContext.SystemContextAArch64->FP, SystemContext.SystemContextAArch64->LR));
+
+ /* We save these as 128bit numbers, but have to print them as two 64bit numbers,
+ so swap the 64bit words to correctly represent a 128bit number. */
+ DEBUG ((EFI_D_ERROR, "\n V0 0x%016lx %016lx V1 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V0[1], SystemContext.SystemContextAArch64->V0[0], SystemContext.SystemContextAArch64->V1[1], SystemContext.SystemContextAArch64->V1[0]));
+ DEBUG ((EFI_D_ERROR, " V2 0x%016lx %016lx V3 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V2[1], SystemContext.SystemContextAArch64->V2[0], SystemContext.SystemContextAArch64->V3[1], SystemContext.SystemContextAArch64->V3[0]));
+ DEBUG ((EFI_D_ERROR, " V4 0x%016lx %016lx V5 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V4[1], SystemContext.SystemContextAArch64->V4[0], SystemContext.SystemContextAArch64->V5[1], SystemContext.SystemContextAArch64->V5[0]));
+ DEBUG ((EFI_D_ERROR, " V6 0x%016lx %016lx V7 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V6[1], SystemContext.SystemContextAArch64->V6[0], SystemContext.SystemContextAArch64->V7[1], SystemContext.SystemContextAArch64->V7[0]));
+ DEBUG ((EFI_D_ERROR, " V8 0x%016lx %016lx V9 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V8[1], SystemContext.SystemContextAArch64->V8[0], SystemContext.SystemContextAArch64->V9[1], SystemContext.SystemContextAArch64->V9[0]));
+ DEBUG ((EFI_D_ERROR, " V10 0x%016lx %016lx V11 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V10[1], SystemContext.SystemContextAArch64->V10[0], SystemContext.SystemContextAArch64->V11[1], SystemContext.SystemContextAArch64->V11[0]));
+ DEBUG ((EFI_D_ERROR, " V12 0x%016lx %016lx V13 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V12[1], SystemContext.SystemContextAArch64->V12[0], SystemContext.SystemContextAArch64->V13[1], SystemContext.SystemContextAArch64->V13[0]));
+ DEBUG ((EFI_D_ERROR, " V14 0x%016lx %016lx V15 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V14[1], SystemContext.SystemContextAArch64->V14[0], SystemContext.SystemContextAArch64->V15[1], SystemContext.SystemContextAArch64->V15[0]));
+ DEBUG ((EFI_D_ERROR, " V16 0x%016lx %016lx V17 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V16[1], SystemContext.SystemContextAArch64->V16[0], SystemContext.SystemContextAArch64->V17[1], SystemContext.SystemContextAArch64->V17[0]));
+ DEBUG ((EFI_D_ERROR, " V18 0x%016lx %016lx V19 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V18[1], SystemContext.SystemContextAArch64->V18[0], SystemContext.SystemContextAArch64->V19[1], SystemContext.SystemContextAArch64->V19[0]));
+ DEBUG ((EFI_D_ERROR, " V20 0x%016lx %016lx V21 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V20[1], SystemContext.SystemContextAArch64->V20[0], SystemContext.SystemContextAArch64->V21[1], SystemContext.SystemContextAArch64->V21[0]));
+ DEBUG ((EFI_D_ERROR, " V22 0x%016lx %016lx V23 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V22[1], SystemContext.SystemContextAArch64->V22[0], SystemContext.SystemContextAArch64->V23[1], SystemContext.SystemContextAArch64->V23[0]));
+ DEBUG ((EFI_D_ERROR, " V24 0x%016lx %016lx V25 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V24[1], SystemContext.SystemContextAArch64->V24[0], SystemContext.SystemContextAArch64->V25[1], SystemContext.SystemContextAArch64->V25[0]));
+ DEBUG ((EFI_D_ERROR, " V26 0x%016lx %016lx V27 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V26[1], SystemContext.SystemContextAArch64->V26[0], SystemContext.SystemContextAArch64->V27[1], SystemContext.SystemContextAArch64->V27[0]));
+ DEBUG ((EFI_D_ERROR, " V28 0x%016lx %016lx V29 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V28[1], SystemContext.SystemContextAArch64->V28[0], SystemContext.SystemContextAArch64->V29[1], SystemContext.SystemContextAArch64->V29[0]));
+ DEBUG ((EFI_D_ERROR, " V30 0x%016lx %016lx V31 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V30[1], SystemContext.SystemContextAArch64->V30[0], SystemContext.SystemContextAArch64->V31[1], SystemContext.SystemContextAArch64->V31[0]));
+
+ DEBUG ((EFI_D_ERROR, "\n SP 0x%016lx ELR 0x%016lx SPSR 0x%08lx FPSR 0x%08lx\n ESR 0x%08lx FAR 0x%016lx\n", SystemContext.SystemContextAArch64->SP, SystemContext.SystemContextAArch64->ELR, SystemContext.SystemContextAArch64->SPSR, SystemContext.SystemContextAArch64->FPSR, SystemContext.SystemContextAArch64->ESR, SystemContext.SystemContextAArch64->FAR));
+
+ DEBUG ((EFI_D_ERROR, "\n ESR : EC 0x%02x IL 0x%x ISS 0x%08x\n", (SystemContext.SystemContextAArch64->ESR & 0xFC000000) >> 26, (SystemContext.SystemContextAArch64->ESR >> 25) & 0x1, SystemContext.SystemContextAArch64->ESR & 0x1FFFFFF ));
+
+ DEBUG ((EFI_D_ERROR, "\n"));
+ ASSERT (FALSE);
+}
+
diff --git a/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf b/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf
index 61dabf484..da8190bd3 100644
--- a/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf
+++ b/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf
@@ -1,6 +1,7 @@
#/** @file
#
-# Copyright (c) 2008, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2008, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -27,6 +28,9 @@
[Sources.ARM]
Arm/DefaultExceptionHandler.c
+[Sources.AARCH64]
+ AArch64/DefaultExceptionHandler.c
+
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec
diff --git a/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLibBase.inf b/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLibBase.inf
index 39b2775fc..8f5b3e101 100644
--- a/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLibBase.inf
+++ b/ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLibBase.inf
@@ -26,6 +26,9 @@
[Sources.ARM]
Arm/DefaultExceptionHandler.c
+[Sources.AARCH64]
+ AArch64/DefaultExceptionHandler.c
+
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec
diff --git a/ArmPkg/Library/SemihostLib/AArch64/GccSemihost.S b/ArmPkg/Library/SemihostLib/AArch64/GccSemihost.S
new file mode 100644
index 000000000..42211cf4f
--- /dev/null
+++ b/ArmPkg/Library/SemihostLib/AArch64/GccSemihost.S
@@ -0,0 +1,23 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+.text
+.align 2
+
+.globl ASM_PFX(GccSemihostCall)
+
+ASM_PFX(GccSemihostCall):
+ hlt #0xf000
+ ret
diff --git a/ArmPkg/Library/SemihostLib/SemihostLib.inf b/ArmPkg/Library/SemihostLib/SemihostLib.inf
index a0a687178..78727780a 100644
--- a/ArmPkg/Library/SemihostLib/SemihostLib.inf
+++ b/ArmPkg/Library/SemihostLib/SemihostLib.inf
@@ -2,6 +2,7 @@
# Semihosting JTAG lib
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -25,7 +26,7 @@
#
# The following information is for reference only and not required by the build tools.
#
-# VALID_ARCHITECTURES = ARM
+# VALID_ARCHITECTURES = ARM AARCH64
#
[Sources.common]
SemihostLib.c
@@ -33,10 +34,13 @@
[Sources.ARM]
Arm/GccSemihost.S | GCC
+[Sources.AARCH64]
+ AArch64/GccSemihost.S | GCC
+
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec
[LibraryClasses]
BaseLib
-
+