aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig257
-rw-r--r--arch/arm64/Kconfig.debug25
-rw-r--r--arch/arm64/Makefile16
-rw-r--r--arch/arm64/boot/dts/Makefile5
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts30
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi398
-rw-r--r--arch/arm64/boot/dts/clcd-panels.dtsi52
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/boot/dts/fvp-base-gicv2-psci.dts294
-rw-r--r--arch/arm64/boot/dts/juno.dts498
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-aemv8a.dts51
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi9
-rw-r--r--arch/arm64/configs/defconfig23
-rw-r--r--arch/arm64/crypto/Kconfig53
-rw-r--r--arch/arm64/crypto/Makefile38
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S222
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c297
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c155
-rw-r--r--arch/arm64/crypto/aes-ce.S133
-rw-r--r--arch/arm64/crypto/aes-glue.c446
-rw-r--r--arch/arm64/crypto/aes-modes.S532
-rw-r--r--arch/arm64/crypto/aes-neon.S382
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S95
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c155
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S153
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c174
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S156
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c255
-rw-r--r--arch/arm64/include/asm/Kbuild4
-rw-r--r--arch/arm64/include/asm/arch_timer.h65
-rw-r--r--arch/arm64/include/asm/assembler.h31
-rw-r--r--arch/arm64/include/asm/atomic.h55
-rw-r--r--arch/arm64/include/asm/bL_switcher.h54
-rw-r--r--arch/arm64/include/asm/barrier.h65
-rw-r--r--arch/arm64/include/asm/cacheflush.h26
-rw-r--r--arch/arm64/include/asm/cmpxchg.h54
-rw-r--r--arch/arm64/include/asm/compat.h25
-rw-r--r--arch/arm64/include/asm/cpu_ops.h70
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/cpuidle.h13
-rw-r--r--arch/arm64/include/asm/cputype.h63
-rw-r--r--arch/arm64/include/asm/debug-monitors.h111
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/dma-contiguous.h28
-rw-r--r--arch/arm64/include/asm/dma-mapping.h27
-rw-r--r--arch/arm64/include/asm/efi.h14
-rw-r--r--arch/arm64/include/asm/elf.h21
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/fixmap.h67
-rw-r--r--arch/arm64/include/asm/ftrace.h59
-rw-r--r--arch/arm64/include/asm/futex.h11
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/hugetlb.h117
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm64/include/asm/hwcap.h20
-rw-r--r--arch/arm64/include/asm/insn.h113
-rw-r--r--arch/arm64/include/asm/io.h11
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h23
-rw-r--r--arch/arm64/include/asm/jump_label.h52
-rw-r--r--arch/arm64/include/asm/kgdb.h84
-rw-r--r--arch/arm64/include/asm/kvm_arm.h259
-rw-r--r--arch/arm64/include/asm/kvm_asm.h141
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h57
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h268
-rw-r--r--arch/arm64/include/asm/kvm_host.h251
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h59
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h157
-rw-r--r--arch/arm64/include/asm/kvm_psci.h (renamed from arch/arm64/include/asm/sigcontext.h)26
-rw-r--r--arch/arm64/include/asm/memory.h22
-rw-r--r--arch/arm64/include/asm/mmu.h6
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/page.h9
-rw-r--r--arch/arm64/include/asm/percpu.h49
-rw-r--r--arch/arm64/include/asm/pgtable-2level-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable-2level-types.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h38
-rw-r--r--arch/arm64/include/asm/pgtable.h297
-rw-r--r--arch/arm64/include/asm/proc-fns.h5
-rw-r--r--arch/arm64/include/asm/processor.h9
-rw-r--r--arch/arm64/include/asm/psci.h19
-rw-r--r--arch/arm64/include/asm/ptrace.h13
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/include/asm/smp_plat.h13
-rw-r--r--arch/arm64/include/asm/sparsemem.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h92
-rw-r--r--arch/arm64/include/asm/spinlock_types.h10
-rw-r--r--arch/arm64/include/asm/stackprotector.h38
-rw-r--r--arch/arm64/include/asm/string.h15
-rw-r--r--arch/arm64/include/asm/suspend.h27
-rw-r--r--arch/arm64/include/asm/syscall.h7
-rw-r--r--arch/arm64/include/asm/sysreg.h60
-rw-r--r--arch/arm64/include/asm/thread_info.h26
-rw-r--r--arch/arm64/include/asm/timex.h6
-rw-r--r--arch/arm64/include/asm/tlb.h13
-rw-r--r--arch/arm64/include/asm/tlbflush.h70
-rw-r--r--arch/arm64/include/asm/topology.h70
-rw-r--r--arch/arm64/include/asm/uaccess.h29
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/virt.h7
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h94
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h4
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h7
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h201
-rw-r--r--arch/arm64/include/uapi/asm/perf_regs.h40
-rw-r--r--arch/arm64/kernel/Makefile21
-rw-r--r--arch/arm64/kernel/arm64ksyms.c15
-rw-r--r--arch/arm64/kernel/asm-offsets.c57
-rw-r--r--arch/arm64/kernel/cpu_ops.c99
-rw-r--r--arch/arm64/kernel/cpuidle.c31
-rw-r--r--arch/arm64/kernel/cputable.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c166
-rw-r--r--arch/arm64/kernel/early_printk.c8
-rw-r--r--arch/arm64/kernel/efi-entry.S109
-rw-r--r--arch/arm64/kernel/efi-stub.c79
-rw-r--r--arch/arm64/kernel/efi.c469
-rw-r--r--arch/arm64/kernel/entry-ftrace.S218
-rw-r--r--arch/arm64/kernel/entry.S56
-rw-r--r--arch/arm64/kernel/fpsimd.c65
-rw-r--r--arch/arm64/kernel/ftrace.c177
-rw-r--r--arch/arm64/kernel/head.S308
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c205
-rw-r--r--arch/arm64/kernel/hyp-stub.S1
-rw-r--r--arch/arm64/kernel/insn.c304
-rw-r--r--arch/arm64/kernel/irq.c61
-rw-r--r--arch/arm64/kernel/jump_label.c58
-rw-r--r--arch/arm64/kernel/kgdb.c336
-rw-r--r--arch/arm64/kernel/kuser32.S55
-rw-r--r--arch/arm64/kernel/module.c156
-rw-r--r--arch/arm64/kernel/perf_event.c204
-rw-r--r--arch/arm64/kernel/perf_regs.c46
-rw-r--r--arch/arm64/kernel/process.c92
-rw-r--r--arch/arm64/kernel/psci-call.S28
-rw-r--r--arch/arm64/kernel/psci.c375
-rw-r--r--arch/arm64/kernel/ptrace.c218
-rw-r--r--arch/arm64/kernel/return_address.c55
-rw-r--r--arch/arm64/kernel/setup.c307
-rw-r--r--arch/arm64/kernel/signal.c29
-rw-r--r--arch/arm64/kernel/signal32.c42
-rw-r--r--arch/arm64/kernel/sleep.S184
-rw-r--r--arch/arm64/kernel/smp.c282
-rw-r--r--arch/arm64/kernel/smp_psci.c53
-rw-r--r--arch/arm64/kernel/smp_spin_table.c60
-rw-r--r--arch/arm64/kernel/stacktrace.c13
-rw-r--r--arch/arm64/kernel/suspend.c140
-rw-r--r--arch/arm64/kernel/sys32.S22
-rw-r--r--arch/arm64/kernel/sys_compat.c6
-rw-r--r--arch/arm64/kernel/time.c9
-rw-r--r--arch/arm64/kernel/topology.c590
-rw-r--r--arch/arm64/kernel/traps.c16
-rw-r--r--arch/arm64/kernel/vdso.c85
-rw-r--r--arch/arm64/kernel/vdso/Makefile8
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S72
-rw-r--r--arch/arm64/kvm/Kconfig63
-rw-r--r--arch/arm64/kvm/Makefile27
-rw-r--r--arch/arm64/kvm/emulate.c158
-rw-r--r--arch/arm64/kvm/guest.c359
-rw-r--r--arch/arm64/kvm/handle_exit.c133
-rw-r--r--arch/arm64/kvm/hyp-init.S116
-rw-r--r--arch/arm64/kvm/hyp.S1274
-rw-r--r--arch/arm64/kvm/inject_fault.c203
-rw-r--r--arch/arm64/kvm/regmap.c168
-rw-r--r--arch/arm64/kvm/reset.c112
-rw-r--r--arch/arm64/kvm/sys_regs.c1528
-rw-r--r--arch/arm64/kvm/sys_regs.h140
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c100
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S133
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S267
-rw-r--r--arch/arm64/lib/Makefile9
-rw-r--r--arch/arm64/lib/bitops.S3
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/lib/memcmp.S258
-rw-r--r--arch/arm64/lib/memcpy.S192
-rw-r--r--arch/arm64/lib/memmove.S190
-rw-r--r--arch/arm64/lib/memset.S207
-rw-r--r--arch/arm64/lib/strcmp.S234
-rw-r--r--arch/arm64/lib/strlen.S126
-rw-r--r--arch/arm64/lib/strncmp.S310
-rw-r--r--arch/arm64/lib/strncpy_from_user.S50
-rw-r--r--arch/arm64/lib/strnlen.S171
-rw-r--r--arch/arm64/lib/strnlen_user.S47
-rw-r--r--arch/arm64/mm/Makefile3
-rw-r--r--arch/arm64/mm/cache.S104
-rw-r--r--arch/arm64/mm/copypage.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c436
-rw-r--r--arch/arm64/mm/fault.c84
-rw-r--r--arch/arm64/mm/flush.c38
-rw-r--r--arch/arm64/mm/hugetlbpage.c74
-rw-r--r--arch/arm64/mm/init.c83
-rw-r--r--arch/arm64/mm/ioremap.c106
-rw-r--r--arch/arm64/mm/mm.h1
-rw-r--r--arch/arm64/mm/mmu.c185
-rw-r--r--arch/arm64/mm/pageattr.c96
-rw-r--r--arch/arm64/mm/pgd.c11
-rw-r--r--arch/arm64/mm/proc-macros.S3
-rw-r--r--arch/arm64/mm/proc.S114
-rw-r--r--arch/arm64/mm/tlb.S71
199 files changed, 20902 insertions, 1861 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 56b3f6d447ae..9742106cb51c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,36 +1,67 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_OPP
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_HAS_OPP
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
select ARM_AMBA
select ARM_ARCH_TIMER
select ARM_GIC
+ select ARM_GIC_V3
+ select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
+ select CPU_PM if (SUSPEND || CPU_IDLE)
+ select DCACHE_WORD_ACCESS
+ select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_EARLY_IOREMAP
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_C_RECORDMCOUNT
+ select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GENERIC_DMA_COHERENT
select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
select PERF_USE_VMALLOC
select POWER_RESET
select POWER_SUPPLY
@@ -61,11 +92,7 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config GENERIC_LOCKBREAK
- def_bool y
- depends on SMP && PREEMPT
-
-config RWSEM_GENERIC_SPINLOCK
+config RWSEM_XCHGADD_ALGORITHM
def_bool y
config GENERIC_HWEIGHT
@@ -77,7 +104,7 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA32
+config ZONE_DMA
def_bool y
config ARCH_DMA_ADDR_T_64BIT
@@ -95,6 +122,9 @@ config SWIOTLB
config IOMMU_HELPER
def_bool SWIOTLB
+config FIX_EARLYCON_MEM
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -111,6 +141,11 @@ config ARCH_VEXPRESS
This enables support for the ARMv8 software model (Versatile
Express).
+config ARCH_XGENE
+ bool "AppliedMicro X-Gene SOC Family"
+ help
+ This enables support for AppliedMicro X-Gene SOC Family
+
endmenu
menu "Bus support"
@@ -122,6 +157,32 @@ endmenu
menu "Kernel Features"
+menu "ARM errata workarounds"
+
+config ARM64_ERRATUM_845719
+ bool "Cortex-A53: 845719: a load might read incorrect data"
+ depends on COMPAT
+ default n
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 845719 on Cortex-A53 parts up to r0p4.
+
+ When running a compat (AArch32) userspace on an affected Cortex-A53
+ part, a load at EL0 from a virtual address that matches the bottom 32
+ bits of the virtual address used by a recent load at (AArch64) EL1
+ might return incorrect data.
+
+ The workaround is to write the contextidr_el1 register on exception
+ return to a 32-bit task.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
+endmenu
+
+
config ARM64_64K_PAGES
bool "Enable 64KB pages support"
help
@@ -130,6 +191,11 @@ config ARM64_64K_PAGES
look-up. AArch32 emulation is not available when this feature
is enabled.
+config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ help
+ Say Y if you plan on running a kernel in big-endian mode.
+
config SMP
bool "Symmetric Multi-Processing"
select USE_GENERIC_SMP_HELPERS
@@ -144,11 +210,131 @@ config SMP
If you don't know what to do here, say N.
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
+config SCHED_SMT
+ bool "SMT scheduler support"
+ depends on SMP
+ help
+ Improves the CPU scheduler's decision making when dealing with
+ MultiThreading at a cost of slightly increased overhead in some
+ places. If unsure say N here.
+
+config DISABLE_CPU_SCHED_DOMAIN_BALANCE
+ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
+ help
+ Disables scheduler load-balancing at CPU sched domain level.
+
+config SCHED_HMP
+ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
+ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
+ help
+ Experimental scheduler optimizations for heterogeneous platforms.
+ Attempts to introspectively select task affinity to optimize power
+ and performance. Basic support for multiple (>2) cpu types is in place,
+ but it has only been tested with two types of cpus.
+ There is currently no support for migration of task groups, hence
+ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
+ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
+
+config SCHED_HMP_PRIO_FILTER
+ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
+ depends on SCHED_HMP
+ help
+ Enables task priority based HMP migration filter. Any task with
+ a NICE value above the threshold will always be on low-power cpus
+ with less compute capacity.
+
+config SCHED_HMP_PRIO_FILTER_VAL
+ int "NICE priority threshold"
+ default 5
+ depends on SCHED_HMP_PRIO_FILTER
+
+config HMP_FAST_CPU_MASK
+ string "HMP scheduler fast CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the fast CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_SLOW_CPU_MASK
+ string "HMP scheduler slow CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the slow CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_VARIABLE_SCALE
+ bool "Allows changing the load tracking scale through sysfs"
+ depends on SCHED_HMP
+ help
+ When turned on, this option exports the thresholds and load average
+ period value for the load tracking patches through sysfs.
+ The values can be modified to change the rate of load accumulation
+ and the thresholds used for HMP migration.
+ The load_avg_period_ms is the time in ms to reach a load average of
+ 0.5 for an idle task of 0 load average ratio that start a busy loop.
+ The up_threshold and down_threshold is the value to go to a faster
+ CPU or to go back to a slower cpu.
+ The {up,down}_threshold are devided by 1024 before being compared
+ to the load average.
+ For examples, with load_avg_period_ms = 128 and up_threshold = 512,
+ a running task with a load of 0 will be migrated to a bigger CPU after
+ 128ms, because after 128ms its load_avg_ratio is 0.5 and the real
+ up_threshold is 0.5.
+ This patch has the same behavior as changing the Y of the load
+ average computation to
+ (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
+ but it remove intermadiate overflows in computation.
+
+config HMP_FREQUENCY_INVARIANT_SCALE
+ bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP"
+ depends on HMP_VARIABLE_SCALE && CPU_FREQ
+ help
+ Scales the current load contribution in line with the frequency
+ of the CPU that the task was executed on.
+ In this version, we use a simple linear scale derived from the
+ maximum frequency reported by CPUFreq.
+ Restricting tracked load to be scaled by the CPU's frequency
+ represents the consumption of possible compute capacity
+ (rather than consumption of actual instantaneous capacity as
+ normal) and allows the HMP migration's simple threshold
+ migration strategy to interact more predictably with CPUFreq's
+ asynchronous compute capacity changes.
+
+config SCHED_HMP_LITTLE_PACKING
+ bool "Small task packing for HMP"
+ depends on SCHED_HMP
+ default n
+ help
+ Allows the HMP Scheduler to pack small tasks into CPUs in the
+ smallest HMP domain.
+ Controlled by two sysfs files in sys/kernel/hmp.
+ packing_enable: 1 to enable, 0 to disable packing. Default 1.
+ packing_limit: runqueue load ratio where a RQ is considered
+ to be full. Default is NICE_0_LOAD * 9/8.
+
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4"
+ # These have to remain sorted largest to smallest
+ default "8"
+
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ help
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
source kernel/Kconfig.preempt
@@ -180,8 +366,25 @@ config HW_PERF_EVENTS
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
+config SYS_SUPPORTS_HUGETLBFS
+ def_bool y
+
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
+config ARCH_WANT_HUGE_PMD_SHARE
+ def_bool y if !ARM64_64K_PAGES
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ def_bool y
+
source "mm/Kconfig"
+config FORCE_MAX_ZONEORDER
+ int
+ default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
+ default "11"
+
endmenu
menu "Boot options"
@@ -202,6 +405,20 @@ config CMDLINE_FORCE
This is useful if you cannot or don't want to change the
command-line options your boot loader passes to the kernel.
+config EFI
+ bool "UEFI runtime support"
+ depends on OF && !CPU_BIG_ENDIAN
+ select LIBFDT
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ default y
+ help
+ This option provides support for runtime services provided
+ by UEFI firmware (such as non-volatile variables, realtime
+ clock, and platform reset). A UEFI stub is also provided to
+ allow the kernel to be booted as an EFI application. This
+ is only useful on systems that have UEFI firmware.
+
endmenu
menu "Userspace binary formats"
@@ -229,16 +446,42 @@ config SYSVIPC_COMPAT
endmenu
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+source "drivers/cpufreq/Kconfig"
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
+config ARM64_CPU_SUSPEND
+ def_bool PM_SLEEP
+
+endmenu
+
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
source "net/Kconfig"
source "drivers/Kconfig"
+source "drivers/firmware/Kconfig"
+
source "fs/Kconfig"
+source "arch/arm64/kvm/Kconfig"
+
source "arch/arm64/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
+if CRYPTO
+source "arch/arm64/crypto/Kconfig"
+endif
source "lib/Kconfig"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 1a6bfe954d49..bb55717c8dad 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -13,6 +13,20 @@ config DEBUG_STACK_USAGE
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T output.
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to memory mapped peripherals.
+
+ If in doubt, say Y.
+
config EARLY_PRINTK
bool "Early printk support"
default y
@@ -30,4 +44,15 @@ config PID_IN_CONTEXTIDR
instructions during context switch. Say Y here only if you are
planning to use hardware trace tools with this kernel.
+config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+ depends on MODULES
+ help
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+ of module data. Such protection may interfere with run-time code
+ patching and dynamic kernel tracing - and they might also protect
+ against certain classes of kernel exploits.
+ If in doubt, say "N".
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index c95c5cb212fd..3e7882c4e034 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -20,9 +20,15 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+KBUILD_CPPFLAGS += -mbig-endian
+AS += -EB
+LD += -EB
+else
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
+endif
comma = ,
@@ -32,11 +38,17 @@ CHECKFLAGS += -D__aarch64__
head-y := arch/arm64/kernel/head.o
# The byte offset of the kernel image in RAM from the start of RAM.
+ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
+TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+else
TEXT_OFFSET := 0x00080000
+endif
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
+core-$(CONFIG_KVM) += arch/arm64/kvm/
+core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
libs-y += $(LIBGCC)
@@ -60,6 +72,10 @@ zinstall install: vmlinux
dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+PHONY += vdso_install
+vdso_install:
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 68457e9e0975..ef388176116d 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,4 +1,7 @@
-dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb \
+ fvp-base-gicv2-psci.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb
+dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
targets += dtbs
targets += $(dtb-y)
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
new file mode 100644
index 000000000000..6541962f5d70
--- /dev/null
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -0,0 +1,30 @@
+/*
+ * dts file for AppliedMicro (APM) Mustang Board
+ *
+ * Copyright (C) 2013, Applied Micro Circuits Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+/include/ "apm-storm.dtsi"
+
+/ {
+ model = "APM X-Gene Mustang board";
+ compatible = "apm,mustang", "apm,xgene-storm";
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
+ };
+};
+
+&serial0 {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
new file mode 100644
index 000000000000..42edf193d084
--- /dev/null
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -0,0 +1,398 @@
+/*
+ * dts file for AppliedMicro (APM) X-Gene Storm SOC
+ *
+ * Copyright (C) 2013, Applied Micro Circuits Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/ {
+ compatible = "apm,xgene-storm";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@000 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x000>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@001 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x001>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@200 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x200>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@201 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x201>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@300 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x300>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@301 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x301>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ };
+
+ gic: interrupt-controller@78010000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x78010000 0x0 0x1000>, /* GIC Dist */
+ <0x0 0x78020000 0x0 0x1000>, /* GIC CPU */
+ <0x0 0x78040000 0x0 0x2000>, /* GIC VCPU Control */
+ <0x0 0x78060000 0x0 0x2000>; /* GIC VCPU */
+ interrupts = <1 9 0xf04>; /* GIC Maintenence IRQ */
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 0 0xff01>, /* Secure Phys IRQ */
+ <1 13 0xff01>, /* Non-secure Phys IRQ */
+ <1 14 0xff01>, /* Virt IRQ */
+ <1 15 0xff01>; /* Hyp IRQ */
+ clock-frequency = <50000000>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ refclk: refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <100000000>;
+ clock-output-names = "refclk";
+ };
+
+ pcppll: pcppll@17000100 {
+ compatible = "apm,xgene-pcppll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "pcppll";
+ reg = <0x0 0x17000100 0x0 0x1000>;
+ clock-output-names = "pcppll";
+ type = <0>;
+ };
+
+ socpll: socpll@17000120 {
+ compatible = "apm,xgene-socpll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "socpll";
+ reg = <0x0 0x17000120 0x0 0x1000>;
+ clock-output-names = "socpll";
+ type = <1>;
+ };
+
+ socplldiv2: socplldiv2 {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <1>;
+ clocks = <&socpll 0>;
+ clock-names = "socplldiv2";
+ clock-mult = <1>;
+ clock-div = <2>;
+ clock-output-names = "socplldiv2";
+ };
+
+ qmlclk: qmlclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "qmlclk";
+ reg = <0x0 0x1703C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "qmlclk";
+ };
+
+ ethclk: ethclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "ethclk";
+ reg = <0x0 0x17000000 0x0 0x1000>;
+ reg-names = "div-reg";
+ divider-offset = <0x238>;
+ divider-width = <0x9>;
+ divider-shift = <0x0>;
+ clock-output-names = "ethclk";
+ };
+
+ eth8clk: eth8clk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&ethclk 0>;
+ clock-names = "eth8clk";
+ reg = <0x0 0x1702C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "eth8clk";
+ };
+
+ sataphy1clk: sataphy1clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy1clk";
+ status = "disabled";
+ csr-offset = <0x4>;
+ csr-mask = <0x00>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sataphy2clk: sataphy1clk@1f22c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f22c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy2clk";
+ status = "ok";
+ csr-offset = <0x4>;
+ csr-mask = <0x3a>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sataphy3clk: sataphy1clk@1f23c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f23c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy3clk";
+ status = "ok";
+ csr-offset = <0x4>;
+ csr-mask = <0x3a>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sata01clk: sata01clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata01clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ sata23clk: sata23clk@1f22c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f22c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata23clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ sata45clk: sata45clk@1f23c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f23c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata45clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ rtcclk: rtcclk@17000000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x17000000 0x0 0x2000>;
+ reg-names = "csr-reg";
+ csr-offset = <0xc>;
+ csr-mask = <0x2>;
+ enable-offset = <0x10>;
+ enable-mask = <0x2>;
+ clock-output-names = "rtcclk";
+ };
+ };
+
+ serial0: serial@1c020000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c020000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4c 0x4>;
+ };
+
+ serial1: serial@1c021000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c021000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4d 0x4>;
+ };
+
+ serial2: serial@1c022000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c022000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4e 0x4>;
+ };
+
+ serial3: serial@1c023000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c023000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4f 0x4>;
+ };
+
+ phy1: phy@1f21a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f21a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy1clk 0>;
+ status = "disabled";
+ apm,tx-boost-gain = <30 30 30 30 30 30>;
+ apm,tx-eye-tuning = <2 10 10 2 10 10>;
+ };
+
+ phy2: phy@1f22a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f22a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy2clk 0>;
+ status = "ok";
+ apm,tx-boost-gain = <30 30 30 30 30 30>;
+ apm,tx-eye-tuning = <1 10 10 2 10 10>;
+ };
+
+ phy3: phy@1f23a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f23a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy3clk 0>;
+ status = "ok";
+ apm,tx-boost-gain = <31 31 31 31 31 31>;
+ apm,tx-eye-tuning = <2 10 10 2 10 10>;
+ };
+
+ sata1: sata@1a000000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a000000 0x0 0x1000>,
+ <0x0 0x1f210000 0x0 0x1000>,
+ <0x0 0x1f21d000 0x0 0x1000>,
+ <0x0 0x1f21e000 0x0 0x1000>,
+ <0x0 0x1f217000 0x0 0x1000>;
+ interrupts = <0x0 0x86 0x4>;
+ status = "disabled";
+ clocks = <&sata01clk 0>;
+ phys = <&phy1 0>;
+ phy-names = "sata-phy";
+ };
+
+ sata2: sata@1a400000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a400000 0x0 0x1000>,
+ <0x0 0x1f220000 0x0 0x1000>,
+ <0x0 0x1f22d000 0x0 0x1000>,
+ <0x0 0x1f22e000 0x0 0x1000>,
+ <0x0 0x1f227000 0x0 0x1000>;
+ interrupts = <0x0 0x87 0x4>;
+ status = "ok";
+ clocks = <&sata23clk 0>;
+ phys = <&phy2 0>;
+ phy-names = "sata-phy";
+ };
+
+ sata3: sata@1a800000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a800000 0x0 0x1000>,
+ <0x0 0x1f230000 0x0 0x1000>,
+ <0x0 0x1f23d000 0x0 0x1000>,
+ <0x0 0x1f23e000 0x0 0x1000>;
+ interrupts = <0x0 0x88 0x4>;
+ status = "ok";
+ clocks = <&sata45clk 0>;
+ phys = <&phy3 0>;
+ phy-names = "sata-phy";
+ };
+
+ rtc: rtc@10510000 {
+ compatible = "apm,xgene-rtc";
+ reg = <0x0 0x10510000 0x0 0x400>;
+ interrupts = <0x0 0x46 0x4>;
+ #clock-cells = <1>;
+ clocks = <&rtcclk 0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/clcd-panels.dtsi b/arch/arm64/boot/dts/clcd-panels.dtsi
new file mode 100644
index 000000000000..0b0ff6ead4b2
--- /dev/null
+++ b/arch/arm64/boot/dts/clcd-panels.dtsi
@@ -0,0 +1,52 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ */
+
+/ {
+ panels {
+ panel@0 {
+ compatible = "panel";
+ mode = "VGA";
+ refresh = <60>;
+ xres = <640>;
+ yres = <480>;
+ pixclock = <39721>;
+ left_margin = <40>;
+ right_margin = <24>;
+ upper_margin = <32>;
+ lower_margin = <11>;
+ hsync_len = <96>;
+ vsync_len = <2>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+
+ panel@1 {
+ compatible = "panel";
+ mode = "XVGA";
+ refresh = <60>;
+ xres = <1024>;
+ yres = <768>;
+ pixclock = <15748>;
+ left_margin = <152>;
+ right_margin = <48>;
+ upper_margin = <23>;
+ lower_margin = <3>;
+ hsync_len = <104>;
+ vsync_len = <4>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc5018284..519c4b2c0687 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
/dts-v1/;
+/memreserve/ 0x80000000 0x00010000;
+
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts b/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts
new file mode 100644
index 000000000000..ed55571e06dd
--- /dev/null
+++ b/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x80000000 0x00010000;
+
+/ {
+};
+
+/ {
+ model = "FVP Base";
+ compatible = "arm,vfp-base", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x84000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0xc4000003>;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ idle-states {
+ entry-method = "arm,psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x0010000>;
+ entry-latency-us = <40>;
+ exit-latency-us = <100>;
+ min-residency-us = <150>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x1010000>;
+ entry-latency-us = <500>;
+ exit-latency-us = <1000>;
+ min-residency-us = <2500>;
+ };
+ };
+
+ big0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ big1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ big2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ big3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little0: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little1: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little2: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x102>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little3: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x103>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&big0>;
+ };
+ core1 {
+ cpu = <&big1>;
+ };
+ core2 {
+ cpu = <&big2>;
+ };
+ core3 {
+ cpu = <&big3>;
+ };
+ };
+ cluster1 {
+ core0 {
+ cpu = <&little0>;
+ };
+ core1 {
+ cpu = <&little1>;
+ };
+ core2 {
+ cpu = <&little2>;
+ };
+ core3 {
+ cpu = <&little3>;
+ };
+ };
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0 0x80000000>,
+ <0x00000008 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2f000000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0x2f000000 0 0x10000>,
+ <0x0 0x2c000000 0 0x2000>,
+ <0x0 0x2c010000 0 0x2000>,
+ <0x0 0x2c02F000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 0xff01>,
+ <1 14 0xff01>,
+ <1 11 0xff01>,
+ <1 10 0xff01>;
+ clock-frequency = <100000000>;
+ };
+
+ timer@2a810000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0 0x2a810000 0x0 0x10000>;
+ clock-frequency = <100000000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ frame@2a820000 {
+ frame-number = <0>;
+ interrupts = <0 25 4>;
+ reg = <0x0 0x2a820000 0x0 0x10000>;
+ };
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <0 60 4>,
+ <0 61 4>,
+ <0 62 4>,
+ <0 63 4>;
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm64/boot/dts/juno.dts b/arch/arm64/boot/dts/juno.dts
new file mode 100644
index 000000000000..f260d702041c
--- /dev/null
+++ b/arch/arm64/boot/dts/juno.dts
@@ -0,0 +1,498 @@
+/*
+ * ARM Ltd. Juno Plaform
+ *
+ * Fast Models FVP v2 support
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "Juno";
+ compatible = "arm,juno", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &soc_uart0;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ };
+
+ cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x102>;
+ enable-method = "psci";
+ };
+
+ cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x103>;
+ enable-method = "psci";
+ };
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0x0 0x7f000000>,
+ <0x00000008 0x80000000 0x1 0x80000000>;
+ };
+
+ /* memory@14000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x14000000 0x0 0x02000000>;
+ }; */
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0x2c010000 0 0x1000>,
+ <0x0 0x2c02f000 0 0x1000>,
+ <0x0 0x2c04f000 0 0x2000>,
+ <0x0 0x2c06f000 0 0x2000>;
+ interrupts = <GIC_PPI 9 0xf04>;
+ };
+
+ msi0: msi@2c1c0000 {
+ compatible = "arm,gic-msi";
+ reg = <0x0 0x2c1c0000 0 0x10000
+ 0x0 0x2c1d0000 0 0x10000
+ 0x0 0x2c1e0000 0 0x10000
+ 0x0 0x2c1f0000 0 0x10000>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 0xff01>,
+ <GIC_PPI 14 0xff01>,
+ <GIC_PPI 11 0xff01>,
+ <GIC_PPI 10 0xff01>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 60 4>,
+ <GIC_SPI 61 4>,
+ <GIC_SPI 62 4>,
+ <GIC_SPI 63 4>;
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0xC4000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0xC4000003>;
+ migrate = <0xC4000005>;
+ };
+
+ pci0: pci@30000000 {
+ compatible = "arm,pcie-xr3";
+ device_type = "pci";
+ reg = <0 0x7ff30000 0 0x1000
+ 0 0x7ff20000 0 0x10000
+ 0 0x40000000 0 0x10000000>;
+ bus-range = <0 255>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x00000000 0x00 0x5ff00000 0x0 0x00100000
+ 0x02000000 0x0 0x00000000 0x40 0x00000000 0x0 0x80000000
+ 0x42000000 0x0 0x80000000 0x40 0x80000000 0x0 0x80000000>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &gic 0 136 4
+ 0 0 0 2 &gic 0 137 4
+ 0 0 0 3 &gic 0 138 4
+ 0 0 0 4 &gic 0 139 4>;
+ };
+
+ scpi: scpi@2b1f0000 {
+ compatible = "arm,scpi-mhu";
+ reg = <0x0 0x2b1f0000 0x0 0x10000>, /* MHU registers */
+ <0x0 0x2e000000 0x0 0x10000>; /* Payload area */
+ interrupts = <0 36 4>, /* low priority interrupt */
+ <0 35 4>, /* high priority interrupt */
+ <0 37 4>; /* secure channel interrupt */
+ #clock-cells = <1>;
+ clock-output-names = "a57", "a53", "gpu", "hdlcd0", "hdlcd1";
+ };
+
+ hdlcd0_osc: scpi_osc@3 {
+ compatible = "arm,scpi-osc";
+ #clock-cells = <0>;
+ clocks = <&scpi 3>;
+ frequency-range = <23000000 210000000>;
+ clock-output-names = "pxlclk0";
+ };
+
+ hdlcd1_osc: scpi_osc@4 {
+ compatible = "arm,scpi-osc";
+ #clock-cells = <0>;
+ clocks = <&scpi 4>;
+ frequency-range = <23000000 210000000>;
+ clock-output-names = "pxlclk1";
+ };
+
+ soc_uartclk: refclk72738khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <7273800>;
+ clock-output-names = "juno:uartclk";
+ };
+
+ soc_refclk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "juno:clk24mhz";
+ };
+
+ mb_eth25mhz: clk25mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "ethclk25mhz";
+ };
+
+ soc_usb48mhz: clk48mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ clock-output-names = "clk48mhz";
+ };
+
+ soc_smc50mhz: clk50mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "smc_clk";
+ };
+
+ soc_refclk100mhz: refclk100mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "apb_pclk";
+ };
+
+ soc_faxiclk: refclk533mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <533000000>;
+ clock-output-names = "faxi_clk";
+ };
+
+ soc_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ memory-controller@7ffd0000 {
+ compatible = "arm,pl354", "arm,primecell";
+ reg = <0 0x7ffd0000 0 0x1000>;
+ interrupts = <0 86 4>,
+ <0 87 4>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ chip5-memwidth = <16>;
+ };
+
+ dma0: dma@0x7ff00000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0x7ff00000 0 0x1000>;
+ interrupts = <0 95 4>,
+ <0 88 4>,
+ <0 89 4>,
+ <0 90 4>,
+ <0 91 4>,
+ <0 108 4>,
+ <0 109 4>,
+ <0 110 4>,
+ <0 111 4>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ clocks = <&soc_faxiclk>;
+ clock-names = "apb_pclk";
+ };
+
+ soc_uart0: uart@7ff80000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0x7ff80000 0x0 0x1000>;
+ interrupts = <0 83 4>;
+ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
+ clock-names = "uartclk", "apb_pclk";
+ dmas = <&dma0 1
+ &dma0 2>;
+ dma-names = "rx", "tx";
+ };
+
+ /* this UART is reserved for secure software.
+ soc_uart1: uart@7ff70000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0x7ff70000 0x0 0x1000>;
+ interrupts = <0 84 4>;
+ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
+ clock-names = "uartclk", "apb_pclk";
+ }; */
+
+ ulpi_phy: phy@0 {
+ compatible = "phy-ulpi-generic";
+ reg = <0x0 0x94 0x0 0x4>;
+ phy-id = <0>;
+ };
+
+ ehci@7ffc0000 {
+ compatible = "snps,ehci-h20ahb";
+ /* compatible = "arm,h20ahb-ehci"; */
+ reg = <0x0 0x7ffc0000 0x0 0x10000>;
+ interrupts = <0 117 4>;
+ clocks = <&soc_usb48mhz>;
+ clock-names = "otg";
+ phys = <&ulpi_phy>;
+ };
+
+ ohci@0x7ffb0000 {
+ compatible = "generic-ohci";
+ reg = <0x0 0x7ffb0000 0x0 0x10000>;
+ interrupts = <0 116 4>;
+ clocks = <&soc_usb48mhz>;
+ clock-names = "otg";
+ };
+
+ i2c@0x7ffa0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0x7ffa0000 0x0 0x1000>;
+ interrupts = <0 104 4>;
+ clock-frequency = <400000>;
+ i2c-sda-hold-time-ns = <500>;
+ clocks = <&soc_smc50mhz>;
+
+ dvi0: dvi-transmitter@70 {
+ compatible = "nxp,tda998x";
+ reg = <0x70>;
+ };
+
+ dvi1: dvi-transmitter@71 {
+ compatible = "nxp,tda998x";
+ reg = <0x71>;
+ };
+ };
+
+ /* mmci@1c050000 {
+ compatible = "arm,pl180", "arm,primecell";
+ reg = <0x0 0x1c050000 0x0 0x1000>;
+ interrupts = <0 73 4>,
+ <0 74 4>;
+ max-frequency = <12000000>;
+ vmmc-supply = <&soc_fixed_3v3>;
+ clocks = <&soc_refclk24mhz>, <&soc_refclk100mhz>;
+ clock-names = "mclk", "apb_pclk";
+ }; */
+
+ hdlcd@7ff60000 {
+ compatible = "arm,hdlcd";
+ reg = <0 0x7ff60000 0 0x1000>;
+ interrupts = <0 85 4>;
+ clocks = <&hdlcd0_osc>;
+ clock-names = "pxlclk";
+ i2c-slave = <&dvi0>;
+
+ /* display-timings {
+ native-mode = <&timing0>;
+ timing0: timing@0 {
+ /* 1024 x 768 framebufer, standard VGA timings * /
+ clock-frequency = <65000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hfront-porch = <24>;
+ hback-porch = <160>;
+ hsync-len = <136>;
+ vfront-porch = <3>;
+ vback-porch = <29>;
+ vsync-len = <6>;
+ };
+ }; */
+ };
+
+ hdlcd@7ff50000 {
+ compatible = "arm,hdlcd";
+ reg = <0 0x7ff50000 0 0x1000>;
+ interrupts = <0 93 4>;
+ clocks = <&hdlcd1_osc>;
+ clock-names = "pxlclk";
+ i2c-slave = <&dvi1>;
+
+ display-timings {
+ native-mode = <&timing1>;
+ timing1: timing@1 {
+ /* 1024 x 768 framebufer, standard VGA timings */
+ clock-frequency = <65000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hfront-porch = <24>;
+ hback-porch = <160>;
+ hsync-len = <136>;
+ vfront-porch = <3>;
+ vback-porch = <29>;
+ vsync-len = <6>;
+ };
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 15>;
+ interrupt-map = <0 0 0 &gic 0 68 4>,
+ <0 0 1 &gic 0 69 4>,
+ <0 0 2 &gic 0 70 4>,
+ <0 0 3 &gic 0 160 4>,
+ <0 0 4 &gic 0 161 4>,
+ <0 0 5 &gic 0 162 4>,
+ <0 0 6 &gic 0 163 4>,
+ <0 0 7 &gic 0 164 4>,
+ <0 0 8 &gic 0 165 4>,
+ <0 0 9 &gic 0 166 4>,
+ <0 0 10 &gic 0 167 4>,
+ <0 0 11 &gic 0 168 4>,
+ <0 0 12 &gic 0 169 4>;
+
+ motherboard {
+ model = "V2M-Juno";
+ arm,hbi = <0x252>;
+ arm,vexpress,site = <0>;
+ arm,v2m-memory-map = "rs1";
+ compatible = "arm,vexpress,v2p-p1", "simple-bus";
+ #address-cells = <2>; /* SMB chipselect number and offset */
+ #size-cells = <1>;
+ #interrupt-cells = <1>;
+ ranges;
+
+ usb@5,00000000 {
+ compatible = "nxp,usb-isp1763";
+ reg = <5 0x00000000 0x20000>;
+ bus-width = <16>;
+ interrupts = <4>;
+ };
+
+ ethernet@2,00000000 {
+ compatible = "smsc,lan9118", "smsc,lan9115";
+ reg = <2 0x00000000 0x10000>;
+ interrupts = <3>;
+ phy-mode = "mii";
+ reg-io-width = <4>;
+ smsc,irq-active-high;
+ smsc,irq-push-pull;
+ clocks = <&mb_eth25mhz>;
+ vdd33a-supply = <&soc_fixed_3v3>; /* change this */
+ vddvario-supply = <&soc_fixed_3v3>; /* and this */
+ };
+
+ iofpga@3,00000000 {
+ compatible = "arm,amba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 3 0 0x200000>;
+
+ kmi@060000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x060000 0x1000>;
+ interrupts = <8>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ kmi@070000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x070000 0x1000>;
+ interrupts = <8>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ wdt@0f0000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0f0000 0x10000>;
+ interrupts = <7>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "wdogclk", "apb_pclk";
+ };
+
+ v2m_timer01: timer@110000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x110000 0x10000>;
+ interrupts = <9>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "timclken1", "apb_pclk";
+ };
+
+ v2m_timer23: timer@120000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x120000 0x10000>;
+ interrupts = <9>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "timclken1", "apb_pclk";
+ };
+
+ rtc@170000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x170000 0x10000>;
+ interrupts = <0>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
index 572005ea2217..28ed4ba3391a 100644
--- a/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
@@ -27,37 +27,70 @@
serial3 = &v2m_serial3;
};
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ /*
+ * Function IDs usage and compliancy with PSCI v0.2 still
+ * under discussion. Current IDs should be considered
+ * temporary for demonstration purposes.
+ */
+ cpu_suspend = <0x84000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0x84000003>;
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
+ idle-states {
+ entry-method = "arm,psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x0010000>;
+ entry-latency-us = <40>;
+ exit-latency-us = <100>;
+ min-residency-us = <150>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x1010000>;
+ entry-latency-us = <500>;
+ exit-latency-us = <1000>;
+ min-residency-us = <2500>;
+ };
+ };
+
cpu@0 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x0>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu@1 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x1>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu@2 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x2>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu@3 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x3>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
};
@@ -157,3 +190,5 @@
/include/ "rtsm_ve-motherboard.dtsi"
};
};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
index b45e5f39f577..b683d4703582 100644
--- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -182,6 +182,15 @@
interrupts = <14>;
clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
clock-names = "clcdclk", "apb_pclk";
+ mode = "XVGA";
+ use_dma = <0>;
+ framebuffer = <0x18000000 0x00180000>;
+ };
+
+ virtio_block@0130000 {
+ compatible = "virtio,mmio";
+ reg = <0x130000 0x200>;
+ interrupts = <42>;
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 8d9696adb440..8e323147c375 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -19,13 +18,17 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
CONFIG_SMP=y
+CONFIG_PREEMPT=y
CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -42,29 +45,42 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_BLK_DEV is not set
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_MII=y
CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
@@ -86,3 +102,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DMA_CMA=y
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
new file mode 100644
index 000000000000..5562652c5316
--- /dev/null
+++ b/arch/arm64/crypto/Kconfig
@@ -0,0 +1,53 @@
+
+menuconfig ARM64_CRYPTO
+ bool "ARM64 Accelerated Cryptographic Algorithms"
+ depends on ARM64
+ help
+ Say Y here to choose from a selection of cryptographic algorithms
+ implemented using ARM64 specific CPU features or instructions.
+
+if ARM64_CRYPTO
+
+config CRYPTO_SHA1_ARM64_CE
+ tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_HASH
+
+config CRYPTO_SHA2_ARM64_CE
+ tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_HASH
+
+config CRYPTO_GHASH_ARM64_CE
+ tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_HASH
+
+config CRYPTO_AES_ARM64_CE
+ tristate "AES core cipher using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+
+config CRYPTO_AES_ARM64_CE_CCM
+ tristate "AES in CCM mode using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_AEAD
+
+config CRYPTO_AES_ARM64_CE_BLK
+ tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ABLK_HELPER
+
+config CRYPTO_AES_ARM64_NEON_BLK
+ tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ABLK_HELPER
+
+endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
new file mode 100644
index 000000000000..2070a56ecc46
--- /dev/null
+++ b/arch/arm64/crypto/Makefile
@@ -0,0 +1,38 @@
+#
+# linux/arch/arm64/crypto/Makefile
+#
+# Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
+sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
+
+obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
+sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
+
+obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
+ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
+CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
+aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE_BLK) += aes-ce-blk.o
+aes-ce-blk-y := aes-glue-ce.o aes-ce.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
+aes-neon-blk-y := aes-glue-neon.o aes-neon.o
+
+AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE
+AFLAGS_aes-neon.o := -DINTERLEAVE=4
+
+CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
+
+$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
+ $(call if_changed_dep,cc_o_c)
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
new file mode 100644
index 000000000000..432e4841cd81
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -0,0 +1,222 @@
+/*
+ * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+ .text
+ .arch armv8-a+crypto
+
+ /*
+ * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
+ * u32 *macp, u8 const rk[], u32 rounds);
+ */
+ENTRY(ce_aes_ccm_auth_data)
+ ldr w8, [x3] /* leftover from prev round? */
+ ld1 {v0.2d}, [x0] /* load mac */
+ cbz w8, 1f
+ sub w8, w8, #16
+ eor v1.16b, v1.16b, v1.16b
+0: ldrb w7, [x1], #1 /* get 1 byte of input */
+ subs w2, w2, #1
+ add w8, w8, #1
+ ins v1.b[0], w7
+ ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
+ beq 8f /* out of input? */
+ cbnz w8, 0b
+ eor v0.16b, v0.16b, v1.16b
+1: ld1 {v3.2d}, [x4] /* load first round key */
+ prfm pldl1strm, [x1]
+ cmp w5, #12 /* which key size? */
+ add x6, x4, #16
+ sub w7, w5, #2 /* modified # of rounds */
+ bmi 2f
+ bne 5f
+ mov v5.16b, v3.16b
+ b 4f
+2: mov v4.16b, v3.16b
+ ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
+3: aese v0.16b, v4.16b
+ aesmc v0.16b, v0.16b
+4: ld1 {v3.2d}, [x6], #16 /* load next round key */
+ aese v0.16b, v5.16b
+ aesmc v0.16b, v0.16b
+5: ld1 {v4.2d}, [x6], #16 /* load next round key */
+ subs w7, w7, #3
+ aese v0.16b, v3.16b
+ aesmc v0.16b, v0.16b
+ ld1 {v5.2d}, [x6], #16 /* load next round key */
+ bpl 3b
+ aese v0.16b, v4.16b
+ subs w2, w2, #16 /* last data? */
+ eor v0.16b, v0.16b, v5.16b /* final round */
+ bmi 6f
+ ld1 {v1.16b}, [x1], #16 /* load next input block */
+ eor v0.16b, v0.16b, v1.16b /* xor with mac */
+ bne 1b
+6: st1 {v0.2d}, [x0] /* store mac */
+ beq 10f
+ adds w2, w2, #16
+ beq 10f
+ mov w8, w2
+7: ldrb w7, [x1], #1
+ umov w6, v0.b[0]
+ eor w6, w6, w7
+ strb w6, [x0], #1
+ subs w2, w2, #1
+ beq 10f
+ ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
+ b 7b
+8: mov w7, w8
+ add w8, w8, #16
+9: ext v1.16b, v1.16b, v1.16b, #1
+ adds w7, w7, #1
+ bne 9b
+ eor v0.16b, v0.16b, v1.16b
+ st1 {v0.2d}, [x0]
+10: str w8, [x3]
+ ret
+ENDPROC(ce_aes_ccm_auth_data)
+
+ /*
+ * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
+ * u32 rounds);
+ */
+ENTRY(ce_aes_ccm_final)
+ ld1 {v3.2d}, [x2], #16 /* load first round key */
+ ld1 {v0.2d}, [x0] /* load mac */
+ cmp w3, #12 /* which key size? */
+ sub w3, w3, #2 /* modified # of rounds */
+ ld1 {v1.2d}, [x1] /* load 1st ctriv */
+ bmi 0f
+ bne 3f
+ mov v5.16b, v3.16b
+ b 2f
+0: mov v4.16b, v3.16b
+1: ld1 {v5.2d}, [x2], #16 /* load next round key */
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+2: ld1 {v3.2d}, [x2], #16 /* load next round key */
+ aese v0.16b, v5.16b
+ aese v1.16b, v5.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+3: ld1 {v4.2d}, [x2], #16 /* load next round key */
+ subs w3, w3, #3
+ aese v0.16b, v3.16b
+ aese v1.16b, v3.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+ bpl 1b
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ /* final round key cancels out */
+ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
+ st1 {v0.2d}, [x0] /* store result */
+ ret
+ENDPROC(ce_aes_ccm_final)
+
+ .macro aes_ccm_do_crypt,enc
+ ldr x8, [x6, #8] /* load lower ctr */
+ ld1 {v0.2d}, [x5] /* load mac */
+ rev x8, x8 /* keep swabbed ctr in reg */
+0: /* outer loop */
+ ld1 {v1.1d}, [x6] /* load upper ctr */
+ prfm pldl1strm, [x1]
+ add x8, x8, #1
+ rev x9, x8
+ cmp w4, #12 /* which key size? */
+ sub w7, w4, #2 /* get modified # of rounds */
+ ins v1.d[1], x9 /* no carry in lower ctr */
+ ld1 {v3.2d}, [x3] /* load first round key */
+ add x10, x3, #16
+ bmi 1f
+ bne 4f
+ mov v5.16b, v3.16b
+ b 3f
+1: mov v4.16b, v3.16b
+ ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
+2: /* inner loop: 3 rounds, 2x interleaved */
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+3: ld1 {v3.2d}, [x10], #16 /* load next round key */
+ aese v0.16b, v5.16b
+ aese v1.16b, v5.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+4: ld1 {v4.2d}, [x10], #16 /* load next round key */
+ subs w7, w7, #3
+ aese v0.16b, v3.16b
+ aese v1.16b, v3.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+ ld1 {v5.2d}, [x10], #16 /* load next round key */
+ bpl 2b
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ subs w2, w2, #16
+ bmi 6f /* partial block? */
+ ld1 {v2.16b}, [x1], #16 /* load next input block */
+ .if \enc == 1
+ eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
+ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
+ .else
+ eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
+ eor v1.16b, v2.16b, v5.16b /* final round enc */
+ .endif
+ eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
+ st1 {v1.16b}, [x0], #16 /* write output block */
+ bne 0b
+ rev x8, x8
+ st1 {v0.2d}, [x5] /* store mac */
+ str x8, [x6, #8] /* store lsb end of ctr (BE) */
+5: ret
+
+6: eor v0.16b, v0.16b, v5.16b /* final round mac */
+ eor v1.16b, v1.16b, v5.16b /* final round enc */
+ st1 {v0.2d}, [x5] /* store mac */
+ add w2, w2, #16 /* process partial tail block */
+7: ldrb w9, [x1], #1 /* get 1 byte of input */
+ umov w6, v1.b[0] /* get top crypted ctr byte */
+ umov w7, v0.b[0] /* get top mac byte */
+ .if \enc == 1
+ eor w7, w7, w9
+ eor w9, w9, w6
+ .else
+ eor w9, w9, w6
+ eor w7, w7, w9
+ .endif
+ strb w9, [x0], #1 /* store out byte */
+ strb w7, [x5], #1 /* store mac byte */
+ subs w2, w2, #1
+ beq 5b
+ ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
+ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
+ b 7b
+ .endm
+
+ /*
+ * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
+ * u8 const rk[], u32 rounds, u8 mac[],
+ * u8 ctr[]);
+ * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
+ * u8 const rk[], u32 rounds, u8 mac[],
+ * u8 ctr[]);
+ */
+ENTRY(ce_aes_ccm_encrypt)
+ aes_ccm_do_crypt 1
+ENDPROC(ce_aes_ccm_encrypt)
+
+ENTRY(ce_aes_ccm_decrypt)
+ aes_ccm_do_crypt 0
+ENDPROC(ce_aes_ccm_decrypt)
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
new file mode 100644
index 000000000000..9e6cdde9b43d
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -0,0 +1,297 @@
+/*
+ * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+ /*
+ * # of rounds specified by AES:
+ * 128 bit key 10 rounds
+ * 192 bit key 12 rounds
+ * 256 bit key 14 rounds
+ * => n byte key => 6 + (n/4) rounds
+ */
+ return 6 + ctx->key_length / 4;
+}
+
+asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
+ u32 *macp, u32 const rk[], u32 rounds);
+
+asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
+ u32 const rk[], u32 rounds, u8 mac[],
+ u8 ctr[]);
+
+asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
+ u32 const rk[], u32 rounds, u8 mac[],
+ u8 ctr[]);
+
+asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
+ u32 rounds);
+
+static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = crypto_aes_expand_key(ctx, in_key, key_len);
+ if (!ret)
+ return 0;
+
+ tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+
+static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ if ((authsize & 1) || authsize < 4)
+ return -EINVAL;
+ return 0;
+}
+
+static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
+ u32 l = req->iv[0] + 1;
+
+ /* verify that CCM dimension 'L' is set correctly in the IV */
+ if (l < 2 || l > 8)
+ return -EINVAL;
+
+ /* verify that msglen can in fact be represented in L bytes */
+ if (l < 4 && msglen >> (8 * l))
+ return -EOVERFLOW;
+
+ /*
+ * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
+ * uses a u32 type to represent msglen so the top 4 bytes are always 0.
+ */
+ n[0] = 0;
+ n[1] = cpu_to_be32(msglen);
+
+ memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
+
+ /*
+ * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
+ * - bits 0..2 : max # of bytes required to represent msglen, minus 1
+ * (already set by caller)
+ * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
+ * - bit 6 : indicates presence of authenticate-only data
+ */
+ maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
+ if (req->assoclen)
+ maciv[0] |= 0x40;
+
+ memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
+ return 0;
+}
+
+static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+ struct __packed { __be16 l; __be32 h; u16 len; } ltag;
+ struct scatter_walk walk;
+ u32 len = req->assoclen;
+ u32 macp = 0;
+
+ /* prepend the AAD with a length tag */
+ if (len < 0xff00) {
+ ltag.l = cpu_to_be16(len);
+ ltag.len = 2;
+ } else {
+ ltag.l = cpu_to_be16(0xfffe);
+ put_unaligned_be32(len, &ltag.h);
+ ltag.len = 6;
+ }
+
+ ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
+ num_rounds(ctx));
+ scatterwalk_start(&walk, req->assoc);
+
+ do {
+ u32 n = scatterwalk_clamp(&walk, len);
+ u8 *p;
+
+ if (!n) {
+ scatterwalk_start(&walk, sg_next(walk.sg));
+ n = scatterwalk_clamp(&walk, len);
+ }
+ p = scatterwalk_map(&walk);
+ ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc,
+ num_rounds(ctx));
+ len -= n;
+
+ scatterwalk_unmap(p);
+ scatterwalk_advance(&walk, n);
+ scatterwalk_done(&walk, 0, len);
+ } while (len);
+}
+
+static int ccm_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+ struct blkcipher_desc desc = { .info = req->iv };
+ struct blkcipher_walk walk;
+ u8 __aligned(8) mac[AES_BLOCK_SIZE];
+ u8 buf[AES_BLOCK_SIZE];
+ u32 len = req->cryptlen;
+ int err;
+
+ err = ccm_init_mac(req, mac, len);
+ if (err)
+ return err;
+
+ kernel_neon_begin_partial(6);
+
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ /* preserve the original iv for the final round */
+ memcpy(buf, req->iv, AES_BLOCK_SIZE);
+
+ blkcipher_walk_init(&walk, req->dst, req->src, len);
+ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
+ AES_BLOCK_SIZE);
+
+ while (walk.nbytes) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+
+ if (walk.nbytes == len)
+ tail = 0;
+
+ ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes - tail, ctx->key_enc,
+ num_rounds(ctx), mac, walk.iv);
+
+ len -= walk.nbytes - tail;
+ err = blkcipher_walk_done(&desc, &walk, tail);
+ }
+ if (!err)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+
+ kernel_neon_end();
+
+ if (err)
+ return err;
+
+ /* copy authtag to end of dst */
+ scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
+ crypto_aead_authsize(aead), 1);
+
+ return 0;
+}
+
+static int ccm_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ struct blkcipher_desc desc = { .info = req->iv };
+ struct blkcipher_walk walk;
+ u8 __aligned(8) mac[AES_BLOCK_SIZE];
+ u8 buf[AES_BLOCK_SIZE];
+ u32 len = req->cryptlen - authsize;
+ int err;
+
+ err = ccm_init_mac(req, mac, len);
+ if (err)
+ return err;
+
+ kernel_neon_begin_partial(6);
+
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ /* preserve the original iv for the final round */
+ memcpy(buf, req->iv, AES_BLOCK_SIZE);
+
+ blkcipher_walk_init(&walk, req->dst, req->src, len);
+ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
+ AES_BLOCK_SIZE);
+
+ while (walk.nbytes) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+
+ if (walk.nbytes == len)
+ tail = 0;
+
+ ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes - tail, ctx->key_enc,
+ num_rounds(ctx), mac, walk.iv);
+
+ len -= walk.nbytes - tail;
+ err = blkcipher_walk_done(&desc, &walk, tail);
+ }
+ if (!err)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+
+ kernel_neon_end();
+
+ if (err)
+ return err;
+
+ /* compare calculated auth tag with the stored one */
+ scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
+ authsize, 0);
+
+ if (memcmp(mac, buf, authsize))
+ return -EBADMSG;
+ return 0;
+}
+
+static struct crypto_alg ccm_aes_alg = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm_setkey,
+ .setauthsize = ccm_setauthsize,
+ .encrypt = ccm_encrypt,
+ .decrypt = ccm_decrypt,
+ }
+};
+
+static int __init aes_mod_init(void)
+{
+ if (!(elf_hwcap & HWCAP_AES))
+ return -ENODEV;
+ return crypto_register_alg(&ccm_aes_alg);
+}
+
+static void __exit aes_mod_exit(void)
+{
+ crypto_unregister_alg(&ccm_aes_alg);
+}
+
+module_init(aes_mod_init);
+module_exit(aes_mod_exit);
+
+MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ccm(aes)");
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
new file mode 100644
index 000000000000..2075e1acae6b
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -0,0 +1,155 @@
+/*
+ * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+struct aes_block {
+ u8 b[AES_BLOCK_SIZE];
+};
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+ /*
+ * # of rounds specified by AES:
+ * 128 bit key 10 rounds
+ * 192 bit key 12 rounds
+ * 256 bit key 14 rounds
+ * => n byte key => 6 + (n/4) rounds
+ */
+ return 6 + ctx->key_length / 4;
+}
+
+static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aes_block *out = (struct aes_block *)dst;
+ struct aes_block const *in = (struct aes_block *)src;
+ void *dummy0;
+ int dummy1;
+
+ kernel_neon_begin_partial(4);
+
+ __asm__(" ld1 {v0.16b}, %[in] ;"
+ " ld1 {v1.2d}, [%[key]], #16 ;"
+ " cmp %w[rounds], #10 ;"
+ " bmi 0f ;"
+ " bne 3f ;"
+ " mov v3.16b, v1.16b ;"
+ " b 2f ;"
+ "0: mov v2.16b, v1.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ "1: aese v0.16b, v2.16b ;"
+ " aesmc v0.16b, v0.16b ;"
+ "2: ld1 {v1.2d}, [%[key]], #16 ;"
+ " aese v0.16b, v3.16b ;"
+ " aesmc v0.16b, v0.16b ;"
+ "3: ld1 {v2.2d}, [%[key]], #16 ;"
+ " subs %w[rounds], %w[rounds], #3 ;"
+ " aese v0.16b, v1.16b ;"
+ " aesmc v0.16b, v0.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ " bpl 1b ;"
+ " aese v0.16b, v2.16b ;"
+ " eor v0.16b, v0.16b, v3.16b ;"
+ " st1 {v0.16b}, %[out] ;"
+
+ : [out] "=Q"(*out),
+ [key] "=r"(dummy0),
+ [rounds] "=r"(dummy1)
+ : [in] "Q"(*in),
+ "1"(ctx->key_enc),
+ "2"(num_rounds(ctx) - 2)
+ : "cc");
+
+ kernel_neon_end();
+}
+
+static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aes_block *out = (struct aes_block *)dst;
+ struct aes_block const *in = (struct aes_block *)src;
+ void *dummy0;
+ int dummy1;
+
+ kernel_neon_begin_partial(4);
+
+ __asm__(" ld1 {v0.16b}, %[in] ;"
+ " ld1 {v1.2d}, [%[key]], #16 ;"
+ " cmp %w[rounds], #10 ;"
+ " bmi 0f ;"
+ " bne 3f ;"
+ " mov v3.16b, v1.16b ;"
+ " b 2f ;"
+ "0: mov v2.16b, v1.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ "1: aesd v0.16b, v2.16b ;"
+ " aesimc v0.16b, v0.16b ;"
+ "2: ld1 {v1.2d}, [%[key]], #16 ;"
+ " aesd v0.16b, v3.16b ;"
+ " aesimc v0.16b, v0.16b ;"
+ "3: ld1 {v2.2d}, [%[key]], #16 ;"
+ " subs %w[rounds], %w[rounds], #3 ;"
+ " aesd v0.16b, v1.16b ;"
+ " aesimc v0.16b, v0.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ " bpl 1b ;"
+ " aesd v0.16b, v2.16b ;"
+ " eor v0.16b, v0.16b, v3.16b ;"
+ " st1 {v0.16b}, %[out] ;"
+
+ : [out] "=Q"(*out),
+ [key] "=r"(dummy0),
+ [rounds] "=r"(dummy1)
+ : [in] "Q"(*in),
+ "1"(ctx->key_dec),
+ "2"(num_rounds(ctx) - 2)
+ : "cc");
+
+ kernel_neon_end();
+}
+
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = crypto_aes_set_key,
+ .cia_encrypt = aes_cipher_encrypt,
+ .cia_decrypt = aes_cipher_decrypt
+ }
+};
+
+static int __init aes_mod_init(void)
+{
+ return crypto_register_alg(&aes_alg);
+}
+
+static void __exit aes_mod_exit(void)
+{
+ crypto_unregister_alg(&aes_alg);
+}
+
+module_cpu_feature_match(AES, aes_mod_init);
+module_exit(aes_mod_exit);
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
new file mode 100644
index 000000000000..685a18f731eb
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce.S
@@ -0,0 +1,133 @@
+/*
+ * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with
+ * Crypto Extensions
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#define AES_ENTRY(func) ENTRY(ce_ ## func)
+#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
+
+ .arch armv8-a+crypto
+
+ /* preload all round keys */
+ .macro load_round_keys, rounds, rk
+ cmp \rounds, #12
+ blo 2222f /* 128 bits */
+ beq 1111f /* 192 bits */
+ ld1 {v17.16b-v18.16b}, [\rk], #32
+1111: ld1 {v19.16b-v20.16b}, [\rk], #32
+2222: ld1 {v21.16b-v24.16b}, [\rk], #64
+ ld1 {v25.16b-v28.16b}, [\rk], #64
+ ld1 {v29.16b-v31.16b}, [\rk]
+ .endm
+
+ /* prepare for encryption with key in rk[] */
+ .macro enc_prepare, rounds, rk, ignore
+ load_round_keys \rounds, \rk
+ .endm
+
+ /* prepare for encryption (again) but with new key in rk[] */
+ .macro enc_switch_key, rounds, rk, ignore
+ load_round_keys \rounds, \rk
+ .endm
+
+ /* prepare for decryption with key in rk[] */
+ .macro dec_prepare, rounds, rk, ignore
+ load_round_keys \rounds, \rk
+ .endm
+
+ .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
+ aes\de \i0\().16b, \k\().16b
+ .ifnb \i1
+ aes\de \i1\().16b, \k\().16b
+ .ifnb \i3
+ aes\de \i2\().16b, \k\().16b
+ aes\de \i3\().16b, \k\().16b
+ .endif
+ .endif
+ aes\mc \i0\().16b, \i0\().16b
+ .ifnb \i1
+ aes\mc \i1\().16b, \i1\().16b
+ .ifnb \i3
+ aes\mc \i2\().16b, \i2\().16b
+ aes\mc \i3\().16b, \i3\().16b
+ .endif
+ .endif
+ .endm
+
+ /* up to 4 interleaved encryption rounds with the same round key */
+ .macro round_Nx, enc, k, i0, i1, i2, i3
+ .ifc \enc, e
+ do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3
+ .else
+ do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3
+ .endif
+ .endm
+
+ /* up to 4 interleaved final rounds */
+ .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3
+ aes\de \i0\().16b, \k\().16b
+ .ifnb \i1
+ aes\de \i1\().16b, \k\().16b
+ .ifnb \i3
+ aes\de \i2\().16b, \k\().16b
+ aes\de \i3\().16b, \k\().16b
+ .endif
+ .endif
+ eor \i0\().16b, \i0\().16b, \k2\().16b
+ .ifnb \i1
+ eor \i1\().16b, \i1\().16b, \k2\().16b
+ .ifnb \i3
+ eor \i2\().16b, \i2\().16b, \k2\().16b
+ eor \i3\().16b, \i3\().16b, \k2\().16b
+ .endif
+ .endif
+ .endm
+
+ /* up to 4 interleaved blocks */
+ .macro do_block_Nx, enc, rounds, i0, i1, i2, i3
+ cmp \rounds, #12
+ blo 2222f /* 128 bits */
+ beq 1111f /* 192 bits */
+ round_Nx \enc, v17, \i0, \i1, \i2, \i3
+ round_Nx \enc, v18, \i0, \i1, \i2, \i3
+1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3
+ round_Nx \enc, v20, \i0, \i1, \i2, \i3
+2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
+ round_Nx \enc, \key, \i0, \i1, \i2, \i3
+ .endr
+ fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3
+ .endm
+
+ .macro encrypt_block, in, rounds, t0, t1, t2
+ do_block_Nx e, \rounds, \in
+ .endm
+
+ .macro encrypt_block2x, i0, i1, rounds, t0, t1, t2
+ do_block_Nx e, \rounds, \i0, \i1
+ .endm
+
+ .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
+ do_block_Nx e, \rounds, \i0, \i1, \i2, \i3
+ .endm
+
+ .macro decrypt_block, in, rounds, t0, t1, t2
+ do_block_Nx d, \rounds, \in
+ .endm
+
+ .macro decrypt_block2x, i0, i1, rounds, t0, t1, t2
+ do_block_Nx d, \rounds, \i0, \i1
+ .endm
+
+ .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
+ do_block_Nx d, \rounds, \i0, \i1, \i2, \i3
+ .endm
+
+#include "aes-modes.S"
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
new file mode 100644
index 000000000000..60f2f4c12256
--- /dev/null
+++ b/arch/arm64/crypto/aes-glue.c
@@ -0,0 +1,446 @@
+/*
+ * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/hwcap.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#ifdef USE_V8_CRYPTO_EXTENSIONS
+#define MODE "ce"
+#define PRIO 300
+#define aes_ecb_encrypt ce_aes_ecb_encrypt
+#define aes_ecb_decrypt ce_aes_ecb_decrypt
+#define aes_cbc_encrypt ce_aes_cbc_encrypt
+#define aes_cbc_decrypt ce_aes_cbc_decrypt
+#define aes_ctr_encrypt ce_aes_ctr_encrypt
+#define aes_xts_encrypt ce_aes_xts_encrypt
+#define aes_xts_decrypt ce_aes_xts_decrypt
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
+#else
+#define MODE "neon"
+#define PRIO 200
+#define aes_ecb_encrypt neon_aes_ecb_encrypt
+#define aes_ecb_decrypt neon_aes_ecb_decrypt
+#define aes_cbc_encrypt neon_aes_cbc_encrypt
+#define aes_cbc_decrypt neon_aes_cbc_decrypt
+#define aes_ctr_encrypt neon_aes_ctr_encrypt
+#define aes_xts_encrypt neon_aes_xts_encrypt
+#define aes_xts_decrypt neon_aes_xts_decrypt
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
+MODULE_ALIAS("ecb(aes)");
+MODULE_ALIAS("cbc(aes)");
+MODULE_ALIAS("ctr(aes)");
+MODULE_ALIAS("xts(aes)");
+#endif
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+/* defined in aes-modes.S */
+asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, int first);
+asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, int first);
+
+asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 iv[], int first);
+asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 iv[], int first);
+
+asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 ctr[], int first);
+
+asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
+ int rounds, int blocks, u8 const rk2[], u8 iv[],
+ int first);
+asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
+ int rounds, int blocks, u8 const rk2[], u8 iv[],
+ int first);
+
+struct crypto_aes_xts_ctx {
+ struct crypto_aes_ctx key1;
+ struct crypto_aes_ctx __aligned(8) key2;
+};
+
+static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_aes_expand_key(&ctx->key1, in_key, key_len / 2);
+ if (!ret)
+ ret = crypto_aes_expand_key(&ctx->key2, &in_key[key_len / 2],
+ key_len / 2);
+ if (!ret)
+ return 0;
+
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, rounds, blocks, first);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_dec, rounds, blocks, first);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
+ first);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
+ first);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+
+ first = 1;
+ kernel_neon_begin();
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
+ first);
+ first = 0;
+ nbytes -= blocks * AES_BLOCK_SIZE;
+ if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
+ break;
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (nbytes) {
+ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 __aligned(8) tail[AES_BLOCK_SIZE];
+
+ /*
+ * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
+ * to tell aes_ctr_encrypt() to only read half a block.
+ */
+ blocks = (nbytes <= 8) ? -1 : 1;
+
+ aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
+ blocks, walk.iv, first);
+ memcpy(tdst, tail, nbytes);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key1.key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key1.key_enc, rounds, blocks,
+ (u8 *)ctx->key2.key_enc, walk.iv, first);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key1.key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key1.key_dec, rounds, blocks,
+ (u8 *)ctx->key2.key_enc, walk.iv, first);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static struct crypto_alg aes_algs[] = { {
+ .cra_name = "__ecb-aes-" MODE,
+ .cra_driver_name = "__driver-ecb-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto_aes_set_key,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ },
+}, {
+ .cra_name = "__cbc-aes-" MODE,
+ .cra_driver_name = "__driver-cbc-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto_aes_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ },
+}, {
+ .cra_name = "__ctr-aes-" MODE,
+ .cra_driver_name = "__driver-ctr-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto_aes_set_key,
+ .encrypt = ctr_encrypt,
+ .decrypt = ctr_encrypt,
+ },
+}, {
+ .cra_name = "__xts-aes-" MODE,
+ .cra_driver_name = "__driver-xts-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_set_key,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
+ },
+}, {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+} };
+
+static int __init aes_init(void)
+{
+ return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+static void __exit aes_exit(void)
+{
+ crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+#ifdef USE_V8_CRYPTO_EXTENSIONS
+module_cpu_feature_match(AES, aes_init);
+#else
+module_init(aes_init);
+#endif
+module_exit(aes_exit);
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
new file mode 100644
index 000000000000..f6e372c528eb
--- /dev/null
+++ b/arch/arm64/crypto/aes-modes.S
@@ -0,0 +1,532 @@
+/*
+ * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* included by aes-ce.S and aes-neon.S */
+
+ .text
+ .align 4
+
+/*
+ * There are several ways to instantiate this code:
+ * - no interleave, all inline
+ * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
+ * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
+ * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
+ * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
+ *
+ * Macros imported by this code:
+ * - enc_prepare - setup NEON registers for encryption
+ * - dec_prepare - setup NEON registers for decryption
+ * - enc_switch_key - change to new key after having prepared for encryption
+ * - encrypt_block - encrypt a single block
+ * - decrypt block - decrypt a single block
+ * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2)
+ * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2)
+ * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4)
+ * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4)
+ */
+
+#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
+#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp
+#define FRAME_POP ldp x29, x30, [sp],#16
+
+#if INTERLEAVE == 2
+
+aes_encrypt_block2x:
+ encrypt_block2x v0, v1, w3, x2, x6, w7
+ ret
+ENDPROC(aes_encrypt_block2x)
+
+aes_decrypt_block2x:
+ decrypt_block2x v0, v1, w3, x2, x6, w7
+ ret
+ENDPROC(aes_decrypt_block2x)
+
+#elif INTERLEAVE == 4
+
+aes_encrypt_block4x:
+ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ ret
+ENDPROC(aes_encrypt_block4x)
+
+aes_decrypt_block4x:
+ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ ret
+ENDPROC(aes_decrypt_block4x)
+
+#else
+#error INTERLEAVE should equal 2 or 4
+#endif
+
+ .macro do_encrypt_block2x
+ bl aes_encrypt_block2x
+ .endm
+
+ .macro do_decrypt_block2x
+ bl aes_decrypt_block2x
+ .endm
+
+ .macro do_encrypt_block4x
+ bl aes_encrypt_block4x
+ .endm
+
+ .macro do_decrypt_block4x
+ bl aes_decrypt_block4x
+ .endm
+
+#else
+#define FRAME_PUSH
+#define FRAME_POP
+
+ .macro do_encrypt_block2x
+ encrypt_block2x v0, v1, w3, x2, x6, w7
+ .endm
+
+ .macro do_decrypt_block2x
+ decrypt_block2x v0, v1, w3, x2, x6, w7
+ .endm
+
+ .macro do_encrypt_block4x
+ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ .endm
+
+ .macro do_decrypt_block4x
+ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ .endm
+
+#endif
+
+ /*
+ * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, int first)
+ * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, int first)
+ */
+
+AES_ENTRY(aes_ecb_encrypt)
+ FRAME_PUSH
+ cbz w5, .LecbencloopNx
+
+ enc_prepare w3, x2, x5
+
+.LecbencloopNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lecbenc1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
+ do_encrypt_block2x
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
+ do_encrypt_block4x
+ st1 {v0.16b-v3.16b}, [x0], #64
+#endif
+ b .LecbencloopNx
+.Lecbenc1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lecbencout
+#endif
+.Lecbencloop:
+ ld1 {v0.16b}, [x1], #16 /* get next pt block */
+ encrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lecbencloop
+.Lecbencout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_ecb_encrypt)
+
+
+AES_ENTRY(aes_ecb_decrypt)
+ FRAME_PUSH
+ cbz w5, .LecbdecloopNx
+
+ dec_prepare w3, x2, x5
+
+.LecbdecloopNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lecbdec1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
+ do_decrypt_block2x
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ do_decrypt_block4x
+ st1 {v0.16b-v3.16b}, [x0], #64
+#endif
+ b .LecbdecloopNx
+.Lecbdec1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lecbdecout
+#endif
+.Lecbdecloop:
+ ld1 {v0.16b}, [x1], #16 /* get next ct block */
+ decrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lecbdecloop
+.Lecbdecout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_ecb_decrypt)
+
+
+ /*
+ * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 iv[], int first)
+ * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 iv[], int first)
+ */
+
+AES_ENTRY(aes_cbc_encrypt)
+ cbz w6, .Lcbcencloop
+
+ ld1 {v0.16b}, [x5] /* get iv */
+ enc_prepare w3, x2, x5
+
+.Lcbcencloop:
+ ld1 {v1.16b}, [x1], #16 /* get next pt block */
+ eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
+ encrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lcbcencloop
+ ret
+AES_ENDPROC(aes_cbc_encrypt)
+
+
+AES_ENTRY(aes_cbc_decrypt)
+ FRAME_PUSH
+ cbz w6, .LcbcdecloopNx
+
+ ld1 {v7.16b}, [x5] /* get iv */
+ dec_prepare w3, x2, x5
+
+.LcbcdecloopNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lcbcdec1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
+ mov v2.16b, v0.16b
+ mov v3.16b, v1.16b
+ do_decrypt_block2x
+ eor v0.16b, v0.16b, v7.16b
+ eor v1.16b, v1.16b, v2.16b
+ mov v7.16b, v3.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ mov v4.16b, v0.16b
+ mov v5.16b, v1.16b
+ mov v6.16b, v2.16b
+ do_decrypt_block4x
+ sub x1, x1, #16
+ eor v0.16b, v0.16b, v7.16b
+ eor v1.16b, v1.16b, v4.16b
+ ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
+ eor v2.16b, v2.16b, v5.16b
+ eor v3.16b, v3.16b, v6.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+#endif
+ b .LcbcdecloopNx
+.Lcbcdec1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lcbcdecout
+#endif
+.Lcbcdecloop:
+ ld1 {v1.16b}, [x1], #16 /* get next ct block */
+ mov v0.16b, v1.16b /* ...and copy to v0 */
+ decrypt_block v0, w3, x2, x5, w6
+ eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
+ mov v7.16b, v1.16b /* ct is next iv */
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lcbcdecloop
+.Lcbcdecout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_cbc_decrypt)
+
+
+ /*
+ * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 ctr[], int first)
+ */
+
+AES_ENTRY(aes_ctr_encrypt)
+ FRAME_PUSH
+ cbnz w6, .Lctrfirst /* 1st time around? */
+ umov x5, v4.d[1] /* keep swabbed ctr in reg */
+ rev x5, x5
+#if INTERLEAVE >= 2
+ cmn w5, w4 /* 32 bit overflow? */
+ bcs .Lctrinc
+ add x5, x5, #1 /* increment BE ctr */
+ b .LctrincNx
+#else
+ b .Lctrinc
+#endif
+.Lctrfirst:
+ enc_prepare w3, x2, x6
+ ld1 {v4.16b}, [x5]
+ umov x5, v4.d[1] /* keep swabbed ctr in reg */
+ rev x5, x5
+#if INTERLEAVE >= 2
+ cmn w5, w4 /* 32 bit overflow? */
+ bcs .Lctrloop
+.LctrloopNx:
+ subs w4, w4, #INTERLEAVE
+ bmi .Lctr1x
+#if INTERLEAVE == 2
+ mov v0.8b, v4.8b
+ mov v1.8b, v4.8b
+ rev x7, x5
+ add x5, x5, #1
+ ins v0.d[1], x7
+ rev x7, x5
+ add x5, x5, #1
+ ins v1.d[1], x7
+ ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
+ do_encrypt_block2x
+ eor v0.16b, v0.16b, v2.16b
+ eor v1.16b, v1.16b, v3.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
+ dup v7.4s, w5
+ mov v0.16b, v4.16b
+ add v7.4s, v7.4s, v8.4s
+ mov v1.16b, v4.16b
+ rev32 v8.16b, v7.16b
+ mov v2.16b, v4.16b
+ mov v3.16b, v4.16b
+ mov v1.s[3], v8.s[0]
+ mov v2.s[3], v8.s[1]
+ mov v3.s[3], v8.s[2]
+ ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
+ do_encrypt_block4x
+ eor v0.16b, v5.16b, v0.16b
+ ld1 {v5.16b}, [x1], #16 /* get 1 input block */
+ eor v1.16b, v6.16b, v1.16b
+ eor v2.16b, v7.16b, v2.16b
+ eor v3.16b, v5.16b, v3.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+ add x5, x5, #INTERLEAVE
+#endif
+ cbz w4, .LctroutNx
+.LctrincNx:
+ rev x7, x5
+ ins v4.d[1], x7
+ b .LctrloopNx
+.LctroutNx:
+ sub x5, x5, #1
+ rev x7, x5
+ ins v4.d[1], x7
+ b .Lctrout
+.Lctr1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lctrout
+#endif
+.Lctrloop:
+ mov v0.16b, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
+ subs w4, w4, #1
+ bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
+ ld1 {v3.16b}, [x1], #16
+ eor v3.16b, v0.16b, v3.16b
+ st1 {v3.16b}, [x0], #16
+ beq .Lctrout
+.Lctrinc:
+ adds x5, x5, #1 /* increment BE ctr */
+ rev x7, x5
+ ins v4.d[1], x7
+ bcc .Lctrloop /* no overflow? */
+ umov x7, v4.d[0] /* load upper word of ctr */
+ rev x7, x7 /* ... to handle the carry */
+ add x7, x7, #1
+ rev x7, x7
+ ins v4.d[0], x7
+ b .Lctrloop
+.Lctrhalfblock:
+ ld1 {v3.8b}, [x1]
+ eor v3.8b, v0.8b, v3.8b
+ st1 {v3.8b}, [x0]
+.Lctrout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_ctr_encrypt)
+ .ltorg
+
+
+ /*
+ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+ * int blocks, u8 const rk2[], u8 iv[], int first)
+ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+ * int blocks, u8 const rk2[], u8 iv[], int first)
+ */
+
+ .macro next_tweak, out, in, const, tmp
+ sshr \tmp\().2d, \in\().2d, #63
+ and \tmp\().16b, \tmp\().16b, \const\().16b
+ add \out\().2d, \in\().2d, \in\().2d
+ ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
+ eor \out\().16b, \out\().16b, \tmp\().16b
+ .endm
+
+.Lxts_mul_x:
+ .word 1, 0, 0x87, 0
+
+AES_ENTRY(aes_xts_encrypt)
+ FRAME_PUSH
+ cbz w7, .LxtsencloopNx
+
+ ld1 {v4.16b}, [x6]
+ enc_prepare w3, x5, x6
+ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
+ enc_switch_key w3, x2, x6
+ ldr q7, .Lxts_mul_x
+ b .LxtsencNx
+
+.LxtsencloopNx:
+ ldr q7, .Lxts_mul_x
+ next_tweak v4, v4, v7, v8
+.LxtsencNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lxtsenc1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ do_encrypt_block2x
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+ cbz w4, .LxtsencoutNx
+ next_tweak v4, v5, v7, v8
+ b .LxtsencNx
+.LxtsencoutNx:
+ mov v4.16b, v5.16b
+ b .Lxtsencout
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ next_tweak v6, v5, v7, v8
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ next_tweak v7, v6, v7, v8
+ eor v3.16b, v3.16b, v7.16b
+ do_encrypt_block4x
+ eor v3.16b, v3.16b, v7.16b
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+ mov v4.16b, v7.16b
+ cbz w4, .Lxtsencout
+ b .LxtsencloopNx
+#endif
+.Lxtsenc1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lxtsencout
+#endif
+.Lxtsencloop:
+ ld1 {v1.16b}, [x1], #16
+ eor v0.16b, v1.16b, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v4.16b
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ beq .Lxtsencout
+ next_tweak v4, v4, v7, v8
+ b .Lxtsencloop
+.Lxtsencout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_xts_encrypt)
+
+
+AES_ENTRY(aes_xts_decrypt)
+ FRAME_PUSH
+ cbz w7, .LxtsdecloopNx
+
+ ld1 {v4.16b}, [x6]
+ enc_prepare w3, x5, x6
+ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
+ dec_prepare w3, x2, x6
+ ldr q7, .Lxts_mul_x
+ b .LxtsdecNx
+
+.LxtsdecloopNx:
+ ldr q7, .Lxts_mul_x
+ next_tweak v4, v4, v7, v8
+.LxtsdecNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lxtsdec1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ do_decrypt_block2x
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+ cbz w4, .LxtsdecoutNx
+ next_tweak v4, v5, v7, v8
+ b .LxtsdecNx
+.LxtsdecoutNx:
+ mov v4.16b, v5.16b
+ b .Lxtsdecout
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ next_tweak v6, v5, v7, v8
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ next_tweak v7, v6, v7, v8
+ eor v3.16b, v3.16b, v7.16b
+ do_decrypt_block4x
+ eor v3.16b, v3.16b, v7.16b
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+ mov v4.16b, v7.16b
+ cbz w4, .Lxtsdecout
+ b .LxtsdecloopNx
+#endif
+.Lxtsdec1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lxtsdecout
+#endif
+.Lxtsdecloop:
+ ld1 {v1.16b}, [x1], #16
+ eor v0.16b, v1.16b, v4.16b
+ decrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v4.16b
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ beq .Lxtsdecout
+ next_tweak v4, v4, v7, v8
+ b .Lxtsdecloop
+.Lxtsdecout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_xts_decrypt)
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
new file mode 100644
index 000000000000..b93170e1cc93
--- /dev/null
+++ b/arch/arm64/crypto/aes-neon.S
@@ -0,0 +1,382 @@
+/*
+ * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#define AES_ENTRY(func) ENTRY(neon_ ## func)
+#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
+
+ /* multiply by polynomial 'x' in GF(2^8) */
+ .macro mul_by_x, out, in, temp, const
+ sshr \temp, \in, #7
+ add \out, \in, \in
+ and \temp, \temp, \const
+ eor \out, \out, \temp
+ .endm
+
+ /* preload the entire Sbox */
+ .macro prepare, sbox, shiftrows, temp
+ adr \temp, \sbox
+ movi v12.16b, #0x40
+ ldr q13, \shiftrows
+ movi v14.16b, #0x1b
+ ld1 {v16.16b-v19.16b}, [\temp], #64
+ ld1 {v20.16b-v23.16b}, [\temp], #64
+ ld1 {v24.16b-v27.16b}, [\temp], #64
+ ld1 {v28.16b-v31.16b}, [\temp]
+ .endm
+
+ /* do preload for encryption */
+ .macro enc_prepare, ignore0, ignore1, temp
+ prepare .LForward_Sbox, .LForward_ShiftRows, \temp
+ .endm
+
+ .macro enc_switch_key, ignore0, ignore1, temp
+ /* do nothing */
+ .endm
+
+ /* do preload for decryption */
+ .macro dec_prepare, ignore0, ignore1, temp
+ prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp
+ .endm
+
+ /* apply SubBytes transformation using the the preloaded Sbox */
+ .macro sub_bytes, in
+ sub v9.16b, \in\().16b, v12.16b
+ tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b
+ sub v10.16b, v9.16b, v12.16b
+ tbx \in\().16b, {v20.16b-v23.16b}, v9.16b
+ sub v11.16b, v10.16b, v12.16b
+ tbx \in\().16b, {v24.16b-v27.16b}, v10.16b
+ tbx \in\().16b, {v28.16b-v31.16b}, v11.16b
+ .endm
+
+ /* apply MixColumns transformation */
+ .macro mix_columns, in
+ mul_by_x v10.16b, \in\().16b, v9.16b, v14.16b
+ rev32 v8.8h, \in\().8h
+ eor \in\().16b, v10.16b, \in\().16b
+ shl v9.4s, v8.4s, #24
+ shl v11.4s, \in\().4s, #24
+ sri v9.4s, v8.4s, #8
+ sri v11.4s, \in\().4s, #8
+ eor v9.16b, v9.16b, v8.16b
+ eor v10.16b, v10.16b, v9.16b
+ eor \in\().16b, v10.16b, v11.16b
+ .endm
+
+ /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */
+ .macro inv_mix_columns, in
+ mul_by_x v11.16b, \in\().16b, v10.16b, v14.16b
+ mul_by_x v11.16b, v11.16b, v10.16b, v14.16b
+ eor \in\().16b, \in\().16b, v11.16b
+ rev32 v11.8h, v11.8h
+ eor \in\().16b, \in\().16b, v11.16b
+ mix_columns \in
+ .endm
+
+ .macro do_block, enc, in, rounds, rk, rkp, i
+ ld1 {v15.16b}, [\rk]
+ add \rkp, \rk, #16
+ mov \i, \rounds
+1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
+ tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
+ sub_bytes \in
+ ld1 {v15.16b}, [\rkp], #16
+ subs \i, \i, #1
+ beq 2222f
+ .if \enc == 1
+ mix_columns \in
+ .else
+ inv_mix_columns \in
+ .endif
+ b 1111b
+2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
+ .endm
+
+ .macro encrypt_block, in, rounds, rk, rkp, i
+ do_block 1, \in, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro decrypt_block, in, rounds, rk, rkp, i
+ do_block 0, \in, \rounds, \rk, \rkp, \i
+ .endm
+
+ /*
+ * Interleaved versions: functionally equivalent to the
+ * ones above, but applied to 2 or 4 AES states in parallel.
+ */
+
+ .macro sub_bytes_2x, in0, in1
+ sub v8.16b, \in0\().16b, v12.16b
+ sub v9.16b, \in1\().16b, v12.16b
+ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
+ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
+ sub v10.16b, v8.16b, v12.16b
+ sub v11.16b, v9.16b, v12.16b
+ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
+ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
+ sub v8.16b, v10.16b, v12.16b
+ sub v9.16b, v11.16b, v12.16b
+ tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
+ tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
+ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
+ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
+ .endm
+
+ .macro sub_bytes_4x, in0, in1, in2, in3
+ sub v8.16b, \in0\().16b, v12.16b
+ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
+ sub v9.16b, \in1\().16b, v12.16b
+ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
+ sub v10.16b, \in2\().16b, v12.16b
+ tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b
+ sub v11.16b, \in3\().16b, v12.16b
+ tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b
+ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
+ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
+ sub v8.16b, v8.16b, v12.16b
+ tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b
+ sub v9.16b, v9.16b, v12.16b
+ tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b
+ sub v10.16b, v10.16b, v12.16b
+ tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b
+ sub v11.16b, v11.16b, v12.16b
+ tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b
+ sub v8.16b, v8.16b, v12.16b
+ tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b
+ sub v9.16b, v9.16b, v12.16b
+ tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b
+ sub v10.16b, v10.16b, v12.16b
+ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
+ sub v11.16b, v11.16b, v12.16b
+ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
+ tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b
+ tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b
+ .endm
+
+ .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const
+ sshr \tmp0\().16b, \in0\().16b, #7
+ add \out0\().16b, \in0\().16b, \in0\().16b
+ sshr \tmp1\().16b, \in1\().16b, #7
+ and \tmp0\().16b, \tmp0\().16b, \const\().16b
+ add \out1\().16b, \in1\().16b, \in1\().16b
+ and \tmp1\().16b, \tmp1\().16b, \const\().16b
+ eor \out0\().16b, \out0\().16b, \tmp0\().16b
+ eor \out1\().16b, \out1\().16b, \tmp1\().16b
+ .endm
+
+ .macro mix_columns_2x, in0, in1
+ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
+ rev32 v10.8h, \in0\().8h
+ rev32 v11.8h, \in1\().8h
+ eor \in0\().16b, v8.16b, \in0\().16b
+ eor \in1\().16b, v9.16b, \in1\().16b
+ shl v12.4s, v10.4s, #24
+ shl v13.4s, v11.4s, #24
+ eor v8.16b, v8.16b, v10.16b
+ sri v12.4s, v10.4s, #8
+ shl v10.4s, \in0\().4s, #24
+ eor v9.16b, v9.16b, v11.16b
+ sri v13.4s, v11.4s, #8
+ shl v11.4s, \in1\().4s, #24
+ sri v10.4s, \in0\().4s, #8
+ eor \in0\().16b, v8.16b, v12.16b
+ sri v11.4s, \in1\().4s, #8
+ eor \in1\().16b, v9.16b, v13.16b
+ eor \in0\().16b, v10.16b, \in0\().16b
+ eor \in1\().16b, v11.16b, \in1\().16b
+ .endm
+
+ .macro inv_mix_cols_2x, in0, in1
+ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
+ mul_by_x_2x v8, v9, v8, v9, v10, v11, v14
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ rev32 v8.8h, v8.8h
+ rev32 v9.8h, v9.8h
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ mix_columns_2x \in0, \in1
+ .endm
+
+ .macro inv_mix_cols_4x, in0, in1, in2, in3
+ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
+ mul_by_x_2x v10, v11, \in2, \in3, v12, v13, v14
+ mul_by_x_2x v8, v9, v8, v9, v12, v13, v14
+ mul_by_x_2x v10, v11, v10, v11, v12, v13, v14
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ eor \in2\().16b, \in2\().16b, v10.16b
+ eor \in3\().16b, \in3\().16b, v11.16b
+ rev32 v8.8h, v8.8h
+ rev32 v9.8h, v9.8h
+ rev32 v10.8h, v10.8h
+ rev32 v11.8h, v11.8h
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ eor \in2\().16b, \in2\().16b, v10.16b
+ eor \in3\().16b, \in3\().16b, v11.16b
+ mix_columns_2x \in0, \in1
+ mix_columns_2x \in2, \in3
+ .endm
+
+ .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
+ ld1 {v15.16b}, [\rk]
+ add \rkp, \rk, #16
+ mov \i, \rounds
+1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ sub_bytes_2x \in0, \in1
+ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
+ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
+ ld1 {v15.16b}, [\rkp], #16
+ subs \i, \i, #1
+ beq 2222f
+ .if \enc == 1
+ mix_columns_2x \in0, \in1
+ ldr q13, .LForward_ShiftRows
+ .else
+ inv_mix_cols_2x \in0, \in1
+ ldr q13, .LReverse_ShiftRows
+ .endif
+ movi v12.16b, #0x40
+ b 1111b
+2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ .endm
+
+ .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
+ ld1 {v15.16b}, [\rk]
+ add \rkp, \rk, #16
+ mov \i, \rounds
+1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
+ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
+ sub_bytes_4x \in0, \in1, \in2, \in3
+ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
+ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
+ tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
+ tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
+ ld1 {v15.16b}, [\rkp], #16
+ subs \i, \i, #1
+ beq 2222f
+ .if \enc == 1
+ mix_columns_2x \in0, \in1
+ mix_columns_2x \in2, \in3
+ ldr q13, .LForward_ShiftRows
+ .else
+ inv_mix_cols_4x \in0, \in1, \in2, \in3
+ ldr q13, .LReverse_ShiftRows
+ .endif
+ movi v12.16b, #0x40
+ b 1111b
+2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
+ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
+ .endm
+
+ .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
+ do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
+ do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
+ do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
+ do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
+ .endm
+
+#include "aes-modes.S"
+
+ .text
+ .align 4
+.LForward_ShiftRows:
+ .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
+ .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
+
+.LReverse_ShiftRows:
+ .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
+ .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
+
+.LForward_Sbox:
+ .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+ .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+ .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+ .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+ .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+ .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+ .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+ .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+ .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+ .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+ .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+ .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+ .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+ .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+ .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+ .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+ .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+ .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+ .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+ .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+ .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+ .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+ .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+ .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+ .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+ .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+ .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+ .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+ .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+ .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+ .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+ .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+
+.LReverse_Sbox:
+ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+ .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+ .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+ .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+ .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+ .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+ .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+ .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+ .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+ .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+ .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+ .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+ .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+ .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+ .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+ .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+ .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+ .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+ .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+ .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+ .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+ .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+ .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+ .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+ .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+ .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+ .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+ .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+ .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+ .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+ .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+ .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
new file mode 100644
index 000000000000..b9e6eaf41c9b
--- /dev/null
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -0,0 +1,95 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on arch/x86/crypto/ghash-pmullni-intel_asm.S
+ *
+ * Copyright (c) 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ * Vinodh Gopal
+ * Erdinc Ozturk
+ * Deniz Karakoyunlu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ DATA .req v0
+ SHASH .req v1
+ IN1 .req v2
+ T1 .req v2
+ T2 .req v3
+ T3 .req v4
+ VZR .req v5
+
+ .text
+ .arch armv8-a+crypto
+
+ /*
+ * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ * struct ghash_key const *k, const char *head)
+ */
+ENTRY(pmull_ghash_update)
+ ld1 {DATA.16b}, [x1]
+ ld1 {SHASH.16b}, [x3]
+ eor VZR.16b, VZR.16b, VZR.16b
+
+ /* do the head block first, if supplied */
+ cbz x4, 0f
+ ld1 {IN1.2d}, [x4]
+ b 1f
+
+0: ld1 {IN1.2d}, [x2], #16
+ sub w0, w0, #1
+1: ext IN1.16b, IN1.16b, IN1.16b, #8
+CPU_LE( rev64 IN1.16b, IN1.16b )
+ eor DATA.16b, DATA.16b, IN1.16b
+
+ /* multiply DATA by SHASH in GF(2^128) */
+ ext T2.16b, DATA.16b, DATA.16b, #8
+ ext T3.16b, SHASH.16b, SHASH.16b, #8
+ eor T2.16b, T2.16b, DATA.16b
+ eor T3.16b, T3.16b, SHASH.16b
+
+ pmull2 T1.1q, SHASH.2d, DATA.2d // a1 * b1
+ pmull DATA.1q, SHASH.1d, DATA.1d // a0 * b0
+ pmull T2.1q, T2.1d, T3.1d // (a1 + a0)(b1 + b0)
+ eor T2.16b, T2.16b, T1.16b // (a0 * b1) + (a1 * b0)
+ eor T2.16b, T2.16b, DATA.16b
+
+ ext T3.16b, VZR.16b, T2.16b, #8
+ ext T2.16b, T2.16b, VZR.16b, #8
+ eor DATA.16b, DATA.16b, T3.16b
+ eor T1.16b, T1.16b, T2.16b // <T1:DATA> is result of
+ // carry-less multiplication
+
+ /* first phase of the reduction */
+ shl T3.2d, DATA.2d, #1
+ eor T3.16b, T3.16b, DATA.16b
+ shl T3.2d, T3.2d, #5
+ eor T3.16b, T3.16b, DATA.16b
+ shl T3.2d, T3.2d, #57
+ ext T2.16b, VZR.16b, T3.16b, #8
+ ext T3.16b, T3.16b, VZR.16b, #8
+ eor DATA.16b, DATA.16b, T2.16b
+ eor T1.16b, T1.16b, T3.16b
+
+ /* second phase of the reduction */
+ ushr T2.2d, DATA.2d, #5
+ eor T2.16b, T2.16b, DATA.16b
+ ushr T2.2d, T2.2d, #1
+ eor T2.16b, T2.16b, DATA.16b
+ ushr T2.2d, T2.2d, #1
+ eor T1.16b, T1.16b, T2.16b
+ eor DATA.16b, DATA.16b, T1.16b
+
+ cbnz w0, 0b
+
+ st1 {DATA.16b}, [x1]
+ ret
+ENDPROC(pmull_ghash_update)
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
new file mode 100644
index 000000000000..b92baf3f68c7
--- /dev/null
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -0,0 +1,155 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_key {
+ u64 a;
+ u64 b;
+};
+
+struct ghash_desc_ctx {
+ u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
+ u8 buf[GHASH_BLOCK_SIZE];
+ u32 count;
+};
+
+asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k, const char *head);
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ *ctx = (struct ghash_desc_ctx){};
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+ ctx->count += len;
+
+ if ((partial + len) >= GHASH_BLOCK_SIZE) {
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ int blocks;
+
+ if (partial) {
+ int p = GHASH_BLOCK_SIZE - partial;
+
+ memcpy(ctx->buf + partial, src, p);
+ src += p;
+ len -= p;
+ }
+
+ blocks = len / GHASH_BLOCK_SIZE;
+ len %= GHASH_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(6);
+ pmull_ghash_update(blocks, ctx->digest, src, key,
+ partial ? ctx->buf : NULL);
+ kernel_neon_end();
+ src += blocks * GHASH_BLOCK_SIZE;
+ }
+ if (len)
+ memcpy(ctx->buf + partial, src, len);
+ return 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+ if (partial) {
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+
+ memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
+
+ kernel_neon_begin_partial(6);
+ pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
+ kernel_neon_end();
+ }
+ put_unaligned_be64(ctx->digest[1], dst);
+ put_unaligned_be64(ctx->digest[0], dst + 8);
+
+ *ctx = (struct ghash_desc_ctx){};
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ghash_key *key = crypto_shash_ctx(tfm);
+ u64 a, b;
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* perform multiplication by 'x' in GF(2^128) */
+ b = get_unaligned_be64(inkey);
+ a = get_unaligned_be64(inkey + 8);
+
+ key->a = (a << 1) | (b >> 63);
+ key->b = (b << 1) | (a >> 63);
+
+ if (b >> 63)
+ key->b ^= 0xc200000000000000UL;
+
+ return 0;
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_key),
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init ghash_ce_mod_init(void)
+{
+ return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_ce_mod_exit(void)
+{
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_cpu_feature_match(PMULL, ghash_ce_mod_init);
+module_exit(ghash_ce_mod_exit);
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
new file mode 100644
index 000000000000..09d57d98609c
--- /dev/null
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -0,0 +1,153 @@
+/*
+ * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .arch armv8-a+crypto
+
+ k0 .req v0
+ k1 .req v1
+ k2 .req v2
+ k3 .req v3
+
+ t0 .req v4
+ t1 .req v5
+
+ dga .req q6
+ dgav .req v6
+ dgb .req s7
+ dgbv .req v7
+
+ dg0q .req q12
+ dg0s .req s12
+ dg0v .req v12
+ dg1s .req s13
+ dg1v .req v13
+ dg2s .req s14
+
+ .macro add_only, op, ev, rc, s0, dg1
+ .ifc \ev, ev
+ add t1.4s, v\s0\().4s, \rc\().4s
+ sha1h dg2s, dg0s
+ .ifnb \dg1
+ sha1\op dg0q, \dg1, t0.4s
+ .else
+ sha1\op dg0q, dg1s, t0.4s
+ .endif
+ .else
+ .ifnb \s0
+ add t0.4s, v\s0\().4s, \rc\().4s
+ .endif
+ sha1h dg1s, dg0s
+ sha1\op dg0q, dg2s, t1.4s
+ .endif
+ .endm
+
+ .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1
+ sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s
+ add_only \op, \ev, \rc, \s1, \dg1
+ sha1su1 v\s0\().4s, v\s3\().4s
+ .endm
+
+ /*
+ * The SHA1 round constants
+ */
+ .align 4
+.Lsha1_rcon:
+ .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
+
+ /*
+ * void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
+ * u8 *head, long bytes)
+ */
+ENTRY(sha1_ce_transform)
+ /* load round constants */
+ adr x6, .Lsha1_rcon
+ ld1r {k0.4s}, [x6], #4
+ ld1r {k1.4s}, [x6], #4
+ ld1r {k2.4s}, [x6], #4
+ ld1r {k3.4s}, [x6]
+
+ /* load state */
+ ldr dga, [x2]
+ ldr dgb, [x2, #16]
+
+ /* load partial state (if supplied) */
+ cbz x3, 0f
+ ld1 {v8.4s-v11.4s}, [x3]
+ b 1f
+
+ /* load input */
+0: ld1 {v8.4s-v11.4s}, [x1], #64
+ sub w0, w0, #1
+
+1:
+CPU_LE( rev32 v8.16b, v8.16b )
+CPU_LE( rev32 v9.16b, v9.16b )
+CPU_LE( rev32 v10.16b, v10.16b )
+CPU_LE( rev32 v11.16b, v11.16b )
+
+2: add t0.4s, v8.4s, k0.4s
+ mov dg0v.16b, dgav.16b
+
+ add_update c, ev, k0, 8, 9, 10, 11, dgb
+ add_update c, od, k0, 9, 10, 11, 8
+ add_update c, ev, k0, 10, 11, 8, 9
+ add_update c, od, k0, 11, 8, 9, 10
+ add_update c, ev, k1, 8, 9, 10, 11
+
+ add_update p, od, k1, 9, 10, 11, 8
+ add_update p, ev, k1, 10, 11, 8, 9
+ add_update p, od, k1, 11, 8, 9, 10
+ add_update p, ev, k1, 8, 9, 10, 11
+ add_update p, od, k2, 9, 10, 11, 8
+
+ add_update m, ev, k2, 10, 11, 8, 9
+ add_update m, od, k2, 11, 8, 9, 10
+ add_update m, ev, k2, 8, 9, 10, 11
+ add_update m, od, k2, 9, 10, 11, 8
+ add_update m, ev, k3, 10, 11, 8, 9
+
+ add_update p, od, k3, 11, 8, 9, 10
+ add_only p, ev, k3, 9
+ add_only p, od, k3, 10
+ add_only p, ev, k3, 11
+ add_only p, od
+
+ /* update state */
+ add dgbv.2s, dgbv.2s, dg1v.2s
+ add dgav.4s, dgav.4s, dg0v.4s
+
+ cbnz w0, 0b
+
+ /*
+ * Final block: add padding and total bit count.
+ * Skip if we have no total byte count in x4. In that case, the input
+ * size was not a round multiple of the block size, and the padding is
+ * handled by the C code.
+ */
+ cbz x4, 3f
+ movi v9.2d, #0
+ mov x8, #0x80000000
+ movi v10.2d, #0
+ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
+ fmov d8, x8
+ mov x4, #0
+ mov v11.d[0], xzr
+ mov v11.d[1], x7
+ b 2b
+
+ /* store new state */
+3: str dga, [x2]
+ str dgb, [x2, #16]
+ ret
+ENDPROC(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
new file mode 100644
index 000000000000..6fe83f37a750
--- /dev/null
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -0,0 +1,174 @@
+/*
+ * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
+ u8 *head, long bytes);
+
+static int sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+ };
+ return 0;
+}
+
+static int sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if ((partial + len) >= SHA1_BLOCK_SIZE) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA1_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+ }
+
+ blocks = len / SHA1_BLOCK_SIZE;
+ len %= SHA1_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(16);
+ sha1_ce_transform(blocks, data, sctx->state,
+ partial ? sctx->buffer : NULL, 0);
+ kernel_neon_end();
+
+ data += blocks * SHA1_BLOCK_SIZE;
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+ return 0;
+}
+
+static int sha1_final(struct shash_desc *desc, u8 *out)
+{
+ static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
+
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be64 bits = cpu_to_be64(sctx->count << 3);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ u32 padlen = SHA1_BLOCK_SIZE
+ - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE);
+
+ sha1_update(desc, padding, padlen);
+ sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
+
+static int sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int blocks;
+ int i;
+
+ if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) {
+ sha1_update(desc, data, len);
+ return sha1_final(desc, out);
+ }
+
+ /*
+ * Use a fast path if the input is a multiple of 64 bytes. In
+ * this case, there is no need to copy data around, and we can
+ * perform the entire digest calculation in a single invocation
+ * of sha1_ce_transform()
+ */
+ blocks = len / SHA1_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(16);
+ sha1_ce_transform(blocks, data, sctx->state, NULL, len);
+ kernel_neon_end();
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
+
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ struct sha1_state *dst = out;
+
+ *dst = *sctx;
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ struct sha1_state const *src = in;
+
+ *sctx = *src;
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .init = sha1_init,
+ .update = sha1_update,
+ .final = sha1_final,
+ .finup = sha1_finup,
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha1_ce_mod_init(void)
+{
+ return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_ce_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_cpu_feature_match(SHA1, sha1_ce_mod_init);
+module_exit(sha1_ce_mod_fini);
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
new file mode 100644
index 000000000000..7f29fc031ea8
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -0,0 +1,156 @@
+/*
+ * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .arch armv8-a+crypto
+
+ dga .req q20
+ dgav .req v20
+ dgb .req q21
+ dgbv .req v21
+
+ t0 .req v22
+ t1 .req v23
+
+ dg0q .req q24
+ dg0v .req v24
+ dg1q .req q25
+ dg1v .req v25
+ dg2q .req q26
+ dg2v .req v26
+
+ .macro add_only, ev, rc, s0
+ mov dg2v.16b, dg0v.16b
+ .ifeq \ev
+ add t1.4s, v\s0\().4s, \rc\().4s
+ sha256h dg0q, dg1q, t0.4s
+ sha256h2 dg1q, dg2q, t0.4s
+ .else
+ .ifnb \s0
+ add t0.4s, v\s0\().4s, \rc\().4s
+ .endif
+ sha256h dg0q, dg1q, t1.4s
+ sha256h2 dg1q, dg2q, t1.4s
+ .endif
+ .endm
+
+ .macro add_update, ev, rc, s0, s1, s2, s3
+ sha256su0 v\s0\().4s, v\s1\().4s
+ add_only \ev, \rc, \s1
+ sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s
+ .endm
+
+ /*
+ * The SHA-256 round constants
+ */
+ .align 4
+.Lsha2_rcon:
+ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+
+ /*
+ * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+ * u8 *head, long bytes)
+ */
+ENTRY(sha2_ce_transform)
+ /* load round constants */
+ adr x8, .Lsha2_rcon
+ ld1 { v0.4s- v3.4s}, [x8], #64
+ ld1 { v4.4s- v7.4s}, [x8], #64
+ ld1 { v8.4s-v11.4s}, [x8], #64
+ ld1 {v12.4s-v15.4s}, [x8]
+
+ /* load state */
+ ldp dga, dgb, [x2]
+
+ /* load partial input (if supplied) */
+ cbz x3, 0f
+ ld1 {v16.4s-v19.4s}, [x3]
+ b 1f
+
+ /* load input */
+0: ld1 {v16.4s-v19.4s}, [x1], #64
+ sub w0, w0, #1
+
+1:
+CPU_LE( rev32 v16.16b, v16.16b )
+CPU_LE( rev32 v17.16b, v17.16b )
+CPU_LE( rev32 v18.16b, v18.16b )
+CPU_LE( rev32 v19.16b, v19.16b )
+
+2: add t0.4s, v16.4s, v0.4s
+ mov dg0v.16b, dgav.16b
+ mov dg1v.16b, dgbv.16b
+
+ add_update 0, v1, 16, 17, 18, 19
+ add_update 1, v2, 17, 18, 19, 16
+ add_update 0, v3, 18, 19, 16, 17
+ add_update 1, v4, 19, 16, 17, 18
+
+ add_update 0, v5, 16, 17, 18, 19
+ add_update 1, v6, 17, 18, 19, 16
+ add_update 0, v7, 18, 19, 16, 17
+ add_update 1, v8, 19, 16, 17, 18
+
+ add_update 0, v9, 16, 17, 18, 19
+ add_update 1, v10, 17, 18, 19, 16
+ add_update 0, v11, 18, 19, 16, 17
+ add_update 1, v12, 19, 16, 17, 18
+
+ add_only 0, v13, 17
+ add_only 1, v14, 18
+ add_only 0, v15, 19
+ add_only 1
+
+ /* update state */
+ add dgav.4s, dgav.4s, dg0v.4s
+ add dgbv.4s, dgbv.4s, dg1v.4s
+
+ /* handled all input blocks? */
+ cbnz w0, 0b
+
+ /*
+ * Final block: add padding and total bit count.
+ * Skip if we have no total byte count in x4. In that case, the input
+ * size was not a round multiple of the block size, and the padding is
+ * handled by the C code.
+ */
+ cbz x4, 3f
+ movi v17.2d, #0
+ mov x8, #0x80000000
+ movi v18.2d, #0
+ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
+ fmov d16, x8
+ mov x4, #0
+ mov v19.d[0], xzr
+ mov v19.d[1], x7
+ b 2b
+
+ /* store new state */
+3: stp dga, dgb, [x2]
+ ret
+ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
new file mode 100644
index 000000000000..c294e67d3925
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -0,0 +1,255 @@
+/*
+ * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+ u8 *head, long bytes);
+
+static int sha224_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ *sctx = (struct sha256_state){
+ .state = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+ }
+ };
+ return 0;
+}
+
+static int sha256_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ *sctx = (struct sha256_state){
+ .state = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+ }
+ };
+ return 0;
+}
+
+static int sha2_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if ((partial + len) >= SHA256_BLOCK_SIZE) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA256_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+ }
+
+ blocks = len / SHA256_BLOCK_SIZE;
+ len %= SHA256_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(28);
+ sha2_ce_transform(blocks, data, sctx->state,
+ partial ? sctx->buf : NULL, 0);
+ kernel_neon_end();
+
+ data += blocks * SHA256_BLOCK_SIZE;
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+ return 0;
+}
+
+static void sha2_final(struct shash_desc *desc)
+{
+ static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
+
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be64 bits = cpu_to_be64(sctx->count << 3);
+ u32 padlen = SHA256_BLOCK_SIZE
+ - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
+
+ sha2_update(desc, padding, padlen);
+ sha2_update(desc, (const u8 *)&bits, sizeof(bits));
+}
+
+static int sha224_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_final(desc);
+
+ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_final(desc);
+
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static void sha2_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ int blocks;
+
+ if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
+ sha2_update(desc, data, len);
+ sha2_final(desc);
+ return;
+ }
+
+ /*
+ * Use a fast path if the input is a multiple of 64 bytes. In
+ * this case, there is no need to copy data around, and we can
+ * perform the entire digest calculation in a single invocation
+ * of sha2_ce_transform()
+ */
+ blocks = len / SHA256_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(28);
+ sha2_ce_transform(blocks, data, sctx->state, NULL, len);
+ kernel_neon_end();
+ data += blocks * SHA256_BLOCK_SIZE;
+}
+
+static int sha224_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_finup(desc, data, len);
+
+ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static int sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_finup(desc, data, len);
+
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static int sha2_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state *dst = out;
+
+ *dst = *sctx;
+ return 0;
+}
+
+static int sha2_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state const *src = in;
+
+ *sctx = *src;
+ return 0;
+}
+
+static struct shash_alg algs[] = { {
+ .init = sha224_init,
+ .update = sha2_update,
+ .final = sha224_final,
+ .finup = sha224_finup,
+ .export = sha2_export,
+ .import = sha2_import,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .init = sha256_init,
+ .update = sha2_update,
+ .final = sha256_final,
+ .finup = sha256_finup,
+ .export = sha2_export,
+ .import = sha2_import,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init sha2_ce_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha2_ce_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(SHA2, sha2_ce_mod_init);
+module_exit(sha2_ce_mod_fini);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 79a642d199f2..cfe9860b2076 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -10,6 +10,7 @@ generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
+generic-y += early_ioremap.h
generic-y += errno.h
generic-y += ftrace.h
generic-y += hw_irq.h
@@ -26,16 +27,17 @@ generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += pci.h
-generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
+generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
+generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index bf6ab242f047..be56d33c5dbf 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -26,7 +26,13 @@
#include <clocksource/arm_arch_timer.h>
-static inline void arch_timer_reg_write(int access, int reg, u32 val)
+/*
+ * These register accessors are marked inline so the compiler can
+ * nicely work out which register we want, and chuck away the rest of
+ * the code.
+ */
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
@@ -36,8 +42,6 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -47,17 +51,14 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
isb();
}
-static inline u32 arch_timer_reg_read(int access, int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val;
@@ -69,8 +70,6 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -80,11 +79,7 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
return val;
@@ -97,27 +92,47 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
- /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- cntkctl &= ~((3 << 8) | (1 << 0));
+ return cntkctl;
+}
- /* Enable user access to the virtual counter and frequency. */
- cntkctl |= (1 << 1);
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
-static inline u64 arch_counter_get_cntpct(void)
+static inline void __cpuinit arch_counter_set_user_access(void)
{
- u64 cval;
+ u32 cntkctl = arch_timer_get_cntkctl();
- isb();
- asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
+ /* Disable user access to the timers and the physical counter */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
- return cval;
+ /* Enable user access to the virtual counter */
+ cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+#ifdef CONFIG_COMPAT
+ compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
+#endif
}
static inline u64 arch_counter_get_cntvct(void)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 5aceb83b3f5c..fd3e3924041b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -115,3 +115,34 @@ lr .req x30 // link register
.align 7
b \label
.endm
+
+/*
+ * Select code when configured for BE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_BE(code...) code
+#else
+#define CPU_BE(code...)
+#endif
+
+/*
+ * Select code when configured for LE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_LE(code...)
+#else
+#define CPU_LE(code...) code
+#endif
+
+/*
+ * Define a macro that constructs a 64-bit value by concatenating two
+ * 32-bit registers. Note that on big endian systems the order of the
+ * registers is swapped.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ .macro regs_to_64, rd, lbits, hbits
+#else
+ .macro regs_to_64, rd, hbits, lbits
+#endif
+ orr \rd, \lbits, \hbits, lsl #32
+ .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 836364468571..a049bf7f5150 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp;
int oldval;
+ smp_mb();
+
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w4, %2\n"
+" stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -173,7 +176,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) (*(volatile long long *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
static inline void atomic64_add(u64 i, atomic64_t *v)
@@ -187,8 +190,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -197,14 +199,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -219,8 +222,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -229,14 +231,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -245,17 +248,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval;
unsigned long res;
+ smp_mb();
+
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, %2\n"
+"1: ldxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %4, %2\n"
+" stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -267,11 +273,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
+" dmb ish\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
diff --git a/arch/arm64/include/asm/bL_switcher.h b/arch/arm64/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..2bee500b7f54
--- /dev/null
+++ b/arch/arm64/include/asm/bL_switcher.h
@@ -0,0 +1,54 @@
+/*
+ * Based on the stubs for the ARM implementation which is:
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+static inline int bL_switch_request(unsigned int cpu,
+ unsigned int new_cluster_id)
+{
+ return -ENOTSUPP;
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+
+#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index d4a63338a53c..709f1f6d6bbd 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,20 +25,71 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
+#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
+#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
-#define mb() dsb()
-#define rmb() asm volatile("dsb ld" : : : "memory")
-#define wmb() asm volatile("dsb st" : : : "memory")
+#define mb() dsb(sy)
+#define rmb() dsb(ld)
+#define wmb() dsb(st)
#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#else
-#define smp_mb() asm volatile("dmb ish" : : : "memory")
-#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
-#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
+
+#define smp_mb() dmb(ish)
+#define smp_rmb() dmb(ishld)
+#define smp_wmb() dmb(ishst)
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ } \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ } \
+ ___p1; \
+})
+
#endif
#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 3300cbd18a89..689b6379188c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -85,6 +85,13 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
}
/*
+ * Cache maintenance functions used by the DMA API. No to be used directly.
+ */
+extern void __dma_map_area(const void *, size_t, int);
+extern void __dma_unmap_area(const void *, size_t, int);
+extern void __dma_flush_range(const void *, const void *);
+
+/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
@@ -116,6 +123,7 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
+ dsb(ish);
}
#define flush_dcache_mmap_lock(mapping) \
@@ -123,9 +131,6 @@ static inline void __flush_icache_all(void)
#define flush_dcache_mmap_unlock(mapping) \
spin_unlock_irq(&(mapping)->tree_lock)
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
@@ -133,23 +138,18 @@ static inline void __flush_icache_all(void)
#define flush_icache_page(vma,page) do { } while (0)
/*
- * flush_cache_vmap() is used when creating mappings (eg, via vmap,
- * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
- * caches, since the direct-mappings of these pages may contain cached
- * data, we need to do a full cache flush to ensure that writebacks
- * don't corrupt data placed into these pages via the new mappings.
+ * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
*/
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
- /*
- * set_pte_at() called from vmap_pte_range() does not
- * have a DSB after cleaning the cache line.
- */
- dsb();
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
#endif
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 8a8ce0e73a38..a84d4c8acbbe 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,49 +29,55 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
asm volatile("// __xchg1\n"
- "1: ldaxrb %w0, %2\n"
+ "1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 2:
asm volatile("// __xchg2\n"
- "1: ldaxrh %w0, %2\n"
+ "1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 4:
asm volatile("// __xchg4\n"
- "1: ldaxr %w0, %2\n"
+ "1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 8:
asm volatile("// __xchg8\n"
- "1: ldaxr %0, %2\n"
+ "1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
default:
BUILD_BUG();
}
+ smp_mb();
return ret;
}
#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __ret; \
+})
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
@@ -158,19 +164,27 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
return ret;
}
-#define cmpxchg(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-
-#define cmpxchg_local(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
+ __ret; \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 899af807ef0f..56de5aadede2 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -26,15 +26,19 @@
#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
+#ifdef __AARCH64EB__
+#define COMPAT_UTS_MACHINE "armv8b\0\0"
+#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
+#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
-typedef u32 __compat_uid_t;
-typedef u32 __compat_gid_t;
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
typedef u16 __compat_uid16_t;
typedef u16 __compat_gid16_t;
typedef u32 __compat_uid32_t;
@@ -73,13 +77,23 @@ struct compat_timeval {
};
struct compat_stat {
+#ifdef __AARCH64EB__
+ short st_dev;
+ short __pad1;
+#else
compat_dev_t st_dev;
+#endif
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_ushort_t st_nlink;
__compat_uid16_t st_uid;
__compat_gid16_t st_gid;
+#ifdef __AARCH64EB__
+ short st_rdev;
+ short __pad2;
+#else
compat_dev_t st_rdev;
+#endif
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
@@ -214,7 +228,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
+#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
static inline void __user *arch_compat_alloc_user_space(long len)
{
@@ -291,11 +305,6 @@ static inline int is_compat_thread(struct thread_info *thread)
#else /* !CONFIG_COMPAT */
-static inline int is_compat_task(void)
-{
- return 0;
-}
-
static inline int is_compat_thread(struct thread_info *thread)
{
return 0;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
new file mode 100644
index 000000000000..47dfa31ad71a
--- /dev/null
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+
+struct device_node;
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the property as appears in a devicetree cpu node's
+ * enable-method property.
+ * @cpu_init: Reads any data necessary for a specific enable-method from the
+ * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
+ * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
+ * mechanism for doing so, tests whether it is possible to boot
+ * the given CPU.
+ * @cpu_boot: Boots a cpu into the kernel.
+ * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
+ * synchronisation. Called from the cpu being booted.
+ * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
+ * reason, which will cause the hot unplug to be aborted. Called
+ * from the cpu to be killed.
+ * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
+ * cpu being killed.
+ * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
+ * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
+ * to wrong parameters or error conditions. Called from the
+ * CPU being suspended. Must be called with IRQs disabled.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_init_idle)(struct device_node *, unsigned int);
+ int (*cpu_prepare)(unsigned int);
+ int (*cpu_boot)(unsigned int);
+ void (*cpu_postboot)(void);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+ int (*cpu_kill)(unsigned int cpu);
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ int (*cpu_suspend)(unsigned long);
+#endif
+};
+
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+extern int __init cpu_read_ops(struct device_node *dn, int cpu);
+extern void __init cpu_read_bootcpu_ops(void);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
new file mode 100644
index 000000000000..cd4ac0516488
--- /dev/null
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <asm/hwcap.h>
+
+/*
+ * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
+ * in the kernel and for user space to keep track of which optional features
+ * are supported by the current system. So let's map feature 'x' to HWCAP_x.
+ * Note that HWCAP_x constants are bit fields so we need to take the log.
+ */
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+#define cpu_feature(x) ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return elf_hwcap & (1UL << num);
+}
+
+#endif
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
new file mode 100644
index 000000000000..b52a9932e2b1
--- /dev/null
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_CPUIDLE_H
+#define __ASM_CPUIDLE_H
+
+#ifdef CONFIG_CPU_IDLE
+extern int cpu_init_idle(unsigned int cpu);
+#else
+static inline int cpu_init_idle(unsigned int cpu)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index cf2749488cd4..539bea61f129 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -16,31 +16,54 @@
#ifndef __ASM_CPUTYPE_H
#define __ASM_CPUTYPE_H
-#define ID_MIDR_EL1 "midr_el1"
-#define ID_MPIDR_EL1 "mpidr_el1"
-#define ID_CTR_EL0 "ctr_el0"
-
-#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1"
-#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1"
-#define ID_AA64AFR0_EL1 "id_aa64afr0_el1"
-#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1"
-#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1"
-
#define INVALID_HWID ULONG_MAX
#define MPIDR_HWID_BITMASK 0xff00ffffff
+#define MPIDR_LEVEL_BITS_SHIFT 3
+#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
+#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+
+#define MPIDR_LEVEL_SHIFT(level) \
+ (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
+
+#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
+ ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+
#define read_cpuid(reg) ({ \
u64 __val; \
- asm("mrs %0, " reg : "=r" (__val)); \
+ asm("mrs %0, " #reg : "=r" (__val)); \
__val; \
})
+#define MIDR_REVISION_MASK 0xf
+#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
+#define MIDR_PARTNUM_SHIFT 4
+#define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT)
+#define MIDR_PARTNUM(midr) \
+ (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
+#define MIDR_ARCHITECTURE_SHIFT 16
+#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_ARCHITECTURE(midr) \
+ (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_VARIANT_SHIFT 20
+#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
+#define MIDR_VARIANT(midr) \
+ (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
+#define MIDR_IMPLEMENTOR_SHIFT 24
+#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR(midr) \
+ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_APM 0x50
-#define ARM_CPU_PART_AEM_V8 0xD0F0
-#define ARM_CPU_PART_FOUNDATION 0xD000
-#define ARM_CPU_PART_CORTEX_A57 0xD070
+#define ARM_CPU_PART_AEM_V8 0xD0F
+#define ARM_CPU_PART_FOUNDATION 0xD00
+#define ARM_CPU_PART_CORTEX_A57 0xD07
+#define ARM_CPU_PART_CORTEX_A53 0xD03
+
+#define APM_CPU_PART_POTENZA 0x000
#ifndef __ASSEMBLY__
@@ -51,29 +74,31 @@
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(ID_MIDR_EL1);
+ return read_cpuid(MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(ID_MPIDR_EL1);
+ return read_cpuid(MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
{
- return (read_cpuid_id() & 0xFF000000) >> 24;
+ return MIDR_IMPLEMENTOR(read_cpuid_id());
}
static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
{
- return (read_cpuid_id() & 0xFFF0);
+ return MIDR_PARTNUM(read_cpuid_id());
}
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(ID_CTR_EL0);
+ return read_cpuid(CTR_EL0);
}
+void cpuinfo_store_cpu(void);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7eaa0b302493..aab72ce22348 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,15 @@
#ifdef __KERNEL__
+/* Low-level stepping controls. */
+#define DBG_MDSCR_SS (1 << 0)
+#define DBG_SPSR_SS (1 << 21)
+
+/* MDSCR_EL1 enabling bits */
+#define DBG_MDSCR_KDE (1 << 13)
+#define DBG_MDSCR_MDE (1 << 15)
+#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
+
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
/* AArch64 */
@@ -26,10 +35,52 @@
#define DBG_ESR_EVT_HWWP 0x2
#define DBG_ESR_EVT_BRK 0x6
-enum debug_el {
- DBG_ACTIVE_EL0 = 0,
- DBG_ACTIVE_EL1,
-};
+/*
+ * Break point instruction encoding
+ */
+#define BREAK_INSTR_SIZE 4
+
+/*
+ * ESR values expected for dynamic and compile time BRK instruction
+ */
+#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
+
+/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_IMM 0x400
+#define KDBG_COMPILED_DBG_BRK_IMM 0x401
+
+/*
+ * BRK instruction encoding
+ * The #imm16 value should be placed at bits[20:5] within BRK ins
+ */
+#define AARCH64_BREAK_MON 0xd4200000
+
+/*
+ * Extract byte from BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
+ ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
+
+/*
+ * Extract byte from BRK #imm16
+ */
+#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
+ (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
+
+#define KGDB_DYN_DGB_BRK_BYTE(x) \
+ (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
+
+#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
+#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
+#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
+#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
+
+#define CACHE_FLUSH_IS_SAFE 1
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
@@ -43,27 +94,36 @@ enum debug_el {
#ifndef __ASSEMBLY__
struct task_struct;
-#define local_dbg_save(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "mrs %0, daif // local_dbg_save\n" \
- "msr daifset, #8" \
- : "=r" (flags) : : "memory"); \
- } while (0)
-
-#define local_dbg_restore(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "msr daif, %0 // local_dbg_restore\n" \
- : : "r" (flags) : "memory"); \
- } while (0)
-
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
+#define DBG_HOOK_HANDLED 0
+#define DBG_HOOK_ERROR 1
+
+struct step_hook {
+ struct list_head node;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_step_hook(struct step_hook *hook);
+void unregister_step_hook(struct step_hook *hook);
+
+struct break_hook {
+ struct list_head node;
+ u32 esr_val;
+ u32 esr_mask;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_break_hook(struct break_hook *hook);
+void unregister_break_hook(struct break_hook *hook);
+
u8 debug_monitors_arch(void);
+enum debug_el {
+ DBG_ACTIVE_EL0 = 0,
+ DBG_ACTIVE_EL1,
+};
+
void enable_debug_monitors(enum debug_el el);
void disable_debug_monitors(enum debug_el el);
@@ -83,6 +143,15 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
}
#endif
+#ifdef CONFIG_COMPAT
+int aarch32_break_handler(struct pt_regs *regs);
+#else
+static int aarch32_break_handler(struct pt_regs *regs)
+{
+ return -EFAULT;
+}
+#endif
+
#endif /* __ASSEMBLY */
#endif /* __KERNEL__ */
#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 0d8453c755a8..cf98b362094b 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -18,6 +18,9 @@
struct dev_archdata {
struct dma_map_ops *dma_ops;
+#ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+#endif
};
struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..14c4c0ca7f2a
--- /dev/null
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_DMA_CMA
+
+#include <linux/types.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 994776894198..fa6a0c5a8de3 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,9 +23,10 @@
#include <asm-generic/dma-coherent.h>
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops coherent_swiotlb_dma_ops;
+extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
@@ -35,6 +36,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ dev->archdata.dma_ops = ops;
+}
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
@@ -81,8 +87,12 @@ static inline void dma_mark_clean(void *addr, size_t size)
{
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *vaddr;
@@ -90,13 +100,14 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
return vaddr;
- vaddr = ops->alloc(dev, size, dma_handle, flags, NULL);
+ vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
return vaddr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dev_addr)
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dev_addr,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -104,7 +115,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
return;
debug_dma_free_coherent(dev, size, vaddr, dev_addr);
- ops->free(dev, size, vaddr, dev_addr, NULL);
+ ops->free(dev, size, vaddr, dev_addr, attrs);
}
/*
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
new file mode 100644
index 000000000000..5a46c4e7f539
--- /dev/null
+++ b/arch/arm64/include/asm/efi.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_EFI_H
+#define _ASM_EFI_H
+
+#include <asm/io.h>
+
+#ifdef CONFIG_EFI
+extern void efi_init(void);
+extern void efi_idmap_init(void);
+#else
+#define efi_init()
+#define efi_idmap_init()
+#endif
+
+#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index fe32c0e4ac01..01d3aab64b79 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -33,8 +33,6 @@ typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpsimd_state elf_fpregset_t;
-#define EM_AARCH64 183
-
/*
* AArch64 static relocation types.
*/
@@ -92,11 +90,24 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
+#ifdef __AARCH64EB__
+#define ELF_DATA ELFDATA2MSB
+#else
#define ELF_DATA ELFDATA2LSB
+#endif
#define ELF_ARCH EM_AARCH64
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
#define ELF_PLATFORM_SIZE 16
+#ifdef __AARCH64EB__
+#define ELF_PLATFORM ("aarch64_be")
+#else
#define ELF_PLATFORM ("aarch64")
+#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -151,8 +162,12 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT
-#define EM_ARM 40
+
+#ifdef __AARCH64EB__
+#define COMPAT_ELF_PLATFORM ("v8b")
+#else
#define COMPAT_ELF_PLATFORM ("v8l")
+#endif
#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 78834123a32e..c4a7f940b387 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
#define ESR_EL1_EC_SP_ALIGN (0x26)
#define ESR_EL1_EC_FP_EXC32 (0x28)
#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERRROR (0x2F)
+#define ESR_EL1_EC_SERROR (0x2F)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
new file mode 100644
index 000000000000..5f7bfe6df723
--- /dev/null
+++ b/arch/arm64/include/asm/fixmap.h
@@ -0,0 +1,67 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
+ *
+ * Adapted from arch/x86_64 version.
+ *
+ */
+
+#ifndef _ASM_ARM64_FIXMAP_H
+#define _ASM_ARM64_FIXMAP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are
+ * page-sized. Use set_fixmap(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ */
+enum fixed_addresses {
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ /*
+ * Temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define NR_FIX_BTMAPS 4
+#else
+#define NR_FIX_BTMAPS 64
+#endif
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
+
+extern void __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags);
+
+#define __set_fixmap __early_set_fixmap
+
+#include <asm-generic/fixmap.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_ARM64_FIXMAP_H */
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
new file mode 100644
index 000000000000..c5534facf941
--- /dev/null
+++ b/arch/arm64/include/asm/ftrace.h
@@ -0,0 +1,59 @@
+/*
+ * arch/arm64/include/asm/ftrace.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_FTRACE_H
+#define __ASM_FTRACE_H
+
+#include <asm/insn.h>
+
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
+
+#ifndef __ASSEMBLY__
+#include <linux/compat.h>
+
+extern void _mcount(unsigned long);
+extern void *return_address(unsigned int);
+
+struct dyn_arch_ftrace {
+ /* No extra data needed for arm64 */
+};
+
+extern unsigned long ftrace_graph_call;
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /*
+ * addr is the address of the mcount call instruction.
+ * recordmcount does the necessary offset calculation.
+ */
+ return addr;
+}
+
+#define ftrace_return_address(n) return_address(n)
+
+/*
+ * Because AArch32 mode does not share the same syscall table with AArch64,
+ * tracing compat syscalls may result in reporting bogus syscalls or even
+ * hang-up, so just do not trace them.
+ * See kernel/trace/trace_syscalls.c
+ *
+ * x86 code says:
+ * If the user realy wants these, then they should use the
+ * raw syscall tracepoints with filtering.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ return is_compat_task();
+}
+#endif /* ifndef __ASSEMBLY__ */
+
+#endif /* __ASM_FTRACE_H */
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c582fa316366..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,12 +24,14 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \
-"1: ldaxr %w1, %2\n" \
+"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \
+" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
+" .align 2\n" \
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
@@ -39,7 +41,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -110,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n"
+" dmb ish\n"
"3:\n"
" .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n"
@@ -126,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
- : "cc", "memory");
+ : "memory");
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 990c051e7829..ae4801d77514 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 4
+#define NR_IPI 5
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
new file mode 100644
index 000000000000..5b7ca8ace95f
--- /dev/null
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -0,0 +1,117 @@
+/*
+ * arch/arm64/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2013 Linaro Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_HUGETLB_H
+#define __ASM_HUGETLB_H
+
+#include <asm-generic/hugetlb.h>
+#include <asm/page.h>
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index d064047612b1..52b484b6aa1a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
*/
#define ARM_MAX_BRP 16
#define ARM_MAX_WRP 16
-#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
/* Virtual debug register bases. */
#define AARCH64_DBG_REG_BVR 0
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6d4482fa35bc..024c46183c3c 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,13 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_EVTSTRM (1 << 21)
+
+#define COMPAT_HWCAP2_AES (1 << 0)
+#define COMPAT_HWCAP2_PMULL (1 << 1)
+#define COMPAT_HWCAP2_SHA1 (1 << 2)
+#define COMPAT_HWCAP2_SHA2 (1 << 3)
+#define COMPAT_HWCAP2_CRC32 (1 << 4)
#ifndef __ASSEMBLY__
/*
@@ -37,12 +44,13 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
- COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
-extern unsigned int elf_hwcap;
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
+#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
+extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
+#endif
+
+extern unsigned long elf_hwcap;
#endif
#endif
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
new file mode 100644
index 000000000000..62e7b8bcd2dc
--- /dev/null
+++ b/arch/arm64/include/asm/insn.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_INSN_H
+#define __ASM_INSN_H
+
+#include <linux/types.h>
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+#ifndef __ASSEMBLY__
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+ * AArch64 main encoding table
+ * Bit position
+ * 28 27 26 25 Encoding Group
+ * 0 0 - - Unallocated
+ * 1 0 0 - Data processing, immediate
+ * 1 0 1 - Branch, exception generation and system instructions
+ * - 1 - 0 Loads and stores
+ * - 1 0 1 Data processing - register
+ * 0 1 1 1 Data processing - SIMD and floating point
+ * 1 1 1 1 Data processing - SIMD and floating point
+ * "-" means "don't care"
+ */
+enum aarch64_insn_encoding_class {
+ AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
+ AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
+ AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
+ AARCH64_INSN_CLS_LDST, /* Loads and stores */
+ AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and
+ * system instructions */
+};
+
+enum aarch64_insn_hint_op {
+ AARCH64_INSN_HINT_NOP = 0x0 << 5,
+ AARCH64_INSN_HINT_YIELD = 0x1 << 5,
+ AARCH64_INSN_HINT_WFE = 0x2 << 5,
+ AARCH64_INSN_HINT_WFI = 0x3 << 5,
+ AARCH64_INSN_HINT_SEV = 0x4 << 5,
+ AARCH64_INSN_HINT_SEVL = 0x5 << 5,
+};
+
+enum aarch64_insn_imm_type {
+ AARCH64_INSN_IMM_ADR,
+ AARCH64_INSN_IMM_26,
+ AARCH64_INSN_IMM_19,
+ AARCH64_INSN_IMM_16,
+ AARCH64_INSN_IMM_14,
+ AARCH64_INSN_IMM_12,
+ AARCH64_INSN_IMM_9,
+ AARCH64_INSN_IMM_MAX
+};
+
+enum aarch64_insn_branch_type {
+ AARCH64_INSN_BRANCH_NOLINK,
+ AARCH64_INSN_BRANCH_LINK,
+};
+
+#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
+static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
+{ return (code & (mask)) == (val); } \
+static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
+{ return (val); }
+
+__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
+__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
+__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
+__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
+__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
+
+#undef __AARCH64_INSN_FUNCS
+
+bool aarch64_insn_is_nop(u32 insn);
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm);
+u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
+u32 aarch64_insn_gen_nop(void);
+
+bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 2e12258aa7e4..732e3d51c2cb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -26,6 +26,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
+#include <asm/early_ioremap.h>
/*
* Generic IO read/write. These perform native-endian accesses.
@@ -118,7 +119,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
-#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
+#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
static inline u8 inb(unsigned long addr)
{
@@ -224,19 +225,13 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
-
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
+extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
-#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
-
#define ARCH_HAS_IOREMAP_WC
#include <asm-generic/iomap.h>
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 0332fc077f6e..e1f7ecdde11f 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,6 +4,7 @@
#include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *);
+extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa11943b8502..0ed52c691868 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -87,5 +87,28 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return flags & PSR_I_BIT;
}
+/*
+ * save and restore debug state
+ */
+#define local_dbg_save(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ asm volatile( \
+ "mrs %0, daif // local_dbg_save\n" \
+ "msr daifset, #8" \
+ : "=r" (flags) : : "memory"); \
+ } while (0)
+
+#define local_dbg_restore(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ asm volatile( \
+ "msr daif, %0 // local_dbg_restore\n" \
+ : : "r" (flags) : "memory"); \
+ } while (0)
+
+#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
+#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
+
#endif
#endif
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
new file mode 100644
index 000000000000..076a1c714049
--- /dev/null
+++ b/arch/arm64/include/asm/jump_label.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/include/asm/jump_label.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+#include <linux/types.h>
+#include <asm/insn.h>
+
+#ifdef __KERNEL__
+
+#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm goto("1: nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 3\n\t"
+ ".quad 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i"(key) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __KERNEL__ */
+
+typedef u64 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
new file mode 100644
index 000000000000..3c8aafc1082f
--- /dev/null
+++ b/arch/arm64/include/asm/kgdb.h
@@ -0,0 +1,84 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/include/kgdb.h
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KGDB_H
+#define __ARM_KGDB_H
+
+#include <linux/ptrace.h>
+#include <asm/debug-monitors.h>
+
+#ifndef __ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
+}
+
+extern void kgdb_handle_bus_error(void);
+extern int kgdb_fault_expected;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * gdb is expecting the following registers layout.
+ *
+ * General purpose regs:
+ * r0-r30: 64 bit
+ * sp,pc : 64 bit
+ * pstate : 64 bit
+ * Total: 34
+ * FPU regs:
+ * f0-f31: 128 bit
+ * Total: 32
+ * Extra regs
+ * fpsr & fpcr: 32 bit
+ * Total: 2
+ *
+ */
+
+#define _GP_REGS 34
+#define _FP_REGS 32
+#define _EXTRA_REGS 2
+/*
+ * general purpose registers size in bytes.
+ * pstate is only 4 bytes. subtract 4 bytes
+ */
+#define GP_REG_BYTES (_GP_REGS * 8)
+#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
+
+/*
+ * Size of I/O buffer for gdb packet.
+ * considering to hold all register contents, size is set
+ */
+
+#define BUFMAX 2048
+
+/*
+ * Number of bytes required for gdb_regs buffer.
+ * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
+ * GDB fails to connect for size beyond this with error
+ * "'g' packet reply is too long"
+ */
+
+#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \
+ (_EXTRA_REGS * 4))
+
+#endif /* __ASM_KGDB_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..7fd3e27e3ccc
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_ARM_H__
+#define __ARM64_KVM_ARM_H__
+
+#include <asm/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_ID (UL(1) << 33)
+#define HCR_CD (UL(1) << 32)
+#define HCR_RW_SHIFT 31
+#define HCR_RW (UL(1) << HCR_RW_SHIFT)
+#define HCR_TRVM (UL(1) << 30)
+#define HCR_HCD (UL(1) << 29)
+#define HCR_TDZ (UL(1) << 28)
+#define HCR_TGE (UL(1) << 27)
+#define HCR_TVM (UL(1) << 26)
+#define HCR_TTLB (UL(1) << 25)
+#define HCR_TPU (UL(1) << 24)
+#define HCR_TPC (UL(1) << 23)
+#define HCR_TSW (UL(1) << 22)
+#define HCR_TAC (UL(1) << 21)
+#define HCR_TIDCP (UL(1) << 20)
+#define HCR_TSC (UL(1) << 19)
+#define HCR_TID3 (UL(1) << 18)
+#define HCR_TID2 (UL(1) << 17)
+#define HCR_TID1 (UL(1) << 16)
+#define HCR_TID0 (UL(1) << 15)
+#define HCR_TWE (UL(1) << 14)
+#define HCR_TWI (UL(1) << 13)
+#define HCR_DC (UL(1) << 12)
+#define HCR_BSU (3 << 10)
+#define HCR_BSU_IS (UL(1) << 10)
+#define HCR_FB (UL(1) << 9)
+#define HCR_VA (UL(1) << 8)
+#define HCR_VI (UL(1) << 7)
+#define HCR_VF (UL(1) << 6)
+#define HCR_AMO (UL(1) << 5)
+#define HCR_IMO (UL(1) << 4)
+#define HCR_FMO (UL(1) << 3)
+#define HCR_PTW (UL(1) << 2)
+#define HCR_SWIO (UL(1) << 1)
+#define HCR_VM (UL(1) << 0)
+
+/*
+ * The bits we set in HCR:
+ * RW: 64bit by default, can be overriden for 32bit VMs
+ * TAC: Trap ACTLR
+ * TSC: Trap SMC
+ * TVM: Trap VM ops (until M+C set in SCTLR_EL1)
+ * TSW: Trap cache operations by set/way
+ * TWE: Trap WFE
+ * TWI: Trap WFI
+ * TIDCP: Trap L2CTLR/L2ECTLR
+ * BSU_IS: Upgrade barriers to the inner shareable domain
+ * FB: Force broadcast of all maintainance operations
+ * AMO: Override CPSR.A and enable signaling with VA
+ * IMO: Override CPSR.I and enable signaling with VI
+ * FMO: Override CPSR.F and enable signaling with VF
+ * SWIO: Turn set/way invalidates into set/way clean+invalidate
+ */
+#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
+ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
+ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
+#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
+
+
+/* Hyp System Control Register (SCTLR_EL2) bits */
+#define SCTLR_EL2_EE (1 << 25)
+#define SCTLR_EL2_WXN (1 << 19)
+#define SCTLR_EL2_I (1 << 12)
+#define SCTLR_EL2_SA (1 << 3)
+#define SCTLR_EL2_C (1 << 2)
+#define SCTLR_EL2_A (1 << 1)
+#define SCTLR_EL2_M 1
+#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
+ SCTLR_EL2_SA | SCTLR_EL2_I)
+
+/* TCR_EL2 Registers bits */
+#define TCR_EL2_TBI (1 << 20)
+#define TCR_EL2_PS (7 << 16)
+#define TCR_EL2_PS_40B (2 << 16)
+#define TCR_EL2_TG0 (1 << 14)
+#define TCR_EL2_SH0 (3 << 12)
+#define TCR_EL2_ORGN0 (3 << 10)
+#define TCR_EL2_IRGN0 (3 << 8)
+#define TCR_EL2_T0SZ 0x3f
+#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
+ TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
+
+#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
+
+/* VTCR_EL2 Registers bits */
+#define VTCR_EL2_PS_MASK (7 << 16)
+#define VTCR_EL2_TG0_MASK (1 << 14)
+#define VTCR_EL2_TG0_4K (0 << 14)
+#define VTCR_EL2_TG0_64K (1 << 14)
+#define VTCR_EL2_SH0_MASK (3 << 12)
+#define VTCR_EL2_SH0_INNER (3 << 12)
+#define VTCR_EL2_ORGN0_MASK (3 << 10)
+#define VTCR_EL2_ORGN0_WBWA (1 << 10)
+#define VTCR_EL2_IRGN0_MASK (3 << 8)
+#define VTCR_EL2_IRGN0_WBWA (1 << 8)
+#define VTCR_EL2_SL0_MASK (3 << 6)
+#define VTCR_EL2_SL0_LVL1 (1 << 6)
+#define VTCR_EL2_T0SZ_MASK 0x3f
+#define VTCR_EL2_T0SZ_40B 24
+
+/*
+ * We configure the Stage-2 page tables to always restrict the IPA space to be
+ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
+ * not known to exist and will break with this configuration.
+ *
+ * Note that when using 4K pages, we concatenate two first level page tables
+ * together.
+ *
+ * The magic numbers used for VTTBR_X in this patch can be found in Tables
+ * D4-23 and D4-25 in ARM DDI 0487A.b.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+/*
+ * Stage2 translation configuration:
+ * 40bits output (PS = 2)
+ * 40bits input (T0SZ = 24)
+ * 64kB pages (TG0 = 1)
+ * 2 level page tables (SL = 1)
+ */
+#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
+ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
+#else
+/*
+ * Stage2 translation configuration:
+ * 40bits output (PS = 2)
+ * 40bits input (T0SZ = 24)
+ * 4kB pages (TG0 = 0)
+ * 3 level page tables (SL = 1)
+ */
+#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
+ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
+#endif
+
+#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT (48LLU)
+#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+
+/* Hyp System Trap Register */
+#define HSTR_EL2_TTEE (1 << 16)
+#define HSTR_EL2_T(x) (1 << x)
+
+/* Hyp Coprocessor Trap Register */
+#define CPTR_EL2_TCPAC (1 << 31)
+#define CPTR_EL2_TTA (1 << 20)
+#define CPTR_EL2_TFP (1 << 10)
+
+/* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TDRA (1 << 11)
+#define MDCR_EL2_TDOSA (1 << 10)
+#define MDCR_EL2_TDA (1 << 9)
+#define MDCR_EL2_TDE (1 << 8)
+#define MDCR_EL2_HPME (1 << 7)
+#define MDCR_EL2_TPM (1 << 6)
+#define MDCR_EL2_TPMCR (1 << 5)
+#define MDCR_EL2_HPMN_MASK (0x1F)
+
+/* Exception Syndrome Register (ESR) bits */
+#define ESR_EL2_EC_SHIFT (26)
+#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
+#define ESR_EL2_IL (1U << 25)
+#define ESR_EL2_ISS (ESR_EL2_IL - 1)
+#define ESR_EL2_ISV_SHIFT (24)
+#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
+#define ESR_EL2_SAS_SHIFT (22)
+#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
+#define ESR_EL2_SSE (1 << 21)
+#define ESR_EL2_SRT_SHIFT (16)
+#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
+#define ESR_EL2_SF (1 << 15)
+#define ESR_EL2_AR (1 << 14)
+#define ESR_EL2_EA (1 << 9)
+#define ESR_EL2_CM (1 << 8)
+#define ESR_EL2_S1PTW (1 << 7)
+#define ESR_EL2_WNR (1 << 6)
+#define ESR_EL2_FSC (0x3f)
+#define ESR_EL2_FSC_TYPE (0x3c)
+
+#define ESR_EL2_CV_SHIFT (24)
+#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
+#define ESR_EL2_COND_SHIFT (20)
+#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
+
+
+#define FSC_FAULT (0x04)
+#define FSC_PERM (0x0c)
+
+/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+#define HPFAR_MASK (~0xFUL)
+
+#define ESR_EL2_EC_UNKNOWN (0x00)
+#define ESR_EL2_EC_WFI (0x01)
+#define ESR_EL2_EC_CP15_32 (0x03)
+#define ESR_EL2_EC_CP15_64 (0x04)
+#define ESR_EL2_EC_CP14_MR (0x05)
+#define ESR_EL2_EC_CP14_LS (0x06)
+#define ESR_EL2_EC_FP_ASIMD (0x07)
+#define ESR_EL2_EC_CP10_ID (0x08)
+#define ESR_EL2_EC_CP14_64 (0x0C)
+#define ESR_EL2_EC_ILL_ISS (0x0E)
+#define ESR_EL2_EC_SVC32 (0x11)
+#define ESR_EL2_EC_HVC32 (0x12)
+#define ESR_EL2_EC_SMC32 (0x13)
+#define ESR_EL2_EC_SVC64 (0x15)
+#define ESR_EL2_EC_HVC64 (0x16)
+#define ESR_EL2_EC_SMC64 (0x17)
+#define ESR_EL2_EC_SYS64 (0x18)
+#define ESR_EL2_EC_IABT (0x20)
+#define ESR_EL2_EC_IABT_HYP (0x21)
+#define ESR_EL2_EC_PC_ALIGN (0x22)
+#define ESR_EL2_EC_DABT (0x24)
+#define ESR_EL2_EC_DABT_HYP (0x25)
+#define ESR_EL2_EC_SP_ALIGN (0x26)
+#define ESR_EL2_EC_FP_EXC32 (0x28)
+#define ESR_EL2_EC_FP_EXC64 (0x2C)
+#define ESR_EL2_EC_SERROR (0x2F)
+#define ESR_EL2_EC_BREAKPT (0x30)
+#define ESR_EL2_EC_BREAKPT_HYP (0x31)
+#define ESR_EL2_EC_SOFTSTP (0x32)
+#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
+#define ESR_EL2_EC_WATCHPT (0x34)
+#define ESR_EL2_EC_WATCHPT_HYP (0x35)
+#define ESR_EL2_EC_BKPT32 (0x38)
+#define ESR_EL2_EC_VECTOR32 (0x3A)
+#define ESR_EL2_EC_BRK64 (0x3C)
+
+#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
+
+#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
+
+#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..483842180f8f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_ASM_H__
+#define __ARM_KVM_ASM_H__
+
+#include <asm/virt.h>
+
+/*
+ * 0 is reserved as an invalid value.
+ * Order *must* be kept in sync with the hyp switch code.
+ */
+#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
+#define CSSELR_EL1 2 /* Cache Size Selection Register */
+#define SCTLR_EL1 3 /* System Control Register */
+#define ACTLR_EL1 4 /* Auxilliary Control Register */
+#define CPACR_EL1 5 /* Coprocessor Access Control */
+#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
+#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
+#define TCR_EL1 8 /* Translation Control Register */
+#define ESR_EL1 9 /* Exception Syndrome Register */
+#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
+#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
+#define FAR_EL1 12 /* Fault Address Register */
+#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
+#define VBAR_EL1 14 /* Vector Base Address Register */
+#define CONTEXTIDR_EL1 15 /* Context ID Register */
+#define TPIDR_EL0 16 /* Thread ID, User R/W */
+#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
+#define TPIDR_EL1 18 /* Thread ID, Privileged */
+#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
+#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
+#define PAR_EL1 21 /* Physical Address Register */
+#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
+#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
+#define DBGBCR15_EL1 38
+#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
+#define DBGBVR15_EL1 54
+#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
+#define DBGWCR15_EL1 70
+#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
+#define DBGWVR15_EL1 86
+#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
+/* 32bit specific registers. Keep them at the end of the range */
+#define DACR32_EL2 88 /* Domain Access Control Register */
+#define IFSR32_EL2 89 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
+#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 94
+
+/* 32bit mapping */
+#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
+#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
+#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
+#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
+#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
+#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
+#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
+#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
+#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
+#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
+#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
+#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
+#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
+#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
+#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
+#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
+#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
+#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
+#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
+#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
+#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
+#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+
+#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS (NR_SYS_REGS * 2)
+
+#define ARM_EXCEPTION_IRQ 0
+#define ARM_EXCEPTION_TRAP 1
+
+#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
+#ifndef __ASSEMBLY__
+struct kvm;
+struct kvm_vcpu;
+
+extern char __kvm_hyp_init[];
+extern char __kvm_hyp_init_end[];
+
+extern char __kvm_hyp_vector[];
+
+#define __kvm_hyp_code_start __hyp_text_start
+#define __kvm_hyp_code_end __hyp_text_end
+
+extern void __kvm_flush_vm_context(void);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+
+extern char __save_vgic_v2_state[];
+extern char __restore_vgic_v2_state[];
+extern char __save_vgic_v3_state[];
+extern char __restore_vgic_v3_state[];
+
+#endif
+
+#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..0b52377a6c11
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/asm/kvm_coproc.h
+ * Copyright (C) 2012 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_COPROC_H__
+#define __ARM64_KVM_COPROC_H__
+
+#include <linux/kvm_host.h>
+
+void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
+
+struct kvm_sys_reg_table {
+ const struct sys_reg_desc *table;
+ size_t num;
+};
+
+struct kvm_sys_reg_target_table {
+ struct kvm_sys_reg_table table64;
+ struct kvm_sys_reg_table table32;
+};
+
+void kvm_register_target_sys_reg_table(unsigned int target,
+ struct kvm_sys_reg_target_table *table);
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+#define kvm_coproc_table_init kvm_sys_reg_table_init
+void kvm_sys_reg_table_init(void);
+
+struct kvm_one_reg;
+int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM64_KVM_COPROC_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..5674a55b5518
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/kvm_emulate.h
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_EMULATE_H__
+#define __ARM64_KVM_EMULATE_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmio.h>
+#include <asm/ptrace.h>
+
+unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
+unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
+
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
+
+void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
+static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
+}
+
+static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
+}
+
+static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
+}
+
+static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
+{
+ return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
+}
+
+static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+{
+ if (vcpu_mode_is_32bit(vcpu))
+ return kvm_condition_valid32(vcpu);
+
+ return true;
+}
+
+static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ if (vcpu_mode_is_32bit(vcpu))
+ kvm_skip_instr32(vcpu, is_wide_instr);
+ else
+ *vcpu_pc(vcpu) += 4;
+}
+
+static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+{
+ *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+}
+
+static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+{
+ if (vcpu_mode_is_32bit(vcpu))
+ return vcpu_reg32(vcpu, reg_num);
+
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+}
+
+/* Get vcpu SPSR for current mode */
+static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
+{
+ if (vcpu_mode_is_32bit(vcpu))
+ return vcpu_spsr32(vcpu);
+
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
+}
+
+static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
+{
+ u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+
+ if (vcpu_mode_is_32bit(vcpu))
+ return mode > COMPAT_PSR_MODE_USR;
+
+ return mode != PSR_MODE_EL0t;
+}
+
+static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.esr_el2;
+}
+
+static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.far_el2;
+}
+
+static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
+{
+ return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
+}
+
+static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
+}
+
+static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
+}
+
+static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
+}
+
+static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+{
+ return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
+}
+
+static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
+}
+
+static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
+}
+
+static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
+{
+ return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
+}
+
+/* This one is not specific to Data Abort */
+static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
+}
+
+static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
+}
+
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+ return vcpu_sys_reg(vcpu, MPIDR_EL1);
+}
+
+static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_mode_is_32bit(vcpu))
+ *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
+ else
+ vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_mode_is_32bit(vcpu))
+ return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
+
+ return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return be16_to_cpu(data & 0xffff);
+ case 4:
+ return be32_to_cpu(data & 0xffffffff);
+ default:
+ return be64_to_cpu(data);
+ }
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return le16_to_cpu(data & 0xffff);
+ case 4:
+ return le32_to_cpu(data & 0xffffffff);
+ default:
+ return le64_to_cpu(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_be16(data & 0xffff);
+ case 4:
+ return cpu_to_be32(data & 0xffffffff);
+ default:
+ return cpu_to_be64(data);
+ }
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_le16(data & 0xffff);
+ case 4:
+ return cpu_to_le32(data & 0xffffffff);
+ default:
+ return cpu_to_le64(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
+#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
new file mode 100644
index 000000000000..bcde41905746
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/asm/kvm_host.h:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_HOST_H__
+#define __ARM64_KVM_HOST_H__
+
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+
+#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
+#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
+#else
+#define KVM_MAX_VCPUS 0
+#endif
+
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+#include <kvm/arm_vgic.h>
+#include <kvm/arm_arch_timer.h>
+
+#define KVM_VCPU_MAX_FEATURES 3
+
+int __attribute_const__ kvm_target_cpu(void);
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+int kvm_arch_dev_ioctl_check_extension(long ext);
+
+struct kvm_arch {
+ /* The VMID generation used for the virt. memory system */
+ u64 vmid_gen;
+ u32 vmid;
+
+ /* 1-level 2nd stage table and lock */
+ spinlock_t pgd_lock;
+ pgd_t *pgd;
+
+ /* VTTBR value associated with above pgd and vmid */
+ u64 vttbr;
+
+ /* Interrupt controller */
+ struct vgic_dist vgic;
+
+ /* Timer */
+ struct arch_timer_kvm timer;
+};
+
+#define KVM_NR_MEM_OBJS 40
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ void *objects[KVM_NR_MEM_OBJS];
+};
+
+struct kvm_vcpu_fault_info {
+ u32 esr_el2; /* Hyp Syndrom Register */
+ u64 far_el2; /* Hyp Fault Address Register */
+ u64 hpfar_el2; /* Hyp IPA Fault Address Register */
+};
+
+struct kvm_cpu_context {
+ struct kvm_regs gp_regs;
+ union {
+ u64 sys_regs[NR_SYS_REGS];
+ u32 copro[NR_COPRO_REGS];
+ };
+};
+
+typedef struct kvm_cpu_context kvm_cpu_context_t;
+
+struct kvm_vcpu_arch {
+ struct kvm_cpu_context ctxt;
+
+ /* HYP configuration */
+ u64 hcr_el2;
+
+ /* Exception Information */
+ struct kvm_vcpu_fault_info fault;
+
+ /* Debug state */
+ u64 debug_flags;
+
+ /* Pointer to host CPU context */
+ kvm_cpu_context_t *host_cpu_context;
+
+ /* VGIC state */
+ struct vgic_cpu vgic_cpu;
+ struct arch_timer_cpu timer_cpu;
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+ /* dcache set/way operation pending */
+ int last_pcpu;
+ cpumask_t require_dcache_flush;
+
+ /* Don't run the guest */
+ bool pause;
+
+ /* IO related fields */
+ struct kvm_decode mmio_decode;
+
+ /* Interrupt related fields */
+ u64 irq_lines; /* IRQ and FIQ levels */
+
+ /* Cache some mmu pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* Target CPU and feature flags */
+ int target;
+ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
+
+ /* Detect first run of a vcpu */
+ bool has_run_once;
+};
+
+#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
+#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
+/*
+ * CP14 and CP15 live in the same array, as they are backed by the
+ * same system registers.
+ */
+#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
+#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
+#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1)
+#else
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
+#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r))
+#endif
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 halt_wakeup;
+};
+
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+/* We do not have shadow page tables, hence the empty hooks */
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+
+static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+
+u64 kvm_call_hyp(void *hypfn, ...);
+
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index);
+
+int kvm_perf_init(void);
+int kvm_perf_teardown(void);
+
+static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
+ phys_addr_t pgd_ptr,
+ unsigned long hyp_stack_ptr,
+ unsigned long vector_ptr)
+{
+ /*
+ * Call initialization code, and switch to the full blown
+ * HYP code.
+ */
+ kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+ hyp_stack_ptr, vector_ptr);
+}
+
+struct vgic_sr_vectors {
+ void *save_vgic;
+ void *restore_vgic;
+};
+
+static inline void vgic_arch_setup(const struct vgic_params *vgic)
+{
+ extern struct vgic_sr_vectors __vgic_sr_vectors;
+
+ switch(vgic->type)
+ {
+ case VGIC_V2:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
+ break;
+
+#ifdef CONFIG_ARM_GIC_V3
+ case VGIC_V3:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
+ break;
+#endif
+
+ default:
+ BUG();
+ }
+}
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
+#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..fc2f689c0694
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_MMIO_H__
+#define __ARM64_KVM_MMIO_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+/*
+ * This is annoying. The mmio code requires this, even if we don't
+ * need any decoding. To be fixed.
+ */
+struct kvm_decode {
+ unsigned long rt;
+ bool sign_extend;
+};
+
+/*
+ * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
+ * which is an anonymous type. Use our own type instead.
+ */
+struct kvm_exit_mmio {
+ phys_addr_t phys_addr;
+ u8 data[8];
+ u32 len;
+ bool is_write;
+};
+
+static inline void kvm_prepare_mmio(struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ run->mmio.phys_addr = mmio->phys_addr;
+ run->mmio.len = mmio->len;
+ run->mmio.is_write = mmio->is_write;
+ memcpy(run->mmio.data, mmio->data, mmio->len);
+ run->exit_reason = KVM_EXIT_MMIO;
+}
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ phys_addr_t fault_ipa);
+
+#endif /* __ARM64_KVM_MMIO_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..a030d163840b
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_MMU_H__
+#define __ARM64_KVM_MMU_H__
+
+#include <asm/page.h>
+#include <asm/memory.h>
+
+/*
+ * As we only have the TTBR0_EL2 register, we cannot express
+ * "negative" addresses. This makes it impossible to directly share
+ * mappings with the kernel.
+ *
+ * Instead, give the HYP mode its own VA region at a fixed offset from
+ * the kernel by just masking the top bits (which are all ones for a
+ * kernel address).
+ */
+#define HYP_PAGE_OFFSET_SHIFT VA_BITS
+#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
+#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
+
+/*
+ * Our virtual mapping for the idmap-ed MMU-enable code. Must be
+ * shared across all the page-tables. Conveniently, we use the last
+ * possible page, where no kernel mapping will ever exist.
+ */
+#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Convert a kernel VA into a HYP VA.
+ * reg: VA to be converted.
+ */
+.macro kern_hyp_va reg
+ and \reg, \reg, #HYP_PAGE_OFFSET_MASK
+.endm
+
+#else
+
+#include <asm/cachetype.h>
+#include <asm/cacheflush.h>
+
+#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
+
+/*
+ * We currently only support a 40bit IPA.
+ */
+#define KVM_PHYS_SHIFT (40)
+#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
+
+/* Make sure we get the right size, and thus the right alignment */
+#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
+#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+
+int create_hyp_mappings(void *from, void *to);
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+void free_boot_hyp_pgd(void);
+void free_hyp_pgds(void);
+
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+ phys_addr_t pa, unsigned long size);
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+
+phys_addr_t kvm_mmu_get_httbr(void);
+phys_addr_t kvm_mmu_get_boot_httbr(void);
+phys_addr_t kvm_get_idmap_vector(void);
+int kvm_mmu_init(void);
+void kvm_clear_hyp_idmap(void);
+
+#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
+#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
+
+static inline void kvm_clean_pgd(pgd_t *pgd) {}
+static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
+static inline void kvm_clean_pte(pte_t *pte) {}
+static inline void kvm_clean_pte_entry(pte_t *pte) {}
+
+static inline void kvm_set_s2pte_writable(pte_t *pte)
+{
+ pte_val(*pte) |= PTE_S2_RDWR;
+}
+
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+ pmd_val(*pmd) |= PMD_S2_RDWR;
+}
+
+#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
+#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
+#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
+
+static inline bool kvm_page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
+
+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
+#ifndef CONFIG_ARM64_64K_PAGES
+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+#else
+#define kvm_pmd_table_empty(pmdp) (0)
+#endif
+#define kvm_pud_table_empty(pudp) (0)
+
+
+struct kvm;
+
+#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+{
+ return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+}
+
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+ unsigned long size)
+{
+ if (!vcpu_has_cache_enabled(vcpu))
+ kvm_flush_dcache_to_poc((void *)hva, size);
+
+ if (!icache_is_aliasing()) { /* PIPT */
+ flush_icache_range(hva, hva + size);
+ } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ }
+}
+
+#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
+
+void stage2_flush_vm(struct kvm *kvm);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/sigcontext.h b/arch/arm64/include/asm/kvm_psci.h
index dca1094acc74..bc39e557c56c 100644
--- a/arch/arm64/include/asm/sigcontext.h
+++ b/arch/arm64/include/asm/kvm_psci.h
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -13,19 +14,14 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ASM_SIGCONTEXT_H
-#define __ASM_SIGCONTEXT_H
-#include <uapi/asm/sigcontext.h>
+#ifndef __ARM64_KVM_PSCI_H__
+#define __ARM64_KVM_PSCI_H__
-/*
- * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
- * user space as it will change with the addition of new context. User space
- * should check the magic/size information.
- */
-struct aux_context {
- struct fpsimd_context fpsimd;
- /* additional context to be added before "end" */
- struct _aarch64_ctx end;
-};
-#endif
+#define KVM_ARM_PSCI_0_1 1
+#define KVM_ARM_PSCI_0_2 2
+
+int kvm_psci_version(struct kvm_vcpu *vcpu);
+int kvm_psci_call(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 381f556b664e..71cd41644279 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,24 +33,31 @@
#define UL(x) _AC(x, UL)
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image.
+ * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
-#define PAGE_OFFSET UL(0xffffffc000000000)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define VA_BITS (42)
+#else
+#define VA_BITS (39)
+#endif
+#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
-#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
-#define VA_BITS (39)
+#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 UL(0x100000000)
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE TASK_SIZE_64
#endif /* CONFIG_COMPAT */
@@ -90,6 +97,12 @@
#define MT_NORMAL_NC 3
#define MT_NORMAL 4
+/*
+ * Memory types for Stage-2 translation
+ */
+#define MT_S2_NORMAL 0xf
+#define MT_S2_DEVICE_nGnRE 0x1
+
#ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr;
@@ -127,6 +140,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
/*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 2494fc01896a..c2f006c48bdb 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -22,10 +22,16 @@ typedef struct {
void *vdso;
} mm_context_t;
+#define INIT_MM_CONTEXT(name) \
+ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+
#define ASID(mm) ((mm)->context.id & 0xffff)
extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
+extern void init_mem_pgprot(void);
+/* create an identity mapping for memory (or io if map_io is true) */
+extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index e2bc385adb6b..a9eee33dfa62 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,12 +151,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- /* check for possible thread migration */
- if (!cpumask_empty(mm_cpumask(next)) &&
- !cpumask_test_cpu(cpu, mm_cpumask(next)))
- __flush_icache_all();
-#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
check_and_switch_context(next, tsk);
}
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 46bf66628b6a..e84ca637af6b 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -31,6 +31,15 @@
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
+/*
+ * The idmap and swapper page tables need some space reserved in the kernel
+ * image. Both require a pgd and a next level table to (section) map the
+ * kernel. The the swapper also maps the FDT (see __create_page_tables for
+ * more information).
+ */
+#define SWAPPER_DIR_SIZE (2 * PAGE_SIZE)
+#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARM64_64K_PAGES
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
new file mode 100644
index 000000000000..5279e5733386
--- /dev/null
+++ b/arch/arm64/include/asm/percpu.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+#ifdef CONFIG_SMP
+
+static inline void set_my_cpu_offset(unsigned long off)
+{
+ asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+}
+
+static inline unsigned long __my_cpu_offset(void)
+{
+ unsigned long off;
+
+ /*
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrs %0, tpidr_el1" : "=r" (off) :
+ "Q" (*(const unsigned long *)current_stack_pointer));
+
+ return off;
+}
+#define __my_cpu_offset __my_cpu_offset()
+
+#else /* !CONFIG_SMP */
+
+#define set_my_cpu_offset(x) do { } while (0)
+
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
index 0a8ed3f94e93..2593b490c56a 100644
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -21,10 +21,10 @@
* 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
* used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
* entry representing 512MB. The user and kernel address spaces are limited to
- * 512GB and therefore we only use 1024 entries in the PGD.
+ * 4TB in the 64KB page configuration.
*/
#define PTRS_PER_PTE 8192
-#define PTRS_PER_PGD 1024
+#define PTRS_PER_PGD 8192
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h
index 3c3ca7d361e4..5f101e63dfc1 100644
--- a/arch/arm64/include/asm/pgtable-2level-types.h
+++ b/arch/arm64/include/asm/pgtable-2level-types.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
#define __ASM_PGTABLE_2LEVEL_TYPES_H
+#include <asm/types.h>
+
typedef u64 pteval_t;
typedef u64 pgdval_t;
typedef pgdval_t pmdval_t;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 75fd13d289b9..f7af66b54cb2 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -25,16 +25,27 @@
/*
* Hardware page table definitions.
*
+ * Level 1 descriptor (PUD).
+ */
+
+#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
+
+/*
* Level 2 descriptor (PMD).
*/
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
/*
* Section
*/
+#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
@@ -53,6 +64,7 @@
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
@@ -68,9 +80,29 @@
#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
/*
- * 40-bit physical address supported.
+ * 2nd stage PTE definitions
+ */
+#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
+#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+
+#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+
+/*
+ * Memory Attribute override for Stage-2 (MemAttr[3:0])
+ */
+#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
+#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
+
+/*
+ * EL2/HYP PTE/PMD definitions
+ */
+#define PMD_HYP PMD_SECT_USER
+#define PTE_HYP PTE_USER
+
+/*
+ * Highest possible physical address supported.
*/
-#define PHYS_MASK_SHIFT (40)
+#define PHYS_MASK_SHIFT (48)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
/*
@@ -90,7 +122,7 @@
#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
#define TCR_TG0_64K (UL(1) << 14)
#define TCR_TG1_64K (UL(1) << 30)
-#define TCR_IPS_40BIT (UL(2) << 32)
#define TCR_ASID16 (UL(1) << 36)
+#define TCR_TBI0 (UL(1) << 37)
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e333a243bfcc..d86b4d4df1f0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,15 +25,16 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+#define PTE_WRITE (_AT(pteval_t, 1) << 57)
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*/
-#define VMALLOC_START UL(0xffffff8000000000)
+#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
@@ -51,60 +52,59 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#endif
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
-/*
- * The pgprot_* and protection_map entries will be fixed up at runtime to
- * include the cachable and bufferable bits based on memory policy, as well as
- * any architecture dependent bits like global/ASID and SMP shared mapping
- * bits.
- */
-#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF
-
-extern pgprot_t pgprot_default;
-
-#define __pgprot_modify(prot,mask,bits) \
- __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+#ifdef CONFIG_SMP
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#else
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
+#endif
-#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
-
-#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
-#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
-#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
-
-#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
-#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
-#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-
-#endif /* __ASSEMBLY__ */
-
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+
+#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+
+#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+
+#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+
+#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
+#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+
+#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
+
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
-#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -119,7 +119,7 @@ extern struct page *empty_zero_page;
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
+#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -129,30 +129,77 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & PTE_AF)
-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
+#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
+#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+#define pte_valid_not_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) &= ~pgprot_val(prot);
+ return pte;
+}
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) |= pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_WRITE));
+}
-PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY);
-PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
-PTE_BIT_FUNC(mkold, &= ~PTE_AF);
-PTE_BIT_FUNC(mkyoung, |= PTE_AF);
-PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_WRITE));
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_DIRTY));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_AF));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_AF));
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
+}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
+
+ /*
+ * Only if the new pte is valid and kernel, otherwise TLB maintenance
+ * or update_mmu_cache() have the necessary barriers.
+ */
+ if (pte_valid_not_user(pte)) {
+ dsb(ishst);
+ isb();
+ }
}
extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
@@ -161,10 +208,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (pte_valid_user(pte)) {
- if (pte_exec(pte))
+ if (!pte_special(pte) && pte_exec(pte))
__sync_icache_dcache(pte, addr);
- if (!pte_dirty(pte))
- pte = pte_wrprotect(pte);
+ if (pte_dirty(pte) && pte_write(pte))
+ pte_val(pte) &= ~PTE_RDONLY;
+ else
+ pte_val(pte) |= PTE_RDONLY;
}
set_pte(ptep, pte);
@@ -173,20 +222,78 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
/*
* Huge pte definitions.
*/
-#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
-#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
+#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+/*
+ * Hugetlb definitions.
+ */
+#define HUGE_MAX_HSTATE 2
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define __HAVE_ARCH_PTE_SPECIAL
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+/*
+ * THP definitions.
+ */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
+#endif
+
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+
+#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_dmacoherent(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -197,10 +304,17 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
+
+
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- dsb();
+ dsb(ishst);
+ isb();
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -230,7 +344,8 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
- dsb();
+ dsb(ishst);
+ isb();
}
static inline void pud_clear(pud_t *pudp)
@@ -263,36 +378,40 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#endif
/* Find an entry in the third-level page table.. */
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
-
/*
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
* bits 3-8: swap type
- * bits 9-63: swap offset
+ * bits 9-57: swap offset
*/
#define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6
+#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -300,7 +419,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Ensure that there are not more swap files than can be encoded in the kernel
- * the PTEs.
+ * PTEs.
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
@@ -308,13 +427,13 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* Encode and decode a file entry:
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
- * bits 3-63: file offset / PAGE_SIZE
+ * bits 3-57: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 3)
#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 61
+#define PTE_FILE_MAX_BITS 55
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 7cdf466fd0c5..0d3b0b17e927 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -26,11 +26,16 @@
#include <asm/page.h>
struct mm_struct;
+struct cpu_suspend_ctx;
extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
+extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
+void cpu_soft_restart(phys_addr_t cpu_reset,
+ unsigned long addr) __attribute__((noreturn));
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ab239b2c456f..001b764fffa1 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -107,6 +107,11 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = COMPAT_PSR_MODE_USR;
if (pc & 1)
regs->pstate |= COMPAT_PSR_T_BIT;
+
+#ifdef __AARCH64EB__
+ regs->pstate |= COMPAT_PSR_E_BIT;
+#endif
+
regs->compat_sp = sp;
}
#endif
@@ -131,8 +136,8 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
-#define KSTK_EIP(tsk) task_pt_regs(tsk)->pc
-#define KSTK_ESP(tsk) task_pt_regs(tsk)->sp
+#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
/*
* Prefetching support
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 0604237ecd99..e5312ea0ec1a 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,25 +14,6 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
-};
-
-extern struct psci_operations psci_ops;
-
int psci_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 41a71ee4c3df..5db016e6d065 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,6 +42,7 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
+#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
@@ -67,6 +68,7 @@
/* Architecturally defined mapping between AArch32 and AArch64 registers */
#define compat_usr(x) regs[(x)]
+#define compat_fp regs[11]
#define compat_sp regs[13]
#define compat_lr regs[14]
#define compat_sp_hyp regs[15]
@@ -131,7 +133,12 @@ struct pt_regs {
(!((regs)->pstate & PSR_F_BIT))
#define user_stack_pointer(regs) \
- ((regs)->sp)
+ (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->regs[0];
+}
/*
* Are the current registers suitable for user mode? (used to maintain
@@ -163,7 +170,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs)
return 0;
}
-#define instruction_pointer(regs) (regs)->pc
+#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
@@ -171,7 +178,5 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define profile_pc(regs) instruction_pointer(regs)
#endif
-extern int aarch32_break_trap(struct pt_regs *regs);
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 4b8023c5d146..a498f2cd2c2a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,21 +60,14 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_holding_pen(void);
-extern volatile unsigned long secondary_holding_pen_release;
+extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-struct device_node;
+extern int __cpu_disable(void);
-struct smp_enable_ops {
- const char *name;
- int (*init_cpu)(struct device_node *, int);
- int (*prepare_cpu)(int);
-};
-
-extern const struct smp_enable_ops smp_spin_table_ops;
-extern const struct smp_enable_ops smp_psci_ops;
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void);
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index ed43a0d2b1b2..59e282311b58 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -21,6 +21,19 @@
#include <asm/types.h>
+struct mpidr_hash {
+ u64 mask;
+ u32 shift_aff[4];
+ u32 bits;
+};
+
+extern struct mpidr_hash mpidr_hash;
+
+static inline u32 mpidr_hash_size(void)
+{
+ return 1 << mpidr_hash.bits;
+}
+
/*
* Logical CPU mapping.
*/
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 1be62bcb9d47..74a9d301819f 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -17,7 +17,7 @@
#define __ASM_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS 40
+#define MAX_PHYSMEM_BITS 48
#define SECTION_SIZE_BITS 30
#endif
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 7065e920149d..c45b7b1b7197 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,17 +22,10 @@
/*
* Spinlock implementation.
*
- * The old value is read exclusively and the new one, if unlocked, is written
- * exclusively. In case of failure, the loop is restarted.
- *
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
- *
- * Unlocked value: 0
- * Locked value: 1
*/
-#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -41,31 +34,51 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval, newval;
asm volatile(
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+ /* Atomically increment the next ticket. */
+" prfm pstl1strm, %3\n"
+"1: ldaxr %w0, %3\n"
+" add %w1, %w0, %w5\n"
+" stxr %w2, %w1, %3\n"
+" cbnz %w2, 1b\n"
+ /* Did we get the lock? */
+" eor %w1, %w0, %w0, ror #16\n"
+" cbz %w1, 3f\n"
+ /*
+ * No: spin on the owner. Send a local event to avoid missing an
+ * unlock before the exclusive load.
+ */
+" sevl\n"
+"2: wfe\n"
+" ldaxrh %w2, %4\n"
+" eor %w1, %w2, %w0, lsr #16\n"
+" cbnz %w1, 2b\n"
+ /* We got the lock. Critical section starts here. */
+"3:"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+ : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
+ : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval;
asm volatile(
- " ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
- " stxr %w0, %w2, %1\n"
- "1:\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+" prfm pstl1strm, %2\n"
+"1: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 2f\n"
+" add %w0, %w0, %3\n"
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 1b\n"
+"2:"
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ : "I" (1 << TICKET_SHIFT)
+ : "memory");
return !tmp;
}
@@ -73,9 +86,28 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
- " stlr %w1, %0\n"
- : "=Q" (lock->lock) : "r" (0) : "memory");
+" stlrh %w1, %0\n"
+ : "=Q" (lock->owner)
+ : "r" (lock->owner + 1)
+ : "memory");
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner == lock.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lockval = ACCESS_ONCE(*lock);
+ return (lockval.next - lockval.owner) > 1;
}
+#define arch_spin_is_contended arch_spin_is_contended
/*
* Write lock implementation.
@@ -100,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -114,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
return !tmp;
}
@@ -155,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -169,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -184,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 9a494346efed..87692750ed94 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,14 +20,14 @@
# error "please don't include this file directly"
#endif
-/* We only require natural alignment for exclusive accesses. */
-#define __lock_aligned
+#define TICKET_SHIFT 16
typedef struct {
- volatile unsigned int lock;
-} arch_spinlock_t;
+ u16 owner;
+ u16 next;
+} __aligned(4) arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
new file mode 100644
index 000000000000..fe5e287dc56b
--- /dev/null
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -0,0 +1,38 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on ARM. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef __ASM_STACKPROTECTOR_H
+#define __ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 3ee8b303d9a9..64d2d4884a9d 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -22,6 +22,18 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c);
+#define __HAVE_ARCH_STRCMP
+extern int strcmp(const char *, const char *);
+
+#define __HAVE_ARCH_STRNCMP
+extern int strncmp(const char *, const char *, __kernel_size_t);
+
+#define __HAVE_ARCH_STRLEN
+extern __kernel_size_t strlen(const char *);
+
+#define __HAVE_ARCH_STRNLEN
+extern __kernel_size_t strnlen(const char *, __kernel_size_t);
+
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
@@ -34,4 +46,7 @@ extern void *memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, size_t);
+
#endif
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
new file mode 100644
index 000000000000..e9c149c042e0
--- /dev/null
+++ b/arch/arm64/include/asm/suspend.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_SUSPEND_H
+#define __ASM_SUSPEND_H
+
+#define NR_CTX_REGS 11
+
+/*
+ * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
+ * the stack, which must be 16-byte aligned on v8
+ */
+struct cpu_suspend_ctx {
+ /*
+ * This struct must be kept in sync with
+ * cpu_do_{suspend/resume} in mm/proc.S
+ */
+ u64 ctx_regs[NR_CTX_REGS];
+ u64 sp;
+} __aligned(16);
+
+struct sleep_save_sp {
+ phys_addr_t *save_ptr_stash;
+ phys_addr_t save_ptr_stash_phys;
+};
+
+extern void cpu_resume(void);
+extern int cpu_suspend(unsigned long);
+
+#endif
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 89c047f9a971..383771eb0b87 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -18,6 +18,7 @@
#include <linux/err.h>
+extern const void *sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
@@ -59,6 +60,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -82,6 +86,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
new file mode 100644
index 000000000000..5c89df0acbcb
--- /dev/null
+++ b/arch/arm64/include/asm/sysreg.h
@@ -0,0 +1,60 @@
+/*
+ * Macros for accessing system registers with older binutils.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_SYSREG_H
+#define __ASM_SYSREG_H
+
+#define sys_reg(op0, op1, crn, crm, op2) \
+ ((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#ifdef __ASSEMBLY__
+
+ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+ .equ __reg_num_x\num, \num
+ .endr
+ .equ __reg_num_xzr, 31
+
+ .macro mrs_s, rt, sreg
+ .inst 0xd5300000|(\sreg)|(__reg_num_\rt)
+ .endm
+
+ .macro msr_s, sreg, rt
+ .inst 0xd5100000|(\sreg)|(__reg_num_\rt)
+ .endm
+
+#else
+
+asm(
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
+" .equ __reg_num_x\\num, \\num\n"
+" .endr\n"
+" .equ __reg_num_xzr, 31\n"
+"\n"
+" .macro mrs_s, rt, sreg\n"
+" .inst 0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+" .endm\n"
+"\n"
+" .macro msr_s, sreg, rt\n"
+" .inst 0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+" .endm\n"
+);
+
+#endif
+
+#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 3659e460071d..70dd1ac01f16 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
#include <linux/compiler.h>
#ifndef CONFIG_ARM64_64K_PAGES
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE_ORDER 2
#endif
-#define THREAD_SIZE 8192
+#define THREAD_SIZE 16384
#define THREAD_START_SP (THREAD_SIZE - 16)
#ifndef __ASSEMBLY__
@@ -69,14 +69,19 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
static inline struct thread_info *current_thread_info(void)
{
- register unsigned long sp asm ("sp");
- return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
@@ -97,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
+ * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
+ * TIF_SYSCALL_AUDIT - syscall auditing
+ * TIF_SECOMP - syscall secure computing
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
@@ -107,6 +115,9 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
+#define TIF_SYSCALL_AUDIT 9
+#define TIF_SYSCALL_TRACEPOINT 10
+#define TIF_SECCOMP 11
#define TIF_POLLING_NRFLAG 16
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
@@ -118,10 +129,17 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+
#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h
index b24a31a7e2c9..81a076eb37fa 100644
--- a/arch/arm64/include/asm/timex.h
+++ b/arch/arm64/include/asm/timex.h
@@ -16,14 +16,14 @@
#ifndef __ASM_TIMEX_H
#define __ASM_TIMEX_H
+#include <asm/arch_timer.h>
+
/*
* Use the current timer as a cycle counter since this is what we use for
* the delay loop.
*/
-#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; })
+#define get_cycles() arch_counter_get_cntvct()
#include <asm-generic/timex.h>
-#define ARCH_HAS_READ_CURRENT_TIMER
-
#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 654f0968030b..717031a762c2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
@@ -187,4 +190,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
#define tlb_migrate_finish(mm) do { } while (0)
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+ tlb_add_flush(tlb, addr);
+}
+
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 122d6320f745..73f0ce570fb3 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -72,9 +72,9 @@ extern struct cpu_tlb_fns cpu_tlb;
*/
static inline void flush_tlb_all(void)
{
- dsb();
+ dsb(ishst);
asm("tlbi vmalle1is");
- dsb();
+ dsb(ish);
isb();
}
@@ -82,9 +82,9 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
{
unsigned long asid = (unsigned long)ASID(mm) << 48;
- dsb();
+ dsb(ishst);
asm("tlbi aside1is, %0" : : "r" (asid));
- dsb();
+ dsb(ish);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -93,16 +93,60 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr = uaddr >> 12 |
((unsigned long)ASID(vma->vm_mm) << 48);
- dsb();
+ dsb(ishst);
asm("tlbi vae1is, %0" : : "r" (addr));
- dsb();
+ dsb(ish);
+}
+
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
+ unsigned long addr;
+ start = asid | (start >> 12);
+ end = asid | (end >> 12);
+
+ dsb(ishst);
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+ asm("tlbi vae1is, %0" : : "r"(addr));
+ dsb(ish);
+}
+
+static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+ start >>= 12;
+ end >>= 12;
+
+ dsb(ishst);
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+ asm("tlbi vaae1is, %0" : : "r"(addr));
+ dsb(ish);
+ isb();
}
/*
- * Convert calls to our calling convention.
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
*/
-#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
-#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
+#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if ((end - start) <= MAX_TLB_RANGE)
+ __flush_tlb_range(vma, start, end);
+ else
+ flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if ((end - start) <= MAX_TLB_RANGE)
+ __flush_tlb_kernel_range(start, end);
+ else
+ flush_tlb_all();
+}
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
@@ -111,12 +155,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
/*
- * set_pte() does not have a DSB, so make sure that the page table
- * write is visible.
+ * set_pte() does not have a DSB for user mappings, so make sure that
+ * the page table write is visible.
*/
- dsb();
+ dsb(ishst);
}
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
new file mode 100644
index 000000000000..e0171b393a14
--- /dev/null
+++ b/arch/arm64/include/asm/topology.h
@@ -0,0 +1,70 @@
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/cpumask.h>
+
+struct cpu_topology {
+ int thread_id;
+ int core_id;
+ int cluster_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+};
+
+extern struct cpu_topology cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+
+#define mc_capable() (cpu_topology[0].cluster_id != -1)
+#define smt_capable() (cpu_topology[0].thread_id != -1)
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+
+#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 0*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 7ecc2b23882e..3bf8f4e99a51 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -83,7 +83,7 @@ static inline void set_fs(mm_segment_t fs)
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size < (u65)current->addr_limit
+ * (u65)addr + (u65)size <= current->addr_limit
*
* This needs 65-bit arithmetic.
*/
@@ -91,7 +91,7 @@ static inline void set_fs(mm_segment_t fs)
({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \
+ asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
: "1" (addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
@@ -100,6 +100,7 @@ static inline void set_fs(mm_segment_t fs)
})
#define access_ok(type, addr, size) __range_ok(addr, size)
+#define user_addr_max get_fs
/*
* The "__xxx" versions of the user access functions do not verify the address
@@ -240,9 +241,6 @@ extern unsigned long __must_check __copy_to_user(void __user *to, const void *fr
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
-extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
-
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
@@ -276,24 +274,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
-static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- res = __strncpy_from_user(dst, src, count);
- return res;
-}
-
-#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-static inline long __must_check strnlen_user(const char __user *s, long n)
-{
- unsigned long res = 0;
-
- if (__addr_ok(s))
- res = __strnlen_user(s, n);
-
- return res;
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 82ce217e94cf..c335479c2638 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -28,3 +28,5 @@
#endif
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439827271e3d..7a5df5252dd7 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,7 +18,8 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
-#define BOOT_CPU_MODE_EL2 (0x0e12b007)
+#define BOOT_CPU_MODE_EL1 (0xe11)
+#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
@@ -49,6 +50,10 @@ static inline bool is_hyp_mode_mismatched(void)
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
+
#endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..aab5bf09e9d9
--- /dev/null
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_WORD_AT_A_TIME_H
+#define __ASM_WORD_AT_A_TIME_H
+
+#ifndef __AARCH64EB__
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+#define zero_bytemask(mask) (mask)
+
+#else /* __AARCH64EB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: ldr %0, %3\n"
+ "2:\n"
+ " .pushsection .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: and %1, %2, #0x7\n"
+ " bic %2, %2, #0x7\n"
+ " ldr %0, [%2]\n"
+ " lsl %1, %1, #0x3\n"
+#ifndef __AARCH64EB__
+ " lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
+ " b 2b\n"
+ " .popsection\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 1b, 3b\n"
+ " .popsection"
+ : "=&r" (ret), "=&r" (offset)
+ : "r" (addr), "Q" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* __ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index e4b78bdca19e..942376d37d22 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -9,6 +9,7 @@ header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
header-y += kvm_para.h
+header-y += perf_regs.h
header-y += param.h
header-y += ptrace.h
header-y += setup.h
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index 2b92046aafc5..dc19e9537f0d 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -16,6 +16,10 @@
#ifndef __ASM_BYTEORDER_H
#define __ASM_BYTEORDER_H
+#ifdef __AARCH64EB__
+#include <linux/byteorder/big_endian.h>
+#else
#include <linux/byteorder/little_endian.h>
+#endif
#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index eea497578b87..73cf0f54d57c 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,6 +21,11 @@
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
-
+#define HWCAP_EVTSTRM (1 << 2)
+#define HWCAP_AES (1 << 3)
+#define HWCAP_PMULL (1 << 4)
+#define HWCAP_SHA1 (1 << 5)
+#define HWCAP_SHA2 (1 << 6)
+#define HWCAP_CRC32 (1 << 7)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..8e38878c87c6
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/uapi/asm/kvm.h:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_H__
+#define __ARM_KVM_H__
+
+#define KVM_SPSR_EL1 0
+#define KVM_SPSR_SVC KVM_SPSR_EL1
+#define KVM_SPSR_ABT 1
+#define KVM_SPSR_UND 2
+#define KVM_SPSR_IRQ 3
+#define KVM_SPSR_FIQ 4
+#define KVM_NR_SPSR 5
+
+#ifndef __ASSEMBLY__
+#include <linux/psci.h>
+#include <asm/types.h>
+#include <asm/ptrace.h>
+
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_REG_SIZE(id) \
+ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+struct kvm_regs {
+ struct user_pt_regs regs; /* sp = sp_el0 */
+
+ __u64 sp_el1;
+ __u64 elr_el1;
+
+ __u64 spsr[KVM_NR_SPSR];
+
+ struct user_fpsimd_state fp_regs;
+};
+
+/* Supported Processor Types */
+#define KVM_ARM_TARGET_AEM_V8 0
+#define KVM_ARM_TARGET_FOUNDATION_V8 1
+#define KVM_ARM_TARGET_CORTEX_A57 2
+#define KVM_ARM_TARGET_XGENE_POTENZA 3
+#define KVM_ARM_TARGET_CORTEX_A53 4
+
+#define KVM_ARM_NUM_TARGETS 5
+
+/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
+#define KVM_ARM_DEVICE_TYPE_SHIFT 0
+#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_ID_SHIFT 16
+#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+
+/* Supported device IDs */
+#define KVM_ARM_DEVICE_VGIC_V2 0
+
+/* Supported VGIC address types */
+#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
+#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
+
+#define KVM_VGIC_V2_DIST_SIZE 0x1000
+#define KVM_VGIC_V2_CPU_SIZE 0x2000
+
+#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
+#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
+#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
+
+struct kvm_vcpu_init {
+ __u32 target;
+ __u32 features[7];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+struct kvm_sync_regs {
+};
+
+struct kvm_arch_memory_slot {
+};
+
+/* If you need to interpret the index values, here is the key: */
+#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
+#define KVM_REG_ARM_COPROC_SHIFT 16
+
+/* Normal registers are mapped as coprocessor 16. */
+#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32))
+
+/* Some registers need more space to represent values. */
+#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
+#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
+#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
+#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
+#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
+
+/* AArch64 system registers */
+#define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
+#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14
+#define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
+#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11
+#define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
+#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7
+#define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
+#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3
+#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
+#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
+
+#define ARM64_SYS_REG_SHIFT_MASK(x,n) \
+ (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \
+ KVM_REG_ARM64_SYSREG_ ## n ## _MASK)
+
+#define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \
+ ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
+ ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
+ ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
+ ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
+ ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
+
+#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
+
+#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
+#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
+#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
+
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
+#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
+#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_TYPE_SHIFT 24
+#define KVM_ARM_IRQ_TYPE_MASK 0xff
+#define KVM_ARM_IRQ_VCPU_SHIFT 16
+#define KVM_ARM_IRQ_VCPU_MASK 0xff
+#define KVM_ARM_IRQ_NUM_SHIFT 0
+#define KVM_ARM_IRQ_NUM_MASK 0xffff
+
+/* irq_type field */
+#define KVM_ARM_IRQ_TYPE_CPU 0
+#define KVM_ARM_IRQ_TYPE_SPI 1
+#define KVM_ARM_IRQ_TYPE_PPI 2
+
+/* out-of-kernel GIC cpu interrupt injection irq_number field */
+#define KVM_ARM_IRQ_CPU_IRQ 0
+#define KVM_ARM_IRQ_CPU_FIQ 1
+
+/* Highest supported SPI, from VGIC_NR_IRQS */
+#define KVM_ARM_IRQ_GIC_MAX 127
+
+/* PSCI interface */
+#define KVM_PSCI_FN_BASE 0x95c1ba5e
+#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
+
+#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
+#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
+#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
+#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
+
+#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
+#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
+#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
+#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
+
+#endif
+
+#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm64/include/uapi/asm/perf_regs.h b/arch/arm64/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..172b8317ee49
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_ARM64_PERF_REGS_H
+#define _ASM_ARM64_PERF_REGS_H
+
+enum perf_event_arm_regs {
+ PERF_REG_ARM64_X0,
+ PERF_REG_ARM64_X1,
+ PERF_REG_ARM64_X2,
+ PERF_REG_ARM64_X3,
+ PERF_REG_ARM64_X4,
+ PERF_REG_ARM64_X5,
+ PERF_REG_ARM64_X6,
+ PERF_REG_ARM64_X7,
+ PERF_REG_ARM64_X8,
+ PERF_REG_ARM64_X9,
+ PERF_REG_ARM64_X10,
+ PERF_REG_ARM64_X11,
+ PERF_REG_ARM64_X12,
+ PERF_REG_ARM64_X13,
+ PERF_REG_ARM64_X14,
+ PERF_REG_ARM64_X15,
+ PERF_REG_ARM64_X16,
+ PERF_REG_ARM64_X17,
+ PERF_REG_ARM64_X18,
+ PERF_REG_ARM64_X19,
+ PERF_REG_ARM64_X20,
+ PERF_REG_ARM64_X21,
+ PERF_REG_ARM64_X22,
+ PERF_REG_ARM64_X23,
+ PERF_REG_ARM64_X24,
+ PERF_REG_ARM64_X25,
+ PERF_REG_ARM64_X26,
+ PERF_REG_ARM64_X27,
+ PERF_REG_ARM64_X28,
+ PERF_REG_ARM64_X29,
+ PERF_REG_ARM64_LR,
+ PERF_REG_ARM64_SP,
+ PERF_REG_ARM64_PC,
+ PERF_REG_ARM64_MAX,
+};
+#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7b4b564961d4..12618cbd1bab 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -4,20 +4,35 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) \
+ -I$(src)/../../../scripts/dtc/libfdt
+
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_insn.o = -pg
+CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o
+ hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o return_address.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
+arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
+arm64-obj-$(CONFIG_SMP) += topology.o
+arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
+arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+arm64-obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
+arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+arm64-obj-$(CONFIG_KGDB) += kgdb.o
+arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 41b4f626d554..a85843ddbde8 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -29,16 +29,14 @@
#include <asm/checksum.h>
- /* user mem (segment) */
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+ /* user mem (segment) */
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
@@ -46,10 +44,15 @@ EXPORT_SYMBOL(memstart_addr);
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcmp);
/* atomic bitops */
EXPORT_SYMBOL(set_bit);
@@ -58,3 +61,7 @@ EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(test_and_change_bit);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a2a4d810bea3..9a9fce090d58 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -21,9 +21,12 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/kvm_host.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/cputable.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
#include <linux/kbuild.h>
@@ -104,5 +107,59 @@ int main(void)
BLANK();
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
+ BLANK();
+#ifdef CONFIG_KVM_ARM_HOST
+ DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+ DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
+ DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
+ DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
+ DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
+ DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
+ DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
+ DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
+ DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
+ DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
+ DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
+ DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
+ DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
+ DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
+ DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
+ DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
+ DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
+ DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
+ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+ DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
+ DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
+ DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
+ DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
+ DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
+ DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
+ DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
+ DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
+ DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
+ DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
+ DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
+ DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
+ DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
+ DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
+ DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
+ DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
+ DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
new file mode 100644
index 000000000000..24fb449bc6aa
--- /dev/null
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -0,0 +1,99 @@
+/*
+ * CPU kernel entry/exit control
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/string.h>
+
+extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations cpu_psci_ops;
+
+const struct cpu_operations *cpu_ops[NR_CPUS];
+
+static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_SMP
+ &smp_spin_table_ops,
+#endif
+ &cpu_psci_ops,
+ NULL,
+};
+
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
+{
+ const struct cpu_operations **ops = supported_cpu_ops;
+
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
+
+ ops++;
+ }
+
+ return NULL;
+}
+
+/*
+ * Read a cpu's enable method from the device tree and record it in cpu_ops.
+ */
+int __init cpu_read_ops(struct device_node *dn, int cpu)
+{
+ const char *enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g. when
+ * spin-table is used for secondaries). Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
+ return -ENOENT;
+ }
+
+ cpu_ops[cpu] = cpu_get_ops(enable_method);
+ if (!cpu_ops[cpu]) {
+ pr_warn("%s: unsupported enable-method property: %s\n",
+ dn->full_name, enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void __init cpu_read_bootcpu_ops(void)
+{
+ struct device_node *dn = NULL;
+ u64 mpidr = cpu_logical_map(0);
+
+ while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ u64 hwid;
+ const __be32 *prop;
+
+ prop = of_get_property(dn, "reg", NULL);
+ if (!prop)
+ continue;
+
+ hwid = of_read_number(prop, of_n_addr_cells(dn));
+ if (hwid == mpidr) {
+ cpu_read_ops(dn, 0);
+ of_node_put(dn);
+ return;
+ }
+ }
+}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
new file mode 100644
index 000000000000..19d17f51db37
--- /dev/null
+++ b/arch/arm64/kernel/cpuidle.c
@@ -0,0 +1,31 @@
+/*
+ * ARM64 CPU idle arch support
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cpu_ops.h>
+
+int cpu_init_idle(unsigned int cpu)
+{
+ int ret = -EOPNOTSUPP;
+ struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+
+ if (!cpu_node)
+ return -ENODEV;
+
+ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
+ ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+ return ret;
+}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
index 63cfc4a43f4e..fd3993cb060f 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm64/kernel/cputable.c
@@ -22,7 +22,7 @@
extern unsigned long __cpu_setup(void);
-struct cpu_info __initdata cpu_table[] = {
+struct cpu_info cpu_table[] = {
{
.cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000,
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f4726dc054b3..e3b37ee9076a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -24,21 +24,13 @@
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/stat.h>
+#include <linux/uaccess.h>
#include <asm/debug-monitors.h>
#include <asm/local.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
-/* Low-level stepping controls. */
-#define DBG_MDSCR_SS (1 << 0)
-#define DBG_SPSR_SS (1 << 21)
-
-/* MDSCR_EL1 enabling bits */
-#define DBG_MDSCR_KDE (1 << 13)
-#define DBG_MDSCR_MDE (1 << 15)
-#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
-
/* Determine debug architecture. */
u8 debug_monitors_arch(void)
{
@@ -137,7 +129,6 @@ void disable_debug_monitors(enum debug_el el)
static void clear_os_lock(void *unused)
{
asm volatile("msr oslar_el1, %0" : : "r" (0));
- isb();
}
static int __cpuinit os_lock_notify(struct notifier_block *self,
@@ -156,8 +147,9 @@ static struct notifier_block __cpuinitdata os_lock_nb = {
static int __cpuinit debug_monitors_init(void)
{
/* Clear the OS lock. */
- smp_call_function(clear_os_lock, NULL, 1);
- clear_os_lock(NULL);
+ on_each_cpu(clear_os_lock, NULL, 1);
+ isb();
+ local_dbg_enable();
/* Register hotplug handler. */
register_cpu_notifier(&os_lock_nb);
@@ -187,6 +179,48 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
regs->pstate = spsr;
}
+/* EL1 Single Step Handler hooks */
+static LIST_HEAD(step_hook);
+static DEFINE_RWLOCK(step_hook_lock);
+
+void register_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_add(&hook->node, &step_hook);
+ write_unlock(&step_hook_lock);
+}
+
+void unregister_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&step_hook_lock);
+}
+
+/*
+ * Call registered single step handers
+ * There is no Syndrome info to check for determining the handler.
+ * So we call all the registered handlers, until the right handler is
+ * found which returns zero.
+ */
+static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct step_hook *hook;
+ int retval = DBG_HOOK_ERROR;
+
+ read_lock(&step_hook_lock);
+
+ list_for_each_entry(hook, &step_hook, node) {
+ retval = hook->fn(regs, esr);
+ if (retval == DBG_HOOK_HANDLED)
+ break;
+ }
+
+ read_unlock(&step_hook_lock);
+
+ return retval;
+}
+
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -214,7 +248,9 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
*/
user_rewind_single_step(current);
} else {
- /* TODO: route to KGDB */
+ if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
pr_warning("Unexpected kernel single-step exception at EL1\n");
/*
* Re-enable stepping since we know that we will be
@@ -226,13 +262,113 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
return 0;
}
-static int __init single_step_init(void)
+/*
+ * Breakpoint handler is re-entrant as another breakpoint can
+ * hit within breakpoint handler, especically in kprobes.
+ * Use reader/writer locks instead of plain spinlock.
+ */
+static LIST_HEAD(break_hook);
+static DEFINE_RWLOCK(break_hook_lock);
+
+void register_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_add(&hook->node, &break_hook);
+ write_unlock(&break_hook_lock);
+}
+
+void unregister_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&break_hook_lock);
+}
+
+static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct break_hook *hook;
+ int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+
+ read_lock(&break_hook_lock);
+ list_for_each_entry(hook, &break_hook, node)
+ if ((esr & hook->esr_mask) == hook->esr_val)
+ fn = hook->fn;
+ read_unlock(&break_hook_lock);
+
+ return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
+}
+
+static int brk_handler(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ if (user_mode(regs)) {
+ info = (siginfo_t) {
+ .si_signo = SIGTRAP,
+ .si_errno = 0,
+ .si_code = TRAP_BRKPT,
+ .si_addr = (void __user *)instruction_pointer(regs),
+ };
+
+ force_sig_info(SIGTRAP, &info, current);
+ } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+ pr_warning("Unexpected kernel BRK exception at EL1\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int aarch32_break_handler(struct pt_regs *regs)
+{
+ siginfo_t info;
+ unsigned int instr;
+ bool bp = false;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+
+ if (!compat_user_mode(regs))
+ return -EFAULT;
+
+ if (compat_thumb_mode(regs)) {
+ /* get 16-bit Thumb instruction */
+ get_user(instr, (u16 __user *)pc);
+ if (instr == AARCH32_BREAK_THUMB2_LO) {
+ /* get second half of 32-bit Thumb-2 instruction */
+ get_user(instr, (u16 __user *)(pc + 2));
+ bp = instr == AARCH32_BREAK_THUMB2_HI;
+ } else {
+ bp = instr == AARCH32_BREAK_THUMB;
+ }
+ } else {
+ /* 32-bit ARM instruction */
+ get_user(instr, (u32 __user *)pc);
+ bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+ }
+
+ if (!bp)
+ return -EFAULT;
+
+ info = (siginfo_t) {
+ .si_signo = SIGTRAP,
+ .si_errno = 0,
+ .si_code = TRAP_BRKPT,
+ .si_addr = pc,
+ };
+
+ force_sig_info(SIGTRAP, &info, current);
+ return 0;
+}
+
+static int __init debug_traps_init(void)
{
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
TRAP_HWBKPT, "single-step handler");
+ hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
+ TRAP_BRKPT, "ptrace BRK handler");
return 0;
}
-arch_initcall(single_step_init);
+arch_initcall(debug_traps_init);
/* Re-enable single step for syscall restarting. */
void user_rewind_single_step(struct task_struct *task)
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index fbb6e1843659..ffbbdde7aba1 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -26,6 +26,8 @@
#include <linux/amba/serial.h>
#include <linux/serial_reg.h>
+#include <asm/fixmap.h>
+
static void __iomem *early_base;
static void (*printch)(char ch);
@@ -141,8 +143,10 @@ static int __init setup_early_printk(char *buf)
}
/* no options parsing yet */
- if (paddr)
- early_base = early_io_map(paddr, EARLYCON_IOBASE);
+ if (paddr) {
+ set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
+ early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
+ }
printch = match->printch;
early_console = &early_console_dev;
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
new file mode 100644
index 000000000000..66716c9b9e5f
--- /dev/null
+++ b/arch/arm64/kernel/efi-entry.S
@@ -0,0 +1,109 @@
+/*
+ * EFI entry point.
+ *
+ * Copyright (C) 2013, 2014 Red Hat, Inc.
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+
+#define EFI_LOAD_ERROR 0x8000000000000001
+
+ __INIT
+
+ /*
+ * We arrive here from the EFI boot manager with:
+ *
+ * * CPU in little-endian mode
+ * * MMU on with identity-mapped RAM
+ * * Icache and Dcache on
+ *
+ * We will most likely be running from some place other than where
+ * we want to be. The kernel image wants to be placed at TEXT_OFFSET
+ * from start of RAM.
+ */
+ENTRY(efi_stub_entry)
+ /*
+ * Create a stack frame to save FP/LR with extra space
+ * for image_addr variable passed to efi_entry().
+ */
+ stp x29, x30, [sp, #-32]!
+
+ /*
+ * Call efi_entry to do the real work.
+ * x0 and x1 are already set up by firmware. Current runtime
+ * address of image is calculated and passed via *image_addr.
+ *
+ * unsigned long efi_entry(void *handle,
+ * efi_system_table_t *sys_table,
+ * unsigned long *image_addr) ;
+ */
+ adrp x8, _text
+ add x8, x8, #:lo12:_text
+ add x2, sp, 16
+ str x8, [x2]
+ bl efi_entry
+ cmn x0, #1
+ b.eq efi_load_fail
+
+ /*
+ * efi_entry() will have relocated the kernel image if necessary
+ * and we return here with device tree address in x0 and the kernel
+ * entry point stored at *image_addr. Save those values in registers
+ * which are callee preserved.
+ */
+ mov x20, x0 // DTB address
+ ldr x0, [sp, #16] // relocated _text address
+ mov x21, x0
+
+ /*
+ * Flush dcache covering current runtime addresses
+ * of kernel text/data. Then flush all of icache.
+ */
+ adrp x1, _text
+ add x1, x1, #:lo12:_text
+ adrp x2, _edata
+ add x2, x2, #:lo12:_edata
+ sub x1, x2, x1
+
+ bl __flush_dcache_area
+ ic ialluis
+
+ /* Turn off Dcache and MMU */
+ mrs x0, CurrentEL
+ cmp x0, #PSR_MODE_EL2t
+ ccmp x0, #PSR_MODE_EL2h, #0x4, ne
+ b.ne 1f
+ mrs x0, sctlr_el2
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el2, x0
+ isb
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el1, x0
+ isb
+2:
+ /* Jump to kernel entry point */
+ mov x0, x20
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ br x21
+
+efi_load_fail:
+ mov x0, #EFI_LOAD_ERROR
+ ldp x29, x30, [sp], #32
+ ret
+
+ENDPROC(efi_stub_entry)
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
new file mode 100644
index 000000000000..e786e6cdc400
--- /dev/null
+++ b/arch/arm64/kernel/efi-stub.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
+ *
+ * This file implements the EFI boot stub for the arm64 kernel.
+ * Adapted from ARM version by Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <asm/sections.h>
+
+/*
+ * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
+ * start of kernel and may not cross a 2MiB boundary. We set alignment to
+ * 2MiB so we know it won't cross a 2MiB boundary.
+ */
+#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */
+#define MAX_FDT_OFFSET SZ_512M
+
+#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+
+static void efi_char16_printk(efi_system_table_t *sys_table_arg,
+ efi_char16_t *str);
+
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table,
+ void *__image, void **__fh);
+static efi_status_t efi_file_close(void *handle);
+
+static efi_status_t
+efi_file_read(void *handle, unsigned long *size, void *addr);
+
+static efi_status_t
+efi_file_size(efi_system_table_t *sys_table, void *__fh,
+ efi_char16_t *filename_16, void **handle, u64 *file_sz);
+
+/* Include shared EFI stub code */
+#include "../../../drivers/firmware/efi/efi-stub-helper.c"
+#include "../../../drivers/firmware/efi/fdt.c"
+#include "../../../drivers/firmware/efi/arm-stub.c"
+
+
+static efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
+{
+ efi_status_t status;
+ unsigned long kernel_size, kernel_memsize = 0;
+
+ /* Relocate the image, if required. */
+ kernel_size = _edata - _text;
+ if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ kernel_memsize = kernel_size + (_end - _edata);
+ status = efi_relocate_kernel(sys_table, image_addr,
+ kernel_size, kernel_memsize,
+ dram_base + TEXT_OFFSET,
+ PAGE_SIZE);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ return status;
+ }
+ if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ pr_efi_err(sys_table, "Failed to alloc kernel memory\n");
+ efi_free(sys_table, kernel_memsize, *image_addr);
+ return EFI_ERROR;
+ }
+ *image_size = kernel_memsize;
+ }
+
+
+ return EFI_SUCCESS;
+}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
new file mode 100644
index 000000000000..14db1f6e8d7f
--- /dev/null
+++ b/arch/arm64/kernel/efi.c
@@ -0,0 +1,469 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+
+struct efi_memory_map memmap;
+
+static efi_runtime_services_t *runtime;
+
+static u64 efi_system_table;
+
+static int uefi_debug __initdata;
+static int __init uefi_debug_setup(char *str)
+{
+ uefi_debug = 1;
+
+ return 0;
+}
+early_param("uefi_debug", uefi_debug_setup);
+
+static int __init is_normal_ram(efi_memory_desc_t *md)
+{
+ if (md->attribute & EFI_MEMORY_WB)
+ return 1;
+ return 0;
+}
+
+static void __init efi_setup_idmap(void)
+{
+ struct memblock_region *r;
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ for_each_memblock(memory, r)
+ create_id_mapping(r->base, r->size, 0);
+
+ /* map runtime io spaces */
+ for_each_efi_memory_desc(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
+ continue;
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+ create_id_mapping(paddr, size, 1);
+ }
+}
+
+static int __init uefi_init(void)
+{
+ efi_char16_t *c16;
+ char vendor[100] = "unknown";
+ int i, retval;
+
+ efi.systab = early_memremap(efi_system_table,
+ sizeof(efi_system_table_t));
+ if (efi.systab == NULL) {
+ pr_warn("Unable to map EFI system table.\n");
+ return -ENOMEM;
+ }
+
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
+
+ /*
+ * Verify the EFI Table
+ */
+ if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ pr_err("System table signature incorrect\n");
+ return -EINVAL;
+ }
+ if ((efi.systab->hdr.revision >> 16) < 2)
+ pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff);
+
+ /* Show what we know for posterity */
+ c16 = early_memremap(efi.systab->fw_vendor,
+ sizeof(vendor));
+ if (c16) {
+ for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+ vendor[i] = c16[i];
+ vendor[i] = '\0';
+ }
+
+ pr_info("EFI v%u.%.02u by %s\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff, vendor);
+
+ retval = efi_config_init(NULL);
+ if (retval == 0)
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+ early_memunmap(c16, sizeof(vendor));
+ early_memunmap(efi.systab, sizeof(efi_system_table_t));
+
+ return retval;
+}
+
+static __initdata char memory_type_name[][32] = {
+ {"Reserved"},
+ {"Loader Code"},
+ {"Loader Data"},
+ {"Boot Code"},
+ {"Boot Data"},
+ {"Runtime Code"},
+ {"Runtime Data"},
+ {"Conventional Memory"},
+ {"Unusable Memory"},
+ {"ACPI Reclaim Memory"},
+ {"ACPI Memory NVS"},
+ {"Memory Mapped I/O"},
+ {"MMIO Port Space"},
+ {"PAL Code"},
+};
+
+/*
+ * Return true for RAM regions we want to permanently reserve.
+ */
+static __init int is_reserve_region(efi_memory_desc_t *md)
+{
+ if (!is_normal_ram(md))
+ return 0;
+
+ if (md->attribute & EFI_MEMORY_RUNTIME)
+ return 1;
+
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY ||
+ md->type == EFI_RESERVED_TYPE)
+ return 1;
+
+ return 0;
+}
+
+static __init void reserve_regions(void)
+{
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ if (uefi_debug)
+ pr_info("Processing EFI memory map:\n");
+
+ for_each_efi_memory_desc(&memmap, md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+ if (uefi_debug)
+ pr_info(" 0x%012llx-0x%012llx [%s]",
+ paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+ memory_type_name[md->type]);
+
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_normal_ram(md))
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (is_reserve_region(md) ||
+ md->type == EFI_BOOT_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_DATA) {
+ memblock_reserve(paddr, size);
+ if (uefi_debug)
+ pr_cont("*");
+ }
+
+ if (uefi_debug)
+ pr_cont("\n");
+ }
+}
+
+
+static u64 __init free_one_region(u64 start, u64 end)
+{
+ u64 size = end - start;
+
+ if (uefi_debug)
+ pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
+
+ free_bootmem_late(start, size);
+ return size;
+}
+
+static u64 __init free_region(u64 start, u64 end)
+{
+ u64 map_start, map_end, total = 0;
+
+ if (end <= start)
+ return total;
+
+ map_start = (u64)memmap.phys_map;
+ map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
+ map_start &= PAGE_MASK;
+
+ if (start < map_end && end > map_start) {
+ /* region overlaps UEFI memmap */
+ if (start < map_start)
+ total += free_one_region(start, map_start);
+
+ if (map_end < end)
+ total += free_one_region(map_end, end);
+ } else
+ total += free_one_region(start, end);
+
+ return total;
+}
+
+static void __init free_boot_services(void)
+{
+ u64 total_freed = 0;
+ u64 keep_end, free_start, free_end;
+ efi_memory_desc_t *md;
+
+ /*
+ * If kernel uses larger pages than UEFI, we have to be careful
+ * not to inadvertantly free memory we want to keep if there is
+ * overlap at the kernel page size alignment. We do not want to
+ * free is_reserve_region() memory nor the UEFI memmap itself.
+ *
+ * The memory map is sorted, so we keep track of the end of
+ * any previous region we want to keep, remember any region
+ * we want to free and defer freeing it until we encounter
+ * the next region we want to keep. This way, before freeing
+ * it, we can clip it as needed to avoid freeing memory we
+ * want to keep for UEFI.
+ */
+
+ keep_end = 0;
+ free_start = 0;
+
+ for_each_efi_memory_desc(&memmap, md) {
+ u64 paddr, npages, size;
+
+ if (is_reserve_region(md)) {
+ /*
+ * We don't want to free any memory from this region.
+ */
+ if (free_start) {
+ /* adjust free_end then free region */
+ if (free_end > md->phys_addr)
+ free_end -= PAGE_SIZE;
+ total_freed += free_region(free_start, free_end);
+ free_start = 0;
+ }
+ keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+ continue;
+ }
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA) {
+ /* no need to free this region */
+ continue;
+ }
+
+ /*
+ * We want to free memory from this region.
+ */
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (free_start) {
+ if (paddr <= free_end)
+ free_end = paddr + size;
+ else {
+ total_freed += free_region(free_start, free_end);
+ free_start = paddr;
+ free_end = paddr + size;
+ }
+ } else {
+ free_start = paddr;
+ free_end = paddr + size;
+ }
+ if (free_start < keep_end) {
+ free_start += PAGE_SIZE;
+ if (free_start >= free_end)
+ free_start = 0;
+ }
+ }
+ if (free_start)
+ total_freed += free_region(free_start, free_end);
+
+ if (total_freed)
+ pr_info("Freed 0x%llx bytes of EFI boot services memory",
+ total_freed);
+}
+
+void __init efi_init(void)
+{
+ struct efi_fdt_params params;
+
+ /* Grab UEFI information placed in FDT by stub */
+ if (!efi_get_fdt_params(&params, uefi_debug))
+ return;
+
+ efi_system_table = params.system_table;
+
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
+ memmap.phys_map = (void *)params.mmap;
+ memmap.map = early_memremap(params.mmap, params.mmap_size);
+ memmap.map_end = memmap.map + params.mmap_size;
+ memmap.desc_size = params.desc_size;
+ memmap.desc_version = params.desc_ver;
+
+ if (uefi_init() < 0)
+ return;
+
+ reserve_regions();
+}
+
+void __init efi_idmap_init(void)
+{
+ if (!efi_enabled(EFI_BOOT))
+ return;
+
+ /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
+ efi_setup_idmap();
+}
+
+static int __init remap_region(efi_memory_desc_t *md, void **new)
+{
+ u64 paddr, vaddr, npages, size;
+
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_normal_ram(md))
+ vaddr = (__force u64)ioremap_cache(paddr, size);
+ else
+ vaddr = (__force u64)ioremap(paddr, size);
+
+ if (!vaddr) {
+ pr_err("Unable to remap 0x%llx pages @ %p\n",
+ npages, (void *)paddr);
+ return 0;
+ }
+
+ /* adjust for any rounding when EFI and system pagesize differs */
+ md->virt_addr = vaddr + (md->phys_addr - paddr);
+
+ if (uefi_debug)
+ pr_info(" EFI remap 0x%012llx => %p\n",
+ md->phys_addr, (void *)md->virt_addr);
+
+ memcpy(*new, md, memmap.desc_size);
+ *new += memmap.desc_size;
+
+ return 1;
+}
+
+/*
+ * Switch UEFI from an identity map to a kernel virtual map
+ */
+static int __init arm64_enter_virtual_mode(void)
+{
+ efi_memory_desc_t *md;
+ phys_addr_t virtmap_phys;
+ void *virtmap, *virt_md;
+ efi_status_t status;
+ u64 mapsize;
+ int count = 0;
+ unsigned long flags;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return -1;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ /* replace early memmap mapping with permanent mapping */
+ mapsize = memmap.map_end - memmap.map;
+ early_memunmap(memmap.map, mapsize);
+ memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
+ mapsize);
+ memmap.map_end = memmap.map + mapsize;
+
+ efi.memmap = &memmap;
+
+ /* Map the runtime regions */
+ virtmap = kmalloc(mapsize, GFP_KERNEL);
+ if (!virtmap) {
+ pr_err("Failed to allocate EFI virtual memmap\n");
+ return -1;
+ }
+ virtmap_phys = virt_to_phys(virtmap);
+ virt_md = virtmap;
+
+ for_each_efi_memory_desc(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (remap_region(md, &virt_md))
+ ++count;
+ }
+
+ efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
+ if (efi.systab)
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+ local_irq_save(flags);
+ cpu_switch_mm(idmap_pg_dir, &init_mm);
+
+ /* Call SetVirtualAddressMap with the physical address of the map */
+ runtime = efi.systab->runtime;
+ efi.set_virtual_address_map = runtime->set_virtual_address_map;
+
+ status = efi.set_virtual_address_map(count * memmap.desc_size,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)virtmap_phys);
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
+ local_irq_restore(flags);
+
+ kfree(virtmap);
+
+ free_boot_services();
+
+ if (status != EFI_SUCCESS) {
+ pr_err("Failed to set EFI virtual address map! [%lx]\n",
+ status);
+ return -1;
+ }
+
+ /* Set up runtime services function pointers */
+ runtime = efi.systab->runtime;
+ efi.get_time = runtime->get_time;
+ efi.set_time = runtime->set_time;
+ efi.get_wakeup_time = runtime->get_wakeup_time;
+ efi.set_wakeup_time = runtime->set_wakeup_time;
+ efi.get_variable = runtime->get_variable;
+ efi.get_next_variable = runtime->get_next_variable;
+ efi.set_variable = runtime->set_variable;
+ efi.query_variable_info = runtime->query_variable_info;
+ efi.update_capsule = runtime->update_capsule;
+ efi.query_capsule_caps = runtime->query_capsule_caps;
+ efi.get_next_high_mono_count = runtime->get_next_high_mono_count;
+ efi.reset_system = runtime->reset_system;
+
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(arm64_enter_virtual_mode);
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
new file mode 100644
index 000000000000..b051871f2965
--- /dev/null
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -0,0 +1,218 @@
+/*
+ * arch/arm64/kernel/entry-ftrace.S
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+/*
+ * Gcc with -pg will put the following code in the beginning of each function:
+ * mov x0, x30
+ * bl _mcount
+ * [function's body ...]
+ * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
+ * ftrace is enabled.
+ *
+ * Please note that x0 as an argument will not be used here because we can
+ * get lr(x30) of instrumented function at any time by winding up call stack
+ * as long as the kernel is compiled without -fomit-frame-pointer.
+ * (or CONFIG_FRAME_POINTER, this is forced on arm64)
+ *
+ * stack layout after mcount_enter in _mcount():
+ *
+ * current sp/fp => 0:+-----+
+ * in _mcount() | x29 | -> instrumented function's fp
+ * +-----+
+ * | x30 | -> _mcount()'s lr (= instrumented function's pc)
+ * old sp => +16:+-----+
+ * when instrumented | |
+ * function calls | ... |
+ * _mcount() | |
+ * | |
+ * instrumented => +xx:+-----+
+ * function's fp | x29 | -> parent's fp
+ * +-----+
+ * | x30 | -> instrumented function's lr (= parent's pc)
+ * +-----+
+ * | ... |
+ */
+
+ .macro mcount_enter
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ .endm
+
+ .macro mcount_exit
+ ldp x29, x30, [sp], #16
+ ret
+ .endm
+
+ .macro mcount_adjust_addr rd, rn
+ sub \rd, \rn, #AARCH64_INSN_SIZE
+ .endm
+
+ /* for instrumented function's parent */
+ .macro mcount_get_parent_fp reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg]
+ .endm
+
+ /* for instrumented function */
+ .macro mcount_get_pc0 reg
+ mcount_adjust_addr \reg, x30
+ .endm
+
+ .macro mcount_get_pc reg
+ ldr \reg, [x29, #8]
+ mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg, #8]
+ mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr_addr reg
+ ldr \reg, [x29]
+ add \reg, \reg, #8
+ .endm
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+/*
+ * void _mcount(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function makes calls, if enabled, to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+ENTRY(_mcount)
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ ldr x0, =ftrace_trace_stop
+ ldr x0, [x0] // if ftrace_trace_stop
+ ret // return;
+#endif
+ mcount_enter
+
+ ldr x0, =ftrace_trace_function
+ ldr x2, [x0]
+ adr x0, ftrace_stub
+ cmp x0, x2 // if (ftrace_trace_function
+ b.eq skip_ftrace_call // != ftrace_stub) {
+
+ mcount_get_pc x0 // function's pc
+ mcount_get_lr x1 // function's lr (= parent's pc)
+ blr x2 // (*ftrace_trace_function)(pc, lr);
+
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
+skip_ftrace_call: // return;
+ mcount_exit // }
+#else
+ mcount_exit // return;
+ // }
+skip_ftrace_call:
+ ldr x1, =ftrace_graph_return
+ ldr x2, [x1] // if ((ftrace_graph_return
+ cmp x0, x2 // != ftrace_stub)
+ b.ne ftrace_graph_caller
+
+ ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
+ ldr x2, [x1] // != ftrace_graph_entry_stub))
+ ldr x0, =ftrace_graph_entry_stub
+ cmp x0, x2
+ b.ne ftrace_graph_caller // ftrace_graph_caller();
+
+ mcount_exit
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+ENDPROC(_mcount)
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+/*
+ * _mcount() is used to build the kernel with -pg option, but all the branch
+ * instructions to _mcount() are replaced to NOP initially at kernel start up,
+ * and later on, NOP to branch to ftrace_caller() when enabled or branch to
+ * NOP when disabled per-function base.
+ */
+ENTRY(_mcount)
+ ret
+ENDPROC(_mcount)
+
+/*
+ * void ftrace_caller(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function is a counterpart of _mcount() in 'static' ftrace, and
+ * makes calls to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+ENTRY(ftrace_caller)
+ mcount_enter
+
+ mcount_get_pc0 x0 // function's pc
+ mcount_get_lr x1 // function's lr
+
+ .global ftrace_call
+ftrace_call: // tracer(pc, lr);
+ nop // This will be replaced with "bl xxx"
+ // where xxx can be any kind of tracer.
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .global ftrace_graph_call
+ftrace_graph_call: // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+ mcount_exit
+ENDPROC(ftrace_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(ftrace_stub)
+ ret
+ENDPROC(ftrace_stub)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * void ftrace_graph_caller(void)
+ *
+ * Called from _mcount() or ftrace_caller() when function_graph tracer is
+ * selected.
+ * This function w/ prepare_ftrace_return() fakes link register's value on
+ * the call stack in order to intercept instrumented function's return path
+ * and run return_to_handler() later on its exit.
+ */
+ENTRY(ftrace_graph_caller)
+ mcount_get_lr_addr x0 // pointer to function's saved lr
+ mcount_get_pc x1 // function's pc
+ mcount_get_parent_fp x2 // parent's fp
+ bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
+
+ mcount_exit
+ENDPROC(ftrace_graph_caller)
+
+/*
+ * void return_to_handler(void)
+ *
+ * Run ftrace_return_to_handler() before going back to parent.
+ * @fp is checked against the value passed by ftrace_graph_caller()
+ * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
+ */
+ENTRY(return_to_handler)
+ str x0, [sp, #-16]!
+ mov x0, x29 // parent's fp
+ bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
+ mov x30, x0 // restore the original return address
+ ldr x0, [sp], #16
+ ret
+END(return_to_handler)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a03..1ee38bd61f03 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -89,6 +89,17 @@
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ tbz x22, #4, 1f
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ mrs x29, contextidr_el1
+ msr contextidr_el1, x29
+1:
+#else
+ msr contextidr_el1, xzr
+1:
+#endif
+#endif
.endif
.if \ret
ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
@@ -121,7 +132,7 @@
.macro get_thread_info, rd
mov \rd, sp
- and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
+ and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm
/*
@@ -275,7 +286,6 @@ el1_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- mov x1, x25
mov x2, sp
b do_sp_pc_abort
el1_undef:
@@ -288,6 +298,8 @@ el1_dbg:
/*
* Debug exception handling
*/
+ cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
mov x2, sp // struct pt_regs
@@ -311,14 +323,14 @@ el1_irq:
#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x0, x24, #1 // increment it
- str x0, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w0, w24, #1 // increment it
+ str w0, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- str x24, [tsk, #TI_PREEMPT] // restore preempt count
- cbnz x24, 1f // preempt count != 0
+ str w24, [tsk, #TI_PREEMPT] // restore preempt count
+ cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
@@ -423,6 +435,7 @@ el0_da:
* Data abort handling
*/
mrs x0, far_el1
+ bic x0, x0, #(0xff << 56)
disable_step x1
isb
enable_dbg
@@ -476,6 +489,8 @@ el0_undef:
* Undefined instruction
*/
mov x0, sp
+ // enable interrupts before calling the main handler
+ enable_irq
b do_undefinstr
el0_dbg:
/*
@@ -506,15 +521,15 @@ el0_irq_naked:
#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x23, x24, #1 // increment it
- str x23, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w23, w24, #1 // increment it
+ str w23, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- ldr x0, [tsk, #TI_PREEMPT]
- str x24, [tsk, #TI_PREEMPT]
- cmp x0, x23
+ ldr w0, [tsk, #TI_PREEMPT]
+ str w24, [tsk, #TI_PREEMPT]
+ cmp w0, w23
b.eq 1f
mov x1, #0
str x1, [x1] // BUG
@@ -641,8 +656,9 @@ el0_svc_naked: // compat entry point
enable_irq
get_thread_info tsk
- ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
- tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
+ ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ tst x16, #_TIF_SYSCALL_WORK
+ b.ne __sys_trace
adr lr, ret_fast_syscall // return address
cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys
@@ -658,9 +674,8 @@ ENDPROC(el0_svc)
* switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov x1, sp
- mov w0, #0 // trace entry
- bl syscall_trace
+ mov x0, sp
+ bl syscall_trace_enter
adr lr, __sys_trace_return // return address
uxtw scno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
@@ -675,9 +690,8 @@ __sys_trace:
__sys_trace_return:
str x0, [sp] // save returned x0
- mov x1, sp
- mov w0, #1 // trace exit
- bl syscall_trace
+ mov x0, sp
+ bl syscall_trace_exit
b ret_to_user
/*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index e8b8357aedb4..522df9c7f3a4 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -79,10 +80,72 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
+ preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_load_state(&current->thread.fpsimd_state);
+ preempt_enable();
}
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+ /* Avoid using the NEON in interrupt context */
+ BUG_ON(in_interrupt());
+ preempt_disable();
+
+ if (current->mm)
+ fpsimd_save_state(&current->thread.fpsimd_state);
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+ if (current->mm)
+ fpsimd_load_state(&current->thread.fpsimd_state);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
+#ifdef CONFIG_CPU_PM
+static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ if (current->mm)
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ break;
+ case CPU_PM_EXIT:
+ if (current->mm)
+ fpsimd_load_state(&current->thread.fpsimd_state);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_pm_notifier_block = {
+ .notifier_call = fpsimd_cpu_pm_notifier,
+};
+
+static void fpsimd_pm_init(void)
+{
+ cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
+}
+
+#else
+static inline void fpsimd_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
/*
* FP/SIMD support code initialisation.
*/
@@ -101,6 +164,8 @@ static int __init fpsimd_init(void)
else
elf_hwcap |= HWCAP_ASIMD;
+ fpsimd_pm_init();
+
return 0;
}
late_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
new file mode 100644
index 000000000000..649890a3ac4e
--- /dev/null
+++ b/arch/arm64/kernel/ftrace.c
@@ -0,0 +1,177 @@
+/*
+ * arch/arm64/kernel/ftrace.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/swab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Replace a single instruction, which may be a branch or NOP.
+ * If @validate == true, a replaced instruction is checked against 'old'.
+ */
+static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
+ bool validate)
+{
+ u32 replaced;
+
+ /*
+ * Note:
+ * Due to modules and __init, code can disappear and change,
+ * we need to protect against faulting as well as code changing.
+ * We do this by aarch64_insn_*() which use the probe_kernel_*().
+ *
+ * No lock is held here because all the modifications are run
+ * through stop_machine().
+ */
+ if (validate) {
+ if (aarch64_insn_read((void *)pc, &replaced))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
+ if (aarch64_insn_patch_text_nosync((void *)pc, new))
+ return -EPERM;
+
+ return 0;
+}
+
+/*
+ * Replace tracer function in ftrace_caller()
+ */
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long pc;
+ u32 new;
+
+ pc = (unsigned long)&ftrace_call;
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
+
+ return ftrace_modify_code(pc, 0, new, false);
+}
+
+/*
+ * Turn on the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_nop();
+ new = aarch64_insn_gen_branch_imm(pc, addr, true);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+/*
+ * Turn off the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_branch_imm(pc, addr, true);
+ new = aarch64_insn_gen_nop();
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ *(unsigned long *)data = 0;
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * function_graph tracer expects ftrace_return_to_handler() to be called
+ * on the way back to parent. For this purpose, this function is called
+ * in _mcount() or ftrace_caller() to replace return address (*parent) on
+ * the call stack to return_to_handler.
+ *
+ * Note that @frame_pointer is used only for sanity check later.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+ struct ftrace_graph_ent trace;
+ int err;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Note:
+ * No protection against faulting at *parent, which may be seen
+ * on other archs. It's unlikely on AArch64.
+ */
+ old = *parent;
+ *parent = return_hooker;
+
+ trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ *parent = old;
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
+ }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
+ * depending on @enable.
+ */
+static int ftrace_modify_graph_caller(bool enable)
+{
+ unsigned long pc = (unsigned long)&ftrace_graph_call;
+ u32 branch, nop;
+
+ branch = aarch64_insn_gen_branch_imm(pc,
+ (unsigned long)ftrace_graph_caller, false);
+ nop = aarch64_insn_gen_nop();
+
+ if (enable)
+ return ftrace_modify_code(pc, nop, branch, true);
+ else
+ return ftrace_modify_code(pc, branch, nop, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 53dcae49e729..6254f4a28eed 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -22,10 +22,12 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
@@ -34,29 +36,21 @@
#include <asm/page.h>
#include <asm/virt.h>
-/*
- * swapper_pg_dir is the virtual address of the initial page table. We place
- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
- * 2 pages and is placed below swapper_pg_dir.
- */
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
-#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
-#error KERNEL_RAM_VADDR must start at 0xXXX80000
+#if (TEXT_OFFSET & 0xfff) != 0
+#error TEXT_OFFSET must be at least 4KB aligned
+#elif (PAGE_OFFSET & 0x1fffff) != 0
+#error PAGE_OFFSET must be at least 2MB aligned
+#elif TEXT_OFFSET > 0x1fffff
+#error TEXT_OFFSET must be less than 2MB
#endif
-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
-
- .globl swapper_pg_dir
- .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
-
- .globl idmap_pg_dir
- .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
-
- .macro pgtbl, ttb0, ttb1, phys
- add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
- sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
+ .macro pgtbl, ttb0, ttb1, virt_to_phys
+ ldr \ttb1, =swapper_pg_dir
+ ldr \ttb0, =idmap_pg_dir
+ add \ttb1, \ttb1, \virt_to_phys
+ add \ttb0, \ttb0, \virt_to_phys
.endm
#ifdef CONFIG_ARM64_64K_PAGES
@@ -107,16 +101,137 @@
/*
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*/
+#ifdef CONFIG_EFI
+efi_head:
+ /*
+ * This add instruction has no meaningful effect except that
+ * its opcode forms the magic "MZ" signature required by UEFI.
+ */
+ add x13, x18, #0x16
+ b stext
+#else
b stext // branch to kernel start, magic
.long 0 // reserved
+#endif
.quad TEXT_OFFSET // Image load offset from start of RAM
.quad 0 // reserved
.quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .byte 0x41 // Magic number, "ARM\x64"
+ .byte 0x52
+ .byte 0x4d
+ .byte 0x64
+#ifdef CONFIG_EFI
+ .long pe_header - efi_head // Offset to the PE header.
+#else
+ .word 0 // reserved
+#endif
+
+#ifdef CONFIG_EFI
+ .align 3
+pe_header:
+ .ascii "PE"
+ .short 0
+coff_header:
+ .short 0xaa64 // AArch64
+ .short 2 // nr_sections
+ .long 0 // TimeDateStamp
+ .long 0 // PointerToSymbolTable
+ .long 1 // NumberOfSymbols
+ .short section_table - optional_header // SizeOfOptionalHeader
+ .short 0x206 // Characteristics.
+ // IMAGE_FILE_DEBUG_STRIPPED |
+ // IMAGE_FILE_EXECUTABLE_IMAGE |
+ // IMAGE_FILE_LINE_NUMS_STRIPPED
+optional_header:
+ .short 0x20b // PE32+ format
+ .byte 0x02 // MajorLinkerVersion
+ .byte 0x14 // MinorLinkerVersion
+ .long _edata - stext // SizeOfCode
+ .long 0 // SizeOfInitializedData
+ .long 0 // SizeOfUninitializedData
+ .long efi_stub_entry - efi_head // AddressOfEntryPoint
+ .long stext - efi_head // BaseOfCode
+
+extra_header_fields:
+ .quad 0 // ImageBase
+ .long 0x20 // SectionAlignment
+ .long 0x8 // FileAlignment
+ .short 0 // MajorOperatingSystemVersion
+ .short 0 // MinorOperatingSystemVersion
+ .short 0 // MajorImageVersion
+ .short 0 // MinorImageVersion
+ .short 0 // MajorSubsystemVersion
+ .short 0 // MinorSubsystemVersion
+ .long 0 // Win32VersionValue
+
+ .long _edata - efi_head // SizeOfImage
+
+ // Everything before the kernel image is considered part of the header
+ .long stext - efi_head // SizeOfHeaders
+ .long 0 // CheckSum
+ .short 0xa // Subsystem (EFI application)
+ .short 0 // DllCharacteristics
+ .quad 0 // SizeOfStackReserve
+ .quad 0 // SizeOfStackCommit
+ .quad 0 // SizeOfHeapReserve
+ .quad 0 // SizeOfHeapCommit
+ .long 0 // LoaderFlags
+ .long 0x6 // NumberOfRvaAndSizes
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+
+ // Section table
+section_table:
+
+ /*
+ * The EFI application loader requires a relocation section
+ * because EFI applications must be relocatable. This is a
+ * dummy section as far as we are concerned.
+ */
+ .ascii ".reloc"
+ .byte 0
+ .byte 0 // end of 0 padding of section name
+ .long 0
+ .long 0
+ .long 0 // SizeOfRawData
+ .long 0 // PointerToRawData
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long 0x42100040 // Characteristics (section flags)
+
+
+ .ascii ".text"
+ .byte 0
+ .byte 0
+ .byte 0 // end of 0 padding of section name
+ .long _edata - stext // VirtualSize
+ .long stext - efi_head // VirtualAddress
+ .long _edata - stext // SizeOfRawData
+ .long stext - efi_head // PointerToRawData
+
+ .long 0 // PointerToRelocations (0 for executables)
+ .long 0 // PointerToLineNumbers (0 for executables)
+ .short 0 // NumberOfRelocations (0 for executables)
+ .short 0 // NumberOfLineNumbers (0 for executables)
+ .long 0xe0500020 // Characteristics (section flags)
+ .align 5
+#endif
ENTRY(stext)
mov x21, x0 // x21=FDT
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl el2_setup // Drop to EL1
+ bl set_cpu_boot_mode_flag
mrs x22, midr_el1 // x22=cpuid
mov x0, x22
bl lookup_processor_type
@@ -142,21 +257,30 @@ ENDPROC(stext)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
+ *
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
+ * booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #PSR_MODE_EL2t
ccmp x0, #PSR_MODE_EL2h, #0x4, ne
- ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x0, x0, x28
- b.eq 1f
- str wzr, [x0] // Remember we don't have EL2...
+ b.ne 1f
+ mrs x0, sctlr_el2
+CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
+CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+ msr sctlr_el2, x0
+ b 2f
+1: mrs x0, sctlr_el1
+CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
+CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ msr sctlr_el1, x0
+ mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
+ isb
ret
/* Hyp configuration. */
-1: ldr w1, =BOOT_CPU_MODE_EL2
- str w1, [x0, #4] // This CPU has EL2
- mov x0, #(1 << 31) // 64-bit EL1
+2: mov x0, #(1 << 31) // 64-bit EL1
msr hcr_el2, x0
/* Generic timers. */
@@ -165,6 +289,23 @@ ENTRY(el2_setup)
msr cnthctl_el2, x0
msr cntvoff_el2, xzr // Clear virtual offset
+#ifdef CONFIG_ARM_GIC_V3
+ /* GICv3 system register access */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #24, #4
+ cmp x0, #1
+ b.ne 3f
+
+ mrs_s x0, ICC_SRE_EL2
+ orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
+ orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
+ msr_s ICC_SRE_EL2, x0
+ isb // Make sure SRE is now set
+ msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
+
+3:
+#endif
+
/* Populate ID registers. */
mrs x0, midr_el1
mrs x1, mpidr_el1
@@ -173,7 +314,8 @@ ENTRY(el2_setup)
/* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits
- movk x0, #0x30d0, lsl #16
+CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
+CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -196,28 +338,41 @@ ENTRY(el2_setup)
PSR_MODE_EL1h)
msr spsr_el2, x0
msr elr_el2, lr
+ mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
ENDPROC(el2_setup)
/*
+ * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
+ * in x20. See arch/arm64/include/asm/virt.h for more info.
+ */
+ENTRY(set_cpu_boot_mode_flag)
+ ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
+ add x1, x1, x28
+ cmp w20, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+1: str w20, [x1] // This CPU has booted in EL1
+ dmb sy
+ dc ivac, x1 // Invalidate potentially stale cache line
+ ret
+ENDPROC(set_cpu_boot_mode_flag)
+
+/*
* We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable.
*
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
- .pushsection .data
+ .pushsection .data..cacheline_aligned
ENTRY(__boot_cpu_mode)
+ .align L1_CACHE_SHIFT
.long BOOT_CPU_MODE_EL2
.long 0
.popsection
- .align 3
-2: .quad .
- .quad PAGE_OFFSET
-
#ifdef CONFIG_SMP
- .pushsection .smp.pen.text, "ax"
.align 3
1: .quad .
.quad secondary_holding_pen_release
@@ -227,8 +382,9 @@ ENTRY(__boot_cpu_mode)
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
- bl __calc_phys_offset // x24=phys offset
- bl el2_setup // Drop to EL1
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
+ bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
@@ -242,7 +398,16 @@ pen: ldr x4, [x3]
wfe
b pen
ENDPROC(secondary_holding_pen)
- .popsection
+
+ /*
+ * Secondary entry point that jumps straight into the kernel. Only to
+ * be used where CPUs are brought online dynamically by the kernel.
+ */
+ENTRY(secondary_entry)
+ bl __calc_phys_offset // x2=phys offset
+ bl el2_setup // Drop to EL1
+ b secondary_startup
+ENDPROC(secondary_entry)
ENTRY(secondary_startup)
/*
@@ -254,7 +419,7 @@ ENTRY(secondary_startup)
mov x23, x0 // x23=current cpu_table
cbz x23, __error_p // invalid processor (x23=0)?
- pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
+ pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
ldr x12, [x23, #CPU_INFO_SETUP]
add x12, x12, x28 // __virt_to_phys
blr x12 // initialise processor
@@ -296,8 +461,13 @@ ENDPROC(__enable_mmu)
* x27 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
+ *
+ * We align the entire function to the smallest power of two larger than it to
+ * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
+ * close to the end of a 512MB or 1GB block we might require an additional
+ * table to map the entire function.
*/
- .align 6
+ .align 4
__turn_mmu_on:
msr sctlr_el1, x0
isb
@@ -340,26 +510,18 @@ ENDPROC(__calc_phys_offset)
* Preserves: tbl, flags
* Corrupts: phys, start, end, pstate
*/
- .macro create_block_map, tbl, flags, phys, start, end, idmap=0
+ .macro create_block_map, tbl, flags, phys, start, end
lsr \phys, \phys, #BLOCK_SHIFT
- .if \idmap
- and \start, \phys, #PTRS_PER_PTE - 1 // table index
- .else
lsr \start, \start, #BLOCK_SHIFT
and \start, \start, #PTRS_PER_PTE - 1 // table index
- .endif
orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
- .ifnc \start,\end
lsr \end, \end, #BLOCK_SHIFT
and \end, \end, #PTRS_PER_PTE - 1 // table end index
- .endif
9999: str \phys, [\tbl, \start, lsl #3] // store the entry
- .ifnc \start,\end
add \start, \start, #1 // next entry
add \phys, \phys, #BLOCK_SIZE // next block
cmp \start, \end
b.ls 9999b
- .endif
.endm
/*
@@ -368,10 +530,19 @@ ENDPROC(__calc_phys_offset)
* - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled, including the FDT blob (TTBR1)
- * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
+ * - pgd entry for fixed mappings (TTBR1)
*/
__create_page_tables:
- pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
+ pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
+ mov x27, lr
+
+ /*
+ * Invalidate the idmap and swapper page tables to avoid potential
+ * dirty cache lines being evicted.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
@@ -391,9 +562,13 @@ __create_page_tables:
* Create the identity mapping.
*/
add x0, x25, #PAGE_SIZE // section table address
- adr x3, __turn_mmu_on // virtual/physical address
+ ldr x3, =KERNEL_START
+ add x3, x3, x28 // __pa(KERNEL_START)
create_pgd_entry x25, x0, x3, x5, x6
- create_block_map x0, x7, x3, x5, x5, idmap=1
+ ldr x6, =KERNEL_END
+ mov x5, x3 // __pa(KERNEL_START)
+ add x6, x6, x28 // __pa(KERNEL_END)
+ create_block_map x0, x7, x3, x5, x6
/*
* Map the kernel image (starting with PHYS_OFFSET).
@@ -401,7 +576,7 @@ __create_page_tables:
add x0, x26, #PAGE_SIZE // section table address
mov x5, #PAGE_OFFSET
create_pgd_entry x26, x0, x5, x3, x6
- ldr x6, =KERNEL_END - 1
+ ldr x6, =KERNEL_END
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -421,15 +596,16 @@ __create_page_tables:
sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6
1:
-#ifdef CONFIG_EARLY_PRINTK
/*
- * Create the pgd entry for the UART mapping. The full mapping is done
- * later based earlyprintk kernel parameter.
+ * Since the page tables have been populated with non-cacheable
+ * accesses (MMU disabled), invalidate the idmap and swapper page
+ * tables again to remove any speculatively loaded cache lines.
*/
- ldr x5, =EARLYCON_IOBASE // UART virtual address
- add x0, x26, #2 * PAGE_SIZE // section table address
- create_pgd_entry x26, x0, x5, x6, x7
-#endif
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
+
+ mov lr, x27
ret
ENDPROC(__create_page_tables)
.ltorg
@@ -438,10 +614,8 @@ ENDPROC(__create_page_tables)
.type __switch_data, %object
__switch_data:
.quad __mmap_switched
- .quad __data_loc // x4
- .quad _data // x5
.quad __bss_start // x6
- .quad _end // x7
+ .quad __bss_stop // x7
.quad processor_id // x4
.quad __fdt_pointer // x5
.quad memstart_addr // x6
@@ -454,15 +628,7 @@ __switch_data:
__mmap_switched:
adr x3, __switch_data + 8
- ldp x4, x5, [x3], #16
ldp x6, x7, [x3], #16
- cmp x4, x5 // Copy data segment if needed
-1: ccmp x5, x6, #4, ne
- b.eq 2f
- ldr x16, [x4], #8
- str x16, [x5], #8
- b 1b
-2:
1: cmp x6, x7
b.hs 2f
str xzr, [x6], #8 // Clear BSS
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 5ab825c59db9..6de3460ede4c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -20,13 +20,14 @@
#define pr_fmt(fmt) "hw-breakpoint: " fmt
+#include <linux/compat.h>
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
-#include <asm/compat.h>
#include <asm/current.h>
#include <asm/debug-monitors.h>
#include <asm/hw_breakpoint.h>
@@ -169,15 +170,68 @@ static enum debug_el debug_exception_level(int privilege)
}
}
-/*
- * Install a perf counter breakpoint.
+enum hw_breakpoint_ops {
+ HW_BREAKPOINT_INSTALL,
+ HW_BREAKPOINT_UNINSTALL,
+ HW_BREAKPOINT_RESTORE
+};
+
+/**
+ * hw_breakpoint_slot_setup - Find and setup a perf slot according to
+ * operations
+ *
+ * @slots: pointer to array of slots
+ * @max_slots: max number of slots
+ * @bp: perf_event to setup
+ * @ops: operation to be carried out on the slot
+ *
+ * Return:
+ * slot index on success
+ * -ENOSPC if no slot is available/matches
+ * -EINVAL on wrong operations parameter
*/
-int arch_install_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
+ struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ int i;
+ struct perf_event **slot;
+
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ if (!*slot) {
+ *slot = bp;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ if (*slot == bp) {
+ *slot = NULL;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_RESTORE:
+ if (*slot == bp)
+ return i;
+ break;
+ default:
+ pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
+ return -EINVAL;
+ }
+ }
+ return -ENOSPC;
+}
+
+static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
+ struct perf_event **slots;
struct debug_info *debug_info = &current->thread.debug;
int i, max_slots, ctrl_reg, val_reg, reg_enable;
+ enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
u32 ctrl;
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -196,67 +250,54 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
reg_enable = !debug_info->wps_disabled;
}
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
-
- if (!*slot) {
- *slot = bp;
- break;
- }
- }
-
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return -ENOSPC;
+ i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
- /* Ensure debug monitors are enabled at the correct exception level. */
- enable_debug_monitors(debug_exception_level(info->ctrl.privilege));
+ if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
+ return i;
- /* Setup the address register. */
- write_wb_reg(val_reg, i, info->address);
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /*
+ * Ensure debug monitors are enabled at the correct exception
+ * level.
+ */
+ enable_debug_monitors(dbg_el);
+ /* Fall through */
+ case HW_BREAKPOINT_RESTORE:
+ /* Setup the address register. */
+ write_wb_reg(val_reg, i, info->address);
+
+ /* Setup the control register. */
+ ctrl = encode_ctrl_reg(info->ctrl);
+ write_wb_reg(ctrl_reg, i,
+ reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the control register. */
+ write_wb_reg(ctrl_reg, i, 0);
- /* Setup the control register. */
- ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ /*
+ * Release the debug monitors for the correct exception
+ * level.
+ */
+ disable_debug_monitors(dbg_el);
+ break;
+ }
return 0;
}
-void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
- int i, max_slots, base;
-
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
- /* Breakpoint */
- base = AARCH64_DBG_REG_BCR;
- slots = __get_cpu_var(bp_on_reg);
- max_slots = core_num_brps;
- } else {
- /* Watchpoint */
- base = AARCH64_DBG_REG_WCR;
- slots = __get_cpu_var(wp_on_reg);
- max_slots = core_num_wrps;
- }
-
- /* Remove the breakpoint. */
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
-
- if (*slot == bp) {
- *slot = NULL;
- break;
- }
- }
-
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return;
-
- /* Reset the control register. */
- write_wb_reg(base, i, 0);
+ return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
+}
- /* Release the debug monitors for the correct exception level. */
- disable_debug_monitors(debug_exception_level(info->ctrl.privilege));
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
}
static int get_hbp_len(u8 hbp_len)
@@ -806,18 +847,36 @@ void hw_breakpoint_thread_switch(struct task_struct *next)
/*
* CPU initialisation.
*/
-static void reset_ctrl_regs(void *unused)
+static void hw_breakpoint_reset(void *unused)
{
int i;
-
- for (i = 0; i < core_num_brps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ struct perf_event **slots;
+ /*
+ * When a CPU goes through cold-boot, it does not have any installed
+ * slot, so it is safe to share the same function for restoring and
+ * resetting breakpoints; when a CPU is hotplugged in, it goes
+ * through the slots, which are all empty, hence it just resets control
+ * and value for debug registers.
+ * When this function is triggered on warm-boot through a CPU PM
+ * notifier some slots might be initialized; if so they are
+ * reprogrammed according to the debug slots content.
+ */
+ for (slots = __get_cpu_var(bp_on_reg), i = 0; i < core_num_brps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ }
}
- for (i = 0; i < core_num_wrps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ for (slots = __get_cpu_var(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ }
}
}
@@ -827,7 +886,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
{
int cpu = (long)hcpu;
if (action == CPU_ONLINE)
- smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1);
+ smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
return NOTIFY_OK;
}
@@ -835,6 +894,14 @@ static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
.notifier_call = hw_breakpoint_reset_notify,
};
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
+#else
+static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+}
+#endif
+
/*
* One-time initialisation.
*/
@@ -850,8 +917,8 @@ static int __init arch_hw_breakpoint_init(void)
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
- smp_call_function(reset_ctrl_regs, NULL, 1);
- reset_ctrl_regs(NULL);
+ smp_call_function(hw_breakpoint_reset, NULL, 1);
+ hw_breakpoint_reset(NULL);
/* Register debug fault handlers. */
hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
@@ -861,6 +928,8 @@ static int __init arch_hw_breakpoint_init(void)
/* Register hotplug notifier. */
register_cpu_notifier(&hw_breakpoint_reset_nb);
+ /* Register cpu_suspend hw breakpoint restore hook */
+ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
return 0;
}
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 0959611d9ff1..a272f335c289 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
new file mode 100644
index 000000000000..92f36835486b
--- /dev/null
+++ b/arch/arm64/kernel/insn.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/insn.h>
+
+static int aarch64_insn_encoding_class[] = {
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+};
+
+enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
+{
+ return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
+}
+
+/* NOP is an alias of HINT */
+bool __kprobes aarch64_insn_is_nop(u32 insn)
+{
+ if (!aarch64_insn_is_hint(insn))
+ return false;
+
+ switch (insn & 0xFE0) {
+ case AARCH64_INSN_HINT_YIELD:
+ case AARCH64_INSN_HINT_WFE:
+ case AARCH64_INSN_HINT_WFI:
+ case AARCH64_INSN_HINT_SEV:
+ case AARCH64_INSN_HINT_SEVL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ u32 val;
+
+ ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ insn = cpu_to_le32(insn);
+ return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
+}
+
+static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
+{
+ if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
+ return false;
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_svc(insn) ||
+ aarch64_insn_is_hvc(insn) ||
+ aarch64_insn_is_smc(insn) ||
+ aarch64_insn_is_brk(insn) ||
+ aarch64_insn_is_nop(insn);
+}
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section B2.6.5 "Concurrent modification and execution of instructions":
+ * Concurrent modification and execution of instructions can lead to the
+ * resulting instruction performing any behavior that can be achieved by
+ * executing any sequence of instructions that can be executed from the
+ * same Exception level, except where the instruction before modification
+ * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
+ * or SMC instruction.
+ */
+bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
+{
+ return __aarch64_insn_hotpatch_safe(old_insn) &&
+ __aarch64_insn_hotpatch_safe(new_insn);
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /*
+ * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
+ * which ends with "dsb; isb" pair guaranteeing global
+ * visibility.
+ */
+ atomic_set(&pp->cpu_count, -1);
+ } else {
+ while (atomic_read(&pp->cpu_count) != -1)
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ int ret;
+ u32 insn;
+
+ /* Unsafe to patch multiple instructions without synchronizaiton */
+ if (cnt == 1) {
+ ret = aarch64_insn_read(addrs[0], &insn);
+ if (ret)
+ return ret;
+
+ if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
+ /*
+ * ARMv8 architecture doesn't guarantee all CPUs see
+ * the new instruction after returning from function
+ * aarch64_insn_patch_text_nosync(). So send IPIs to
+ * all other CPUs to achieve instruction
+ * synchronization.
+ */
+ ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
+ kick_all_cpus_sync();
+ return ret;
+ }
+ }
+
+ return aarch64_insn_patch_text_sync(addrs, insns, cnt);
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, lomask, himask, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ lomask = 0x3;
+ himask = 0x7ffff;
+ immlo = imm & lomask;
+ imm >>= 2;
+ immhi = imm & himask;
+ imm = (immlo << 24) | (immhi);
+ mask = (lomask << 24) | (himask);
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_26:
+ mask = BIT(26) - 1;
+ shift = 0;
+ break;
+ case AARCH64_INSN_IMM_19:
+ mask = BIT(19) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_16:
+ mask = BIT(16) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_14:
+ mask = BIT(14) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_12:
+ mask = BIT(12) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_9:
+ mask = BIT(9) - 1;
+ shift = 12;
+ break;
+ default:
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+
+ /* Update the immediate field. */
+ insn &= ~(mask << shift);
+ insn |= (imm & mask) << shift;
+
+ return insn;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ /*
+ * PC: A 64-bit Program Counter holding the address of the current
+ * instruction. A64 instructions must be word-aligned.
+ */
+ BUG_ON((pc & 0x3) || (addr & 0x3));
+
+ /*
+ * B/BL support [-128M, 128M) offset
+ * ARM64 virtual address arrangement guarantees all kernel and module
+ * texts are within +/-128M.
+ */
+ offset = ((long)addr - (long)pc);
+ BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+
+ if (type == AARCH64_INSN_BRANCH_LINK)
+ insn = aarch64_insn_get_bl_value();
+ else
+ insn = aarch64_insn_get_b_value();
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+}
+
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
+{
+ return aarch64_insn_get_hint_value() | op;
+}
+
+u32 __kprobes aarch64_insn_gen_nop(void)
+{
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index ecb3354292ed..dfa6e3e74fdd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -81,3 +81,64 @@ void __init init_IRQ(void)
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
+ bool ret = false;
+
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
+ ret = true;
+ }
+
+ c = irq_data_get_irq_chip(d);
+ if (!c->irq_set_affinity)
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
+ cpumask_copy(d->affinity, affinity);
+
+ return ret;
+}
+
+/*
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_irq_desc(i, desc) {
+ bool affinity_broken;
+
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
new file mode 100644
index 000000000000..263a166291fb
--- /dev/null
+++ b/arch/arm64/kernel/jump_label.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/kernel/jump_label.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/insn.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ bool is_static)
+{
+ void *addr = (void *)entry->code;
+ u32 insn;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ insn = aarch64_insn_gen_branch_imm(entry->code,
+ entry->target,
+ AARCH64_INSN_BRANCH_NOLINK);
+ } else {
+ insn = aarch64_insn_gen_nop();
+ }
+
+ if (is_static)
+ aarch64_insn_patch_text_nosync(addr, insn);
+ else
+ aarch64_insn_patch_text(&addr, &insn, 1);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, true);
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
new file mode 100644
index 000000000000..75c9cf1aafee
--- /dev/null
+++ b/arch/arm64/kernel/kgdb.c
@@ -0,0 +1,336 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/kernel/kgdb.c
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <asm/traps.h>
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "x0", 8, offsetof(struct pt_regs, regs[0])},
+ { "x1", 8, offsetof(struct pt_regs, regs[1])},
+ { "x2", 8, offsetof(struct pt_regs, regs[2])},
+ { "x3", 8, offsetof(struct pt_regs, regs[3])},
+ { "x4", 8, offsetof(struct pt_regs, regs[4])},
+ { "x5", 8, offsetof(struct pt_regs, regs[5])},
+ { "x6", 8, offsetof(struct pt_regs, regs[6])},
+ { "x7", 8, offsetof(struct pt_regs, regs[7])},
+ { "x8", 8, offsetof(struct pt_regs, regs[8])},
+ { "x9", 8, offsetof(struct pt_regs, regs[9])},
+ { "x10", 8, offsetof(struct pt_regs, regs[10])},
+ { "x11", 8, offsetof(struct pt_regs, regs[11])},
+ { "x12", 8, offsetof(struct pt_regs, regs[12])},
+ { "x13", 8, offsetof(struct pt_regs, regs[13])},
+ { "x14", 8, offsetof(struct pt_regs, regs[14])},
+ { "x15", 8, offsetof(struct pt_regs, regs[15])},
+ { "x16", 8, offsetof(struct pt_regs, regs[16])},
+ { "x17", 8, offsetof(struct pt_regs, regs[17])},
+ { "x18", 8, offsetof(struct pt_regs, regs[18])},
+ { "x19", 8, offsetof(struct pt_regs, regs[19])},
+ { "x20", 8, offsetof(struct pt_regs, regs[20])},
+ { "x21", 8, offsetof(struct pt_regs, regs[21])},
+ { "x22", 8, offsetof(struct pt_regs, regs[22])},
+ { "x23", 8, offsetof(struct pt_regs, regs[23])},
+ { "x24", 8, offsetof(struct pt_regs, regs[24])},
+ { "x25", 8, offsetof(struct pt_regs, regs[25])},
+ { "x26", 8, offsetof(struct pt_regs, regs[26])},
+ { "x27", 8, offsetof(struct pt_regs, regs[27])},
+ { "x28", 8, offsetof(struct pt_regs, regs[28])},
+ { "x29", 8, offsetof(struct pt_regs, regs[29])},
+ { "x30", 8, offsetof(struct pt_regs, regs[30])},
+ { "sp", 8, offsetof(struct pt_regs, sp)},
+ { "pc", 8, offsetof(struct pt_regs, pc)},
+ { "pstate", 8, offsetof(struct pt_regs, pstate)},
+ { "v0", 16, -1 },
+ { "v1", 16, -1 },
+ { "v2", 16, -1 },
+ { "v3", 16, -1 },
+ { "v4", 16, -1 },
+ { "v5", 16, -1 },
+ { "v6", 16, -1 },
+ { "v7", 16, -1 },
+ { "v8", 16, -1 },
+ { "v9", 16, -1 },
+ { "v10", 16, -1 },
+ { "v11", 16, -1 },
+ { "v12", 16, -1 },
+ { "v13", 16, -1 },
+ { "v14", 16, -1 },
+ { "v15", 16, -1 },
+ { "v16", 16, -1 },
+ { "v17", 16, -1 },
+ { "v18", 16, -1 },
+ { "v19", 16, -1 },
+ { "v20", 16, -1 },
+ { "v21", 16, -1 },
+ { "v22", 16, -1 },
+ { "v23", 16, -1 },
+ { "v24", 16, -1 },
+ { "v25", 16, -1 },
+ { "v26", 16, -1 },
+ { "v27", 16, -1 },
+ { "v28", 16, -1 },
+ { "v29", 16, -1 },
+ { "v30", 16, -1 },
+ { "v31", 16, -1 },
+ { "fpsr", 4, -1 },
+ { "fpcr", 4, -1 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+ struct pt_regs *thread_regs;
+
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+ thread_regs = task_pt_regs(task);
+ memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->pc = pc;
+}
+
+static int compiled_break;
+
+static void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ kgdb_arch_set_pc(regs, addr);
+ else if (compiled_break == 1)
+ kgdb_arch_set_pc(regs, regs->pc + 4);
+
+ compiled_break = 0;
+}
+
+int kgdb_arch_handle_exception(int exception_vector, int signo,
+ int err_code, char *remcom_in_buffer,
+ char *remcom_out_buffer,
+ struct pt_regs *linux_regs)
+{
+ int err;
+
+ switch (remcom_in_buffer[0]) {
+ case 'D':
+ case 'k':
+ /*
+ * Packet D (Detach), k (kill). No special handling
+ * is required here. Handle same as c packet.
+ */
+ case 'c':
+ /*
+ * Packet c (Continue) to continue executing.
+ * Set pc to required address.
+ * Try to read optional parameter and set pc.
+ * If this was a compiled breakpoint, we need to move
+ * to the next instruction else we will just breakpoint
+ * over and over again.
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+ kgdb_single_step = 0;
+
+ /*
+ * Received continue command, disable single step
+ */
+ if (kernel_active_single_step())
+ kernel_disable_single_step();
+
+ err = 0;
+ break;
+ case 's':
+ /*
+ * Update step address value with address passed
+ * with step packet.
+ * On debug exception return PC is copied to ELR
+ * So just update PC.
+ * If no step address is passed, resume from the address
+ * pointed by PC. Do not update PC
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+ kgdb_single_step = 1;
+
+ /*
+ * Enable single step handling
+ */
+ if (!kernel_active_single_step())
+ kernel_enable_single_step(linux_regs);
+ err = 0;
+ break;
+ default:
+ err = -1;
+ }
+ return err;
+}
+
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ return 0;
+}
+
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ compiled_break = 1;
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+ return 0;
+}
+
+static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ return 0;
+}
+
+static struct break_hook kgdb_brkpt_hook = {
+ .esr_mask = 0xffffffff,
+ .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
+ .fn = kgdb_brk_fn
+};
+
+static struct break_hook kgdb_compiled_brkpt_hook = {
+ .esr_mask = 0xffffffff,
+ .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
+ .fn = kgdb_compiled_brk_fn
+};
+
+static struct step_hook kgdb_step_hook = {
+ .fn = kgdb_step_brk_fn
+};
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ struct pt_regs *regs = args->regs;
+
+ if (kgdb_handle_exception(1, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+ /*
+ * Want to be lowest priority
+ */
+ .priority = -INT_MAX,
+};
+
+/*
+ * kgdb_arch_init - Perform any architecture specific initalization.
+ * This function will handle the initalization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+ int ret = register_die_notifier(&kgdb_notifier);
+
+ if (ret != 0)
+ return ret;
+
+ register_break_hook(&kgdb_brkpt_hook);
+ register_break_hook(&kgdb_compiled_brkpt_hook);
+ register_step_hook(&kgdb_step_hook);
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_break_hook(&kgdb_brkpt_hook);
+ unregister_break_hook(&kgdb_compiled_brkpt_hook);
+ unregister_step_hook(&kgdb_step_hook);
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * ARM instructions are always in LE.
+ * Break instruction is encoded in LE format
+ */
+struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {
+ KGDB_DYN_BRK_INS_BYTE0,
+ KGDB_DYN_BRK_INS_BYTE1,
+ KGDB_DYN_BRK_INS_BYTE2,
+ KGDB_DYN_BRK_INS_BYTE3,
+ }
+};
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 8b69ecb1d8bc..7787208e8cc6 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,6 +27,9 @@
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
+
+#include <asm/unistd32.h>
+
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -35,33 +38,32 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xf57ff05f // dmb sy
.inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
- .inst 0x01a23f96 // strexdeq r3, r6, [r2]
+ .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
.align 5
__kuser_memory_barrier: // 0xffff0fa0
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe12fff1e // bx lr
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xf57ff05f // dmb sy
.inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
- .inst 0x01823f91 // strexeq r3, r1, [r2]
+ .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
- .inst 0xeaffffef // b <__kuser_memory_barrier>
+ .inst 0xe12fff1e // bx lr
.align 5
__kuser_get_tls: // 0xffff0fe0
@@ -75,3 +77,42 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
+
+/*
+ * AArch32 sigreturn code
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index ca0e3d55da99..df08a6e0287d 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,10 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/insn.h>
+
+#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
+#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
void *module_alloc(unsigned long size)
{
@@ -94,25 +98,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
return 0;
}
-enum aarch64_imm_type {
- INSN_IMM_MOVNZ,
- INSN_IMM_MOVK,
- INSN_IMM_ADR,
- INSN_IMM_26,
- INSN_IMM_19,
- INSN_IMM_16,
- INSN_IMM_14,
- INSN_IMM_12,
- INSN_IMM_9,
-};
-
-static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
+static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
+ int lsb, enum aarch64_insn_imm_type imm_type)
{
- u32 immlo, immhi, lomask, himask, mask;
- int shift;
+ u64 imm, limit = 0;
+ s64 sval;
+ u32 insn = le32_to_cpu(*(u32 *)place);
+
+ sval = do_reloc(op, place, val);
+ sval >>= lsb;
+ imm = sval & 0xffff;
- switch (type) {
- case INSN_IMM_MOVNZ:
+ if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
* For signed MOVW relocations, we have to manipulate the
* instruction encoding depending on whether or not the
@@ -131,70 +128,12 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
*/
imm = ~imm;
}
- case INSN_IMM_MOVK:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_ADR:
- lomask = 0x3;
- himask = 0x7ffff;
- immlo = imm & lomask;
- imm >>= 2;
- immhi = imm & himask;
- imm = (immlo << 24) | (immhi);
- mask = (lomask << 24) | (himask);
- shift = 5;
- break;
- case INSN_IMM_26:
- mask = BIT(26) - 1;
- shift = 0;
- break;
- case INSN_IMM_19:
- mask = BIT(19) - 1;
- shift = 5;
- break;
- case INSN_IMM_16:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_14:
- mask = BIT(14) - 1;
- shift = 5;
- break;
- case INSN_IMM_12:
- mask = BIT(12) - 1;
- shift = 10;
- break;
- case INSN_IMM_9:
- mask = BIT(9) - 1;
- shift = 12;
- break;
- default:
- pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
+ imm_type = AARCH64_INSN_IMM_MOVK;
}
- /* Update the immediate field. */
- insn &= ~(mask << shift);
- insn |= (imm & mask) << shift;
-
- return insn;
-}
-
-static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_imm_type imm_type)
-{
- u64 imm, limit = 0;
- s64 sval;
- u32 insn = *(u32 *)place;
-
- sval = do_reloc(op, place, val);
- sval >>= lsb;
- imm = sval & 0xffff;
-
/* Update the instruction with the new encoding. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/* Shift out the immediate field. */
sval >>= 16;
@@ -203,9 +142,9 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
* For unsigned immediates, the overflow check is straightforward.
* For signed immediates, the sign bit is actually the bit past the
* most significant bit of the field.
- * The INSN_IMM_16 immediate type is unsigned.
+ * The AARCH64_INSN_IMM_16 immediate type is unsigned.
*/
- if (imm_type != INSN_IMM_16) {
+ if (imm_type != AARCH64_INSN_IMM_16) {
sval++;
limit++;
}
@@ -218,11 +157,11 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
}
static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, int len, enum aarch64_imm_type imm_type)
+ int lsb, int len, enum aarch64_insn_imm_type imm_type)
{
u64 imm, imm_mask;
s64 sval;
- u32 insn = *(u32 *)place;
+ u32 insn = le32_to_cpu(*(u32 *)place);
/* Calculate the relocation value. */
sval = do_reloc(op, place, val);
@@ -233,7 +172,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
imm = sval & imm_mask;
/* Update the instruction's immediate field. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/*
* Extract the upper value bits (including the sign bit) and
@@ -315,125 +255,125 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
/* Immediate instruction relocations. */
case R_AARCH64_LD_PREL_LO19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_ADR_PREL_LO21:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_TSTBR14:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
- INSN_IMM_14);
+ AARCH64_INSN_IMM_14);
break;
case R_AARCH64_CONDBR19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
- INSN_IMM_26);
+ AARCH64_INSN_IMM_26);
break;
default:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..dfcd8fadde3c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -22,6 +22,7 @@
#include <linux/bitmap.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/perf_event.h>
@@ -107,7 +108,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -317,7 +323,13 @@ validate_event(struct pmu_hw_events *hw_events,
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+ if (is_software_event(event))
+ return 1;
+
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -352,26 +364,53 @@ validate_group(struct perf_event *event)
}
static void
+armpmu_disable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ disable_percpu_irq(irq);
+}
+
+static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
+ int irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (!irqs)
+ return;
- for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, armpmu);
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0)
+ return;
+
+ if (irq_is_percpu(irq)) {
+ on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &cpu_hw_events);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq > 0)
+ free_irq(irq, armpmu);
+ }
}
}
+static void
+armpmu_enable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
- int i, err, irq, irqs;
+ int err, irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device) {
@@ -380,39 +419,59 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
}
irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
+ if (!irqs) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < irqs; ++i) {
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0) {
+ pr_err("failed to get valid irq for PMU device\n");
+ return -ENODEV;
+ }
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
- continue;
- }
+ if (irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, armpmu->handle_irq,
+ "arm-pmu", &cpu_hw_events);
- err = request_irq(irq, armpmu->handle_irq,
- IRQF_NOBALANCING,
- "arm-pmu", armpmu);
if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
+ pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
+ irq);
armpmu_release_hardware(armpmu);
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq <= 0)
+ continue;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
+ err = request_irq(irq, armpmu->handle_irq,
+ IRQF_NOBALANCING,
+ "arm-pmu", armpmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ cpumask_set_cpu(i, &armpmu->active_irqs);
+ }
}
return 0;
@@ -773,7 +832,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
@@ -1288,8 +1347,8 @@ early_initcall(init_hw_perf_events);
* Callchain handling code.
*/
struct frame_tail {
- struct frame_tail __user *fp;
- unsigned long lr;
+ struct frame_tail __user *fp;
+ unsigned long lr;
} __attribute__((packed));
/*
@@ -1326,22 +1385,84 @@ user_backtrace(struct frame_tail __user *tail,
return buftail.fp;
}
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
+ u32 sp;
+ u32 lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct compat_frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= (struct compat_frame_tail __user *)
+ compat_ptr(buftail.fp))
+ return NULL;
+
+ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
- struct frame_tail __user *tail;
-
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->pc);
- tail = (struct frame_tail __user *)regs->regs[29];
- while (entry->nr < PERF_MAX_STACK_DEPTH &&
- tail && !((unsigned long)tail & 0xf))
- tail = user_backtrace(tail, entry);
+ if (!compat_user_mode(regs)) {
+ /* AARCH64 mode */
+ struct frame_tail __user *tail;
+
+ tail = (struct frame_tail __user *)regs->regs[29];
+
+ while (entry->nr < PERF_MAX_STACK_DEPTH &&
+ tail && !((unsigned long)tail & 0xf))
+ tail = user_backtrace(tail, entry);
+ } else {
+#ifdef CONFIG_COMPAT
+ /* AARCH32 compat mode */
+ struct compat_frame_tail __user *tail;
+
+ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
+ tail = compat_user_backtrace(tail, entry);
+#endif
+ }
}
/*
@@ -1369,6 +1490,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
+
walk_stackframe(&frame, callchain_trace, entry);
}
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
new file mode 100644
index 000000000000..422ebd63b619
--- /dev/null
+++ b/arch/arm64/kernel/perf_regs.c
@@ -0,0 +1,46 @@
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+
+#include <asm/compat.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
+ return 0;
+
+ /*
+ * Compat (i.e. 32 bit) mode:
+ * - PC has been set in the pt_regs struct in kernel_entry,
+ * - Handle SP and LR here.
+ */
+ if (compat_user_mode(regs)) {
+ if ((u32)idx == PERF_REG_ARM64_SP)
+ return regs->compat_sp;
+ if ((u32)idx == PERF_REG_ARM64_LR)
+ return regs->compat_lr;
+ }
+
+ return regs->regs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (is_compat_thread(task_thread_info(task)))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 46f02c3b5015..a82af49bcc51 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -20,6 +20,7 @@
#include <stdarg.h>
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -33,6 +34,7 @@
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
+#include <linux/cpuidle.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
@@ -50,29 +52,18 @@
#include <asm/processor.h>
#include <asm/stacktrace.h>
-static void setup_restart(void)
-{
- /*
- * Tell the mm system that we are going to reboot -
- * we may need it to insert some 1:1 mappings so that
- * soft boot works.
- */
- setup_mm_for_reboot();
-
- /* Clean and invalidate caches */
- flush_cache_all();
-
- /* Turn D-cache off */
- cpu_cache_off();
-
- /* Push out any further dirty data, and ensure cache is empty */
- flush_cache_all();
-}
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
void soft_restart(unsigned long addr)
{
- setup_restart();
- cpu_reset(addr);
+ setup_mm_for_reboot();
+ cpu_soft_restart(virt_to_phys(cpu_reset), addr);
+ /* Should never get here */
+ BUG();
}
/*
@@ -98,9 +89,18 @@ void arch_cpu_idle(void)
* This should do all the clock switching and wait for interrupt
* tricks
*/
- cpu_do_idle();
- local_irq_enable();
+ if (cpuidle_idle_call()) {
+ cpu_do_idle();
+ local_irq_enable();
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
}
+#endif
void machine_shutdown(void)
{
@@ -143,15 +143,26 @@ void machine_restart(char *cmd)
void __show_regs(struct pt_regs *regs)
{
- int i;
+ int i, top_reg;
+ u64 lr, sp;
+
+ if (compat_user_mode(regs)) {
+ lr = regs->compat_lr;
+ sp = regs->compat_sp;
+ top_reg = 12;
+ } else {
+ lr = regs->regs[30];
+ sp = regs->sp;
+ top_reg = 29;
+ }
show_regs_print_info(KERN_DEFAULT);
print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", regs->regs[30]);
+ print_symbol("LR is at %s\n", lr);
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, regs->regs[30], regs->pstate);
- printk("sp : %016llx\n", regs->sp);
- for (i = 29; i >= 0; i--) {
+ regs->pc, lr, regs->pstate);
+ printk("sp : %016llx\n", sp);
+ for (i = top_reg; i >= 0; i--) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
if (i % 2 == 0)
printk("\n");
@@ -172,9 +183,27 @@ void exit_thread(void)
{
}
+static void tls_thread_flush(void)
+{
+ asm ("msr tpidr_el0, xzr");
+
+ if (is_compat_task()) {
+ current->thread.tp_value = 0;
+
+ /*
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ */
+ barrier();
+ asm ("msr tpidrro_el0, xzr");
+ }
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
+ tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
}
@@ -279,7 +308,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
*/
- dsb();
+ dsb(ish);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -290,6 +319,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -297,9 +327,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S
new file mode 100644
index 000000000000..cf83e61cd3b5
--- /dev/null
+++ b/arch/arm64/kernel/psci-call.S
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+
+/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_hvc)
+ hvc #0
+ ret
+ENDPROC(__invoke_psci_fn_hvc)
+
+/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_smc)
+ smc #0
+ ret
+ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 14f73c445ff5..fc457d0fee9c 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,97 +15,109 @@
#define pr_fmt(fmt) "psci: " fmt
+#include <linux/cpuidle.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <linux/delay.h>
+#include <uapi/linux/psci.h>
#include <asm/compiler.h>
+#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/system_misc.h>
-struct psci_operations psci_ops;
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+};
+
+static struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64);
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
+ PSCI_FN_AFFINITY_INFO,
+ PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX,
};
-static u32 psci_function_id[PSCI_FN_MAX];
+static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
-#define PSCI_RET_SUCCESS 0
-#define PSCI_RET_EOPNOTSUPP -1
-#define PSCI_RET_EINVAL -2
-#define PSCI_RET_EPERM -3
+static u32 psci_function_id[PSCI_FN_MAX];
static int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
return 0;
- case PSCI_RET_EOPNOTSUPP:
+ case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP;
- case PSCI_RET_EINVAL:
+ case PSCI_RET_INVALID_PARAMS:
return -EINVAL;
- case PSCI_RET_EPERM:
+ case PSCI_RET_DENIED:
return -EPERM;
};
return -EINVAL;
}
-#define PSCI_POWER_STATE_ID_MASK 0xffff
-#define PSCI_POWER_STATE_ID_SHIFT 0
-#define PSCI_POWER_STATE_TYPE_MASK 0x1
-#define PSCI_POWER_STATE_TYPE_SHIFT 16
-#define PSCI_POWER_STATE_AFFL_MASK 0x3
-#define PSCI_POWER_STATE_AFFL_SHIFT 24
-
static u32 psci_power_state_pack(struct psci_power_state state)
{
- return ((state.id & PSCI_POWER_STATE_ID_MASK)
- << PSCI_POWER_STATE_ID_SHIFT) |
- ((state.type & PSCI_POWER_STATE_TYPE_MASK)
- << PSCI_POWER_STATE_TYPE_SHIFT) |
- ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
- << PSCI_POWER_STATE_AFFL_SHIFT);
+ return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
+ & PSCI_0_2_POWER_STATE_ID_MASK) |
+ ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+ & PSCI_0_2_POWER_STATE_TYPE_MASK) |
+ ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+ & PSCI_0_2_POWER_STATE_AFFL_MASK);
}
-/*
- * The following two functions are invoked via the invoke_psci_fn pointer
- * and will not be inlined, allowing us to piggyback on the AAPCS.
- */
-static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
- u64 arg2)
+static void psci_power_state_unpack(u32 power_state,
+ struct psci_power_state *state)
{
- asm volatile(
- __asmeq("%0", "x0")
- __asmeq("%1", "x1")
- __asmeq("%2", "x2")
- __asmeq("%3", "x3")
- "hvc #0\n"
- : "+r" (function_id)
- : "r" (arg0), "r" (arg1), "r" (arg2));
-
- return function_id;
+ state->id = (power_state >> PSCI_0_2_POWER_STATE_ID_SHIFT)
+ & PSCI_0_2_POWER_STATE_ID_MASK;
+ state->type = (power_state >> PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+ & PSCI_0_2_POWER_STATE_TYPE_MASK;
+ state->affinity_level = (power_state >> PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+ & PSCI_0_2_POWER_STATE_AFFL_MASK;
}
-static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
- u64 arg2)
+static int psci_get_version(void)
{
- asm volatile(
- __asmeq("%0", "x0")
- __asmeq("%1", "x1")
- __asmeq("%2", "x2")
- __asmeq("%3", "x3")
- "smc #0\n"
- : "+r" (function_id)
- : "r" (arg0), "r" (arg1), "r" (arg2));
+ int err;
- return function_id;
+ err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+ return err;
}
static int psci_cpu_suspend(struct psci_power_state state,
@@ -151,28 +163,36 @@ static int psci_migrate(unsigned long cpuid)
return psci_to_linux_errno(err);
}
-static const struct of_device_id psci_of_match[] __initconst = {
- { .compatible = "arm,psci", },
- {},
-};
+static int psci_affinity_info(unsigned long target_affinity,
+ unsigned long lowest_affinity_level)
+{
+ int err;
+ u32 fn;
-int __init psci_init(void)
+ fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
+ err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
+ return err;
+}
+
+static int psci_migrate_info_type(void)
{
- struct device_node *np;
- const char *method;
- u32 id;
- int err = 0;
+ int err;
+ u32 fn;
- np = of_find_matching_node(NULL, psci_of_match);
- if (!np)
- return -ENODEV;
+ fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
+ err = invoke_psci_fn(fn, 0, 0, 0);
+ return err;
+}
- pr_info("probing function IDs from device-tree\n");
+static int get_set_conduit_method(struct device_node *np)
+{
+ const char *method;
+
+ pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) {
- pr_warning("missing \"method\" property\n");
- err = -ENXIO;
- goto out_put_node;
+ pr_warn("missing \"method\" property\n");
+ return -ENXIO;
}
if (!strcmp("hvc", method)) {
@@ -180,11 +200,99 @@ int __init psci_init(void)
} else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
- pr_warning("invalid \"method\" property: %s\n", method);
- err = -EINVAL;
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void psci_sys_reset(char str, const char *cmd)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * PSCI Function IDs for v0.2+ are well defined so use
+ * standard values.
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+ int err, ver;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+
+ ver = psci_get_version();
+
+ if (ver == PSCI_RET_NOT_SUPPORTED) {
+ /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
+ pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
+ err = -EOPNOTSUPP;
goto out_put_node;
+ } else {
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 &&
+ PSCI_VERSION_MINOR(ver) < 2) {
+ err = -EINVAL;
+ pr_err("Conflicting PSCI version detected.\n");
+ goto out_put_node;
+ }
}
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+
+ psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+ psci_ops.cpu_off = psci_cpu_off;
+
+ psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
+ psci_ops.cpu_on = psci_cpu_on;
+
+ psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
+ psci_ops.migrate = psci_migrate;
+
+ psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
+ psci_ops.affinity_info = psci_affinity_info;
+
+ psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
+ PSCI_0_2_FN_MIGRATE_INFO_TYPE;
+ psci_ops.migrate_info_type = psci_migrate_info_type;
+
+ arm_pm_restart = psci_sys_reset;
+
+ pm_power_off = psci_sys_poweroff;
+
+out_put_node:
+ of_node_put(np);
+ return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
+{
+ u32 id;
+ int err;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+
+ pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend;
@@ -209,3 +317,136 @@ out_put_node:
of_node_put(np);
return err;
}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", .data = psci_0_1_init},
+ { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
+ {},
+};
+
+int __init psci_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *matched_np;
+ psci_initcall_t init_fn;
+
+ np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+ if (!np)
+ return -ENODEV;
+
+ init_fn = (psci_initcall_t)matched_np->data;
+ return init_fn(np);
+}
+
+#ifdef CONFIG_SMP
+
+static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ return 0;
+}
+
+static int __init cpu_psci_cpu_prepare(unsigned int cpu)
+{
+ if (!psci_ops.cpu_on) {
+ pr_err("no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int cpu_psci_cpu_boot(unsigned int cpu)
+{
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ if (err)
+ pr_err("failed to boot CPU%d (%d)\n", cpu, err);
+
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpu_psci_cpu_disable(unsigned int cpu)
+{
+ /* Fail early if we don't have CPU_OFF support */
+ if (!psci_ops.cpu_off)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void cpu_psci_cpu_die(unsigned int cpu)
+{
+ int ret;
+ /*
+ * There are no known implementations of PSCI actually using the
+ * power state field, pass a sensible default for now.
+ */
+ struct psci_power_state state = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ ret = psci_ops.cpu_off(state);
+
+ pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
+}
+
+static int cpu_psci_cpu_kill(unsigned int cpu)
+{
+ int err, i;
+
+ if (!psci_ops.affinity_info)
+ return 1;
+ /*
+ * cpu_kill could race with cpu_die and we can
+ * potentially end up declaring this cpu undead
+ * while it is dying. So, try again a few times.
+ */
+
+ for (i = 0; i < 10; i++) {
+ err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
+ if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
+ pr_info("CPU%d killed.\n", cpu);
+ return 1;
+ }
+
+ msleep(10);
+ pr_info("Retrying again to check for CPU kill\n");
+ }
+
+ pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
+ cpu, err);
+ /* Make op_cpu_kill() fail. */
+ return 0;
+}
+#endif
+#endif
+
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+static int cpu_psci_cpu_suspend(unsigned long index)
+{
+ struct psci_power_state *state = __get_cpu_var(psci_power_state);
+
+ if (!state)
+ return -EOPNOTSUPP;
+
+ return psci_ops.cpu_suspend(state[index], virt_to_phys(cpu_resume));
+}
+#endif
+
+const struct cpu_operations cpu_psci_ops = {
+ .name = "psci",
+#ifdef CONFIG_SMP
+ .cpu_init = cpu_psci_cpu_init,
+ .cpu_prepare = cpu_psci_cpu_prepare,
+ .cpu_boot = cpu_psci_cpu_boot,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cpu_psci_cpu_disable,
+ .cpu_die = cpu_psci_cpu_die,
+ .cpu_kill = cpu_psci_cpu_kill,
+#endif
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ .cpu_suspend = cpu_psci_cpu_suspend,
+#endif
+};
+
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6e1e77f1831c..5cafe0e40cfa 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -41,6 +42,9 @@
#include <asm/traps.h>
#include <asm/system_misc.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/*
* TODO: does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
@@ -53,28 +57,6 @@ void ptrace_disable(struct task_struct *child)
{
}
-/*
- * Handle hitting a breakpoint.
- */
-static int ptrace_break(struct pt_regs *regs)
-{
- siginfo_t info = {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = TRAP_BRKPT,
- .si_addr = (void __user *)instruction_pointer(regs),
- };
-
- force_sig_info(SIGTRAP, &info, current);
- return 0;
-}
-
-static int arm64_break_trap(unsigned long addr, unsigned int esr,
- struct pt_regs *regs)
-{
- return ptrace_break(regs);
-}
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Handle hitting a HW-breakpoint.
@@ -103,7 +85,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
break;
}
}
- for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
+
+ for (i = 0; i < ARM_MAX_WRP; ++i) {
if (current->thread.debug.hbp_watch[i] == bp) {
info.si_errno = -((i << 1) + 1);
break;
@@ -236,31 +219,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
{
int err, len, type, disabled = !ctrl.enabled;
- if (disabled) {
- len = 0;
- type = HW_BREAKPOINT_EMPTY;
- } else {
- err = arch_bp_generic_fields(ctrl, &len, &type);
- if (err)
- return err;
-
- switch (note_type) {
- case NT_ARM_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_ARM_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
+ attr->disabled = disabled;
+ if (disabled)
+ return 0;
+
+ err = arch_bp_generic_fields(ctrl, &len, &type);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
- }
+ break;
+ case NT_ARM_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
- attr->disabled = disabled;
return 0;
}
@@ -658,28 +639,34 @@ static int compat_gpr_get(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
switch (idx) {
case 15:
- reg = (void *)&task_pt_regs(target)->pc;
+ reg = task_pt_regs(target)->pc;
break;
case 16:
- reg = (void *)&task_pt_regs(target)->pstate;
+ reg = task_pt_regs(target)->pstate;
break;
case 17:
- reg = (void *)&task_pt_regs(target)->orig_x0;
+ reg = task_pt_regs(target)->orig_x0;
break;
default:
- reg = (void *)&task_pt_regs(target)->regs[idx];
+ reg = task_pt_regs(target)->regs[idx];
}
- ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
+ if (kbuf) {
+ memcpy(kbuf, &reg, sizeof(reg));
+ kbuf += sizeof(reg);
+ } else {
+ ret = copy_to_user(ubuf, &reg, sizeof(reg));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
- if (ret)
- break;
- else
- ubuf += sizeof(compat_ulong_t);
+ ubuf += sizeof(reg);
+ }
}
return ret;
@@ -707,28 +694,35 @@ static int compat_gpr_set(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
+
+ if (kbuf) {
+ memcpy(&reg, kbuf, sizeof(reg));
+ kbuf += sizeof(reg);
+ } else {
+ ret = copy_from_user(&reg, ubuf, sizeof(reg));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ubuf += sizeof(reg);
+ }
switch (idx) {
case 15:
- reg = (void *)&newregs.pc;
+ newregs.pc = reg;
break;
case 16:
- reg = (void *)&newregs.pstate;
+ newregs.pstate = reg;
break;
case 17:
- reg = (void *)&newregs.orig_x0;
+ newregs.orig_x0 = reg;
break;
default:
- reg = (void *)&newregs.regs[idx];
+ newregs.regs[idx] = reg;
}
- ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
-
- if (ret)
- goto out;
- else
- ubuf += sizeof(compat_ulong_t);
}
if (valid_user_regs(&newregs.user_regs))
@@ -736,7 +730,6 @@ static int compat_gpr_set(struct task_struct *target,
else
ret = -EINVAL;
-out:
return ret;
}
@@ -817,33 +810,6 @@ static const struct user_regset_view user_aarch32_view = {
.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
};
-int aarch32_break_trap(struct pt_regs *regs)
-{
- unsigned int instr;
- bool bp = false;
- void __user *pc = (void __user *)instruction_pointer(regs);
-
- if (compat_thumb_mode(regs)) {
- /* get 16-bit Thumb instruction */
- get_user(instr, (u16 __user *)pc);
- if (instr == AARCH32_BREAK_THUMB2_LO) {
- /* get second half of 32-bit Thumb-2 instruction */
- get_user(instr, (u16 __user *)(pc + 2));
- bp = instr == AARCH32_BREAK_THUMB2_HI;
- } else {
- bp = instr == AARCH32_BREAK_THUMB;
- }
- } else {
- /* 32-bit ARM instruction */
- get_user(instr, (u32 __user *)pc);
- bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
- }
-
- if (bp)
- return ptrace_break(regs);
- return 1;
-}
-
static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t __user *ret)
{
@@ -874,6 +840,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t val)
{
int ret;
+ mm_segment_t old_fs = get_fs();
if (off & 3 || off >= COMPAT_USER_SZ)
return -EIO;
@@ -881,10 +848,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
if (off >= sizeof(compat_elf_gregset_t))
return 0;
+ set_fs(KERNEL_DS);
ret = copy_regset_from_user(tsk, &user_aarch32_view,
REGSET_COMPAT_GPR, off,
sizeof(compat_ulong_t),
&val);
+ set_fs(old_fs);
+
return ret;
}
@@ -1111,45 +1081,49 @@ long arch_ptrace(struct task_struct *child, long request,
return ptrace_request(child, request, addr, data);
}
+enum ptrace_syscall_dir {
+ PTRACE_SYSCALL_ENTER = 0,
+ PTRACE_SYSCALL_EXIT,
+};
-static int __init ptrace_break_init(void)
-{
- hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP,
- TRAP_BRKPT, "ptrace BRK handler");
- return 0;
-}
-core_initcall(ptrace_break_init);
-
-
-asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
+static void tracehook_report_syscall(struct pt_regs *regs,
+ enum ptrace_syscall_dir dir)
{
+ int regno;
unsigned long saved_reg;
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return regs->syscallno;
-
- if (is_compat_task()) {
- /* AArch32 uses ip (r12) for scratch */
- saved_reg = regs->regs[12];
- regs->regs[12] = dir;
- } else {
- /*
- * Save X7. X7 is used to denote syscall entry/exit:
- * X7 = 0 -> entry, = 1 -> exit
- */
- saved_reg = regs->regs[7];
- regs->regs[7] = dir;
- }
+ /*
+ * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
+ * used to denote syscall entry/exit:
+ */
+ regno = (is_compat_task() ? 12 : 7);
+ saved_reg = regs->regs[regno];
+ regs->regs[regno] = dir;
- if (dir)
+ if (dir == PTRACE_SYSCALL_EXIT)
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
regs->syscallno = ~0UL;
- if (is_compat_task())
- regs->regs[12] = saved_reg;
- else
- regs->regs[7] = saved_reg;
+ regs->regs[regno] = saved_reg;
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, regs->syscallno);
return regs->syscallno;
}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs_return_value(regs));
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+}
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
new file mode 100644
index 000000000000..89102a6ffad5
--- /dev/null
+++ b/arch/arm64/kernel/return_address.c
@@ -0,0 +1,55 @@
+/*
+ * arch/arm64/kernel/return_address.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/ftrace.h>
+
+#include <asm/stacktrace.h>
+
+struct return_address_data {
+ unsigned int level;
+ void *addr;
+};
+
+static int save_return_addr(struct stackframe *frame, void *d)
+{
+ struct return_address_data *data = d;
+
+ if (!data->level) {
+ data->addr = (void *)frame->pc;
+ return 1;
+ } else {
+ --data->level;
+ return 0;
+ }
+}
+
+void *return_address(unsigned int level)
+{
+ struct return_address_data data;
+ struct stackframe frame;
+ register unsigned long current_sp asm ("sp");
+
+ data.level = level + 2;
+ data.addr = NULL;
+
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.pc = (unsigned long)return_address; /* dummy */
+
+ walk_stackframe(&frame, save_return_addr, &data);
+
+ if (!data.level)
+ return data.addr;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index add6ea616843..9d704df65a6a 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -41,10 +41,14 @@
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <linux/efi.h>
+#include <linux/personality.h>
+#include <asm/fixmap.h>
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
+#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -53,13 +57,25 @@
#include <asm/traps.h>
#include <asm/memblock.h>
#include <asm/psci.h>
+#include <asm/efi.h>
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
-unsigned int elf_hwcap __read_mostly;
+unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+unsigned int compat_elf_hwcap2 __read_mostly;
+#endif
+
static const char *cpu_name;
static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
@@ -97,15 +113,108 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
-static void __init setup_processor(void)
+struct cpuinfo_arm64 {
+ struct cpu cpu;
+ u32 reg_midr;
+};
+
+static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+
+void cpuinfo_store_cpu(void)
{
- struct cpu_info *cpu_info;
+ struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
+ info->reg_midr = read_cpuid_id();
+}
+
+void __init smp_setup_processor_id(void)
+{
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+ * using percpu variable early, for example, lockdep will
+ * access percpu variable inside lock_release
+ */
+ set_my_cpu_offset(0);
+}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
+struct mpidr_hash mpidr_hash;
+#ifdef CONFIG_SMP
+/**
+ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
+ * level in order to build a linear index from an
+ * MPIDR value. Resulting algorithm is a collision
+ * free hash carried out through shifting and ORing
+ */
+static void __init smp_build_mpidr_hash(void)
+{
+ u32 i, affinity, fs[4], bits[4], ls;
+ u64 mask = 0;
/*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc.S
+ * Pre-scan the list of MPIDRS and filter out bits that do
+ * not contribute to affinity levels, ie they never toggle.
*/
+ for_each_possible_cpu(i)
+ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
+ pr_debug("mask of set bits %#llx\n", mask);
+ /*
+ * Find and stash the last and first bit set at all affinity levels to
+ * check how many bits are required to represent them.
+ */
+ for (i = 0; i < 4; i++) {
+ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
+ /*
+ * Find the MSB bit and LSB bits position
+ * to determine how many bits are required
+ * to express the affinity level.
+ */
+ ls = fls(affinity);
+ fs[i] = affinity ? ffs(affinity) - 1 : 0;
+ bits[i] = ls - fs[i];
+ }
+ /*
+ * An index can be created from the MPIDR_EL1 by isolating the
+ * significant bits at each affinity level and by shifting
+ * them in order to compress the 32 bits values space to a
+ * compressed set of values. This is equivalent to hashing
+ * the MPIDR_EL1 through shifting and ORing. It is a collision free
+ * hash though not minimal since some levels might contain a number
+ * of CPUs that is not an exact power of 2 and their bit
+ * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
+ */
+ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
+ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
+ mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
+ (bits[1] + bits[0]);
+ mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
+ fs[3] - (bits[2] + bits[1] + bits[0]);
+ mpidr_hash.mask = mask;
+ mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
+ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
+ mpidr_hash.shift_aff[0],
+ mpidr_hash.shift_aff[1],
+ mpidr_hash.shift_aff[2],
+ mpidr_hash.shift_aff[3],
+ mpidr_hash.mask,
+ mpidr_hash.bits);
+ /*
+ * 4x is an arbitrary value used to warn on a hash table much bigger
+ * than expected on most systems.
+ */
+ if (mpidr_hash_size() > 4 * num_possible_cpus())
+ pr_warn("Large number of MPIDR hash buckets detected\n");
+ __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
+}
+#endif
+
+static void __init setup_processor(void)
+{
+ struct cpu_info *cpu_info;
+ u64 features, block;
+
cpu_info = lookup_processor_type(read_cpuid_id());
if (!cpu_info) {
printk("CPU configuration botched (ID %08x), unable to continue.\n",
@@ -118,8 +227,71 @@ static void __init setup_processor(void)
printk("CPU: %s [%08x] revision %d\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
- sprintf(init_utsname()->machine, "aarch64");
+ sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
+
+ /*
+ * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
+ * The blocks we test below represent incremental functionality
+ * for non-negative values. Negative values are reserved.
+ */
+ features = read_cpuid(ID_AA64ISAR0_EL1);
+ block = (features >> 4) & 0xf;
+ if (!(block & 0x8)) {
+ switch (block) {
+ default:
+ case 2:
+ elf_hwcap |= HWCAP_PMULL;
+ case 1:
+ elf_hwcap |= HWCAP_AES;
+ case 0:
+ break;
+ }
+ }
+
+ block = (features >> 8) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA1;
+
+ block = (features >> 12) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA2;
+
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_CRC32;
+
+#ifdef CONFIG_COMPAT
+ /*
+ * ID_ISAR5_EL1 carries similar information as above, but pertaining to
+ * the Aarch32 32-bit execution state.
+ */
+ features = read_cpuid(ID_ISAR5_EL1);
+ block = (features >> 4) & 0xf;
+ if (!(block & 0x8)) {
+ switch (block) {
+ default:
+ case 2:
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
+ case 1:
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
+ case 0:
+ break;
+ }
+ }
+
+ block = (features >> 8) & 0xf;
+ if (block && !(block & 0x8))
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
+
+ block = (features >> 12) & 0xf;
+ if (block && !(block & 0x8))
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
+
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
+#endif
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -127,6 +299,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
struct boot_param_header *devtree;
unsigned long dt_root;
+ cpuinfo_store_cpu();
+
/* Check we have a non-NULL DT pointer */
if (!dt_phys) {
early_print("\n"
@@ -195,25 +369,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __va(memblock_alloc(size, align));
}
-/*
- * Limit the memory size that was specified via FDT.
- */
-static int __init early_mem(char *p)
-{
- phys_addr_t limit;
-
- if (!p)
- return 1;
-
- limit = memparse(p, &p) & PAGE_MASK;
- pr_notice("Memory limited to %lldMB\n", limit >> 20);
-
- memblock_enforce_memory_limit(limit);
-
- return 0;
-}
-early_param("mem", early_mem);
-
static void __init request_standard_resources(void)
{
struct memblock_region *region;
@@ -257,20 +412,27 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
+ early_ioremap_init();
+
parse_early_param();
+ efi_init();
arm64_memblock_init();
paging_init();
request_standard_resources();
+ efi_idmap_init();
+
unflatten_device_tree();
psci_init();
cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
+ smp_build_mpidr_hash();
#endif
#ifdef CONFIG_VT
@@ -288,16 +450,14 @@ static int __init arm64_device_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
-arch_initcall(arm64_device_init);
-
-static DEFINE_PER_CPU(struct cpu, cpu_data);
+arch_initcall_sync(arm64_device_init);
static int __init topology_init(void)
{
int i;
for_each_possible_cpu(i) {
- struct cpu *cpu = &per_cpu(cpu_data, i);
+ struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
cpu->hotpluggable = 1;
register_cpu(cpu, i);
}
@@ -309,17 +469,50 @@ subsys_initcall(topology_init);
static const char *hwcap_str[] = {
"fp",
"asimd",
+ "evtstrm",
+ "aes",
+ "pmull",
+ "sha1",
+ "sha2",
+ "crc32",
NULL
};
+#ifdef CONFIG_COMPAT
+static const char *compat_hwcap_str[] = {
+ "swp",
+ "half",
+ "thumb",
+ "26bit",
+ "fastmult",
+ "fpa",
+ "vfp",
+ "edsp",
+ "java",
+ "iwmmxt",
+ "crunch",
+ "thumbee",
+ "neon",
+ "vfpv3",
+ "vfpv3d16",
+ "tls",
+ "vfpv4",
+ "idiva",
+ "idivt",
+ "vfpd32",
+ "lpae",
+ "evtstrm"
+};
+#endif /* CONFIG_COMPAT */
+
static int c_show(struct seq_file *m, void *v)
{
- int i;
-
- seq_printf(m, "Processor\t: %s rev %d (%s)\n",
- cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
+ int i, j;
for_each_online_cpu(i) {
+ struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+ u32 midr = cpuinfo->reg_midr;
+
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
@@ -328,27 +521,37 @@ static int c_show(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
+
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000UL/HZ),
loops_per_jiffy / (5000UL/HZ) % 100);
- }
-
- /* dump out the processor features */
- seq_puts(m, "Features\t: ");
-
- for (i = 0; hwcap_str[i]; i++)
- if (elf_hwcap & (1 << i))
- seq_printf(m, "%s ", hwcap_str[i]);
-
- seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
- seq_printf(m, "CPU architecture: AArch64\n");
- seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
- seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
- seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
- seq_puts(m, "\n");
-
- seq_printf(m, "Hardware\t: %s\n", machine_name);
+ /*
+ * Dump out the common processor features in a single line.
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+ * rather than attempting to parse this, but there's a body of
+ * software which does already (at least for 32-bit).
+ */
+ seq_puts(m, "Features\t:");
+ if (personality(current->personality) == PER_LINUX32) {
+#ifdef CONFIG_COMPAT
+ for (j = 0; compat_hwcap_str[j]; j++)
+ if (COMPAT_ELF_HWCAP & (1 << j))
+ seq_printf(m, " %s", compat_hwcap_str[j]);
+#endif /* CONFIG_COMPAT */
+ } else {
+ for (j = 0; hwcap_str[j]; j++)
+ if (elf_hwcap & (1 << j))
+ seq_printf(m, " %s", hwcap_str[j]);
+ }
+ seq_puts(m, "\n");
+
+ seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
+ seq_printf(m, "CPU architecture: 8\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
+ seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
+ seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
+ }
return 0;
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 890a591f75dd..e3cf09626245 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/personality.h>
@@ -25,7 +26,6 @@
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
-#include <asm/compat.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -100,8 +100,7 @@ static int restore_sigframe(struct pt_regs *regs,
{
sigset_t set;
int i, err;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0)
@@ -121,8 +120,11 @@ static int restore_sigframe(struct pt_regs *regs,
err |= !valid_user_regs(&regs->user_regs);
- if (err == 0)
- err |= restore_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= restore_fpsimd_context(fpsimd_ctx);
+ }
return err;
}
@@ -167,8 +169,8 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
struct pt_regs *regs, sigset_t *set)
{
int i, err = 0;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
+ struct _aarch64_ctx *end;
/* set up the stack frame for unwinding */
__put_user_error(regs->regs[29], &sf->fp, err);
@@ -185,12 +187,17 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
- if (err == 0)
- err |= preserve_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= preserve_fpsimd_context(fpsimd_ctx);
+ aux += sizeof(*fpsimd_ctx);
+ }
/* set the "end" magic */
- __put_user_error(0, &aux->end.magic, err);
- __put_user_error(0, &aux->end.size, err);
+ end = aux;
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
return err;
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e393174fe859..96f545cfbb70 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -100,34 +100,6 @@ struct compat_rt_sigframe {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-/*
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
-#define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
-#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
-
-/*
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
- 0x2700 | __NR_compat_sigreturn)
-#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
- 0x2700 | __NR_compat_rt_sigreturn)
-
-const compat_ulong_t aarch32_sigret_code[6] = {
- /*
- * AArch32 sigreturn code.
- * We don't construct an OABI SWI - instead we just set the imm24 field
- * to the EABI syscall number so that we create a sane disassembly.
- */
- MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
-};
-
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
@@ -179,8 +151,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
case __SI_TIMER:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
- &to->si_ptr);
+ err |= __put_user(from->si_int, &to->si_int);
break;
case __SI_POLL:
err |= __put_user(from->si_band, &to->si_band);
@@ -209,7 +180,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
case __SI_MESGQ: /* But this is */
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
+ err |= __put_user(from->si_int, &to->si_int);
break;
default: /* this is just in case for now ... */
err |= __put_user(from->si_pid, &to->si_pid);
@@ -474,12 +445,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
- if (thumb) {
+ if (thumb)
spsr |= COMPAT_PSR_T_BIT;
- spsr &= ~COMPAT_PSR_IT_MASK;
- } else {
+ else
spsr &= ~COMPAT_PSR_T_BIT;
- }
+
+ /* The IT state must be cleared for both ARM and Thumb-2 */
+ spsr &= ~COMPAT_PSR_IT_MASK;
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
new file mode 100644
index 000000000000..b1925729c692
--- /dev/null
+++ b/arch/arm64/kernel/sleep.S
@@ -0,0 +1,184 @@
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+/*
+ * Implementation of MPIDR_EL1 hash algorithm through shifting
+ * and OR'ing.
+ *
+ * @dst: register containing hash result
+ * @rs0: register containing affinity level 0 bit shift
+ * @rs1: register containing affinity level 1 bit shift
+ * @rs2: register containing affinity level 2 bit shift
+ * @rs3: register containing affinity level 3 bit shift
+ * @mpidr: register containing MPIDR_EL1 value
+ * @mask: register containing MPIDR mask
+ *
+ * Pseudo C-code:
+ *
+ *u32 dst;
+ *
+ *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
+ * u32 aff0, aff1, aff2, aff3;
+ * u64 mpidr_masked = mpidr & mask;
+ * aff0 = mpidr_masked & 0xff;
+ * aff1 = mpidr_masked & 0xff00;
+ * aff2 = mpidr_masked & 0xff0000;
+ * aff2 = mpidr_masked & 0xff00000000;
+ * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
+ *}
+ * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
+ * Output register: dst
+ * Note: input and output registers must be disjoint register sets
+ (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
+ */
+ .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
+ and \mpidr, \mpidr, \mask // mask out MPIDR bits
+ and \dst, \mpidr, #0xff // mask=aff0
+ lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
+ and \mask, \mpidr, #0xff00 // mask = aff1
+ lsr \mask ,\mask, \rs1
+ orr \dst, \dst, \mask // dst|=(aff1>>rs1)
+ and \mask, \mpidr, #0xff0000 // mask = aff2
+ lsr \mask ,\mask, \rs2
+ orr \dst, \dst, \mask // dst|=(aff2>>rs2)
+ and \mask, \mpidr, #0xff00000000 // mask = aff3
+ lsr \mask ,\mask, \rs3
+ orr \dst, \dst, \mask // dst|=(aff3>>rs3)
+ .endm
+/*
+ * Save CPU state for a suspend. This saves callee registers, and allocates
+ * space on the kernel stack to save the CPU specific registers + some
+ * other data for resume.
+ *
+ * x0 = suspend finisher argument
+ */
+ENTRY(__cpu_suspend)
+ stp x29, lr, [sp, #-96]!
+ stp x19, x20, [sp,#16]
+ stp x21, x22, [sp,#32]
+ stp x23, x24, [sp,#48]
+ stp x25, x26, [sp,#64]
+ stp x27, x28, [sp,#80]
+ mov x2, sp
+ sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
+ mov x1, sp
+ /*
+ * x1 now points to struct cpu_suspend_ctx allocated on the stack
+ */
+ str x2, [x1, #CPU_CTX_SP]
+ ldr x2, =sleep_save_sp
+ ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
+#ifdef CONFIG_SMP
+ mrs x7, mpidr_el1
+ ldr x9, =mpidr_hash
+ ldr x10, [x9, #MPIDR_HASH_MASK]
+ /*
+ * Following code relies on the struct mpidr_hash
+ * members size.
+ */
+ ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
+ add x2, x2, x8, lsl #3
+#endif
+ bl __cpu_suspend_finisher
+ /*
+ * Never gets here, unless suspend fails.
+ * Successful cpu_suspend should return from cpu_resume, returning
+ * through this code path is considered an error
+ * If the return value is set to 0 force x0 = -EOPNOTSUPP
+ * to make sure a proper error condition is propagated
+ */
+ cmp x0, #0
+ mov x3, #-EOPNOTSUPP
+ csel x0, x3, x0, eq
+ add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(__cpu_suspend)
+ .ltorg
+
+/*
+ * x0 must contain the sctlr value retrieved from restored context
+ */
+ENTRY(cpu_resume_mmu)
+ ldr x3, =cpu_resume_after_mmu
+ msr sctlr_el1, x0 // restore sctlr_el1
+ isb
+ br x3 // global jump to virtual address
+ENDPROC(cpu_resume_mmu)
+cpu_resume_after_mmu:
+ mov x0, #0 // return zero on success
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(cpu_resume_after_mmu)
+
+ .data
+ENTRY(cpu_resume)
+ bl el2_setup // if in EL2 drop to EL1 cleanly
+#ifdef CONFIG_SMP
+ mrs x1, mpidr_el1
+ adr x4, mpidr_hash_ptr
+ ldr x5, [x4]
+ add x8, x4, x5 // x8 = struct mpidr_hash phys address
+ /* retrieve mpidr_hash members to compute the hash */
+ ldr x2, [x8, #MPIDR_HASH_MASK]
+ ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
+ /* x7 contains hash index, let's use it to grab context pointer */
+#else
+ mov x7, xzr
+#endif
+ adr x0, sleep_save_sp
+ ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
+ ldr x0, [x0, x7, lsl #3]
+ /* load sp from context */
+ ldr x2, [x0, #CPU_CTX_SP]
+ adr x1, sleep_idmap_phys
+ /* load physical address of identity map page table in x1 */
+ ldr x1, [x1]
+ mov sp, x2
+ /*
+ * cpu_do_resume expects x0 to contain context physical address
+ * pointer and x1 to contain physical address of 1:1 page tables
+ */
+ bl cpu_do_resume // PC relative jump, MMU off
+ b cpu_resume_mmu // Resume MMU, never returns
+ENDPROC(cpu_resume)
+
+ .align 3
+mpidr_hash_ptr:
+ /*
+ * offset of mpidr_hash symbol from current location
+ * used to obtain run-time mpidr_hash address with MMU off
+ */
+ .quad mpidr_hash - .
+/*
+ * physical address of identity mapped page tables
+ */
+ .type sleep_idmap_phys, #object
+ENTRY(sleep_idmap_phys)
+ .quad 0
+/*
+ * struct sleep_save_sp {
+ * phys_addr_t *save_ptr_stash;
+ * phys_addr_t save_ptr_stash_phys;
+ * };
+ */
+ .type sleep_save_sp, #object
+ENTRY(sleep_save_sp)
+ .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5d54e3717bf8..9a3c7ef182fd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -48,76 +49,34 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/arm-ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_TIMER,
};
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void __cpuinit write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
+ if (cpu_ops[cpu]->cpu_boot)
+ return cpu_ops[cpu]->cpu_boot(cpu);
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+ return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
@@ -158,6 +117,11 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret;
}
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+ store_cpu_topology(cpuid);
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -167,8 +131,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
- printk("CPU%u: Booted secondary processor\n", cpu);
-
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
@@ -177,6 +139,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ printk("CPU%u: Booted secondary processor\n", cpu);
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
@@ -187,24 +152,20 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
- /*
- * Let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(INVALID_HWID);
+ if (cpu_ops[cpu]->cpu_postboot)
+ cpu_ops[cpu]->cpu_postboot();
/*
- * Synchronise with the boot thread.
+ * Enable GIC and timers.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ notify_cpu_starting(cpu);
+
+ smp_store_cpu_info(cpu);
/*
- * Enable local interrupts.
+ * Log the CPU info before it is marked online and might get read.
*/
- notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
+ cpuinfo_store_cpu();
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -214,48 +175,149 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
+ local_dbg_enable();
+ local_irq_enable();
+ local_fiq_enable();
+
/*
* OK, it's off to the idle thread for us
*/
cpu_startup_entry(CPUHP_ONLINE);
}
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
{
- unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ return -EOPNOTSUPP;
- pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- num_online_cpus(), bogosum / (500000/HZ),
- (bogosum / (5000/HZ)) % 100);
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (cpu_ops[cpu]->cpu_disable)
+ return cpu_ops[cpu]->cpu_disable(cpu);
+
+ return 0;
}
-void __init smp_prepare_boot_cpu(void)
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
{
-}
+ unsigned int cpu = smp_processor_id();
+ int ret;
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
-static const struct smp_enable_ops *enable_ops[] __initconst = {
- &smp_spin_table_ops,
- &smp_psci_ops,
- NULL,
-};
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
+ return 0;
+}
+
+static int op_cpu_kill(unsigned int cpu)
{
- const struct smp_enable_ops **ops = enable_ops;
+ /*
+ * If we have no means of synchronising with the dying CPU, then assume
+ * that it is really dead. We can only wait for an arbitrary length of
+ * time and hope that it's dead, so let's skip the wait and just hope.
+ */
+ if (!cpu_ops[cpu]->cpu_kill)
+ return 1;
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
+ return cpu_ops[cpu]->cpu_kill(cpu);
+}
+
+static DECLARE_COMPLETION(cpu_died);
- ops++;
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
}
+ pr_notice("CPU%u: shutdown\n", cpu);
+
+ /*
+ * Now that the dying CPU is beyond the point of no return w.r.t.
+ * in-kernel synchronisation, try to get the firwmare to help us to
+ * verify that it has really left the kernel before we consider
+ * clobbering anything it might still be using.
+ */
+ if (!op_cpu_kill(cpu))
+ pr_warn("CPU%d may not have shut down cleanly\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ idle_task_exit();
+
+ local_irq_disable();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ cpu_ops[cpu]->cpu_die(cpu);
- return NULL;
+ BUG();
}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+
+ pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(), bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+}
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
/*
* Enumerate the possible CPU set from the device tree and build the
@@ -264,9 +326,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
*/
void __init smp_init_cpus(void)
{
- const char *enable_method;
struct device_node *dn = NULL;
- int i, cpu = 1;
+ unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -335,25 +396,10 @@ void __init smp_init_cpus(void)
if (cpu >= NR_CPUS)
goto next;
- /*
- * We currently support only the "spin-table" enable-method.
- */
- enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
- goto next;
- }
-
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
-
- if (!smp_enable_ops[cpu]) {
- pr_err("%s: invalid enable-method property: %s\n",
- dn->full_name, enable_method);
+ if (cpu_read_ops(dn, cpu) != 0)
goto next;
- }
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -383,8 +429,12 @@ next:
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, err;
- unsigned int ncores = num_possible_cpus();
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
+
+ init_cpu_topology();
+
+ smp_store_cpu_info(smp_processor_id());
/*
* are we trying to boot more cores than exist?
@@ -411,10 +461,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!smp_enable_ops[cpu])
+ if (!cpu_ops[cpu])
continue;
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
@@ -445,6 +495,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_TIMER, "Timer broadcast interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -454,7 +505,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_present_cpu(cpu)
+ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
@@ -530,6 +581,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ case IPI_TIMER:
+ irq_enter();
+ tick_receive_broadcast();
+ irq_exit();
+ break;
+#endif
+
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
@@ -542,6 +601,13 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_TIMER);
+}
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
deleted file mode 100644
index 0c533301be77..000000000000
--- a/arch/arm64/kernel/smp_psci.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * PSCI SMP initialisation
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/smp.h>
-
-#include <asm/psci.h>
-#include <asm/smp_plat.h>
-
-static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
-{
- return 0;
-}
-
-static int __init smp_psci_prepare_cpu(int cpu)
-{
- int err;
-
- if (!psci_ops.cpu_on) {
- pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
- return -ENODEV;
- }
-
- err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
- if (err) {
- pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
- return err;
- }
-
- return 0;
-}
-
-const struct smp_enable_ops smp_psci_ops __initconst = {
- .name = "psci",
- .init_cpu = smp_psci_init_cpu,
- .prepare_cpu = smp_psci_prepare_cpu,
-};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7c35fa682f76..0347d38eea29 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,15 +16,38 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+extern void secondary_holding_pen(void);
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
-static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
+
+static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
{
/*
* Determine the address from which the CPU is polling.
@@ -40,7 +63,7 @@ static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
return 0;
}
-static int __init smp_spin_table_prepare_cpu(int cpu)
+static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
void **release_addr;
@@ -48,7 +71,16 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
- release_addr[0] = (void *)__pa(secondary_holding_pen);
+
+ /*
+ * We write the release address as LE regardless of the native
+ * endianess of the kernel. Therefore, any boot-loaders that
+ * read this address need to convert this address to the
+ * boot-loader's endianess before jumping. This is mandated by
+ * the boot protocol.
+ */
+ release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
+
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
@@ -59,8 +91,24 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return 0;
}
-const struct smp_enable_ops smp_spin_table_ops __initconst = {
+static int smp_spin_table_cpu_boot(unsigned int cpu)
+{
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ return 0;
+}
+
+const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
- .init_cpu = smp_spin_table_init_cpu,
- .prepare_cpu = smp_spin_table_prepare_cpu,
+ .cpu_init = smp_spin_table_cpu_init,
+ .cpu_prepare = smp_spin_table_cpu_prepare,
+ .cpu_boot = smp_spin_table_cpu_boot,
};
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d25459ff57fc..407991bf79f5 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -35,7 +35,7 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
-int unwind_frame(struct stackframe *frame)
+int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
@@ -43,12 +43,16 @@ int unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
- if (fp < low || fp > high || fp & 0xf)
+ if (fp < low || fp > high - 0x18 || fp & 0xf)
return -EINVAL;
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- frame->pc = *(unsigned long *)(fp + 8);
+ /*
+ * -4 here because we care about the PC at time of bl,
+ * not where the return will go.
+ */
+ frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0;
}
@@ -107,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
- register unsigned long current_sp asm("sp");
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
new file mode 100644
index 000000000000..1fa9ce4afd8f
--- /dev/null
+++ b/arch/arm64/kernel/suspend.c
@@ -0,0 +1,140 @@
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/debug-monitors.h>
+#include <asm/pgtable.h>
+#include <asm/memory.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+
+extern int __cpu_suspend(unsigned long);
+/*
+ * This is called by __cpu_suspend() to save the state, and do whatever
+ * flushing is required to ensure that when the CPU goes to sleep we have
+ * the necessary data available when the caches are not searched.
+ *
+ * @arg: Argument to pass to suspend operations
+ * @ptr: CPU context virtual address
+ * @save_ptr: address of the location where the context physical address
+ * must be saved
+ */
+int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
+ phys_addr_t *save_ptr)
+{
+ int cpu = smp_processor_id();
+
+ *save_ptr = virt_to_phys(ptr);
+
+ cpu_do_suspend(ptr);
+ /*
+ * Only flush the context that must be retrieved with the MMU
+ * off. VA primitives ensure the flush is applied to all
+ * cache levels so context is pushed to DRAM.
+ */
+ __flush_dcache_area(ptr, sizeof(*ptr));
+ __flush_dcache_area(save_ptr, sizeof(*save_ptr));
+
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
+
+/*
+ * This hook is provided so that cpu_suspend code can restore HW
+ * breakpoints as early as possible in the resume path, before reenabling
+ * debug exceptions. Code cannot be run from a CPU PM notifier since by the
+ * time the notifier runs debug exceptions might have been enabled already,
+ * with HW breakpoints registers content still in an unknown state.
+ */
+void (*hw_breakpoint_restore)(void *);
+void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+ /* Prevent multiple restore hook initializations */
+ if (WARN_ON(hw_breakpoint_restore))
+ return;
+ hw_breakpoint_restore = hw_bp_restore;
+}
+
+/**
+ * cpu_suspend
+ *
+ * @arg: argument to pass to the finisher function
+ */
+int cpu_suspend(unsigned long arg)
+{
+ struct mm_struct *mm = current->active_mm;
+ int ret, cpu = smp_processor_id();
+ unsigned long flags;
+
+ /*
+ * If cpu_ops have not been registered or suspend
+ * has not been initialized, cpu_suspend call fails early.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /*
+ * From this point debug exceptions are disabled to prevent
+ * updates to mdscr register (saved and restored along with
+ * general purpose registers) from kernel debuggers.
+ */
+ local_dbg_save(flags);
+
+ /*
+ * mm context saved on the stack, it will be restored when
+ * the cpu comes out of reset through the identity mapped
+ * page tables, so that the thread address space is properly
+ * set-up on function return.
+ */
+ ret = __cpu_suspend(arg);
+ if (ret == 0) {
+ cpu_switch_mm(mm->pgd, mm);
+ flush_tlb_all();
+
+ /*
+ * Restore per-cpu offset before any kernel
+ * subsystem relying on it has a chance to run.
+ */
+ set_my_cpu_offset(per_cpu_offset(cpu));
+
+ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(NULL);
+ }
+
+ /*
+ * Restore pstate flags. OS lock and mdscr have been already
+ * restored, so from this point onwards, debugging is fully
+ * renabled if it was enabled when core started shutdown.
+ */
+ local_dbg_restore(flags);
+
+ return ret;
+}
+
+extern struct sleep_save_sp sleep_save_sp;
+extern phys_addr_t sleep_idmap_phys;
+
+static int cpu_suspend_init(void)
+{
+ void *ctx_ptr;
+
+ /* ctx_ptr is an array of physical addresses */
+ ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+
+ if (WARN_ON(!ctx_ptr))
+ return -ENOMEM;
+
+ sleep_save_sp.save_ptr_stash = ctx_ptr;
+ sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
+ sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
+ __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
+ __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
+
+ return 0;
+}
+early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index a1b19ed7467c..423a5b3fc2be 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -59,48 +59,48 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* extension.
*/
compat_sys_pread64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
compat_sys_pwrite64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
compat_sys_truncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
compat_sys_ftruncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
compat_sys_readahead_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
compat_sys_fadvise64_64_wrapper:
mov w6, w1
- orr x1, x2, x3, lsl #32
- orr x2, x4, x5, lsl #32
+ regs_to_64 x1, x2, x3
+ regs_to_64 x2, x4, x5
mov w3, w6
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
compat_sys_sync_file_range2_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
compat_sys_fallocate_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 26e9c4eeaba8..78039927c807 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0];
+
+ /*
+ * Protect against register corruption from context switch.
+ * See comment in tls_thread_flush.
+ */
+ barrier();
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
return 0;
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index a551f88ae2c1..1ddb2bd5932a 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -18,6 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clockchips.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -68,18 +69,14 @@ unsigned long long notrace sched_clock(void)
return arch_timer_read_counter() * sched_clock_mult;
}
-int read_current_timer(unsigned long *timer_value)
-{
- *timer_value = arch_timer_read_counter();
- return 0;
-}
-
void __init time_init(void)
{
u32 arch_timer_rate;
clocksource_of_init();
+ tick_setup_hrtimer_broadcast();
+
arch_timer_rate = arch_timer_get_rate();
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
new file mode 100644
index 000000000000..db8bb29c3852
--- /dev/null
+++ b/arch/arm64/kernel/topology.c
@@ -0,0 +1,590 @@
+/*
+ * arch/arm64/kernel/topology.c
+ *
+ * Copyright (C) 2011,2013,2014 Linaro Limited.
+ *
+ * Based on the arm32 version written by Vincent Guittot in turn based on
+ * arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/cputype.h>
+#include <asm/topology.h>
+#include <asm/smp_plat.h>
+
+
+/*
+ * cpu power table
+ * This per cpu data structure describes the relative capacity of each core.
+ * On a heteregenous system, cores don't have the same computation capacity
+ * and we reflect that difference in the cpu_power field so the scheduler can
+ * take this difference into account during load balance. A per cpu structure
+ * is preferred because each CPU updates its own cpu_power field during the
+ * load balance except for idle cores. One idle core is selected to run the
+ * rebalance_domains for all idle cores and the cpu_power can be updated
+ * during this sequence.
+ */
+static DEFINE_PER_CPU(unsigned long, cpu_scale);
+
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ of_node_put(cpu_node);
+ return cpu;
+ }
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+
+ of_node_put(cpu_node);
+ return -1;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int cluster_id __initdata;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
+struct cpu_efficiency {
+ const char *compatible;
+ unsigned long efficiency;
+};
+
+/*
+ * Table of relative efficiency of each processors
+ * The efficiency value must fit in 20bit and the final
+ * cpu_scale value must be in the range
+ * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
+ * in order to return at most 1 when DIV_ROUND_CLOSEST
+ * is used to compute the capacity of a CPU.
+ * Processors that are not defined in the table,
+ * use the default SCHED_POWER_SCALE value for cpu_scale.
+ */
+static const struct cpu_efficiency table_efficiency[] = {
+ { "arm,cortex-a57", 3891 },
+ { "arm,cortex-a53", 2048 },
+ { NULL, },
+};
+
+static unsigned long *__cpu_capacity;
+#define cpu_capacity(cpu) __cpu_capacity[cpu]
+
+static unsigned long middle_capacity = 1;
+
+/*
+ * Iterate all CPUs' descriptor in DT and compute the efficiency
+ * (as per table_efficiency). Also calculate a middle efficiency
+ * as close as possible to (max{eff_i} - min{eff_i}) / 2
+ * This is later used to scale the cpu_power field such that an
+ * 'average' CPU is of middle power. Also see the comments near
+ * table_efficiency[] and update_cpu_power().
+ */
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu) {
+ if (cpu_topology[cpu].cluster_id == -1) {
+ pr_err("CPU%d: No topology information specified\n",
+ cpu);
+ ret = -EINVAL;
+ }
+ }
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+
+static void __init parse_dt_cpu_power(void)
+{
+ const struct cpu_efficiency *cpu_eff;
+ struct device_node *cn;
+ unsigned long min_capacity = ULONG_MAX;
+ unsigned long max_capacity = 0;
+ unsigned long capacity = 0;
+ int cpu;
+
+ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
+ GFP_NOWAIT);
+
+ for_each_possible_cpu(cpu) {
+ const u32 *rate;
+ int len;
+
+ /* Too early to use cpu->of_node */
+ cn = of_get_cpu_node(cpu, NULL);
+ if (!cn) {
+ pr_err("Missing device node for CPU %d\n", cpu);
+ continue;
+ }
+
+ for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
+ if (of_device_is_compatible(cn, cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL) {
+ pr_warn("%s: Unknown CPU type\n", cn->full_name);
+ continue;
+ }
+
+ rate = of_get_property(cn, "clock-frequency", &len);
+ if (!rate || len != 4) {
+ pr_err("%s: Missing clock-frequency property\n",
+ cn->full_name);
+ continue;
+ }
+
+ capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+
+ /* Save min capacity of the system */
+ if (capacity < min_capacity)
+ min_capacity = capacity;
+
+ /* Save max capacity of the system */
+ if (capacity > max_capacity)
+ max_capacity = capacity;
+
+ cpu_capacity(cpu) = capacity;
+ }
+
+ /* If min and max capacities are equal we bypass the update of the
+ * cpu_scale because all CPUs have the same capacity. Otherwise, we
+ * compute a middle_capacity factor that will ensure that the capacity
+ * of an 'average' CPU of the system will be as close as possible to
+ * SCHED_POWER_SCALE, which is the default value, but with the
+ * constraint explained near table_efficiency[].
+ */
+ if (min_capacity == max_capacity)
+ return;
+ else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
+ middle_capacity = (min_capacity + max_capacity)
+ >> (SCHED_POWER_SHIFT+1);
+ else
+ middle_capacity = ((max_capacity / 3)
+ >> (SCHED_POWER_SHIFT-1)) + 1;
+}
+
+/*
+ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
+ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
+ * function returns directly for SMP system.
+ */
+static void update_cpu_power(unsigned int cpu)
+{
+ if (!cpu_capacity(cpu))
+ return;
+
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+ pr_info("CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+/*
+ * cpu topology table
+ */
+struct cpu_topology cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ return &cpu_topology[cpu].core_sibling;
+}
+
+static void update_siblings_masks(unsigned int cpuid)
+{
+ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu;
+
+ if (cpuid_topo->cluster_id == -1) {
+ /*
+ * DT does not contain topology information for this cpu.
+ */
+ pr_debug("CPU%u: No topology information configured\n", cpuid);
+ return;
+ }
+
+ /* update core and thread sibling masks */
+ for_each_possible_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+}
+
+#ifdef CONFIG_SCHED_HMP
+
+/*
+ * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
+ * - mpidr: MPIDR[23:0] to be used for the look-up
+ *
+ * Returns the cpu logical index or -EINVAL on look-up error
+ */
+static inline int get_logical_index(u32 mpidr)
+{
+ int cpu;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (cpu_logical_map(cpu) == mpidr)
+ return cpu;
+ return -EINVAL;
+}
+
+static const char * const little_cores[] = {
+ "arm,cortex-a53",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ const u32 *mpidr;
+ int len;
+
+ mpidr = of_get_property(cn, "reg", &len);
+ if (!mpidr || len != 8) {
+ pr_err("%s missing reg property\n", cn->full_name);
+ continue;
+ }
+
+ cpu = get_logical_index(be32_to_cpup(mpidr+1));
+ if (cpu == -EINVAL) {
+ pr_err("couldn't get logical index for mpidr %x\n",
+ be32_to_cpup(mpidr+1));
+ break;
+ }
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+struct cpumask hmp_slow_cpu_mask;
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+ struct cpumask hmp_fast_cpu_mask;
+ struct hmp_domain *domain;
+
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+ /*
+ * Initialize hmp_domains
+ * Must be ordered with respect to compute capacity.
+ * Fastest domain at head of list.
+ */
+ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+ }
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id: cluster HW identifier
+ * @cluster_mask: the cpumask location to be initialized, modified by the
+ * function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+ int cpu;
+
+ if (!cluster_mask)
+ return -EINVAL;
+
+ for_each_online_cpu(cpu) {
+ if (socket_id == topology_physical_package_id(cpu)) {
+ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ update_siblings_masks(cpuid);
+ update_cpu_power(cpuid);
+}
+
+static void __init reset_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = 0;
+ cpu_topo->cluster_id = -1;
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+ }
+}
+
+static void __init reset_cpu_power(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ set_power_scale(cpu, SCHED_POWER_SCALE);
+}
+
+void __init init_cpu_topology(void)
+{
+ reset_cpu_topology();
+
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ if (parse_dt_topology())
+ reset_cpu_topology();
+
+ reset_cpu_power();
+ parse_dt_cpu_power();
+}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index f30852d28590..12cd34ff35d0 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -32,6 +32,7 @@
#include <linux/syscalls.h>
#include <asm/atomic.h>
+#include <asm/debug-monitors.h>
#include <asm/traps.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -131,7 +132,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- const register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -144,7 +144,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = regs->pc;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*
@@ -155,7 +155,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = thread_saved_pc(tsk);
}
- printk("Call trace:\n");
+ pr_emerg("Call trace:\n");
while (1) {
unsigned long where = frame.pc;
int ret;
@@ -261,11 +261,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
-#ifdef CONFIG_COMPAT
/* check for AArch32 breakpoint instructions */
- if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0)
+ if (!aarch32_break_handler(regs))
return;
-#endif
if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
printk_ratelimit()) {
@@ -329,17 +327,17 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __pte_error(const char *file, int line, unsigned long val)
{
- printk("%s:%d: bad pte %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
}
void __pmd_error(const char *file, int line, unsigned long val)
{
- printk("%s:%d: bad pmd %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
}
void __pgd_error(const char *file, int line, unsigned long val)
{
- printk("%s:%d: bad pgd %016lx.\n", file, line, val);
+ pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
}
void __init trap_init(void)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 6a389dc1bd49..84cafbc3eb54 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -58,7 +58,10 @@ static struct page *vectors_page[1];
static int alloc_vectors_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage;
vpage = get_zeroed_page(GFP_ATOMIC);
@@ -72,7 +75,7 @@ static int alloc_vectors_page(void)
/* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- aarch32_sigret_code, sizeof(aarch32_sigret_code));
+ __aarch32_sigret_code_start, sigret_sz);
flush_icache_range(vpage, vpage + PAGE_SIZE);
vectors_page[0] = virt_to_page(vpage);
@@ -103,49 +106,31 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
static int __init vdso_init(void)
{
- struct page *pg;
- char *vbase;
- int i, ret = 0;
+ int i;
+
+ if (memcmp(&vdso_start, "\177ELF", 4)) {
+ pr_err("vDSO is not a valid ELF object!\n");
+ return -EINVAL;
+ }
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
vdso_pages + 1, vdso_pages, 1L, &vdso_start);
/* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1),
+ vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
GFP_KERNEL);
- if (vdso_pagelist == NULL) {
- pr_err("Failed to allocate vDSO pagelist!\n");
+ if (vdso_pagelist == NULL)
return -ENOMEM;
- }
/* Grab the vDSO code pages. */
- for (i = 0; i < vdso_pages; i++) {
- pg = virt_to_page(&vdso_start + i*PAGE_SIZE);
- ClearPageReserved(pg);
- get_page(pg);
- vdso_pagelist[i] = pg;
- }
-
- /* Sanity check the shared object header. */
- vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL);
- if (vbase == NULL) {
- pr_err("Failed to map vDSO pagelist!\n");
- return -ENOMEM;
- } else if (memcmp(vbase, "\177ELF", 4)) {
- pr_err("vDSO is not a valid ELF object!\n");
- ret = -EINVAL;
- goto unmap;
- }
+ for (i = 0; i < vdso_pages; i++)
+ vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
/* Grab the vDSO data page. */
- pg = virt_to_page(vdso_data);
- get_page(pg);
- vdso_pagelist[i] = pg;
+ vdso_pagelist[i] = virt_to_page(vdso_data);
-unmap:
- vunmap(vbase);
- return ret;
+ return 0;
}
arch_initcall(vdso_init);
@@ -153,11 +138,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
struct mm_struct *mm = current->mm;
- unsigned long vdso_base, vdso_mapping_len;
+ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
int ret;
+ vdso_text_len = vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
- vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
+ vdso_mapping_len = vdso_text_len + PAGE_SIZE;
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
@@ -167,35 +153,52 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
}
mm->context.vdso = (void *)vdso_base;
- ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
+ ret = install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist);
- if (ret) {
- mm->context.vdso = NULL;
+ if (ret)
+ goto up_fail;
+
+ vdso_base += vdso_text_len;
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ VM_READ|VM_MAYREAD,
+ vdso_pagelist + vdso_pages);
+ if (ret)
goto up_fail;
- }
-up_fail:
up_write(&mm->mmap_sem);
+ return 0;
+up_fail:
+ mm->context.vdso = NULL;
+ up_write(&mm->mmap_sem);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
+ unsigned long vdso_text;
+
+ if (!vma->vm_mm)
+ return NULL;
+
+ vdso_text = (unsigned long)vma->vm_mm->context.vdso;
+
/*
* We can re-use the vdso pointer in mm_context_t for identifying
* the vectors page for compat applications. The vDSO will always
* sit above TASK_UNMAPPED_BASE and so we don't need to worry about
* it conflicting with the vectors base.
*/
- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
+ if (vma->vm_start == vdso_text) {
#ifdef CONFIG_COMPAT
if (vma->vm_start == AARCH32_VECTORS_BASE)
return "[vectors]";
#endif
return "[vdso]";
+ } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
+ return "[vvar]";
}
return NULL;
@@ -235,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last;
@@ -242,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift;
- vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
- vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
}
smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af42e62..ff3bddea482d 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -43,13 +43,13 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
# Assembly rules for the .S files
-$(obj-vdso): %.o: %.S
+$(obj-vdso): %.o: %.S FORCE
$(call if_changed_dep,vdsoas)
# Actual build commands
-quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
-quiet_cmd_vdsoas = VDSOA $@
+quiet_cmd_vdsold = VDSOL $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
# Install commands for the unstripped file
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10b5211..fe652ffd34c2 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
bl __do_get_tspec
seqcnt_check w9, 1b
+ mov x30, x2
+
cmp w0, #CLOCK_MONOTONIC
b.ne 6f
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f
+ /* xtime_coarse_nsec is already right-shifted */
+ mov x12, #0
+
/* Get coarse timespec. */
adr vdso_data, _vdso_data
3: seqcnt_acquire
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr
- ret x2
+ ret
7:
mov x30, x2
8: /* Syscall fallback. */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3fae2be8b016..de93a4bbdb7c 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -13,10 +13,23 @@
#define ARM_EXIT_DISCARD(x) x
OUTPUT_ARCH(aarch64)
-ENTRY(stext)
+ENTRY(_text)
jiffies = jiffies_64;
+#define HYPERVISOR_TEXT \
+ /* \
+ * Force the alignment to be compatible with \
+ * the vectors requirements \
+ */ \
+ . = ALIGN(2048); \
+ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
+ *(.hyp.idmap.text) \
+ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
+ VMLINUX_SYMBOL(__hyp_text_start) = .; \
+ *(.hyp.text) \
+ VMLINUX_SYMBOL(__hyp_text_end) = .;
+
SECTIONS
{
/*
@@ -41,7 +54,6 @@ SECTIONS
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- *(.smp.pen.text)
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -49,6 +61,7 @@ SECTIONS
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
+ HYPERVISOR_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
@@ -56,7 +69,8 @@ SECTIONS
}
RO_DATA(PAGE_SIZE)
-
+ EXCEPTION_TABLE(8)
+ NOTES
_etext = .; /* End of text and rodata section */
. = ALIGN(PAGE_SIZE);
@@ -82,45 +96,29 @@ SECTIONS
PERCPU_SECTION(64)
__init_end = .;
- . = ALIGN(THREAD_SIZE);
- __data_loc = .;
-
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(64)
- READ_MOSTLY_DATA(64)
-
- /*
- * The exception fixup table (might need resorting at runtime)
- */
- . = ALIGN(32);
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
- }
- _edata_loc = __data_loc + SIZEOF(.data);
- NOTES
+ . = ALIGN(PAGE_SIZE);
+ _data = .;
+ _sdata = .;
+ RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
BSS_SECTION(0, 0, 0)
+
+ . = ALIGN(PAGE_SIZE);
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+ swapper_pg_dir = .;
+ . += SWAPPER_DIR_SIZE;
+
_end = .;
STABS_DEBUG
.comment 0 : { *(.comment) }
}
+
+/*
+ * The HYP init code can't be more than a page long.
+ */
+ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
+ "HYP init code too big")
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
new file mode 100644
index 000000000000..8ba85e9ea388
--- /dev/null
+++ b/arch/arm64/kvm/Kconfig
@@ -0,0 +1,63 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ bool "Kernel-based Virtual Machine (KVM) support"
+ select MMU_NOTIFIER
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select KVM_MMIO
+ select KVM_ARM_HOST
+ select KVM_ARM_VGIC
+ select KVM_ARM_TIMER
+ ---help---
+ Support hosting virtualized guest machines.
+
+ If unsure, say N.
+
+config KVM_ARM_HOST
+ bool
+ ---help---
+ Provides host support for ARM processors.
+
+config KVM_ARM_MAX_VCPUS
+ int "Number maximum supported virtual CPUs per VM"
+ depends on KVM_ARM_HOST
+ default 4
+ help
+ Static number of max supported virtual CPUs per VM.
+
+ If you choose a high number, the vcpu structures will be quite
+ large, so only choose a reasonable number that you expect to
+ actually use.
+
+config KVM_ARM_VGIC
+ bool
+ depends on KVM_ARM_HOST && OF
+ select HAVE_KVM_IRQCHIP
+ ---help---
+ Adds support for a hardware assisted, in-kernel GIC emulation.
+
+config KVM_ARM_TIMER
+ bool
+ depends on KVM_ARM_VGIC
+ ---help---
+ Adds support for the Architected Timers in virtual machines.
+
+endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
new file mode 100644
index 000000000000..32a096174b94
--- /dev/null
+++ b/arch/arm64/kvm/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
+CFLAGS_arm.o := -I.
+CFLAGS_mmu.o := -I.
+
+KVM=../../../virt/kvm
+ARM=../../../arch/arm/kvm
+
+obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
+
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
+
+kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
+kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
+kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
+
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
+kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
new file mode 100644
index 000000000000..124418d17049
--- /dev/null
+++ b/arch/arm64/kvm/emulate.c
@@ -0,0 +1,158 @@
+/*
+ * (not much of an) Emulation layer for 32bit guests.
+ *
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * based on arch/arm/kvm/emulate.c
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+
+/*
+ * stolen from arch/arm/kernel/opcodes.c
+ *
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+ 0xF0F0, /* EQ == Z set */
+ 0x0F0F, /* NE */
+ 0xCCCC, /* CS == C set */
+ 0x3333, /* CC */
+ 0xFF00, /* MI == N set */
+ 0x00FF, /* PL */
+ 0xAAAA, /* VS == V set */
+ 0x5555, /* VC */
+ 0x0C0C, /* HI == C set && Z clear */
+ 0xF3F3, /* LS == C clear || Z set */
+ 0xAA55, /* GE == (N==V) */
+ 0x55AA, /* LT == (N!=V) */
+ 0x0A05, /* GT == (!Z && (N==V)) */
+ 0xF5FA, /* LE == (Z || (N!=V)) */
+ 0xFFFF, /* AL always */
+ 0 /* NV */
+};
+
+static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+ u32 esr = kvm_vcpu_get_hsr(vcpu);
+
+ if (esr & ESR_EL2_CV)
+ return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
+
+ return -1;
+}
+
+/*
+ * Check if a trapped instruction should have been executed or not.
+ */
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr;
+ u32 cpsr_cond;
+ int cond;
+
+ /* Top two bits non-zero? Unconditional. */
+ if (kvm_vcpu_get_hsr(vcpu) >> 30)
+ return true;
+
+ /* Is condition field valid? */
+ cond = kvm_vcpu_get_condition(vcpu);
+ if (cond == 0xE)
+ return true;
+
+ cpsr = *vcpu_cpsr(vcpu);
+
+ if (cond < 0) {
+ /* This can happen in Thumb mode: examine IT state. */
+ unsigned long it;
+
+ it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+ /* it == 0 => unconditional. */
+ if (it == 0)
+ return true;
+
+ /* The cond for this insn works out as the top 4 bits. */
+ cond = (it >> 4);
+ }
+
+ cpsr_cond = cpsr >> 28;
+
+ if (!((cc_map[cond] >> cpsr_cond) & 1))
+ return false;
+
+ return true;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu: The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+ unsigned long itbits, cond;
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
+
+ BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
+
+ if (!(cpsr & COMPAT_PSR_IT_MASK))
+ return;
+
+ cond = (cpsr & 0xe000) >> 13;
+ itbits = (cpsr & 0x1c00) >> (10 - 2);
+ itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+ /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
+ if ((itbits & 0x7) == 0)
+ itbits = cond = 0;
+ else
+ itbits = (itbits << 1) & 0x1f;
+
+ cpsr &= ~COMPAT_PSR_IT_MASK;
+ cpsr |= cond << 13;
+ cpsr |= (itbits & 0x1c) << (10 - 2);
+ cpsr |= (itbits & 0x3) << 25;
+ *vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ bool is_thumb;
+
+ is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
+ if (is_thumb && !is_wide_instr)
+ *vcpu_pc(vcpu) += 2;
+ else
+ *vcpu_pc(vcpu) += 4;
+ kvm_adjust_itstate(vcpu);
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
new file mode 100644
index 000000000000..76794692c20b
--- /dev/null
+++ b/arch/arm64/kvm/guest.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/guest.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputype.h>
+#include <asm/uaccess.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { NULL }
+};
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ return 0;
+}
+
+static u64 core_reg_offset_from_id(u64 id)
+{
+ return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
+}
+
+static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /*
+ * Because the kvm_regs structure is a mix of 32, 64 and
+ * 128bit fields, we index it as if it was a 32bit
+ * array. Hence below, nr_regs is the number of entries, and
+ * off the index in the "array".
+ */
+ __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
+ struct kvm_regs *regs = vcpu_gp_regs(vcpu);
+ int nr_regs = sizeof(*regs) / sizeof(__u32);
+ u32 off;
+
+ /* Our ID is an index into the kvm_regs struct. */
+ off = core_reg_offset_from_id(reg->id);
+ if (off >= nr_regs ||
+ (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+ return -ENOENT;
+
+ if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
+ struct kvm_regs *regs = vcpu_gp_regs(vcpu);
+ int nr_regs = sizeof(*regs) / sizeof(__u32);
+ __uint128_t tmp;
+ void *valp = &tmp;
+ u64 off;
+ int err = 0;
+
+ /* Our ID is an index into the kvm_regs struct. */
+ off = core_reg_offset_from_id(reg->id);
+ if (off >= nr_regs ||
+ (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+ return -ENOENT;
+
+ if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
+ u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+ switch (mode) {
+ case COMPAT_PSR_MODE_USR:
+ case COMPAT_PSR_MODE_FIQ:
+ case COMPAT_PSR_MODE_IRQ:
+ case COMPAT_PSR_MODE_SVC:
+ case COMPAT_PSR_MODE_ABT:
+ case COMPAT_PSR_MODE_UND:
+ case PSR_MODE_EL0t:
+ case PSR_MODE_EL1t:
+ case PSR_MODE_EL1h:
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
+out:
+ return err;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ return -EINVAL;
+}
+
+static unsigned long num_core_regs(void)
+{
+ return sizeof(struct kvm_regs) / sizeof(__u32);
+}
+
+/**
+ * ARM64 versions of the TIMER registers, always available on arm64
+ */
+
+#define NUM_TIMER_REGS 3
+
+static bool is_timer_reg(u64 index)
+{
+ switch (index) {
+ case KVM_REG_ARM_TIMER_CTL:
+ case KVM_REG_ARM_TIMER_CNT:
+ case KVM_REG_ARM_TIMER_CVAL:
+ return true;
+ }
+ return false;
+}
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+ int ret;
+
+ ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
+ if (ret != 0)
+ return -EFAULT;
+
+ return kvm_arm_timer_set_reg(vcpu, reg->id, val);
+}
+
+static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+
+ val = kvm_arm_timer_get_reg(vcpu, reg->id);
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+}
+
+/**
+ * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
+ *
+ * This is for all registers.
+ */
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
+{
+ return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
+ + NUM_TIMER_REGS;
+}
+
+/**
+ * kvm_arm_copy_reg_indices - get indices of all registers.
+ *
+ * We do core registers right here, then we apppend system regs.
+ */
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ unsigned int i;
+ const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
+ int ret;
+
+ for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+ if (put_user(core_reg | i, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ ret = copy_timer_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += NUM_TIMER_REGS;
+
+ return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
+}
+
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+ return -EINVAL;
+
+ /* Register group 16 means we want a core register. */
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ return get_core_reg(vcpu, reg);
+
+ if (is_timer_reg(reg->id))
+ return get_timer_reg(vcpu, reg);
+
+ return kvm_arm_sys_reg_get_reg(vcpu, reg);
+}
+
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+ return -EINVAL;
+
+ /* Register group 16 means we set a core register. */
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ return set_core_reg(vcpu, reg);
+
+ if (is_timer_reg(reg->id))
+ return set_timer_reg(vcpu, reg);
+
+ return kvm_arm_sys_reg_set_reg(vcpu, reg);
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int __attribute_const__ kvm_target_cpu(void)
+{
+ unsigned long implementor = read_cpuid_implementor();
+ unsigned long part_number = read_cpuid_part_number();
+
+ switch (implementor) {
+ case ARM_CPU_IMP_ARM:
+ switch (part_number) {
+ case ARM_CPU_PART_AEM_V8:
+ return KVM_ARM_TARGET_AEM_V8;
+ case ARM_CPU_PART_FOUNDATION:
+ return KVM_ARM_TARGET_FOUNDATION_V8;
+ case ARM_CPU_PART_CORTEX_A53:
+ return KVM_ARM_TARGET_CORTEX_A53;
+ case ARM_CPU_PART_CORTEX_A57:
+ return KVM_ARM_TARGET_CORTEX_A57;
+ };
+ break;
+ case ARM_CPU_IMP_APM:
+ switch (part_number) {
+ case APM_CPU_PART_POTENZA:
+ return KVM_ARM_TARGET_XGENE_POTENZA;
+ };
+ break;
+ };
+
+ return -EINVAL;
+}
+
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned int i;
+ int phys_target = kvm_target_cpu();
+
+ if (init->target != phys_target)
+ return -EINVAL;
+
+ vcpu->arch.target = phys_target;
+ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+
+ /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+ for (i = 0; i < sizeof(init->features) * 8; i++) {
+ if (init->features[i / 32] & (1 << (i % 32))) {
+ if (i >= KVM_VCPU_MAX_FEATURES)
+ return -ENOENT;
+ set_bit(i, vcpu->arch.features);
+ }
+ }
+
+ /* Now we know what it is, we can reset it. */
+ return kvm_reset_vcpu(vcpu);
+}
+
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+ int target = kvm_target_cpu();
+
+ if (target < 0)
+ return -ENODEV;
+
+ memset(init, 0, sizeof(*init));
+
+ /*
+ * For now, we don't return any features.
+ * In future, we might use features to return target
+ * specific features available for the preferred
+ * target type.
+ */
+ init->target = (__u32)target;
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ return -EINVAL;
+}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
new file mode 100644
index 000000000000..e28be510380c
--- /dev/null
+++ b/arch/arm64/kvm/handle_exit.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/handle_exit.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_psci.h>
+
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+
+static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ int ret;
+
+ ret = kvm_psci_call(vcpu);
+ if (ret < 0) {
+ kvm_inject_undefined(vcpu);
+ return 1;
+ }
+
+ return ret;
+}
+
+static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+/**
+ * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
+ * instruction executed by a guest
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
+ */
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
+ kvm_vcpu_on_spin(vcpu);
+ else
+ kvm_vcpu_block(vcpu);
+
+ return 1;
+}
+
+static exit_handle_fn arm_exit_handlers[] = {
+ [ESR_EL2_EC_WFI] = kvm_handle_wfx,
+ [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
+ [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
+ [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
+ [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
+ [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
+ [ESR_EL2_EC_HVC32] = handle_hvc,
+ [ESR_EL2_EC_SMC32] = handle_smc,
+ [ESR_EL2_EC_HVC64] = handle_hvc,
+ [ESR_EL2_EC_SMC64] = handle_smc,
+ [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
+ [ESR_EL2_EC_IABT] = kvm_handle_guest_abort,
+ [ESR_EL2_EC_DABT] = kvm_handle_guest_abort,
+};
+
+static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
+{
+ u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+ if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
+ !arm_exit_handlers[hsr_ec]) {
+ kvm_err("Unknown exception class: hsr: %#08x\n",
+ (unsigned int)kvm_vcpu_get_hsr(vcpu));
+ BUG();
+ }
+
+ return arm_exit_handlers[hsr_ec];
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * proper exit to userspace.
+ */
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index)
+{
+ exit_handle_fn exit_handler;
+
+ switch (exception_index) {
+ case ARM_EXCEPTION_IRQ:
+ return 1;
+ case ARM_EXCEPTION_TRAP:
+ /*
+ * See ARM ARM B1.14.1: "Hyp traps on instructions
+ * that fail their condition code check"
+ */
+ if (!kvm_condition_valid(vcpu)) {
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ return 1;
+ }
+
+ exit_handler = kvm_get_exit_handler(vcpu);
+
+ return exit_handler(vcpu, run);
+ default:
+ kvm_pr_unimpl("Unsupported exception type: %d",
+ exception_index);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return 0;
+ }
+}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
new file mode 100644
index 000000000000..d968796f4b2d
--- /dev/null
+++ b/arch/arm64/kvm/hyp-init.S
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+ .text
+ .pushsection .hyp.idmap.text, "ax"
+
+ .align 11
+
+ENTRY(__kvm_hyp_init)
+ ventry __invalid // Synchronous EL2t
+ ventry __invalid // IRQ EL2t
+ ventry __invalid // FIQ EL2t
+ ventry __invalid // Error EL2t
+
+ ventry __invalid // Synchronous EL2h
+ ventry __invalid // IRQ EL2h
+ ventry __invalid // FIQ EL2h
+ ventry __invalid // Error EL2h
+
+ ventry __do_hyp_init // Synchronous 64-bit EL1
+ ventry __invalid // IRQ 64-bit EL1
+ ventry __invalid // FIQ 64-bit EL1
+ ventry __invalid // Error 64-bit EL1
+
+ ventry __invalid // Synchronous 32-bit EL1
+ ventry __invalid // IRQ 32-bit EL1
+ ventry __invalid // FIQ 32-bit EL1
+ ventry __invalid // Error 32-bit EL1
+
+__invalid:
+ b .
+
+ /*
+ * x0: HYP boot pgd
+ * x1: HYP pgd
+ * x2: HYP stack
+ * x3: HYP vectors
+ */
+__do_hyp_init:
+
+ msr ttbr0_el2, x0
+
+ mrs x4, tcr_el1
+ ldr x5, =TCR_EL2_MASK
+ and x4, x4, x5
+ ldr x5, =TCR_EL2_FLAGS
+ orr x4, x4, x5
+ msr tcr_el2, x4
+
+ ldr x4, =VTCR_EL2_FLAGS
+ /*
+ * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
+ * VTCR_EL2.
+ */
+ mrs x5, ID_AA64MMFR0_EL1
+ bfi x4, x5, #16, #3
+ msr vtcr_el2, x4
+
+ mrs x4, mair_el1
+ msr mair_el2, x4
+ isb
+
+ mrs x4, sctlr_el2
+ and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
+ ldr x5, =SCTLR_EL2_FLAGS
+ orr x4, x4, x5
+ msr sctlr_el2, x4
+ isb
+
+ /* MMU is now enabled. Get ready for the trampoline dance */
+ ldr x4, =TRAMPOLINE_VA
+ adr x5, target
+ bfi x4, x5, #0, #PAGE_SHIFT
+ br x4
+
+target: /* We're now in the trampoline code, switch page tables */
+ msr ttbr0_el2, x1
+ isb
+
+ /* Invalidate the old TLBs */
+ tlbi alle2
+ dsb sy
+
+ /* Set the stack and new vectors */
+ kern_hyp_va x2
+ mov sp, x2
+ kern_hyp_va x3
+ msr vbar_el2, x3
+
+ /* Hello, World! */
+ eret
+ENDPROC(__kvm_hyp_init)
+
+ .ltorg
+
+ .popsection
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
new file mode 100644
index 000000000000..b72aa9f9215c
--- /dev/null
+++ b/arch/arm64/kvm/hyp.S
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/debug-monitors.h>
+#include <asm/fpsimdmacros.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
+#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
+#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
+
+ .text
+ .pushsection .hyp.text, "ax"
+ .align PAGE_SHIFT
+
+.macro save_common_regs
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ add x3, x2, #CPU_XREG_OFFSET(19)
+ stp x19, x20, [x3]
+ stp x21, x22, [x3, #16]
+ stp x23, x24, [x3, #32]
+ stp x25, x26, [x3, #48]
+ stp x27, x28, [x3, #64]
+ stp x29, lr, [x3, #80]
+
+ mrs x19, sp_el0
+ mrs x20, elr_el2 // EL1 PC
+ mrs x21, spsr_el2 // EL1 pstate
+
+ stp x19, x20, [x3, #96]
+ str x21, [x3, #112]
+
+ mrs x22, sp_el1
+ mrs x23, elr_el1
+ mrs x24, spsr_el1
+
+ str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
+ str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
+ str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
+.endm
+
+.macro restore_common_regs
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
+ ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
+ ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
+
+ msr sp_el1, x22
+ msr elr_el1, x23
+ msr spsr_el1, x24
+
+ add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
+ ldp x19, x20, [x3]
+ ldr x21, [x3, #16]
+
+ msr sp_el0, x19
+ msr elr_el2, x20 // EL1 PC
+ msr spsr_el2, x21 // EL1 pstate
+
+ add x3, x2, #CPU_XREG_OFFSET(19)
+ ldp x19, x20, [x3]
+ ldp x21, x22, [x3, #16]
+ ldp x23, x24, [x3, #32]
+ ldp x25, x26, [x3, #48]
+ ldp x27, x28, [x3, #64]
+ ldp x29, lr, [x3, #80]
+.endm
+
+.macro save_host_regs
+ save_common_regs
+.endm
+
+.macro restore_host_regs
+ restore_common_regs
+.endm
+
+.macro save_fpsimd
+ // x2: cpu context address
+ // x3, x4: tmp regs
+ add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+ fpsimd_save x3, 4
+.endm
+
+.macro restore_fpsimd
+ // x2: cpu context address
+ // x3, x4: tmp regs
+ add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+ fpsimd_restore x3, 4
+.endm
+
+.macro save_guest_regs
+ // x0 is the vcpu address
+ // x1 is the return code, do not corrupt!
+ // x2 is the cpu context
+ // x3 is a tmp register
+ // Guest's x0-x3 are on the stack
+
+ // Compute base to save registers
+ add x3, x2, #CPU_XREG_OFFSET(4)
+ stp x4, x5, [x3]
+ stp x6, x7, [x3, #16]
+ stp x8, x9, [x3, #32]
+ stp x10, x11, [x3, #48]
+ stp x12, x13, [x3, #64]
+ stp x14, x15, [x3, #80]
+ stp x16, x17, [x3, #96]
+ str x18, [x3, #112]
+
+ pop x6, x7 // x2, x3
+ pop x4, x5 // x0, x1
+
+ add x3, x2, #CPU_XREG_OFFSET(0)
+ stp x4, x5, [x3]
+ stp x6, x7, [x3, #16]
+
+ save_common_regs
+.endm
+
+.macro restore_guest_regs
+ // x0 is the vcpu address.
+ // x2 is the cpu context
+ // x3 is a tmp register
+
+ // Prepare x0-x3 for later restore
+ add x3, x2, #CPU_XREG_OFFSET(0)
+ ldp x4, x5, [x3]
+ ldp x6, x7, [x3, #16]
+ push x4, x5 // Push x0-x3 on the stack
+ push x6, x7
+
+ // x4-x18
+ ldp x4, x5, [x3, #32]
+ ldp x6, x7, [x3, #48]
+ ldp x8, x9, [x3, #64]
+ ldp x10, x11, [x3, #80]
+ ldp x12, x13, [x3, #96]
+ ldp x14, x15, [x3, #112]
+ ldp x16, x17, [x3, #128]
+ ldr x18, [x3, #144]
+
+ // x19-x29, lr, sp*, elr*, spsr*
+ restore_common_regs
+
+ // Last bits of the 64bit state
+ pop x2, x3
+ pop x0, x1
+
+ // Do not touch any register after this!
+.endm
+
+/*
+ * Macros to perform system register save/restore.
+ *
+ * Ordering here is absolutely critical, and must be kept consistent
+ * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
+ * and in kvm_asm.h.
+ *
+ * In other words, don't touch any of these unless you know what
+ * you are doing.
+ */
+.macro save_sysregs
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
+
+ mrs x4, vmpidr_el2
+ mrs x5, csselr_el1
+ mrs x6, sctlr_el1
+ mrs x7, actlr_el1
+ mrs x8, cpacr_el1
+ mrs x9, ttbr0_el1
+ mrs x10, ttbr1_el1
+ mrs x11, tcr_el1
+ mrs x12, esr_el1
+ mrs x13, afsr0_el1
+ mrs x14, afsr1_el1
+ mrs x15, far_el1
+ mrs x16, mair_el1
+ mrs x17, vbar_el1
+ mrs x18, contextidr_el1
+ mrs x19, tpidr_el0
+ mrs x20, tpidrro_el0
+ mrs x21, tpidr_el1
+ mrs x22, amair_el1
+ mrs x23, cntkctl_el1
+ mrs x24, par_el1
+ mrs x25, mdscr_el1
+
+ stp x4, x5, [x3]
+ stp x6, x7, [x3, #16]
+ stp x8, x9, [x3, #32]
+ stp x10, x11, [x3, #48]
+ stp x12, x13, [x3, #64]
+ stp x14, x15, [x3, #80]
+ stp x16, x17, [x3, #96]
+ stp x18, x19, [x3, #112]
+ stp x20, x21, [x3, #128]
+ stp x22, x23, [x3, #144]
+ stp x24, x25, [x3, #160]
+.endm
+
+.macro save_debug
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ mrs x20, dbgbcr15_el1
+ mrs x19, dbgbcr14_el1
+ mrs x18, dbgbcr13_el1
+ mrs x17, dbgbcr12_el1
+ mrs x16, dbgbcr11_el1
+ mrs x15, dbgbcr10_el1
+ mrs x14, dbgbcr9_el1
+ mrs x13, dbgbcr8_el1
+ mrs x12, dbgbcr7_el1
+ mrs x11, dbgbcr6_el1
+ mrs x10, dbgbcr5_el1
+ mrs x9, dbgbcr4_el1
+ mrs x8, dbgbcr3_el1
+ mrs x7, dbgbcr2_el1
+ mrs x6, dbgbcr1_el1
+ mrs x5, dbgbcr0_el1
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ mrs x20, dbgbvr15_el1
+ mrs x19, dbgbvr14_el1
+ mrs x18, dbgbvr13_el1
+ mrs x17, dbgbvr12_el1
+ mrs x16, dbgbvr11_el1
+ mrs x15, dbgbvr10_el1
+ mrs x14, dbgbvr9_el1
+ mrs x13, dbgbvr8_el1
+ mrs x12, dbgbvr7_el1
+ mrs x11, dbgbvr6_el1
+ mrs x10, dbgbvr5_el1
+ mrs x9, dbgbvr4_el1
+ mrs x8, dbgbvr3_el1
+ mrs x7, dbgbvr2_el1
+ mrs x6, dbgbvr1_el1
+ mrs x5, dbgbvr0_el1
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ mrs x20, dbgwcr15_el1
+ mrs x19, dbgwcr14_el1
+ mrs x18, dbgwcr13_el1
+ mrs x17, dbgwcr12_el1
+ mrs x16, dbgwcr11_el1
+ mrs x15, dbgwcr10_el1
+ mrs x14, dbgwcr9_el1
+ mrs x13, dbgwcr8_el1
+ mrs x12, dbgwcr7_el1
+ mrs x11, dbgwcr6_el1
+ mrs x10, dbgwcr5_el1
+ mrs x9, dbgwcr4_el1
+ mrs x8, dbgwcr3_el1
+ mrs x7, dbgwcr2_el1
+ mrs x6, dbgwcr1_el1
+ mrs x5, dbgwcr0_el1
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ mrs x20, dbgwvr15_el1
+ mrs x19, dbgwvr14_el1
+ mrs x18, dbgwvr13_el1
+ mrs x17, dbgwvr12_el1
+ mrs x16, dbgwvr11_el1
+ mrs x15, dbgwvr10_el1
+ mrs x14, dbgwvr9_el1
+ mrs x13, dbgwvr8_el1
+ mrs x12, dbgwvr7_el1
+ mrs x11, dbgwvr6_el1
+ mrs x10, dbgwvr5_el1
+ mrs x9, dbgwvr4_el1
+ mrs x8, dbgwvr3_el1
+ mrs x7, dbgwvr2_el1
+ mrs x6, dbgwvr1_el1
+ mrs x5, dbgwvr0_el1
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ mrs x21, mdccint_el1
+ str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+.endm
+
+.macro restore_sysregs
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
+
+ ldp x4, x5, [x3]
+ ldp x6, x7, [x3, #16]
+ ldp x8, x9, [x3, #32]
+ ldp x10, x11, [x3, #48]
+ ldp x12, x13, [x3, #64]
+ ldp x14, x15, [x3, #80]
+ ldp x16, x17, [x3, #96]
+ ldp x18, x19, [x3, #112]
+ ldp x20, x21, [x3, #128]
+ ldp x22, x23, [x3, #144]
+ ldp x24, x25, [x3, #160]
+
+ msr vmpidr_el2, x4
+ msr csselr_el1, x5
+ msr sctlr_el1, x6
+ msr actlr_el1, x7
+ msr cpacr_el1, x8
+ msr ttbr0_el1, x9
+ msr ttbr1_el1, x10
+ msr tcr_el1, x11
+ msr esr_el1, x12
+ msr afsr0_el1, x13
+ msr afsr1_el1, x14
+ msr far_el1, x15
+ msr mair_el1, x16
+ msr vbar_el1, x17
+ msr contextidr_el1, x18
+ msr tpidr_el0, x19
+ msr tpidrro_el0, x20
+ msr tpidr_el1, x21
+ msr amair_el1, x22
+ msr cntkctl_el1, x23
+ msr par_el1, x24
+ msr mdscr_el1, x25
+.endm
+
+.macro restore_debug
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ msr dbgbcr15_el1, x20
+ msr dbgbcr14_el1, x19
+ msr dbgbcr13_el1, x18
+ msr dbgbcr12_el1, x17
+ msr dbgbcr11_el1, x16
+ msr dbgbcr10_el1, x15
+ msr dbgbcr9_el1, x14
+ msr dbgbcr8_el1, x13
+ msr dbgbcr7_el1, x12
+ msr dbgbcr6_el1, x11
+ msr dbgbcr5_el1, x10
+ msr dbgbcr4_el1, x9
+ msr dbgbcr3_el1, x8
+ msr dbgbcr2_el1, x7
+ msr dbgbcr1_el1, x6
+ msr dbgbcr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ msr dbgbvr15_el1, x20
+ msr dbgbvr14_el1, x19
+ msr dbgbvr13_el1, x18
+ msr dbgbvr12_el1, x17
+ msr dbgbvr11_el1, x16
+ msr dbgbvr10_el1, x15
+ msr dbgbvr9_el1, x14
+ msr dbgbvr8_el1, x13
+ msr dbgbvr7_el1, x12
+ msr dbgbvr6_el1, x11
+ msr dbgbvr5_el1, x10
+ msr dbgbvr4_el1, x9
+ msr dbgbvr3_el1, x8
+ msr dbgbvr2_el1, x7
+ msr dbgbvr1_el1, x6
+ msr dbgbvr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ msr dbgwcr15_el1, x20
+ msr dbgwcr14_el1, x19
+ msr dbgwcr13_el1, x18
+ msr dbgwcr12_el1, x17
+ msr dbgwcr11_el1, x16
+ msr dbgwcr10_el1, x15
+ msr dbgwcr9_el1, x14
+ msr dbgwcr8_el1, x13
+ msr dbgwcr7_el1, x12
+ msr dbgwcr6_el1, x11
+ msr dbgwcr5_el1, x10
+ msr dbgwcr4_el1, x9
+ msr dbgwcr3_el1, x8
+ msr dbgwcr2_el1, x7
+ msr dbgwcr1_el1, x6
+ msr dbgwcr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ msr dbgwvr15_el1, x20
+ msr dbgwvr14_el1, x19
+ msr dbgwvr13_el1, x18
+ msr dbgwvr12_el1, x17
+ msr dbgwvr11_el1, x16
+ msr dbgwvr10_el1, x15
+ msr dbgwvr9_el1, x14
+ msr dbgwvr8_el1, x13
+ msr dbgwvr7_el1, x12
+ msr dbgwvr6_el1, x11
+ msr dbgwvr5_el1, x10
+ msr dbgwvr4_el1, x9
+ msr dbgwvr3_el1, x8
+ msr dbgwvr2_el1, x7
+ msr dbgwvr1_el1, x6
+ msr dbgwvr0_el1, x5
+
+ ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ msr mdccint_el1, x21
+.endm
+
+.macro skip_32bit_state tmp, target
+ // Skip 32bit state if not needed
+ mrs \tmp, hcr_el2
+ tbnz \tmp, #HCR_RW_SHIFT, \target
+.endm
+
+.macro skip_tee_state tmp, target
+ // Skip ThumbEE state if not needed
+ mrs \tmp, id_pfr0_el1
+ tbz \tmp, #12, \target
+.endm
+
+.macro skip_debug_state tmp, target
+ ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
+ tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
+.endm
+
+.macro compute_debug_state target
+ // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
+ // is set, we do a full save/restore cycle and disable trapping.
+ add x25, x0, #VCPU_CONTEXT
+
+ // Check the state of MDSCR_EL1
+ ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
+ and x26, x25, #DBG_MDSCR_KDE
+ and x25, x25, #DBG_MDSCR_MDE
+ adds xzr, x25, x26
+ b.eq 9998f // Nothing to see there
+
+ // If any interesting bits was set, we must set the flag
+ mov x26, #KVM_ARM64_DEBUG_DIRTY
+ str x26, [x0, #VCPU_DEBUG_FLAGS]
+ b 9999f // Don't skip restore
+
+9998:
+ // Otherwise load the flags from memory in case we recently
+ // trapped
+ skip_debug_state x25, \target
+9999:
+.endm
+
+.macro save_guest_32bit_state
+ skip_32bit_state x3, 1f
+
+ add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
+ mrs x4, spsr_abt
+ mrs x5, spsr_und
+ mrs x6, spsr_irq
+ mrs x7, spsr_fiq
+ stp x4, x5, [x3]
+ stp x6, x7, [x3, #16]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
+ mrs x4, dacr32_el2
+ mrs x5, ifsr32_el2
+ mrs x6, fpexc32_el2
+ stp x4, x5, [x3]
+ str x6, [x3, #16]
+
+ skip_debug_state x8, 2f
+ mrs x7, dbgvcr32_el2
+ str x7, [x3, #24]
+2:
+ skip_tee_state x8, 1f
+
+ add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
+ mrs x4, teecr32_el1
+ mrs x5, teehbr32_el1
+ stp x4, x5, [x3]
+1:
+.endm
+
+.macro restore_guest_32bit_state
+ skip_32bit_state x3, 1f
+
+ add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
+ ldp x4, x5, [x3]
+ ldp x6, x7, [x3, #16]
+ msr spsr_abt, x4
+ msr spsr_und, x5
+ msr spsr_irq, x6
+ msr spsr_fiq, x7
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
+ ldp x4, x5, [x3]
+ ldr x6, [x3, #16]
+ msr dacr32_el2, x4
+ msr ifsr32_el2, x5
+ msr fpexc32_el2, x6
+
+ skip_debug_state x8, 2f
+ ldr x7, [x3, #24]
+ msr dbgvcr32_el2, x7
+2:
+ skip_tee_state x8, 1f
+
+ add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
+ ldp x4, x5, [x3]
+ msr teecr32_el1, x4
+ msr teehbr32_el1, x5
+1:
+.endm
+
+.macro activate_traps
+ ldr x2, [x0, #VCPU_HCR_EL2]
+ msr hcr_el2, x2
+ ldr x2, =(CPTR_EL2_TTA)
+ msr cptr_el2, x2
+
+ ldr x2, =(1 << 15) // Trap CP15 Cr=15
+ msr hstr_el2, x2
+
+ mrs x2, mdcr_el2
+ and x2, x2, #MDCR_EL2_HPMN_MASK
+ orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
+ orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
+
+ // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
+ // if not dirty.
+ ldr x3, [x0, #VCPU_DEBUG_FLAGS]
+ tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
+ orr x2, x2, #MDCR_EL2_TDA
+1:
+ msr mdcr_el2, x2
+.endm
+
+.macro deactivate_traps
+ mov x2, #HCR_RW
+ msr hcr_el2, x2
+ msr cptr_el2, xzr
+ msr hstr_el2, xzr
+
+ mrs x2, mdcr_el2
+ and x2, x2, #MDCR_EL2_HPMN_MASK
+ msr mdcr_el2, x2
+.endm
+
+.macro activate_vm
+ ldr x1, [x0, #VCPU_KVM]
+ kern_hyp_va x1
+ ldr x2, [x1, #KVM_VTTBR]
+ msr vttbr_el2, x2
+.endm
+
+.macro deactivate_vm
+ msr vttbr_el2, xzr
+.endm
+
+/*
+ * Call into the vgic backend for state saving
+ */
+.macro save_vgic_state
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, VGIC_SAVE_FN]
+ kern_hyp_va x24
+ blr x24
+ mrs x24, hcr_el2
+ mov x25, #HCR_INT_OVERRIDE
+ neg x25, x25
+ and x24, x24, x25
+ msr hcr_el2, x24
+.endm
+
+/*
+ * Call into the vgic backend for state restoring
+ */
+.macro restore_vgic_state
+ mrs x24, hcr_el2
+ ldr x25, [x0, #VCPU_IRQ_LINES]
+ orr x24, x24, #HCR_INT_OVERRIDE
+ orr x24, x24, x25
+ msr hcr_el2, x24
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, #VGIC_RESTORE_FN]
+ kern_hyp_va x24
+ blr x24
+.endm
+
+.macro save_timer_state
+ // x0: vcpu pointer
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr w3, [x2, #KVM_TIMER_ENABLED]
+ cbz w3, 1f
+
+ mrs x3, cntv_ctl_el0
+ and x3, x3, #3
+ str w3, [x0, #VCPU_TIMER_CNTV_CTL]
+ bic x3, x3, #1 // Clear Enable
+ msr cntv_ctl_el0, x3
+
+ isb
+
+ mrs x3, cntv_cval_el0
+ str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
+
+1:
+ // Allow physical timer/counter access for the host
+ mrs x2, cnthctl_el2
+ orr x2, x2, #3
+ msr cnthctl_el2, x2
+
+ // Clear cntvoff for the host
+ msr cntvoff_el2, xzr
+.endm
+
+.macro restore_timer_state
+ // x0: vcpu pointer
+ // Disallow physical timer access for the guest
+ // Physical counter access is allowed
+ mrs x2, cnthctl_el2
+ orr x2, x2, #1
+ bic x2, x2, #2
+ msr cnthctl_el2, x2
+
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr w3, [x2, #KVM_TIMER_ENABLED]
+ cbz w3, 1f
+
+ ldr x3, [x2, #KVM_TIMER_CNTVOFF]
+ msr cntvoff_el2, x3
+ ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
+ msr cntv_cval_el0, x2
+ isb
+
+ ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
+ and x2, x2, #3
+ msr cntv_ctl_el0, x2
+1:
+.endm
+
+__save_sysregs:
+ save_sysregs
+ ret
+
+__restore_sysregs:
+ restore_sysregs
+ ret
+
+__save_debug:
+ save_debug
+ ret
+
+__restore_debug:
+ restore_debug
+ ret
+
+__save_fpsimd:
+ save_fpsimd
+ ret
+
+__restore_fpsimd:
+ restore_fpsimd
+ ret
+
+/*
+ * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+ *
+ * This is the world switch. The first half of the function
+ * deals with entering the guest, and anything from __kvm_vcpu_return
+ * to the end of the function deals with reentering the host.
+ * On the enter path, only x0 (vcpu pointer) must be preserved until
+ * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
+ * code) must both be preserved until the epilogue.
+ * In both cases, x2 points to the CPU context we're saving/restoring from/to.
+ */
+ENTRY(__kvm_vcpu_run)
+ kern_hyp_va x0
+ msr tpidr_el2, x0 // Save the vcpu register
+
+ // Host context
+ ldr x2, [x0, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x2
+
+ save_host_regs
+ bl __save_fpsimd
+ bl __save_sysregs
+
+ compute_debug_state 1f
+ bl __save_debug
+1:
+ activate_traps
+ activate_vm
+
+ restore_vgic_state
+ restore_timer_state
+
+ // Guest context
+ add x2, x0, #VCPU_CONTEXT
+
+ bl __restore_sysregs
+ bl __restore_fpsimd
+
+ skip_debug_state x3, 1f
+ bl __restore_debug
+1:
+ restore_guest_32bit_state
+ restore_guest_regs
+
+ // That's it, no more messing around.
+ eret
+
+__kvm_vcpu_return:
+ // Assume x0 is the vcpu pointer, x1 the return code
+ // Guest's x0-x3 are on the stack
+
+ // Guest context
+ add x2, x0, #VCPU_CONTEXT
+
+ save_guest_regs
+ bl __save_fpsimd
+ bl __save_sysregs
+
+ skip_debug_state x3, 1f
+ bl __save_debug
+1:
+ save_guest_32bit_state
+
+ save_timer_state
+ save_vgic_state
+
+ deactivate_traps
+ deactivate_vm
+
+ // Host context
+ ldr x2, [x0, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x2
+
+ bl __restore_sysregs
+ bl __restore_fpsimd
+
+ skip_debug_state x3, 1f
+ // Clear the dirty flag for the next run, as all the state has
+ // already been saved. Note that we nuke the whole 64bit word.
+ // If we ever add more flags, we'll have to be more careful...
+ str xzr, [x0, #VCPU_DEBUG_FLAGS]
+ bl __restore_debug
+1:
+ restore_host_regs
+
+ mov x0, x1
+ ret
+END(__kvm_vcpu_run)
+
+// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+ENTRY(__kvm_tlb_flush_vmid_ipa)
+ dsb ishst
+
+ kern_hyp_va x0
+ ldr x2, [x0, #KVM_VTTBR]
+ msr vttbr_el2, x2
+ isb
+
+ /*
+ * We could do so much better if we had the VA as well.
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
+ tlbi ipas2e1is, x1
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
+ * since a table walk on another CPU could refill a TLB with a
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
+ * the Stage-1 invalidation happened first.
+ */
+ dsb ish
+ tlbi vmalle1is
+ dsb ish
+ isb
+
+ msr vttbr_el2, xzr
+ ret
+ENDPROC(__kvm_tlb_flush_vmid_ipa)
+
+ENTRY(__kvm_flush_vm_context)
+ dsb ishst
+ tlbi alle1is
+ ic ialluis
+ dsb ish
+ ret
+ENDPROC(__kvm_flush_vm_context)
+
+ // struct vgic_sr_vectors __vgi_sr_vectors;
+ .align 3
+ENTRY(__vgic_sr_vectors)
+ .skip VGIC_SR_VECTOR_SZ
+ENDPROC(__vgic_sr_vectors)
+
+__kvm_hyp_panic:
+ // Guess the context by looking at VTTBR:
+ // If zero, then we're already a host.
+ // Otherwise restore a minimal host context before panicing.
+ mrs x0, vttbr_el2
+ cbz x0, 1f
+
+ mrs x0, tpidr_el2
+
+ deactivate_traps
+ deactivate_vm
+
+ ldr x2, [x0, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x2
+
+ bl __restore_sysregs
+
+1: adr x0, __hyp_panic_str
+ adr x1, 2f
+ ldp x2, x3, [x1]
+ sub x0, x0, x2
+ add x0, x0, x3
+ mrs x1, spsr_el2
+ mrs x2, elr_el2
+ mrs x3, esr_el2
+ mrs x4, far_el2
+ mrs x5, hpfar_el2
+ mrs x6, par_el1
+ mrs x7, tpidr_el2
+
+ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
+ PSR_MODE_EL1h)
+ msr spsr_el2, lr
+ ldr lr, =panic
+ msr elr_el2, lr
+ eret
+
+ .align 3
+2: .quad HYP_PAGE_OFFSET
+ .quad PAGE_OFFSET
+ENDPROC(__kvm_hyp_panic)
+
+__hyp_panic_str:
+ .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
+
+ .align 2
+
+/*
+ * u64 kvm_call_hyp(void *hypfn, ...);
+ *
+ * This is not really a variadic function in the classic C-way and care must
+ * be taken when calling this to ensure parameters are passed in registers
+ * only, since the stack will change between the caller and the callee.
+ *
+ * Call the function with the first argument containing a pointer to the
+ * function you wish to call in Hyp mode, and subsequent arguments will be
+ * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
+ * function pointer can be passed). The function being called must be mapped
+ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
+ * passed in r0 and r1.
+ *
+ * A function pointer with a value of 0 has a special meaning, and is
+ * used to implement __hyp_get_vectors in the same way as in
+ * arch/arm64/kernel/hyp_stub.S.
+ */
+ENTRY(kvm_call_hyp)
+ hvc #0
+ ret
+ENDPROC(kvm_call_hyp)
+
+.macro invalid_vector label, target
+ .align 2
+\label:
+ b \target
+ENDPROC(\label)
+.endm
+
+ /* None of these should ever happen */
+ invalid_vector el2t_sync_invalid, __kvm_hyp_panic
+ invalid_vector el2t_irq_invalid, __kvm_hyp_panic
+ invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
+ invalid_vector el2t_error_invalid, __kvm_hyp_panic
+ invalid_vector el2h_sync_invalid, __kvm_hyp_panic
+ invalid_vector el2h_irq_invalid, __kvm_hyp_panic
+ invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
+ invalid_vector el2h_error_invalid, __kvm_hyp_panic
+ invalid_vector el1_sync_invalid, __kvm_hyp_panic
+ invalid_vector el1_irq_invalid, __kvm_hyp_panic
+ invalid_vector el1_fiq_invalid, __kvm_hyp_panic
+ invalid_vector el1_error_invalid, __kvm_hyp_panic
+
+el1_sync: // Guest trapped into EL2
+ push x0, x1
+ push x2, x3
+
+ mrs x1, esr_el2
+ lsr x2, x1, #ESR_EL2_EC_SHIFT
+
+ cmp x2, #ESR_EL2_EC_HVC64
+ b.ne el1_trap
+
+ mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
+ cbnz x3, el1_trap // called HVC
+
+ /* Here, we're pretty sure the host called HVC. */
+ pop x2, x3
+ pop x0, x1
+
+ /* Check for __hyp_get_vectors */
+ cbnz x0, 1f
+ mrs x0, vbar_el2
+ b 2f
+
+1: push lr, xzr
+
+ /*
+ * Compute the function address in EL2, and shuffle the parameters.
+ */
+ kern_hyp_va x0
+ mov lr, x0
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ blr lr
+
+ pop lr, xzr
+2: eret
+
+el1_trap:
+ /*
+ * x1: ESR
+ * x2: ESR_EC
+ */
+ cmp x2, #ESR_EL2_EC_DABT
+ mov x0, #ESR_EL2_EC_IABT
+ ccmp x2, x0, #4, ne
+ b.ne 1f // Not an abort we care about
+
+ /* This is an abort. Check for permission fault */
+ and x2, x1, #ESR_EL2_FSC_TYPE
+ cmp x2, #FSC_PERM
+ b.ne 1f // Not a permission fault
+
+ /*
+ * Check for Stage-1 page table walk, which is guaranteed
+ * to give a valid HPFAR_EL2.
+ */
+ tbnz x1, #7, 1f // S1PTW is set
+
+ /* Preserve PAR_EL1 */
+ mrs x3, par_el1
+ push x3, xzr
+
+ /*
+ * Permission fault, HPFAR_EL2 is invalid.
+ * Resolve the IPA the hard way using the guest VA.
+ * Stage-1 translation already validated the memory access rights.
+ * As such, we can use the EL1 translation regime, and don't have
+ * to distinguish between EL0 and EL1 access.
+ */
+ mrs x2, far_el2
+ at s1e1r, x2
+ isb
+
+ /* Read result */
+ mrs x3, par_el1
+ pop x0, xzr // Restore PAR_EL1 from the stack
+ msr par_el1, x0
+ tbnz x3, #0, 3f // Bail out if we failed the translation
+ ubfx x3, x3, #12, #36 // Extract IPA
+ lsl x3, x3, #4 // and present it like HPFAR
+ b 2f
+
+1: mrs x3, hpfar_el2
+ mrs x2, far_el2
+
+2: mrs x0, tpidr_el2
+ str w1, [x0, #VCPU_ESR_EL2]
+ str x2, [x0, #VCPU_FAR_EL2]
+ str x3, [x0, #VCPU_HPFAR_EL2]
+
+ mov x1, #ARM_EXCEPTION_TRAP
+ b __kvm_vcpu_return
+
+ /*
+ * Translation failed. Just return to the guest and
+ * let it fault again. Another CPU is probably playing
+ * behind our back.
+ */
+3: pop x2, x3
+ pop x0, x1
+
+ eret
+
+el1_irq:
+ push x0, x1
+ push x2, x3
+ mrs x0, tpidr_el2
+ mov x1, #ARM_EXCEPTION_IRQ
+ b __kvm_vcpu_return
+
+ .ltorg
+
+ .align 11
+
+ENTRY(__kvm_hyp_vector)
+ ventry el2t_sync_invalid // Synchronous EL2t
+ ventry el2t_irq_invalid // IRQ EL2t
+ ventry el2t_fiq_invalid // FIQ EL2t
+ ventry el2t_error_invalid // Error EL2t
+
+ ventry el2h_sync_invalid // Synchronous EL2h
+ ventry el2h_irq_invalid // IRQ EL2h
+ ventry el2h_fiq_invalid // FIQ EL2h
+ ventry el2h_error_invalid // Error EL2h
+
+ ventry el1_sync // Synchronous 64-bit EL1
+ ventry el1_irq // IRQ 64-bit EL1
+ ventry el1_fiq_invalid // FIQ 64-bit EL1
+ ventry el1_error_invalid // Error 64-bit EL1
+
+ ventry el1_sync // Synchronous 32-bit EL1
+ ventry el1_irq // IRQ 32-bit EL1
+ ventry el1_fiq_invalid // FIQ 32-bit EL1
+ ventry el1_error_invalid // Error 32-bit EL1
+ENDPROC(__kvm_hyp_vector)
+
+ .popsection
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
new file mode 100644
index 000000000000..81a02a8762b0
--- /dev/null
+++ b/arch/arm64/kvm/inject_fault.c
@@ -0,0 +1,203 @@
+/*
+ * Fault injection for both 32 and 64bit guests.
+ *
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Based on arch/arm/kvm/emulate.c
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/esr.h>
+
+#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
+ PSR_I_BIT | PSR_D_BIT)
+#define EL1_EXCEPT_SYNC_OFFSET 0x200
+
+static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+{
+ unsigned long cpsr;
+ unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+ bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+ u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+
+ cpsr = mode | COMPAT_PSR_I_BIT;
+
+ if (sctlr & (1 << 30))
+ cpsr |= COMPAT_PSR_T_BIT;
+ if (sctlr & (1 << 25))
+ cpsr |= COMPAT_PSR_E_BIT;
+
+ *vcpu_cpsr(vcpu) = cpsr;
+
+ /* Note: These now point to the banked copies */
+ *vcpu_spsr(vcpu) = new_spsr_value;
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+
+ /* Branch to exception vector */
+ if (sctlr & (1 << 13))
+ vect_offset += 0xffff0000;
+ else /* always have security exceptions */
+ vect_offset += vcpu_cp15(vcpu, c12_VBAR);
+
+ *vcpu_pc(vcpu) = vect_offset;
+}
+
+static void inject_undef32(struct kvm_vcpu *vcpu)
+{
+ prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
+}
+
+/*
+ * Modelled after TakeDataAbortException() and TakePrefetchAbortException
+ * pseudocode.
+ */
+static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
+ unsigned long addr)
+{
+ u32 vect_offset;
+ u32 *far, *fsr;
+ bool is_lpae;
+
+ if (is_pabt) {
+ vect_offset = 12;
+ far = &vcpu_cp15(vcpu, c6_IFAR);
+ fsr = &vcpu_cp15(vcpu, c5_IFSR);
+ } else { /* !iabt */
+ vect_offset = 16;
+ far = &vcpu_cp15(vcpu, c6_DFAR);
+ fsr = &vcpu_cp15(vcpu, c5_DFSR);
+ }
+
+ prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
+
+ *far = addr;
+
+ /* Give the guest an IMPLEMENTATION DEFINED exception */
+ is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
+ if (is_lpae)
+ *fsr = 1 << 9 | 0x34;
+ else
+ *fsr = 0x14;
+}
+
+static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
+{
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ bool is_aarch32;
+ u32 esr = 0;
+
+ is_aarch32 = vcpu_mode_is_32bit(vcpu);
+
+ *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+
+ *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+ *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
+
+ vcpu_sys_reg(vcpu, FAR_EL1) = addr;
+
+ /*
+ * Build an {i,d}abort, depending on the level and the
+ * instruction set. Report an external synchronous abort.
+ */
+ if (kvm_vcpu_trap_il_is32bit(vcpu))
+ esr |= ESR_EL1_IL;
+
+ /*
+ * Here, the guest runs in AArch64 mode when in EL1. If we get
+ * an AArch32 fault, it means we managed to trap an EL0 fault.
+ */
+ if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
+ esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
+ else
+ esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
+
+ if (!is_iabt)
+ esr |= ESR_EL1_EC_DABT_EL0;
+
+ vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
+}
+
+static void inject_undef64(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
+
+ *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+
+ *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+ *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
+
+ /*
+ * Build an unknown exception, depending on the instruction
+ * set.
+ */
+ if (kvm_vcpu_trap_il_is32bit(vcpu))
+ esr |= ESR_EL1_IL;
+
+ vcpu_sys_reg(vcpu, ESR_EL1) = esr;
+}
+
+/**
+ * kvm_inject_dabt - inject a data abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ inject_abt32(vcpu, false, addr);
+
+ inject_abt64(vcpu, false, addr);
+}
+
+/**
+ * kvm_inject_pabt - inject a prefetch abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ inject_abt32(vcpu, true, addr);
+
+ inject_abt64(vcpu, true, addr);
+}
+
+/**
+ * kvm_inject_undefined - inject an undefined instruction into the guest
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ inject_undef32(vcpu);
+
+ inject_undef64(vcpu);
+}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
new file mode 100644
index 000000000000..bbc6ae32e4af
--- /dev/null
+++ b/arch/arm64/kvm/regmap.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/emulate.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/ptrace.h>
+
+#define VCPU_NR_MODES 6
+#define REG_OFFSET(_reg) \
+ (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
+
+#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
+
+static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
+ /* USR Registers */
+ {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
+ REG_OFFSET(pc)
+ },
+
+ /* FIQ Registers */
+ {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7),
+ REG_OFFSET(compat_r8_fiq), /* r8 */
+ REG_OFFSET(compat_r9_fiq), /* r9 */
+ REG_OFFSET(compat_r10_fiq), /* r10 */
+ REG_OFFSET(compat_r11_fiq), /* r11 */
+ REG_OFFSET(compat_r12_fiq), /* r12 */
+ REG_OFFSET(compat_sp_fiq), /* r13 */
+ REG_OFFSET(compat_lr_fiq), /* r14 */
+ REG_OFFSET(pc)
+ },
+
+ /* IRQ Registers */
+ {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(compat_sp_irq), /* r13 */
+ REG_OFFSET(compat_lr_irq), /* r14 */
+ REG_OFFSET(pc)
+ },
+
+ /* SVC Registers */
+ {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(compat_sp_svc), /* r13 */
+ REG_OFFSET(compat_lr_svc), /* r14 */
+ REG_OFFSET(pc)
+ },
+
+ /* ABT Registers */
+ {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(compat_sp_abt), /* r13 */
+ REG_OFFSET(compat_lr_abt), /* r14 */
+ REG_OFFSET(pc)
+ },
+
+ /* UND Registers */
+ {
+ USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
+ USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
+ USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
+ USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
+ USR_REG_OFFSET(12),
+ REG_OFFSET(compat_sp_und), /* r13 */
+ REG_OFFSET(compat_lr_und), /* r14 */
+ REG_OFFSET(pc)
+ },
+};
+
+/*
+ * Return a pointer to the register number valid in the current mode of
+ * the virtual CPU.
+ */
+unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
+{
+ unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
+ unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+
+ switch (mode) {
+ case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
+ mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
+ break;
+
+ case COMPAT_PSR_MODE_ABT:
+ mode = 4;
+ break;
+
+ case COMPAT_PSR_MODE_UND:
+ mode = 5;
+ break;
+
+ case COMPAT_PSR_MODE_SYS:
+ mode = 0; /* SYS maps to USR */
+ break;
+
+ default:
+ BUG();
+ }
+
+ return reg_array + vcpu_reg_offsets[mode][reg_num];
+}
+
+/*
+ * Return the SPSR for the current mode of the virtual CPU.
+ */
+unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
+{
+ unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+ switch (mode) {
+ case COMPAT_PSR_MODE_SVC:
+ mode = KVM_SPSR_SVC;
+ break;
+ case COMPAT_PSR_MODE_ABT:
+ mode = KVM_SPSR_ABT;
+ break;
+ case COMPAT_PSR_MODE_UND:
+ mode = KVM_SPSR_UND;
+ break;
+ case COMPAT_PSR_MODE_IRQ:
+ mode = KVM_SPSR_IRQ;
+ break;
+ case COMPAT_PSR_MODE_FIQ:
+ mode = KVM_SPSR_FIQ;
+ break;
+ default:
+ BUG();
+ }
+
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
new file mode 100644
index 000000000000..70a7816535cd
--- /dev/null
+++ b/arch/arm64/kvm/reset.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/reset.c
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/errno.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+
+#include <kvm/arm_arch_timer.h>
+
+#include <asm/cputype.h>
+#include <asm/ptrace.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_coproc.h>
+
+/*
+ * ARMv8 Reset Values
+ */
+static const struct kvm_regs default_regs_reset = {
+ .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
+ PSR_F_BIT | PSR_D_BIT),
+};
+
+static const struct kvm_regs default_regs_reset32 = {
+ .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
+ COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
+};
+
+static const struct kvm_irq_level default_vtimer_irq = {
+ .irq = 27,
+ .level = 1,
+};
+
+static bool cpu_has_32bit_el1(void)
+{
+ u64 pfr0;
+
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ return !!(pfr0 & 0x20);
+}
+
+int kvm_arch_dev_ioctl_check_extension(long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_ARM_EL1_32BIT:
+ r = cpu_has_32bit_el1();
+ break;
+ default:
+ r = 0;
+ }
+
+ return r;
+}
+
+/**
+ * kvm_reset_vcpu - sets core registers and sys_regs to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on
+ * the virtual CPU struct to their architectually defined reset
+ * values.
+ */
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+ const struct kvm_irq_level *cpu_vtimer_irq;
+ const struct kvm_regs *cpu_reset;
+
+ switch (vcpu->arch.target) {
+ default:
+ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+ if (!cpu_has_32bit_el1())
+ return -EINVAL;
+ cpu_reset = &default_regs_reset32;
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
+ } else {
+ cpu_reset = &default_regs_reset;
+ }
+
+ cpu_vtimer_irq = &default_vtimer_irq;
+ break;
+ }
+
+ /* Reset core registers */
+ memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
+
+ /* Reset system registers */
+ kvm_reset_sys_regs(vcpu);
+
+ /* Reset timer */
+ kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
+
+ return 0;
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
new file mode 100644
index 000000000000..4cc3b719208e
--- /dev/null
+++ b/arch/arm64/kvm/sys_regs.c
@@ -0,0 +1,1528 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/coproc.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.com.au>
+ * Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/debug-monitors.h>
+#include <trace/events/kvm.h>
+
+#include "sys_regs.h"
+
+/*
+ * All of this file is extremly similar to the ARM coproc.c, but the
+ * types are different. My gut feeling is that it should be pretty
+ * easy to merge, but that would be an ABI breakage -- again. VFP
+ * would also need to be abstracted.
+ *
+ * For AArch32, we only take care of what is being trapped. Anything
+ * that has to do with init and userspace access has to go via the
+ * 64bit interface.
+ */
+
+/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
+static u32 cache_levels;
+
+/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
+#define CSSELR_MAX 12
+
+/* Which cache CCSIDR represents depends on CSSELR value. */
+static u32 get_ccsidr(u32 csselr)
+{
+ u32 ccsidr;
+
+ /* Make sure noone else changes CSSELR during this! */
+ local_irq_disable();
+ /* Put value into CSSELR */
+ asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
+ isb();
+ /* Read result out of CCSIDR */
+ asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
+ local_irq_enable();
+
+ return ccsidr;
+}
+
+static void do_dc_cisw(u32 val)
+{
+ asm volatile("dc cisw, %x0" : : "r" (val));
+ dsb(ish);
+}
+
+static void do_dc_csw(u32 val)
+{
+ asm volatile("dc csw, %x0" : : "r" (val));
+ dsb(ish);
+}
+
+/* See note at ARM ARM B1.14.4 */
+static bool access_dcsw(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ unsigned long val;
+ int cpu;
+
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
+ cpu = get_cpu();
+
+ cpumask_setall(&vcpu->arch.require_dcache_flush);
+ cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
+
+ /* If we were already preempted, take the long way around */
+ if (cpu != vcpu->arch.last_pcpu) {
+ flush_cache_all();
+ goto done;
+ }
+
+ val = *vcpu_reg(vcpu, p->Rt);
+
+ switch (p->CRm) {
+ case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
+ case 14: /* DCCISW */
+ do_dc_cisw(val);
+ break;
+
+ case 10: /* DCCSW */
+ do_dc_csw(val);
+ break;
+ }
+
+done:
+ put_cpu();
+
+ return true;
+}
+
+/*
+ * Generic accessor for VM registers. Only called as long as HCR_TVM
+ * is set.
+ */
+static bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ unsigned long val;
+
+ BUG_ON(!p->is_write);
+
+ val = *vcpu_reg(vcpu, p->Rt);
+ if (!p->is_aarch32) {
+ vcpu_sys_reg(vcpu, r->reg) = val;
+ } else {
+ if (!p->is_32bit)
+ vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
+ vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
+ }
+
+ return true;
+}
+
+/*
+ * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
+ * guest enables the MMU, we stop trapping the VM sys_regs and leave
+ * it in complete control of the caches.
+ */
+static bool access_sctlr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ access_vm_reg(vcpu, p, r);
+
+ if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
+ vcpu->arch.hcr_el2 &= ~HCR_TVM;
+ stage2_flush_vm(vcpu->kvm);
+ }
+
+ return true;
+}
+
+static bool trap_raz_wi(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+ else
+ return read_zero(vcpu, p);
+}
+
+static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = (1 << 3);
+ return true;
+ }
+}
+
+static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ u32 val;
+ asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
+ *vcpu_reg(vcpu, p->Rt) = val;
+ return true;
+ }
+}
+
+/*
+ * We want to avoid world-switching all the DBG registers all the
+ * time:
+ *
+ * - If we've touched any debug register, it is likely that we're
+ * going to touch more of them. It then makes sense to disable the
+ * traps and start doing the save/restore dance
+ * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
+ * then mandatory to save/restore the registers, as the guest
+ * depends on them.
+ *
+ * For this, we use a DIRTY bit, indicating the guest has modified the
+ * debug registers, used as follow:
+ *
+ * On guest entry:
+ * - If the dirty bit is set (because we're coming back from trapping),
+ * disable the traps, save host registers, restore guest registers.
+ * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
+ * set the dirty bit, disable the traps, save host registers,
+ * restore guest registers.
+ * - Otherwise, enable the traps
+ *
+ * On guest exit:
+ * - If the dirty bit is set, save guest registers, restore host
+ * registers and clear the dirty bit. This ensure that the host can
+ * now use the debug registers.
+ */
+static bool trap_debug_regs(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ }
+
+ return true;
+}
+
+static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 amair;
+
+ asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
+ vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
+}
+
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ /*
+ * Simply map the vcpu_id into the Aff0 field of the MPIDR.
+ */
+ vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
+}
+
+/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
+#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
+ /* DBGBVRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
+ trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
+ /* DBGBCRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
+ trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
+ /* DBGWVRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
+ trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
+ /* DBGWCRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
+ trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+
+/*
+ * Architected system registers.
+ * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ *
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+ *
+ * Therefore we tell the guest we have 0 counters. Unfortunately, we
+ * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
+ * all PM registers, which doesn't crash the guest kernel at least.
+ *
+ * Debug handling: We do trap most, if not all debug related system
+ * registers. The implementation is good enough to ensure that a guest
+ * can use these with minimal performance degradation. The drawback is
+ * that we don't implement any of the external debug, none of the
+ * OSlock protocol. This should be revisited if we ever encounter a
+ * more demanding guest...
+ */
+static const struct sys_reg_desc sys_reg_descs[] = {
+ /* DC ISW */
+ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
+ access_dcsw },
+ /* DC CSW */
+ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
+ access_dcsw },
+ /* DC CISW */
+ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
+ access_dcsw },
+
+ DBG_BCR_BVR_WCR_WVR_EL1(0),
+ DBG_BCR_BVR_WCR_WVR_EL1(1),
+ /* MDCCINT_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
+ trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
+ /* MDSCR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
+ trap_debug_regs, reset_val, MDSCR_EL1, 0 },
+ DBG_BCR_BVR_WCR_WVR_EL1(2),
+ DBG_BCR_BVR_WCR_WVR_EL1(3),
+ DBG_BCR_BVR_WCR_WVR_EL1(4),
+ DBG_BCR_BVR_WCR_WVR_EL1(5),
+ DBG_BCR_BVR_WCR_WVR_EL1(6),
+ DBG_BCR_BVR_WCR_WVR_EL1(7),
+ DBG_BCR_BVR_WCR_WVR_EL1(8),
+ DBG_BCR_BVR_WCR_WVR_EL1(9),
+ DBG_BCR_BVR_WCR_WVR_EL1(10),
+ DBG_BCR_BVR_WCR_WVR_EL1(11),
+ DBG_BCR_BVR_WCR_WVR_EL1(12),
+ DBG_BCR_BVR_WCR_WVR_EL1(13),
+ DBG_BCR_BVR_WCR_WVR_EL1(14),
+ DBG_BCR_BVR_WCR_WVR_EL1(15),
+
+ /* MDRAR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+ trap_raz_wi },
+ /* OSLAR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
+ trap_raz_wi },
+ /* OSLSR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
+ trap_oslsr_el1 },
+ /* OSDLR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
+ trap_raz_wi },
+ /* DBGPRCR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
+ trap_raz_wi },
+ /* DBGCLAIMSET_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
+ trap_raz_wi },
+ /* DBGCLAIMCLR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
+ trap_raz_wi },
+ /* DBGAUTHSTATUS_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
+ trap_dbgauthstatus_el1 },
+
+ /* TEECR32_EL1 */
+ { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
+ NULL, reset_val, TEECR32_EL1, 0 },
+ /* TEEHBR32_EL1 */
+ { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
+ NULL, reset_val, TEEHBR32_EL1, 0 },
+
+ /* MDCCSR_EL1 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
+ trap_raz_wi },
+ /* DBGDTR_EL0 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
+ trap_raz_wi },
+ /* DBGDTR[TR]X_EL0 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
+ trap_raz_wi },
+
+ /* DBGVCR32_EL2 */
+ { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
+ NULL, reset_val, DBGVCR32_EL2, 0 },
+
+ /* MPIDR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
+ NULL, reset_mpidr, MPIDR_EL1 },
+ /* SCTLR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+ access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+ /* CPACR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
+ NULL, reset_val, CPACR_EL1, 0 },
+ /* TTBR0_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
+ access_vm_reg, reset_unknown, TTBR0_EL1 },
+ /* TTBR1_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
+ access_vm_reg, reset_unknown, TTBR1_EL1 },
+ /* TCR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
+ access_vm_reg, reset_val, TCR_EL1, 0 },
+
+ /* AFSR0_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
+ access_vm_reg, reset_unknown, AFSR0_EL1 },
+ /* AFSR1_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
+ access_vm_reg, reset_unknown, AFSR1_EL1 },
+ /* ESR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
+ access_vm_reg, reset_unknown, ESR_EL1 },
+ /* FAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
+ access_vm_reg, reset_unknown, FAR_EL1 },
+ /* PAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+ NULL, reset_unknown, PAR_EL1 },
+
+ /* PMINTENSET_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
+ trap_raz_wi },
+ /* PMINTENCLR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
+ trap_raz_wi },
+
+ /* MAIR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
+ access_vm_reg, reset_unknown, MAIR_EL1 },
+ /* AMAIR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
+ access_vm_reg, reset_amair_el1, AMAIR_EL1 },
+
+ /* VBAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
+ NULL, reset_val, VBAR_EL1, 0 },
+ /* CONTEXTIDR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
+ access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
+ /* TPIDR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
+ NULL, reset_unknown, TPIDR_EL1 },
+
+ /* CNTKCTL_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
+ NULL, reset_val, CNTKCTL_EL1, 0},
+
+ /* CSSELR_EL1 */
+ { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
+ NULL, reset_unknown, CSSELR_EL1 },
+
+ /* PMCR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
+ trap_raz_wi },
+ /* PMCNTENSET_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
+ trap_raz_wi },
+ /* PMCNTENCLR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
+ trap_raz_wi },
+ /* PMOVSCLR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
+ trap_raz_wi },
+ /* PMSWINC_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
+ trap_raz_wi },
+ /* PMSELR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
+ trap_raz_wi },
+ /* PMCEID0_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
+ trap_raz_wi },
+ /* PMCEID1_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
+ trap_raz_wi },
+ /* PMCCNTR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
+ trap_raz_wi },
+ /* PMXEVTYPER_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
+ trap_raz_wi },
+ /* PMXEVCNTR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
+ trap_raz_wi },
+ /* PMUSERENR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
+ trap_raz_wi },
+ /* PMOVSSET_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
+ trap_raz_wi },
+
+ /* TPIDR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
+ NULL, reset_unknown, TPIDR_EL0 },
+ /* TPIDRRO_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
+ NULL, reset_unknown, TPIDRRO_EL0 },
+
+ /* DACR32_EL2 */
+ { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
+ NULL, reset_unknown, DACR32_EL2 },
+ /* IFSR32_EL2 */
+ { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
+ NULL, reset_unknown, IFSR32_EL2 },
+ /* FPEXC32_EL2 */
+ { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
+ NULL, reset_val, FPEXC32_EL2, 0x70 },
+};
+
+static bool trap_dbgidr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
+ u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
+ u32 el3 = !!((pfr >> 12) & 0xf);
+
+ *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
+ (((dfr >> 12) & 0xf) << 24) |
+ (((dfr >> 28) & 0xf) << 20) |
+ (6 << 16) | (el3 << 14) | (el3 << 12));
+ return true;
+ }
+}
+
+static bool trap_debug32(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
+ }
+
+ return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
+ NULL, (cp14_DBGBVR0 + (n) * 2) }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
+ NULL, (cp14_DBGBCR0 + (n) * 2) }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
+ NULL, (cp14_DBGWVR0 + (n) * 2) }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
+ NULL, (cp14_DBGWCR0 + (n) * 2) }
+
+#define DBGBXVR(n) \
+ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
+ NULL, cp14_DBGBXVR0 + n * 2 }
+
+/*
+ * Trapped cp14 registers. We generally ignore most of the external
+ * debug, on the principle that they don't really make sense to a
+ * guest. Revisit this one day, whould this principle change.
+ */
+static const struct sys_reg_desc cp14_regs[] = {
+ /* DBGIDR */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
+ /* DBGDTRRXext */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
+
+ DBG_BCR_BVR_WCR_WVR(0),
+ /* DBGDSCRint */
+ { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(1),
+ /* DBGDCCINT */
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
+ /* DBGDSCRext */
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
+ DBG_BCR_BVR_WCR_WVR(2),
+ /* DBGDTR[RT]Xint */
+ { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
+ /* DBGDTR[RT]Xext */
+ { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(3),
+ DBG_BCR_BVR_WCR_WVR(4),
+ DBG_BCR_BVR_WCR_WVR(5),
+ /* DBGWFAR */
+ { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
+ /* DBGOSECCR */
+ { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(6),
+ /* DBGVCR */
+ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
+ DBG_BCR_BVR_WCR_WVR(7),
+ DBG_BCR_BVR_WCR_WVR(8),
+ DBG_BCR_BVR_WCR_WVR(9),
+ DBG_BCR_BVR_WCR_WVR(10),
+ DBG_BCR_BVR_WCR_WVR(11),
+ DBG_BCR_BVR_WCR_WVR(12),
+ DBG_BCR_BVR_WCR_WVR(13),
+ DBG_BCR_BVR_WCR_WVR(14),
+ DBG_BCR_BVR_WCR_WVR(15),
+
+ /* DBGDRAR (32bit) */
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
+
+ DBGBXVR(0),
+ /* DBGOSLAR */
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
+ DBGBXVR(1),
+ /* DBGOSLSR */
+ { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
+ DBGBXVR(2),
+ DBGBXVR(3),
+ /* DBGOSDLR */
+ { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
+ DBGBXVR(4),
+ /* DBGPRCR */
+ { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
+ DBGBXVR(5),
+ DBGBXVR(6),
+ DBGBXVR(7),
+ DBGBXVR(8),
+ DBGBXVR(9),
+ DBGBXVR(10),
+ DBGBXVR(11),
+ DBGBXVR(12),
+ DBGBXVR(13),
+ DBGBXVR(14),
+ DBGBXVR(15),
+
+ /* DBGDSAR (32bit) */
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
+
+ /* DBGDEVID2 */
+ { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
+ /* DBGDEVID1 */
+ { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
+ /* DBGDEVID */
+ { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
+ /* DBGCLAIMSET */
+ { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
+ /* DBGCLAIMCLR */
+ { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
+ /* DBGAUTHSTATUS */
+ { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
+};
+
+/* Trapped cp14 64bit registers */
+static const struct sys_reg_desc cp14_64_regs[] = {
+ /* DBGDRAR (64bit) */
+ { Op1( 0), CRm( 1), .access = trap_raz_wi },
+
+ /* DBGDSAR (64bit) */
+ { Op1( 0), CRm( 2), .access = trap_raz_wi },
+};
+
+/*
+ * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
+ * depending on the way they are accessed (as a 32bit or a 64bit
+ * register).
+ */
+static const struct sys_reg_desc cp15_regs[] = {
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
+ { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
+ { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
+ { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
+ { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
+ { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
+ { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
+ { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
+
+ /*
+ * DC{C,I,CI}SW operations:
+ */
+ { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
+ { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
+ { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
+
+ /* PMU */
+ { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
+
+ { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
+ { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
+ { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
+ { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
+ { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+};
+
+static const struct sys_reg_desc cp15_64_regs[] = {
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+ { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
+};
+
+/* Target specific emulation tables */
+static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
+
+void kvm_register_target_sys_reg_table(unsigned int target,
+ struct kvm_sys_reg_target_table *table)
+{
+ target_tables[target] = table;
+}
+
+/* Get specific register table for this target. */
+static const struct sys_reg_desc *get_target_table(unsigned target,
+ bool mode_is_64,
+ size_t *num)
+{
+ struct kvm_sys_reg_target_table *table;
+
+ table = target_tables[target];
+ if (mode_is_64) {
+ *num = table->table64.num;
+ return table->table64.table;
+ } else {
+ *num = table->table32.num;
+ return table->table32.table;
+ }
+}
+
+static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
+ const struct sys_reg_desc table[],
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ const struct sys_reg_desc *r = &table[i];
+
+ if (params->Op0 != r->Op0)
+ continue;
+ if (params->Op1 != r->Op1)
+ continue;
+ if (params->CRn != r->CRn)
+ continue;
+ if (params->CRm != r->CRm)
+ continue;
+ if (params->Op2 != r->Op2)
+ continue;
+
+ return r;
+ }
+ return NULL;
+}
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+/*
+ * emulate_cp -- tries to match a sys_reg access in a handling table, and
+ * call the corresponding trap handler.
+ *
+ * @params: pointer to the descriptor of the access
+ * @table: array of trap descriptors
+ * @num: size of the trap descriptor array
+ *
+ * Return 0 if the access has been handled, and -1 if not.
+ */
+static int emulate_cp(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *params,
+ const struct sys_reg_desc *table,
+ size_t num)
+{
+ const struct sys_reg_desc *r;
+
+ if (!table)
+ return -1; /* Not handled */
+
+ r = find_reg(params, table, num);
+
+ if (r) {
+ /*
+ * Not having an accessor means that we have
+ * configured a trap that we don't know how to
+ * handle. This certainly qualifies as a gross bug
+ * that should be fixed right away.
+ */
+ BUG_ON(!r->access);
+
+ if (likely(r->access(vcpu, params, r))) {
+ /* Skip instruction, since it was emulated */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ }
+
+ /* Handled */
+ return 0;
+ }
+
+ /* Not handled */
+ return -1;
+}
+
+static void unhandled_cp_access(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *params)
+{
+ u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ int cp;
+
+ switch(hsr_ec) {
+ case ESR_EL2_EC_CP15_32:
+ case ESR_EL2_EC_CP15_64:
+ cp = 15;
+ break;
+ case ESR_EL2_EC_CP14_MR:
+ case ESR_EL2_EC_CP14_64:
+ cp = 14;
+ break;
+ default:
+ WARN_ON((cp = -1));
+ }
+
+ kvm_err("Unsupported guest CP%d access at: %08lx\n",
+ cp, *vcpu_pc(vcpu));
+ print_sys_reg_instr(params);
+ kvm_inject_undefined(vcpu);
+}
+
+/**
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *global,
+ size_t nr_global,
+ const struct sys_reg_desc *target_specific,
+ size_t nr_specific)
+{
+ struct sys_reg_params params;
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int Rt2 = (hsr >> 10) & 0xf;
+
+ params.is_aarch32 = true;
+ params.is_32bit = false;
+ params.CRm = (hsr >> 1) & 0xf;
+ params.Rt = (hsr >> 5) & 0xf;
+ params.is_write = ((hsr & 1) == 0);
+
+ params.Op0 = 0;
+ params.Op1 = (hsr >> 16) & 0xf;
+ params.Op2 = 0;
+ params.CRn = 0;
+
+ /*
+ * Massive hack here. Store Rt2 in the top 32bits so we only
+ * have one register to deal with. As we use the same trap
+ * backends between AArch32 and AArch64, we get away with it.
+ */
+ if (params.is_write) {
+ u64 val = *vcpu_reg(vcpu, params.Rt);
+ val &= 0xffffffff;
+ val |= *vcpu_reg(vcpu, Rt2) << 32;
+ *vcpu_reg(vcpu, params.Rt) = val;
+ }
+
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+ goto out;
+ if (!emulate_cp(vcpu, &params, global, nr_global))
+ goto out;
+
+ unhandled_cp_access(vcpu, &params);
+
+out:
+ /* Do the opposite hack for the read side */
+ if (!params.is_write) {
+ u64 val = *vcpu_reg(vcpu, params.Rt);
+ val >>= 32;
+ *vcpu_reg(vcpu, Rt2) = val;
+ }
+
+ return 1;
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *global,
+ size_t nr_global,
+ const struct sys_reg_desc *target_specific,
+ size_t nr_specific)
+{
+ struct sys_reg_params params;
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+ params.is_aarch32 = true;
+ params.is_32bit = true;
+ params.CRm = (hsr >> 1) & 0xf;
+ params.Rt = (hsr >> 5) & 0xf;
+ params.is_write = ((hsr & 1) == 0);
+ params.CRn = (hsr >> 10) & 0xf;
+ params.Op0 = 0;
+ params.Op1 = (hsr >> 14) & 0x7;
+ params.Op2 = (hsr >> 17) & 0x7;
+
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+ return 1;
+ if (!emulate_cp(vcpu, &params, global, nr_global))
+ return 1;
+
+ unhandled_cp_access(vcpu, &params);
+ return 1;
+}
+
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ const struct sys_reg_desc *target_specific;
+ size_t num;
+
+ target_specific = get_target_table(vcpu->arch.target, false, &num);
+ return kvm_handle_cp_64(vcpu,
+ cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
+ target_specific, num);
+}
+
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ const struct sys_reg_desc *target_specific;
+ size_t num;
+
+ target_specific = get_target_table(vcpu->arch.target, false, &num);
+ return kvm_handle_cp_32(vcpu,
+ cp15_regs, ARRAY_SIZE(cp15_regs),
+ target_specific, num);
+}
+
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ return kvm_handle_cp_64(vcpu,
+ cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
+ NULL, 0);
+}
+
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ return kvm_handle_cp_32(vcpu,
+ cp14_regs, ARRAY_SIZE(cp14_regs),
+ NULL, 0);
+}
+
+static int emulate_sys_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *params)
+{
+ size_t num;
+ const struct sys_reg_desc *table, *r;
+
+ table = get_target_table(vcpu->arch.target, true, &num);
+
+ /* Search target-specific then generic table. */
+ r = find_reg(params, table, num);
+ if (!r)
+ r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+
+ if (likely(r)) {
+ /*
+ * Not having an accessor means that we have
+ * configured a trap that we don't know how to
+ * handle. This certainly qualifies as a gross bug
+ * that should be fixed right away.
+ */
+ BUG_ON(!r->access);
+
+ if (likely(r->access(vcpu, params, r))) {
+ /* Skip instruction, since it was emulated */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ return 1;
+ }
+ /* If access function fails, it should complain. */
+ } else {
+ kvm_err("Unsupported guest sys_reg access at: %lx\n",
+ *vcpu_pc(vcpu));
+ print_sys_reg_instr(params);
+ }
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *table, size_t num)
+{
+ unsigned long i;
+
+ for (i = 0; i < num; i++)
+ if (table[i].reset)
+ table[i].reset(vcpu, &table[i]);
+}
+
+/**
+ * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ struct sys_reg_params params;
+ unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+
+ params.is_aarch32 = false;
+ params.is_32bit = false;
+ params.Op0 = (esr >> 20) & 3;
+ params.Op1 = (esr >> 14) & 0x7;
+ params.CRn = (esr >> 10) & 0xf;
+ params.CRm = (esr >> 1) & 0xf;
+ params.Op2 = (esr >> 17) & 0x7;
+ params.Rt = (esr >> 5) & 0x1f;
+ params.is_write = !(esr & 1);
+
+ return emulate_sys_reg(vcpu, &params);
+}
+
+/******************************************************************************
+ * Userspace API
+ *****************************************************************************/
+
+static bool index_to_params(u64 id, struct sys_reg_params *params)
+{
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U64:
+ /* Any unused index bits means it's not valid. */
+ if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
+ | KVM_REG_ARM_COPROC_MASK
+ | KVM_REG_ARM64_SYSREG_OP0_MASK
+ | KVM_REG_ARM64_SYSREG_OP1_MASK
+ | KVM_REG_ARM64_SYSREG_CRN_MASK
+ | KVM_REG_ARM64_SYSREG_CRM_MASK
+ | KVM_REG_ARM64_SYSREG_OP2_MASK))
+ return false;
+ params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
+ >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
+ params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
+ >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
+ params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
+ >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
+ params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
+ >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
+ params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
+ >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Decode an index value, and find the sys_reg_desc entry. */
+static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
+ u64 id)
+{
+ size_t num;
+ const struct sys_reg_desc *table, *r;
+ struct sys_reg_params params;
+
+ /* We only do sys_reg for now. */
+ if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
+ return NULL;
+
+ if (!index_to_params(id, &params))
+ return NULL;
+
+ table = get_target_table(vcpu->arch.target, true, &num);
+ r = find_reg(&params, table, num);
+ if (!r)
+ r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+
+ /* Not saved in the sys_reg array? */
+ if (r && !r->reg)
+ r = NULL;
+
+ return r;
+}
+
+/*
+ * These are the invariant sys_reg registers: we let the guest see the
+ * host versions of these, so they're part of the guest state.
+ *
+ * A future CPU may provide a mechanism to present different values to
+ * the guest, or a future kvm may trap them.
+ */
+
+#define FUNCTION_INVARIANT(reg) \
+ static void get_##reg(struct kvm_vcpu *v, \
+ const struct sys_reg_desc *r) \
+ { \
+ u64 val; \
+ \
+ asm volatile("mrs %0, " __stringify(reg) "\n" \
+ : "=r" (val)); \
+ ((struct sys_reg_desc *)r)->val = val; \
+ }
+
+FUNCTION_INVARIANT(midr_el1)
+FUNCTION_INVARIANT(ctr_el0)
+FUNCTION_INVARIANT(revidr_el1)
+FUNCTION_INVARIANT(id_pfr0_el1)
+FUNCTION_INVARIANT(id_pfr1_el1)
+FUNCTION_INVARIANT(id_dfr0_el1)
+FUNCTION_INVARIANT(id_afr0_el1)
+FUNCTION_INVARIANT(id_mmfr0_el1)
+FUNCTION_INVARIANT(id_mmfr1_el1)
+FUNCTION_INVARIANT(id_mmfr2_el1)
+FUNCTION_INVARIANT(id_mmfr3_el1)
+FUNCTION_INVARIANT(id_isar0_el1)
+FUNCTION_INVARIANT(id_isar1_el1)
+FUNCTION_INVARIANT(id_isar2_el1)
+FUNCTION_INVARIANT(id_isar3_el1)
+FUNCTION_INVARIANT(id_isar4_el1)
+FUNCTION_INVARIANT(id_isar5_el1)
+FUNCTION_INVARIANT(clidr_el1)
+FUNCTION_INVARIANT(aidr_el1)
+
+/* ->val is filled in by kvm_sys_reg_table_init() */
+static struct sys_reg_desc invariant_sys_regs[] = {
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
+ NULL, get_midr_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
+ NULL, get_revidr_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
+ NULL, get_id_pfr0_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
+ NULL, get_id_pfr1_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
+ NULL, get_id_dfr0_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
+ NULL, get_id_afr0_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
+ NULL, get_id_mmfr0_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
+ NULL, get_id_mmfr1_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
+ NULL, get_id_mmfr2_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
+ NULL, get_id_mmfr3_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
+ NULL, get_id_isar0_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
+ NULL, get_id_isar1_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
+ NULL, get_id_isar2_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
+ NULL, get_id_isar3_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
+ NULL, get_id_isar4_el1 },
+ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
+ NULL, get_id_isar5_el1 },
+ { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
+ NULL, get_clidr_el1 },
+ { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
+ NULL, get_aidr_el1 },
+ { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
+ NULL, get_ctr_el0 },
+};
+
+static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
+{
+ if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
+{
+ if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_invariant_sys_reg(u64 id, void __user *uaddr)
+{
+ struct sys_reg_params params;
+ const struct sys_reg_desc *r;
+
+ if (!index_to_params(id, &params))
+ return -ENOENT;
+
+ r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
+ if (!r)
+ return -ENOENT;
+
+ return reg_to_user(uaddr, &r->val, id);
+}
+
+static int set_invariant_sys_reg(u64 id, void __user *uaddr)
+{
+ struct sys_reg_params params;
+ const struct sys_reg_desc *r;
+ int err;
+ u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
+
+ if (!index_to_params(id, &params))
+ return -ENOENT;
+ r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
+ if (!r)
+ return -ENOENT;
+
+ err = reg_from_user(&val, uaddr, id);
+ if (err)
+ return err;
+
+ /* This is what we mean by invariant: you can't change it. */
+ if (r->val != val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_valid_cache(u32 val)
+{
+ u32 level, ctype;
+
+ if (val >= CSSELR_MAX)
+ return false;
+
+ /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
+ level = (val >> 1);
+ ctype = (cache_levels >> (level * 3)) & 7;
+
+ switch (ctype) {
+ case 0: /* No cache */
+ return false;
+ case 1: /* Instruction cache only */
+ return (val & 1);
+ case 2: /* Data cache only */
+ case 4: /* Unified cache */
+ return !(val & 1);
+ case 3: /* Separate instruction and data caches */
+ return true;
+ default: /* Reserved: we can't know instruction or data. */
+ return false;
+ }
+}
+
+static int demux_c15_get(u64 id, void __user *uaddr)
+{
+ u32 val;
+ u32 __user *uval = uaddr;
+
+ /* Fail if we have unknown bits set. */
+ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+ | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+ return -ENOENT;
+
+ switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+ case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+ if (KVM_REG_SIZE(id) != 4)
+ return -ENOENT;
+ val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+ >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+ if (!is_valid_cache(val))
+ return -ENOENT;
+
+ return put_user(get_ccsidr(val), uval);
+ default:
+ return -ENOENT;
+ }
+}
+
+static int demux_c15_set(u64 id, void __user *uaddr)
+{
+ u32 val, newval;
+ u32 __user *uval = uaddr;
+
+ /* Fail if we have unknown bits set. */
+ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
+ | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
+ return -ENOENT;
+
+ switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
+ case KVM_REG_ARM_DEMUX_ID_CCSIDR:
+ if (KVM_REG_SIZE(id) != 4)
+ return -ENOENT;
+ val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
+ >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
+ if (!is_valid_cache(val))
+ return -ENOENT;
+
+ if (get_user(newval, uval))
+ return -EFAULT;
+
+ /* This is also invariant: you can't change it. */
+ if (newval != get_ccsidr(val))
+ return -EINVAL;
+ return 0;
+ default:
+ return -ENOENT;
+ }
+}
+
+int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ const struct sys_reg_desc *r;
+ void __user *uaddr = (void __user *)(unsigned long)reg->addr;
+
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+ return demux_c15_get(reg->id, uaddr);
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
+ return -ENOENT;
+
+ r = index_to_sys_reg_desc(vcpu, reg->id);
+ if (!r)
+ return get_invariant_sys_reg(reg->id, uaddr);
+
+ return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
+}
+
+int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ const struct sys_reg_desc *r;
+ void __user *uaddr = (void __user *)(unsigned long)reg->addr;
+
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+ return demux_c15_set(reg->id, uaddr);
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
+ return -ENOENT;
+
+ r = index_to_sys_reg_desc(vcpu, reg->id);
+ if (!r)
+ return set_invariant_sys_reg(reg->id, uaddr);
+
+ return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
+}
+
+static unsigned int num_demux_regs(void)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < CSSELR_MAX; i++)
+ if (is_valid_cache(i))
+ count++;
+
+ return count;
+}
+
+static int write_demux_regids(u64 __user *uindices)
+{
+ u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+ unsigned int i;
+
+ val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
+ for (i = 0; i < CSSELR_MAX; i++) {
+ if (!is_valid_cache(i))
+ continue;
+ if (put_user(val | i, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ return 0;
+}
+
+static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
+{
+ return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
+ KVM_REG_ARM64_SYSREG |
+ (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
+ (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
+ (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
+ (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
+ (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
+}
+
+static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
+{
+ if (!*uind)
+ return true;
+
+ if (put_user(sys_reg_to_index(reg), *uind))
+ return false;
+
+ (*uind)++;
+ return true;
+}
+
+/* Assumed ordered tables, see kvm_sys_reg_table_init. */
+static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
+{
+ const struct sys_reg_desc *i1, *i2, *end1, *end2;
+ unsigned int total = 0;
+ size_t num;
+
+ /* We check for duplicates here, to allow arch-specific overrides. */
+ i1 = get_target_table(vcpu->arch.target, true, &num);
+ end1 = i1 + num;
+ i2 = sys_reg_descs;
+ end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
+
+ BUG_ON(i1 == end1 || i2 == end2);
+
+ /* Walk carefully, as both tables may refer to the same register. */
+ while (i1 || i2) {
+ int cmp = cmp_sys_reg(i1, i2);
+ /* target-specific overrides generic entry. */
+ if (cmp <= 0) {
+ /* Ignore registers we trap but don't save. */
+ if (i1->reg) {
+ if (!copy_reg_to_user(i1, &uind))
+ return -EFAULT;
+ total++;
+ }
+ } else {
+ /* Ignore registers we trap but don't save. */
+ if (i2->reg) {
+ if (!copy_reg_to_user(i2, &uind))
+ return -EFAULT;
+ total++;
+ }
+ }
+
+ if (cmp <= 0 && ++i1 == end1)
+ i1 = NULL;
+ if (cmp >= 0 && ++i2 == end2)
+ i2 = NULL;
+ }
+ return total;
+}
+
+unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
+{
+ return ARRAY_SIZE(invariant_sys_regs)
+ + num_demux_regs()
+ + walk_sys_regs(vcpu, (u64 __user *)NULL);
+}
+
+int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ unsigned int i;
+ int err;
+
+ /* Then give them all the invariant registers' indices. */
+ for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
+ if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ err = walk_sys_regs(vcpu, uindices);
+ if (err < 0)
+ return err;
+ uindices += err;
+
+ return write_demux_regids(uindices);
+}
+
+static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 1; i < n; i++) {
+ if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
+ kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+void kvm_sys_reg_table_init(void)
+{
+ unsigned int i;
+ struct sys_reg_desc clidr;
+
+ /* Make sure tables are unique and in order. */
+ BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
+ BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
+ BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
+ BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
+ BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
+ BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
+
+ /* We abuse the reset function to overwrite the table itself. */
+ for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
+ invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
+
+ /*
+ * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
+ *
+ * If software reads the Cache Type fields from Ctype1
+ * upwards, once it has seen a value of 0b000, no caches
+ * exist at further-out levels of the hierarchy. So, for
+ * example, if Ctype3 is the first Cache Type field with a
+ * value of 0b000, the values of Ctype4 to Ctype7 must be
+ * ignored.
+ */
+ get_clidr_el1(NULL, &clidr); /* Ugly... */
+ cache_levels = clidr.val;
+ for (i = 0; i < 7; i++)
+ if (((cache_levels >> (i*3)) & 7) == 0)
+ break;
+ /* Clear all higher bits. */
+ cache_levels &= (1 << (i*3))-1;
+}
+
+/**
+ * kvm_reset_sys_regs - sets system registers to reset value
+ * @vcpu: The VCPU pointer
+ *
+ * This function finds the right table above and sets the registers on the
+ * virtual CPU struct to their architecturally defined reset values.
+ */
+void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
+{
+ size_t num;
+ const struct sys_reg_desc *table;
+
+ /* Catch someone adding a register without putting in reset entry. */
+ memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
+
+ /* Generic chip reset first (so target could override). */
+ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+
+ table = get_target_table(vcpu->arch.target, true, &num);
+ reset_sys_reg_descs(vcpu, table, num);
+
+ for (num = 1; num < NR_SYS_REGS; num++)
+ if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
+ panic("Didn't reset vcpu_sys_reg(%zi)", num);
+}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
new file mode 100644
index 000000000000..d411e251412c
--- /dev/null
+++ b/arch/arm64/kvm/sys_regs.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/kvm/coproc.h
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
+#define __ARM64_KVM_SYS_REGS_LOCAL_H__
+
+struct sys_reg_params {
+ u8 Op0;
+ u8 Op1;
+ u8 CRn;
+ u8 CRm;
+ u8 Op2;
+ u8 Rt;
+ bool is_write;
+ bool is_aarch32;
+ bool is_32bit; /* Only valid if is_aarch32 is true */
+};
+
+struct sys_reg_desc {
+ /* MRS/MSR instruction which accesses it. */
+ u8 Op0;
+ u8 Op1;
+ u8 CRn;
+ u8 CRm;
+ u8 Op2;
+
+ /* Trapped access from guest, if non-NULL. */
+ bool (*access)(struct kvm_vcpu *,
+ const struct sys_reg_params *,
+ const struct sys_reg_desc *);
+
+ /* Initialization for vcpu. */
+ void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
+
+ /* Index into sys_reg[], or 0 if we don't need to save it. */
+ int reg;
+
+ /* Value (usually reset value) */
+ u64 val;
+};
+
+static inline void print_sys_reg_instr(const struct sys_reg_params *p)
+{
+ /* Look, we even formatted it for you to paste into the table! */
+ kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
+ p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
+}
+
+static inline bool ignore_write(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p)
+{
+ return true;
+}
+
+static inline bool read_zero(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p)
+{
+ *vcpu_reg(vcpu, p->Rt) = 0;
+ return true;
+}
+
+static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *params)
+{
+ kvm_debug("sys_reg write to read-only register at: %lx\n",
+ *vcpu_pc(vcpu));
+ print_sys_reg_instr(params);
+ return false;
+}
+
+static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *params)
+{
+ kvm_debug("sys_reg read to write-only register at: %lx\n",
+ *vcpu_pc(vcpu));
+ print_sys_reg_instr(params);
+ return false;
+}
+
+/* Reset functions */
+static inline void reset_unknown(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ BUG_ON(!r->reg);
+ BUG_ON(r->reg >= NR_SYS_REGS);
+ vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
+}
+
+static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ BUG_ON(!r->reg);
+ BUG_ON(r->reg >= NR_SYS_REGS);
+ vcpu_sys_reg(vcpu, r->reg) = r->val;
+}
+
+static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
+ const struct sys_reg_desc *i2)
+{
+ BUG_ON(i1 == i2);
+ if (!i1)
+ return 1;
+ else if (!i2)
+ return -1;
+ if (i1->Op0 != i2->Op0)
+ return i1->Op0 - i2->Op0;
+ if (i1->Op1 != i2->Op1)
+ return i1->Op1 - i2->Op1;
+ if (i1->CRn != i2->CRn)
+ return i1->CRn - i2->CRn;
+ if (i1->CRm != i2->CRm)
+ return i1->CRm - i2->CRm;
+ return i1->Op2 - i2->Op2;
+}
+
+
+#define Op0(_x) .Op0 = _x
+#define Op1(_x) .Op1 = _x
+#define CRn(_x) .CRn = _x
+#define CRm(_x) .CRm = _x
+#define Op2(_x) .Op2 = _x
+
+#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
new file mode 100644
index 000000000000..475fd2929310
--- /dev/null
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Based on arch/arm/kvm/coproc_a15.c:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ * Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kvm_host.h>
+#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <linux/init.h>
+
+#include "sys_regs.h"
+
+static bool access_actlr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
+ return true;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 actlr;
+
+ asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr));
+ vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr;
+}
+
+/*
+ * Implementation specific sys-reg registers.
+ * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ */
+static const struct sys_reg_desc genericv8_sys_regs[] = {
+ /* ACTLR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
+ access_actlr, reset_actlr, ACTLR_EL1 },
+};
+
+static const struct sys_reg_desc genericv8_cp15_regs[] = {
+ /* ACTLR */
+ { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
+ access_actlr },
+};
+
+static struct kvm_sys_reg_target_table genericv8_target_table = {
+ .table64 = {
+ .table = genericv8_sys_regs,
+ .num = ARRAY_SIZE(genericv8_sys_regs),
+ },
+ .table32 = {
+ .table = genericv8_cp15_regs,
+ .num = ARRAY_SIZE(genericv8_cp15_regs),
+ },
+};
+
+static int __init sys_reg_genericv8_init(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < ARRAY_SIZE(genericv8_sys_regs); i++)
+ BUG_ON(cmp_sys_reg(&genericv8_sys_regs[i-1],
+ &genericv8_sys_regs[i]) >= 0);
+
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_AEM_V8,
+ &genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8,
+ &genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A53,
+ &genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
+ &genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
+ &genericv8_target_table);
+
+ return 0;
+}
+late_initcall(sys_reg_genericv8_init);
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
new file mode 100644
index 000000000000..ae211772f991
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+ENTRY(__save_vgic_v2_state)
+__save_vgic_v2_state:
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* Save all interesting registers */
+ ldr w4, [x2, #GICH_HCR]
+ ldr w5, [x2, #GICH_VMCR]
+ ldr w6, [x2, #GICH_MISR]
+ ldr w7, [x2, #GICH_EISR0]
+ ldr w8, [x2, #GICH_EISR1]
+ ldr w9, [x2, #GICH_ELRSR0]
+ ldr w10, [x2, #GICH_ELRSR1]
+ ldr w11, [x2, #GICH_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+CPU_BE( rev w7, w7 )
+CPU_BE( rev w8, w8 )
+CPU_BE( rev w9, w9 )
+CPU_BE( rev w10, w10 )
+CPU_BE( rev w11, w11 )
+
+ str w4, [x3, #VGIC_V2_CPU_HCR]
+ str w5, [x3, #VGIC_V2_CPU_VMCR]
+ str w6, [x3, #VGIC_V2_CPU_MISR]
+ str w7, [x3, #VGIC_V2_CPU_EISR]
+ str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
+ str w9, [x3, #VGIC_V2_CPU_ELRSR]
+ str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
+ str w11, [x3, #VGIC_V2_CPU_APR]
+
+ /* Clear GICH_HCR */
+ str wzr, [x2, #GICH_HCR]
+
+ /* Save list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x2], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x3], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+ ret
+ENDPROC(__save_vgic_v2_state)
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+ENTRY(__restore_vgic_v2_state)
+__restore_vgic_v2_state:
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* We only restore a minimal set of registers */
+ ldr w4, [x3, #VGIC_V2_CPU_HCR]
+ ldr w5, [x3, #VGIC_V2_CPU_VMCR]
+ ldr w6, [x3, #VGIC_V2_CPU_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+
+ str w4, [x2, #GICH_HCR]
+ str w5, [x2, #GICH_VMCR]
+ str w6, [x2, #GICH_APR]
+
+ /* Restore list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x3], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x2], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+ ret
+ENDPROC(__restore_vgic_v2_state)
+
+ .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
new file mode 100644
index 000000000000..d16046999e06
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * We store LRs in reverse order to let the CPU deal with streaming
+ * access. Use this macro to make it look saner...
+ */
+#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+.macro save_vgic_v3_state
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Make sure stores to the GIC via the memory mapped interface
+ // are now visible to the system register interface
+ dsb st
+
+ // Save all interesting registers
+ mrs_s x4, ICH_HCR_EL2
+ mrs_s x5, ICH_VMCR_EL2
+ mrs_s x6, ICH_MISR_EL2
+ mrs_s x7, ICH_EISR_EL2
+ mrs_s x8, ICH_ELSR_EL2
+
+ str w4, [x3, #VGIC_V3_CPU_HCR]
+ str w5, [x3, #VGIC_V3_CPU_VMCR]
+ str w6, [x3, #VGIC_V3_CPU_MISR]
+ str w7, [x3, #VGIC_V3_CPU_EISR]
+ str w8, [x3, #VGIC_V3_CPU_ELRSR]
+
+ msr_s ICH_HCR_EL2, xzr
+
+ mrs_s x21, ICH_VTR_EL2
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ mrs_s x20, ICH_LR15_EL2
+ mrs_s x19, ICH_LR14_EL2
+ mrs_s x18, ICH_LR13_EL2
+ mrs_s x17, ICH_LR12_EL2
+ mrs_s x16, ICH_LR11_EL2
+ mrs_s x15, ICH_LR10_EL2
+ mrs_s x14, ICH_LR9_EL2
+ mrs_s x13, ICH_LR8_EL2
+ mrs_s x12, ICH_LR7_EL2
+ mrs_s x11, ICH_LR6_EL2
+ mrs_s x10, ICH_LR5_EL2
+ mrs_s x9, ICH_LR4_EL2
+ mrs_s x8, ICH_LR3_EL2
+ mrs_s x7, ICH_LR2_EL2
+ mrs_s x6, ICH_LR1_EL2
+ mrs_s x5, ICH_LR0_EL2
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ str x20, [x3, #LR_OFFSET(15)]
+ str x19, [x3, #LR_OFFSET(14)]
+ str x18, [x3, #LR_OFFSET(13)]
+ str x17, [x3, #LR_OFFSET(12)]
+ str x16, [x3, #LR_OFFSET(11)]
+ str x15, [x3, #LR_OFFSET(10)]
+ str x14, [x3, #LR_OFFSET(9)]
+ str x13, [x3, #LR_OFFSET(8)]
+ str x12, [x3, #LR_OFFSET(7)]
+ str x11, [x3, #LR_OFFSET(6)]
+ str x10, [x3, #LR_OFFSET(5)]
+ str x9, [x3, #LR_OFFSET(4)]
+ str x8, [x3, #LR_OFFSET(3)]
+ str x7, [x3, #LR_OFFSET(2)]
+ str x6, [x3, #LR_OFFSET(1)]
+ str x5, [x3, #LR_OFFSET(0)]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs_s x20, ICH_AP0R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ mrs_s x19, ICH_AP0R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+6: mrs_s x18, ICH_AP0R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+5: mrs_s x17, ICH_AP0R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP0R]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs_s x20, ICH_AP1R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ mrs_s x19, ICH_AP1R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+6: mrs_s x18, ICH_AP1R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+5: mrs_s x17, ICH_AP1R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP1R]
+
+ // Restore SRE_EL1 access and re-enable SRE at EL1.
+ mrs_s x5, ICC_SRE_EL2
+ orr x5, x5, #ICC_SRE_EL2_ENABLE
+ msr_s ICC_SRE_EL2, x5
+ isb
+ mov x5, #1
+ msr_s ICC_SRE_EL1, x5
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+.macro restore_vgic_v3_state
+ // Disable SRE_EL1 access. Necessary, otherwise
+ // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
+ msr_s ICC_SRE_EL1, xzr
+ isb
+
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Restore all interesting registers
+ ldr w4, [x3, #VGIC_V3_CPU_HCR]
+ ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+
+ msr_s ICH_HCR_EL2, x4
+ msr_s ICH_VMCR_EL2, x5
+
+ mrs_s x21, ICH_VTR_EL2
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ msr_s ICH_AP1R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+ msr_s ICH_AP1R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+ msr_s ICH_AP1R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
+ msr_s ICH_AP1R0_EL2, x17
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ msr_s ICH_AP0R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+ msr_s ICH_AP0R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+ msr_s ICH_AP0R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
+ msr_s ICH_AP0R0_EL2, x17
+
+ and w22, w21, #0xf
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ ldr x20, [x3, #LR_OFFSET(15)]
+ ldr x19, [x3, #LR_OFFSET(14)]
+ ldr x18, [x3, #LR_OFFSET(13)]
+ ldr x17, [x3, #LR_OFFSET(12)]
+ ldr x16, [x3, #LR_OFFSET(11)]
+ ldr x15, [x3, #LR_OFFSET(10)]
+ ldr x14, [x3, #LR_OFFSET(9)]
+ ldr x13, [x3, #LR_OFFSET(8)]
+ ldr x12, [x3, #LR_OFFSET(7)]
+ ldr x11, [x3, #LR_OFFSET(6)]
+ ldr x10, [x3, #LR_OFFSET(5)]
+ ldr x9, [x3, #LR_OFFSET(4)]
+ ldr x8, [x3, #LR_OFFSET(3)]
+ ldr x7, [x3, #LR_OFFSET(2)]
+ ldr x6, [x3, #LR_OFFSET(1)]
+ ldr x5, [x3, #LR_OFFSET(0)]
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ msr_s ICH_LR15_EL2, x20
+ msr_s ICH_LR14_EL2, x19
+ msr_s ICH_LR13_EL2, x18
+ msr_s ICH_LR12_EL2, x17
+ msr_s ICH_LR11_EL2, x16
+ msr_s ICH_LR10_EL2, x15
+ msr_s ICH_LR9_EL2, x14
+ msr_s ICH_LR8_EL2, x13
+ msr_s ICH_LR7_EL2, x12
+ msr_s ICH_LR6_EL2, x11
+ msr_s ICH_LR5_EL2, x10
+ msr_s ICH_LR4_EL2, x9
+ msr_s ICH_LR3_EL2, x8
+ msr_s ICH_LR2_EL2, x7
+ msr_s ICH_LR1_EL2, x6
+ msr_s ICH_LR0_EL2, x5
+
+ // Ensure that the above will have reached the
+ // (re)distributors. This ensure the guest will read
+ // the correct values from the memory-mapped interface.
+ isb
+ dsb sy
+
+ // Prevent the guest from touching the GIC system registers
+ mrs_s x5, ICC_SRE_EL2
+ and x5, x5, #~ICC_SRE_EL2_ENABLE
+ msr_s ICC_SRE_EL2, x5
+.endm
+
+ENTRY(__save_vgic_v3_state)
+ save_vgic_v3_state
+ ret
+ENDPROC(__save_vgic_v3_state)
+
+ENTRY(__restore_vgic_v3_state)
+ restore_vgic_v3_state
+ ret
+ENDPROC(__restore_vgic_v3_state)
+
+ENTRY(__vgic_v3_get_ich_vtr_el2)
+ mrs_s x0, ICH_VTR_EL2
+ ret
+ENDPROC(__vgic_v3_get_ich_vtr_el2)
+
+ .popsection
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 59acc0ef0462..d98d3e39879e 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,6 +1,5 @@
-lib-y := bitops.o delay.o \
- strncpy_from_user.o strnlen_user.o clear_user.o \
- copy_from_user.o copy_to_user.o copy_in_user.o \
- copy_page.o clear_page.o \
- memchr.o memcpy.o memmove.o memset.o \
+lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
+ copy_to_user.o copy_in_user.o copy_page.o \
+ clear_page.o memchr.o memcpy.o memmove.o memset.o \
+ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797790d3..7dac371cc9a2 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@ ENTRY( \name )
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldaxr x2, [x1]
+1: ldxr x2, [x1]
lsr x0, x2, x3 // Save old value of bit
\instr x2, x2, x4 // toggle bit
stlxr w5, x2, [x1]
cbnz w5, 1b
+ dmb ish
and x0, x0, #1
3: ret
ENDPROC(\name )
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 6e0ed93d51fe..c17967fdf5f6 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
- strb wzr, [x0]
+USER(9f, strb wzr, [x0] )
5: mov x0, #0
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
new file mode 100644
index 000000000000..6ea0776ba6de
--- /dev/null
+++ b/arch/arm64/lib/memcmp.S
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+* compare memory areas(when two memory areas' offset are different,
+* alignment handled by the hardware)
+*
+* Parameters:
+* x0 - const memory area 1 pointer
+* x1 - const memory area 2 pointer
+* x2 - the maximal compare byte length
+* Returns:
+* x0 - a compare result, maybe less than, equal to, or greater than ZERO
+*/
+
+/* Parameters and result. */
+src1 .req x0
+src2 .req x1
+limit .req x2
+result .req x0
+
+/* Internal variables. */
+data1 .req x3
+data1w .req w3
+data2 .req x4
+data2w .req w4
+has_nul .req x5
+diff .req x6
+endloop .req x7
+tmp1 .req x8
+tmp2 .req x9
+tmp3 .req x10
+pos .req x11
+limit_wd .req x12
+mask .req x13
+
+ENTRY(memcmp)
+ cbz limit, .Lret0
+ eor tmp1, src1, src2
+ tst tmp1, #7
+ b.ne .Lmisaligned8
+ ands tmp1, src1, #7
+ b.ne .Lmutual_align
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
+ /*
+ * The input source addresses are at alignment boundary.
+ * Directly compare eight bytes each time.
+ */
+.Lloop_aligned:
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+.Lstart_realigned:
+ subs limit_wd, limit_wd, #1
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, cs /* Last Dword or differences. */
+ cbz endloop, .Lloop_aligned
+
+ /* Not reached the limit, must have found a diff. */
+ tbz limit_wd, #63, .Lnot_limit
+
+ /* Limit % 8 == 0 => the diff is in the last 8 bytes. */
+ ands limit, limit, #7
+ b.eq .Lnot_limit
+ /*
+ * The remained bytes less than 8. It is needed to extract valid data
+ * from last eight bytes of the intended memory range.
+ */
+ lsl limit, limit, #3 /* bytes-> bits. */
+ mov mask, #~0
+CPU_BE( lsr mask, mask, limit )
+CPU_LE( lsl mask, mask, limit )
+ bic data1, data1, mask
+ bic data2, data2, mask
+
+ orr diff, diff, mask
+ b .Lnot_limit
+
+.Lmutual_align:
+ /*
+ * Sources are mutually aligned, but are not currently at an
+ * alignment boundary. Round down the addresses and then mask off
+ * the bytes that precede the start point.
+ */
+ bic src1, src1, #7
+ bic src2, src2, #7
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+ /*
+ * We can not add limit with alignment offset(tmp1) here. Since the
+ * addition probably make the limit overflown.
+ */
+ sub limit_wd, limit, #1/*limit != 0, so no underflow.*/
+ and tmp3, limit_wd, #7
+ lsr limit_wd, limit_wd, #3
+ add tmp3, tmp3, tmp1
+ add limit_wd, limit_wd, tmp3, lsr #3
+ add limit, limit, tmp1/* Adjust the limit for the extra. */
+
+ lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
+ neg tmp1, tmp1/* Bits to alignment -64. */
+ mov tmp2, #~0
+ /*mask off the non-intended bytes before the start address.*/
+CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
+ /* Little-endian. Early bytes are at LSB. */
+CPU_LE( lsr tmp2, tmp2, tmp1 )
+
+ orr data1, data1, tmp2
+ orr data2, data2, tmp2
+ b .Lstart_realigned
+
+ /*src1 and src2 have different alignment offset.*/
+.Lmisaligned8:
+ cmp limit, #8
+ b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/
+
+ and tmp1, src1, #7
+ neg tmp1, tmp1
+ add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
+ and tmp2, src2, #7
+ neg tmp2, tmp2
+ add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
+ subs tmp3, tmp1, tmp2
+ csel pos, tmp1, tmp2, hi /*Choose the maximum.*/
+
+ sub limit, limit, pos
+ /*compare the proceeding bytes in the first 8 byte segment.*/
+.Ltinycmp:
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ subs pos, pos, #1
+ ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
+ b.eq .Ltinycmp
+ cbnz pos, 1f /*diff occurred before the last byte.*/
+ cmp data1w, data2w
+ b.eq .Lstart_align
+1:
+ sub result, data1, data2
+ ret
+
+.Lstart_align:
+ lsr limit_wd, limit, #3
+ cbz limit_wd, .Lremain8
+
+ ands xzr, src1, #7
+ b.eq .Lrecal_offset
+ /*process more leading bytes to make src1 aligned...*/
+ add src1, src1, tmp3 /*backwards src1 to alignment boundary*/
+ add src2, src2, tmp3
+ sub limit, limit, tmp3
+ lsr limit_wd, limit, #3
+ cbz limit_wd, .Lremain8
+ /*load 8 bytes from aligned SRC1..*/
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+
+ subs limit_wd, limit_wd, #1
+ eor diff, data1, data2 /*Non-zero if differences found.*/
+ csinv endloop, diff, xzr, ne
+ cbnz endloop, .Lunequal_proc
+ /*How far is the current SRC2 from the alignment boundary...*/
+ and tmp3, tmp3, #7
+
+.Lrecal_offset:/*src1 is aligned now..*/
+ neg pos, tmp3
+.Lloopcmp_proc:
+ /*
+ * Divide the eight bytes into two parts. First,backwards the src2
+ * to an alignment boundary,load eight bytes and compare from
+ * the SRC2 alignment boundary. If all 8 bytes are equal,then start
+ * the second part's comparison. Otherwise finish the comparison.
+ * This special handle can garantee all the accesses are in the
+ * thread/task space in avoid to overrange access.
+ */
+ ldr data1, [src1,pos]
+ ldr data2, [src2,pos]
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ cbnz diff, .Lnot_limit
+
+ /*The second part process*/
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ subs limit_wd, limit_wd, #1
+ csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
+ cbz endloop, .Lloopcmp_proc
+.Lunequal_proc:
+ cbz diff, .Lremain8
+
+/*There is differnence occured in the latest comparison.*/
+.Lnot_limit:
+/*
+* For little endian,reverse the low significant equal bits into MSB,then
+* following CLZ can find how many equal bits exist.
+*/
+CPU_LE( rev diff, diff )
+CPU_LE( rev data1, data1 )
+CPU_LE( rev data2, data2 )
+
+ /*
+ * The MS-non-zero bit of DIFF marks either the first bit
+ * that is different, or the end of the significant data.
+ * Shifting left now will bring the critical information into the
+ * top bits.
+ */
+ clz pos, diff
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /*
+ * We need to zero-extend (char is unsigned) the value and then
+ * perform a signed subtraction.
+ */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+
+.Lremain8:
+ /* Limit % 8 == 0 =>. all data are equal.*/
+ ands limit, limit, #7
+ b.eq .Lret0
+
+.Ltiny8proc:
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ subs limit, limit, #1
+
+ ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
+ b.eq .Ltiny8proc
+ sub result, data1, data2
+ ret
+.Lret0:
+ mov result, #0
+ ret
+ENDPROC(memcmp)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index 27b5003609b6..8a9a96d3ddae 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -1,5 +1,13 @@
/*
* Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,6 +24,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
/*
* Copy a buffer from src to dest (alignment handled by the hardware)
@@ -27,27 +36,166 @@
* Returns:
* x0 - dest
*/
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+tmp3 .req x5
+tmp3w .req w5
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
ENTRY(memcpy)
- mov x4, x0
- subs x2, x2, #8
- b.mi 2f
-1: ldr x3, [x1], #8
- subs x2, x2, #8
- str x3, [x4], #8
- b.pl 1b
-2: adds x2, x2, #4
- b.mi 3f
- ldr w3, [x1], #4
- sub x2, x2, #4
- str w3, [x4], #4
-3: adds x2, x2, #2
- b.mi 4f
- ldrh w3, [x1], #2
- sub x2, x2, #2
- strh w3, [x4], #2
-4: adds x2, x2, #1
- b.mi 5f
- ldrb w3, [x1]
- strb w3, [x4]
-5: ret
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwritting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb tmp1w, [src], #1
+ strb tmp1w, [dst], #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh tmp1w, [src], #2
+ strh tmp1w, [dst], #2
+2:
+ tbz tmp2, #2, 3f
+ ldr tmp1w, [src], #4
+ str tmp1w, [dst], #4
+3:
+ tbz tmp2, #3, .LSrcAligned
+ ldr tmp1, [src],#8
+ str tmp1, [dst],#8
+
+.LSrcAligned:
+ cmp count, #64
+ b.ge .Lcpy_over64
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp A_l, A_h, [src], #16
+ stp A_l, A_h, [dst], #16
+1:
+ ldp A_l, A_h, [src], #16
+ stp A_l, A_h, [dst], #16
+2:
+ ldp A_l, A_h, [src], #16
+ stp A_l, A_h, [dst], #16
+.Ltiny15:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr tmp1, [src], #8
+ str tmp1, [dst], #8
+1:
+ tbz count, #2, 2f
+ ldr tmp1w, [src], #4
+ str tmp1w, [dst], #4
+2:
+ tbz count, #1, 3f
+ ldrh tmp1w, [src], #2
+ strh tmp1w, [dst], #2
+3:
+ tbz count, #0, .Lexitfunc
+ ldrb tmp1w, [src]
+ strb tmp1w, [dst]
+
+.Lexitfunc:
+ ret
+
+.Lcpy_over64:
+ subs count, count, #128
+ b.ge .Lcpy_body_large
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp A_l, A_h, [src],#16
+ stp A_l, A_h, [dst],#16
+ ldp B_l, B_h, [src],#16
+ ldp C_l, C_h, [src],#16
+ stp B_l, B_h, [dst],#16
+ stp C_l, C_h, [dst],#16
+ ldp D_l, D_h, [src],#16
+ stp D_l, D_h, [dst],#16
+
+ tst count, #0x3f
+ b.ne .Ltail63
+ ret
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large:
+ /* pre-get 64 bytes data. */
+ ldp A_l, A_h, [src],#16
+ ldp B_l, B_h, [src],#16
+ ldp C_l, C_h, [src],#16
+ ldp D_l, D_h, [src],#16
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stp A_l, A_h, [dst],#16
+ ldp A_l, A_h, [src],#16
+ stp B_l, B_h, [dst],#16
+ ldp B_l, B_h, [src],#16
+ stp C_l, C_h, [dst],#16
+ ldp C_l, C_h, [src],#16
+ stp D_l, D_h, [dst],#16
+ ldp D_l, D_h, [src],#16
+ subs count, count, #64
+ b.ge 1b
+ stp A_l, A_h, [dst],#16
+ stp B_l, B_h, [dst],#16
+ stp C_l, C_h, [dst],#16
+ stp D_l, D_h, [dst],#16
+
+ tst count, #0x3f
+ b.ne .Ltail63
+ ret
ENDPROC(memcpy)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
index b79fdfa42d39..57b19ea2dad4 100644
--- a/arch/arm64/lib/memmove.S
+++ b/arch/arm64/lib/memmove.S
@@ -1,5 +1,13 @@
/*
* Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,6 +24,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
/*
* Move a buffer from src to test (alignment handled by the hardware).
@@ -28,30 +37,161 @@
* Returns:
* x0 - dest
*/
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+tmp3 .req x5
+tmp3w .req w5
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
ENTRY(memmove)
- cmp x0, x1
- b.ls memcpy
- add x4, x0, x2
- add x1, x1, x2
- subs x2, x2, #8
- b.mi 2f
-1: ldr x3, [x1, #-8]!
- subs x2, x2, #8
- str x3, [x4, #-8]!
- b.pl 1b
-2: adds x2, x2, #4
- b.mi 3f
- ldr w3, [x1, #-4]!
- sub x2, x2, #4
- str w3, [x4, #-4]!
-3: adds x2, x2, #2
- b.mi 4f
- ldrh w3, [x1, #-2]!
- sub x2, x2, #2
- strh w3, [x4, #-2]!
-4: adds x2, x2, #1
- b.mi 5f
- ldrb w3, [x1, #-1]
- strb w3, [x4, #-1]
-5: ret
+ cmp dstin, src
+ b.lo memcpy
+ add tmp1, src, count
+ cmp dstin, tmp1
+ b.hs memcpy /* No overlap. */
+
+ add dst, dstin, count
+ add src, src, count
+ cmp count, #16
+ b.lo .Ltail15 /*probably non-alignment accesses.*/
+
+ ands tmp2, src, #15 /* Bytes to reach alignment. */
+ b.eq .LSrcAligned
+ sub count, count, tmp2
+ /*
+ * process the aligned offset length to make the src aligned firstly.
+ * those extra instructions' cost is acceptable. It also make the
+ * coming accesses are based on aligned address.
+ */
+ tbz tmp2, #0, 1f
+ ldrb tmp1w, [src, #-1]!
+ strb tmp1w, [dst, #-1]!
+1:
+ tbz tmp2, #1, 2f
+ ldrh tmp1w, [src, #-2]!
+ strh tmp1w, [dst, #-2]!
+2:
+ tbz tmp2, #2, 3f
+ ldr tmp1w, [src, #-4]!
+ str tmp1w, [dst, #-4]!
+3:
+ tbz tmp2, #3, .LSrcAligned
+ ldr tmp1, [src, #-8]!
+ str tmp1, [dst, #-8]!
+
+.LSrcAligned:
+ cmp count, #64
+ b.ge .Lcpy_over64
+
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltail15
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp A_l, A_h, [src, #-16]!
+ stp A_l, A_h, [dst, #-16]!
+1:
+ ldp A_l, A_h, [src, #-16]!
+ stp A_l, A_h, [dst, #-16]!
+2:
+ ldp A_l, A_h, [src, #-16]!
+ stp A_l, A_h, [dst, #-16]!
+
+.Ltail15:
+ tbz count, #3, 1f
+ ldr tmp1, [src, #-8]!
+ str tmp1, [dst, #-8]!
+1:
+ tbz count, #2, 2f
+ ldr tmp1w, [src, #-4]!
+ str tmp1w, [dst, #-4]!
+2:
+ tbz count, #1, 3f
+ ldrh tmp1w, [src, #-2]!
+ strh tmp1w, [dst, #-2]!
+3:
+ tbz count, #0, .Lexitfunc
+ ldrb tmp1w, [src, #-1]
+ strb tmp1w, [dst, #-1]
+
+.Lexitfunc:
+ ret
+
+.Lcpy_over64:
+ subs count, count, #128
+ b.ge .Lcpy_body_large
+ /*
+ * Less than 128 bytes to copy, so handle 64 bytes here and then jump
+ * to the tail.
+ */
+ ldp A_l, A_h, [src, #-16]
+ stp A_l, A_h, [dst, #-16]
+ ldp B_l, B_h, [src, #-32]
+ ldp C_l, C_h, [src, #-48]
+ stp B_l, B_h, [dst, #-32]
+ stp C_l, C_h, [dst, #-48]
+ ldp D_l, D_h, [src, #-64]!
+ stp D_l, D_h, [dst, #-64]!
+
+ tst count, #0x3f
+ b.ne .Ltail63
+ ret
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large:
+ /* pre-load 64 bytes data. */
+ ldp A_l, A_h, [src, #-16]
+ ldp B_l, B_h, [src, #-32]
+ ldp C_l, C_h, [src, #-48]
+ ldp D_l, D_h, [src, #-64]!
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stp A_l, A_h, [dst, #-16]
+ ldp A_l, A_h, [src, #-16]
+ stp B_l, B_h, [dst, #-32]
+ ldp B_l, B_h, [src, #-32]
+ stp C_l, C_h, [dst, #-48]
+ ldp C_l, C_h, [src, #-48]
+ stp D_l, D_h, [dst, #-64]!
+ ldp D_l, D_h, [src, #-64]!
+ subs count, count, #64
+ b.ge 1b
+ stp A_l, A_h, [dst, #-16]
+ stp B_l, B_h, [dst, #-32]
+ stp C_l, C_h, [dst, #-48]
+ stp D_l, D_h, [dst, #-64]!
+
+ tst count, #0x3f
+ b.ne .Ltail63
+ ret
ENDPROC(memmove)
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
index 87e4a68fbbbc..7c72dfd36b63 100644
--- a/arch/arm64/lib/memset.S
+++ b/arch/arm64/lib/memset.S
@@ -1,5 +1,13 @@
/*
* Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,6 +24,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
/*
* Fill in the buffer with character c (alignment handled by the hardware)
@@ -27,27 +36,181 @@
* Returns:
* x0 - buf
*/
+
+dstin .req x0
+val .req w1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+zva_len_x .req x5
+zva_len .req w5
+zva_bits_x .req x6
+
+A_l .req x7
+A_lw .req w7
+dst .req x8
+tmp3w .req w9
+tmp3 .req x9
+
ENTRY(memset)
- mov x4, x0
- and w1, w1, #0xff
- orr w1, w1, w1, lsl #8
- orr w1, w1, w1, lsl #16
- orr x1, x1, x1, lsl #32
- subs x2, x2, #8
- b.mi 2f
-1: str x1, [x4], #8
- subs x2, x2, #8
- b.pl 1b
-2: adds x2, x2, #4
- b.mi 3f
- sub x2, x2, #4
- str w1, [x4], #4
-3: adds x2, x2, #2
- b.mi 4f
- sub x2, x2, #2
- strh w1, [x4], #2
-4: adds x2, x2, #1
- b.mi 5f
- strb w1, [x4]
-5: ret
+ mov dst, dstin /* Preserve return value. */
+ and A_lw, val, #255
+ orr A_lw, A_lw, A_lw, lsl #8
+ orr A_lw, A_lw, A_lw, lsl #16
+ orr A_l, A_l, A_l, lsl #32
+
+ cmp count, #15
+ b.hi .Lover16_proc
+ /*All store maybe are non-aligned..*/
+ tbz count, #3, 1f
+ str A_l, [dst], #8
+1:
+ tbz count, #2, 2f
+ str A_lw, [dst], #4
+2:
+ tbz count, #1, 3f
+ strh A_lw, [dst], #2
+3:
+ tbz count, #0, 4f
+ strb A_lw, [dst]
+4:
+ ret
+
+.Lover16_proc:
+ /*Whether the start address is aligned with 16.*/
+ neg tmp2, dst
+ ands tmp2, tmp2, #15
+ b.eq .Laligned
+/*
+* The count is not less than 16, we can use stp to store the start 16 bytes,
+* then adjust the dst aligned with 16.This process will make the current
+* memory address at alignment boundary.
+*/
+ stp A_l, A_l, [dst] /*non-aligned store..*/
+ /*make the dst aligned..*/
+ sub count, count, tmp2
+ add dst, dst, tmp2
+
+.Laligned:
+ cbz A_l, .Lzero_mem
+
+.Ltail_maybe_long:
+ cmp count, #64
+ b.ge .Lnot_short
+.Ltail63:
+ ands tmp1, count, #0x30
+ b.eq 3f
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ stp A_l, A_l, [dst], #16
+1:
+ stp A_l, A_l, [dst], #16
+2:
+ stp A_l, A_l, [dst], #16
+/*
+* The last store length is less than 16,use stp to write last 16 bytes.
+* It will lead some bytes written twice and the access is non-aligned.
+*/
+3:
+ ands count, count, #15
+ cbz count, 4f
+ add dst, dst, count
+ stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
+4:
+ ret
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line, this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lnot_short:
+ sub dst, dst, #16/* Pre-bias. */
+ sub count, count, #64
+1:
+ stp A_l, A_l, [dst, #16]
+ stp A_l, A_l, [dst, #32]
+ stp A_l, A_l, [dst, #48]
+ stp A_l, A_l, [dst, #64]!
+ subs count, count, #64
+ b.ge 1b
+ tst count, #0x3f
+ add dst, dst, #16
+ b.ne .Ltail63
+.Lexitfunc:
+ ret
+
+ /*
+ * For zeroing memory, check to see if we can use the ZVA feature to
+ * zero entire 'cache' lines.
+ */
+.Lzero_mem:
+ cmp count, #63
+ b.le .Ltail63
+ /*
+ * For zeroing small amounts of memory, it's not worth setting up
+ * the line-clear code.
+ */
+ cmp count, #128
+ b.lt .Lnot_short /*count is at least 128 bytes*/
+
+ mrs tmp1, dczid_el0
+ tbnz tmp1, #4, .Lnot_short
+ mov tmp3w, #4
+ and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
+ lsl zva_len, tmp3w, zva_len
+
+ ands tmp3w, zva_len, #63
+ /*
+ * ensure the zva_len is not less than 64.
+ * It is not meaningful to use ZVA if the block size is less than 64.
+ */
+ b.ne .Lnot_short
+.Lzero_by_line:
+ /*
+ * Compute how far we need to go to become suitably aligned. We're
+ * already at quad-word alignment.
+ */
+ cmp count, zva_len_x
+ b.lt .Lnot_short /* Not enough to reach alignment. */
+ sub zva_bits_x, zva_len_x, #1
+ neg tmp2, dst
+ ands tmp2, tmp2, zva_bits_x
+ b.eq 2f /* Already aligned. */
+ /* Not aligned, check that there's enough to copy after alignment.*/
+ sub tmp1, count, tmp2
+ /*
+ * grantee the remain length to be ZVA is bigger than 64,
+ * avoid to make the 2f's process over mem range.*/
+ cmp tmp1, #64
+ ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
+ b.lt .Lnot_short
+ /*
+ * We know that there's at least 64 bytes to zero and that it's safe
+ * to overrun by 64 bytes.
+ */
+ mov count, tmp1
+1:
+ stp A_l, A_l, [dst]
+ stp A_l, A_l, [dst, #16]
+ stp A_l, A_l, [dst, #32]
+ subs tmp2, tmp2, #64
+ stp A_l, A_l, [dst, #48]
+ add dst, dst, #64
+ b.ge 1b
+ /* We've overrun a bit, so adjust dst downwards.*/
+ add dst, dst, tmp2
+2:
+ sub count, count, zva_len_x
+3:
+ dc zva, dst
+ add dst, dst, zva_len_x
+ subs count, count, zva_len_x
+ b.ge 3b
+ ands count, count, zva_bits_x
+ b.ne .Ltail_maybe_long
+ ret
ENDPROC(memset)
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
new file mode 100644
index 000000000000..42f828b06c59
--- /dev/null
+++ b/arch/arm64/lib/strcmp.S
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * compare two strings
+ *
+ * Parameters:
+ * x0 - const string 1 pointer
+ * x1 - const string 2 pointer
+ * Returns:
+ * x0 - an integer less than, equal to, or greater than zero
+ * if s1 is found, respectively, to be less than, to match,
+ * or be greater than s2.
+ */
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+/* Parameters and result. */
+src1 .req x0
+src2 .req x1
+result .req x0
+
+/* Internal variables. */
+data1 .req x2
+data1w .req w2
+data2 .req x3
+data2w .req w3
+has_nul .req x4
+diff .req x5
+syndrome .req x6
+tmp1 .req x7
+tmp2 .req x8
+tmp3 .req x9
+zeroones .req x10
+pos .req x11
+
+ENTRY(strcmp)
+ eor tmp1, src1, src2
+ mov zeroones, #REP8_01
+ tst tmp1, #7
+ b.ne .Lmisaligned8
+ ands tmp1, src1, #7
+ b.ne .Lmutual_align
+
+ /*
+ * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ * can be done in parallel across the entire word.
+ */
+.Lloop_aligned:
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+.Lstart_realigned:
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ orr syndrome, diff, has_nul
+ cbz syndrome, .Lloop_aligned
+ b .Lcal_cmpresult
+
+.Lmutual_align:
+ /*
+ * Sources are mutually aligned, but are not currently at an
+ * alignment boundary. Round down the addresses and then mask off
+ * the bytes that preceed the start point.
+ */
+ bic src1, src1, #7
+ bic src2, src2, #7
+ lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
+ ldr data1, [src1], #8
+ neg tmp1, tmp1 /* Bits to alignment -64. */
+ ldr data2, [src2], #8
+ mov tmp2, #~0
+ /* Big-endian. Early bytes are at MSB. */
+CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ /* Little-endian. Early bytes are at LSB. */
+CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+
+ orr data1, data1, tmp2
+ orr data2, data2, tmp2
+ b .Lstart_realigned
+
+.Lmisaligned8:
+ /*
+ * Get the align offset length to compare per byte first.
+ * After this process, one string's address will be aligned.
+ */
+ and tmp1, src1, #7
+ neg tmp1, tmp1
+ add tmp1, tmp1, #8
+ and tmp2, src2, #7
+ neg tmp2, tmp2
+ add tmp2, tmp2, #8
+ subs tmp3, tmp1, tmp2
+ csel pos, tmp1, tmp2, hi /*Choose the maximum. */
+.Ltinycmp:
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ subs pos, pos, #1
+ ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.eq .Ltinycmp
+ cbnz pos, 1f /*find the null or unequal...*/
+ cmp data1w, #1
+ ccmp data1w, data2w, #0, cs
+ b.eq .Lstart_align /*the last bytes are equal....*/
+1:
+ sub result, data1, data2
+ ret
+
+.Lstart_align:
+ ands xzr, src1, #7
+ b.eq .Lrecal_offset
+ /*process more leading bytes to make str1 aligned...*/
+ add src1, src1, tmp3
+ add src2, src2, tmp3
+ /*load 8 bytes from aligned str1 and non-aligned str2..*/
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ orr syndrome, diff, has_nul
+ cbnz syndrome, .Lcal_cmpresult
+ /*How far is the current str2 from the alignment boundary...*/
+ and tmp3, tmp3, #7
+.Lrecal_offset:
+ neg pos, tmp3
+.Lloopcmp_proc:
+ /*
+ * Divide the eight bytes into two parts. First,backwards the src2
+ * to an alignment boundary,load eight bytes from the SRC2 alignment
+ * boundary,then compare with the relative bytes from SRC1.
+ * If all 8 bytes are equal,then start the second part's comparison.
+ * Otherwise finish the comparison.
+ * This special handle can garantee all the accesses are in the
+ * thread/task space in avoid to overrange access.
+ */
+ ldr data1, [src1,pos]
+ ldr data2, [src2,pos]
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ orr syndrome, diff, has_nul
+ cbnz syndrome, .Lcal_cmpresult
+
+ /*The second part process*/
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ orr syndrome, diff, has_nul
+ cbz syndrome, .Lloopcmp_proc
+
+.Lcal_cmpresult:
+ /*
+ * reversed the byte-order as big-endian,then CLZ can find the most
+ * significant zero bits.
+ */
+CPU_LE( rev syndrome, syndrome )
+CPU_LE( rev data1, data1 )
+CPU_LE( rev data2, data2 )
+
+ /*
+ * For big-endian we cannot use the trick with the syndrome value
+ * as carry-propagation can corrupt the upper bits if the trailing
+ * bytes in the string contain 0x01.
+ * However, if there is no NUL byte in the dword, we can generate
+ * the result directly. We ca not just subtract the bytes as the
+ * MSB might be significant.
+ */
+CPU_BE( cbnz has_nul, 1f )
+CPU_BE( cmp data1, data2 )
+CPU_BE( cset result, ne )
+CPU_BE( cneg result, result, lo )
+CPU_BE( ret )
+CPU_BE( 1: )
+ /*Re-compute the NUL-byte detection, using a byte-reversed value. */
+CPU_BE( rev tmp3, data1 )
+CPU_BE( sub tmp1, tmp3, zeroones )
+CPU_BE( orr tmp2, tmp3, #REP8_7f )
+CPU_BE( bic has_nul, tmp1, tmp2 )
+CPU_BE( rev has_nul, has_nul )
+CPU_BE( orr syndrome, diff, has_nul )
+
+ clz pos, syndrome
+ /*
+ * The MS-non-zero bit of the syndrome marks either the first bit
+ * that is different, or the top bit of the first zero byte.
+ * Shifting left now will bring the critical information into the
+ * top bits.
+ */
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /*
+ * But we need to zero-extend (char is unsigned) the value and then
+ * perform a signed 32-bit subtraction.
+ */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+ENDPROC(strcmp)
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
new file mode 100644
index 000000000000..987b68b9ce44
--- /dev/null
+++ b/arch/arm64/lib/strlen.S
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * calculate the length of a string
+ *
+ * Parameters:
+ * x0 - const string pointer
+ * Returns:
+ * x0 - the return length of specific string
+ */
+
+/* Arguments and results. */
+srcin .req x0
+len .req x0
+
+/* Locals and temporaries. */
+src .req x1
+data1 .req x2
+data2 .req x3
+data2a .req x4
+has_nul1 .req x5
+has_nul2 .req x6
+tmp1 .req x7
+tmp2 .req x8
+tmp3 .req x9
+tmp4 .req x10
+zeroones .req x11
+pos .req x12
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+ENTRY(strlen)
+ mov zeroones, #REP8_01
+ bic src, srcin, #15
+ ands tmp1, srcin, #15
+ b.ne .Lmisaligned
+ /*
+ * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ * can be done in parallel across the entire word.
+ */
+ /*
+ * The inner loop deals with two Dwords at a time. This has a
+ * slightly higher start-up cost, but we should win quite quickly,
+ * especially on cores with a high number of issue slots per
+ * cycle, as we get much better parallelism out of the operations.
+ */
+.Lloop:
+ ldp data1, data2, [src], #16
+.Lrealigned:
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, #REP8_7f
+ bic has_nul1, tmp1, tmp2
+ bics has_nul2, tmp3, tmp4
+ ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
+ b.eq .Lloop
+
+ sub len, src, srcin
+ cbz has_nul1, .Lnul_in_data2
+CPU_BE( mov data2, data1 ) /*prepare data to re-calculate the syndrome*/
+ sub len, len, #8
+ mov has_nul2, has_nul1
+.Lnul_in_data2:
+ /*
+ * For big-endian, carry propagation (if the final byte in the
+ * string is 0x01) means we cannot use has_nul directly. The
+ * easiest way to get the correct byte is to byte-swap the data
+ * and calculate the syndrome a second time.
+ */
+CPU_BE( rev data2, data2 )
+CPU_BE( sub tmp1, data2, zeroones )
+CPU_BE( orr tmp2, data2, #REP8_7f )
+CPU_BE( bic has_nul2, tmp1, tmp2 )
+
+ sub len, len, #8
+ rev has_nul2, has_nul2
+ clz pos, has_nul2
+ add len, len, pos, lsr #3 /* Bits to bytes. */
+ ret
+
+.Lmisaligned:
+ cmp tmp1, #8
+ neg tmp1, tmp1
+ ldp data1, data2, [src], #16
+ lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
+ mov tmp2, #~0
+ /* Big-endian. Early bytes are at MSB. */
+CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ /* Little-endian. Early bytes are at LSB. */
+CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+
+ orr data1, data1, tmp2
+ orr data2a, data2, tmp2
+ csinv data1, data1, xzr, le
+ csel data2, data2, data2a, le
+ b .Lrealigned
+ENDPROC(strlen)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
new file mode 100644
index 000000000000..0224cf5a5533
--- /dev/null
+++ b/arch/arm64/lib/strncmp.S
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * compare two strings
+ *
+ * Parameters:
+ * x0 - const string 1 pointer
+ * x1 - const string 2 pointer
+ * x2 - the maximal length to be compared
+ * Returns:
+ * x0 - an integer less than, equal to, or greater than zero if s1 is found,
+ * respectively, to be less than, to match, or be greater than s2.
+ */
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+/* Parameters and result. */
+src1 .req x0
+src2 .req x1
+limit .req x2
+result .req x0
+
+/* Internal variables. */
+data1 .req x3
+data1w .req w3
+data2 .req x4
+data2w .req w4
+has_nul .req x5
+diff .req x6
+syndrome .req x7
+tmp1 .req x8
+tmp2 .req x9
+tmp3 .req x10
+zeroones .req x11
+pos .req x12
+limit_wd .req x13
+mask .req x14
+endloop .req x15
+
+ENTRY(strncmp)
+ cbz limit, .Lret0
+ eor tmp1, src1, src2
+ mov zeroones, #REP8_01
+ tst tmp1, #7
+ b.ne .Lmisaligned8
+ ands tmp1, src1, #7
+ b.ne .Lmutual_align
+ /* Calculate the number of full and partial words -1. */
+ /*
+ * when limit is mulitply of 8, if not sub 1,
+ * the judgement of last dword will wrong.
+ */
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
+
+ /*
+ * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ * can be done in parallel across the entire word.
+ */
+.Lloop_aligned:
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+.Lstart_realigned:
+ subs limit_wd, limit_wd, #1
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, pl /* Last Dword or differences.*/
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ ccmp endloop, #0, #0, eq
+ b.eq .Lloop_aligned
+
+ /*Not reached the limit, must have found the end or a diff. */
+ tbz limit_wd, #63, .Lnot_limit
+
+ /* Limit % 8 == 0 => all bytes significant. */
+ ands limit, limit, #7
+ b.eq .Lnot_limit
+
+ lsl limit, limit, #3 /* Bits -> bytes. */
+ mov mask, #~0
+CPU_BE( lsr mask, mask, limit )
+CPU_LE( lsl mask, mask, limit )
+ bic data1, data1, mask
+ bic data2, data2, mask
+
+ /* Make sure that the NUL byte is marked in the syndrome. */
+ orr has_nul, has_nul, mask
+
+.Lnot_limit:
+ orr syndrome, diff, has_nul
+ b .Lcal_cmpresult
+
+.Lmutual_align:
+ /*
+ * Sources are mutually aligned, but are not currently at an
+ * alignment boundary. Round down the addresses and then mask off
+ * the bytes that precede the start point.
+ * We also need to adjust the limit calculations, but without
+ * overflowing if the limit is near ULONG_MAX.
+ */
+ bic src1, src1, #7
+ bic src2, src2, #7
+ ldr data1, [src1], #8
+ neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */
+ ldr data2, [src2], #8
+ mov tmp2, #~0
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ /* Big-endian. Early bytes are at MSB. */
+CPU_BE( lsl tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
+ /* Little-endian. Early bytes are at LSB. */
+CPU_LE( lsr tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
+
+ and tmp3, limit_wd, #7
+ lsr limit_wd, limit_wd, #3
+ /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/
+ add limit, limit, tmp1
+ add tmp3, tmp3, tmp1
+ orr data1, data1, tmp2
+ orr data2, data2, tmp2
+ add limit_wd, limit_wd, tmp3, lsr #3
+ b .Lstart_realigned
+
+/*when src1 offset is not equal to src2 offset...*/
+.Lmisaligned8:
+ cmp limit, #8
+ b.lo .Ltiny8proc /*limit < 8... */
+ /*
+ * Get the align offset length to compare per byte first.
+ * After this process, one string's address will be aligned.*/
+ and tmp1, src1, #7
+ neg tmp1, tmp1
+ add tmp1, tmp1, #8
+ and tmp2, src2, #7
+ neg tmp2, tmp2
+ add tmp2, tmp2, #8
+ subs tmp3, tmp1, tmp2
+ csel pos, tmp1, tmp2, hi /*Choose the maximum. */
+ /*
+ * Here, limit is not less than 8, so directly run .Ltinycmp
+ * without checking the limit.*/
+ sub limit, limit, pos
+.Ltinycmp:
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ subs pos, pos, #1
+ ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.eq .Ltinycmp
+ cbnz pos, 1f /*find the null or unequal...*/
+ cmp data1w, #1
+ ccmp data1w, data2w, #0, cs
+ b.eq .Lstart_align /*the last bytes are equal....*/
+1:
+ sub result, data1, data2
+ ret
+
+.Lstart_align:
+ lsr limit_wd, limit, #3
+ cbz limit_wd, .Lremain8
+ /*process more leading bytes to make str1 aligned...*/
+ ands xzr, src1, #7
+ b.eq .Lrecal_offset
+ add src1, src1, tmp3 /*tmp3 is positive in this branch.*/
+ add src2, src2, tmp3
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+
+ sub limit, limit, tmp3
+ lsr limit_wd, limit, #3
+ subs limit_wd, limit_wd, #1
+
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
+ bics has_nul, tmp1, tmp2
+ ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
+ b.ne .Lunequal_proc
+ /*How far is the current str2 from the alignment boundary...*/
+ and tmp3, tmp3, #7
+.Lrecal_offset:
+ neg pos, tmp3
+.Lloopcmp_proc:
+ /*
+ * Divide the eight bytes into two parts. First,backwards the src2
+ * to an alignment boundary,load eight bytes from the SRC2 alignment
+ * boundary,then compare with the relative bytes from SRC1.
+ * If all 8 bytes are equal,then start the second part's comparison.
+ * Otherwise finish the comparison.
+ * This special handle can garantee all the accesses are in the
+ * thread/task space in avoid to overrange access.
+ */
+ ldr data1, [src1,pos]
+ ldr data2, [src2,pos]
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, eq
+ cbnz endloop, .Lunequal_proc
+
+ /*The second part process*/
+ ldr data1, [src1], #8
+ ldr data2, [src2], #8
+ subs limit_wd, limit_wd, #1
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
+ bics has_nul, tmp1, tmp2
+ ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
+ b.eq .Lloopcmp_proc
+
+.Lunequal_proc:
+ orr syndrome, diff, has_nul
+ cbz syndrome, .Lremain8
+.Lcal_cmpresult:
+ /*
+ * reversed the byte-order as big-endian,then CLZ can find the most
+ * significant zero bits.
+ */
+CPU_LE( rev syndrome, syndrome )
+CPU_LE( rev data1, data1 )
+CPU_LE( rev data2, data2 )
+ /*
+ * For big-endian we cannot use the trick with the syndrome value
+ * as carry-propagation can corrupt the upper bits if the trailing
+ * bytes in the string contain 0x01.
+ * However, if there is no NUL byte in the dword, we can generate
+ * the result directly. We can't just subtract the bytes as the
+ * MSB might be significant.
+ */
+CPU_BE( cbnz has_nul, 1f )
+CPU_BE( cmp data1, data2 )
+CPU_BE( cset result, ne )
+CPU_BE( cneg result, result, lo )
+CPU_BE( ret )
+CPU_BE( 1: )
+ /* Re-compute the NUL-byte detection, using a byte-reversed value.*/
+CPU_BE( rev tmp3, data1 )
+CPU_BE( sub tmp1, tmp3, zeroones )
+CPU_BE( orr tmp2, tmp3, #REP8_7f )
+CPU_BE( bic has_nul, tmp1, tmp2 )
+CPU_BE( rev has_nul, has_nul )
+CPU_BE( orr syndrome, diff, has_nul )
+ /*
+ * The MS-non-zero bit of the syndrome marks either the first bit
+ * that is different, or the top bit of the first zero byte.
+ * Shifting left now will bring the critical information into the
+ * top bits.
+ */
+ clz pos, syndrome
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /*
+ * But we need to zero-extend (char is unsigned) the value and then
+ * perform a signed 32-bit subtraction.
+ */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+
+.Lremain8:
+ /* Limit % 8 == 0 => all bytes significant. */
+ ands limit, limit, #7
+ b.eq .Lret0
+.Ltiny8proc:
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ subs limit, limit, #1
+
+ ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.eq .Ltiny8proc
+ sub result, data1, data2
+ ret
+
+.Lret0:
+ mov result, #0
+ ret
+ENDPROC(strncmp)
diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S
deleted file mode 100644
index 56e448a831a0..000000000000
--- a/arch/arm64/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Based on arch/arm/lib/strncpy_from_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/*
- * Copy a string from user space to kernel space.
- * x0 = dst, x1 = src, x2 = byte length
- * returns the number of characters copied (strlen of copied string),
- * -EFAULT on exception, or "len" if we fill the whole buffer
- */
-ENTRY(__strncpy_from_user)
- mov x4, x1
-1: subs x2, x2, #1
- bmi 2f
-USER(9f, ldrb w3, [x1], #1 )
- strb w3, [x0], #1
- cbnz w3, 1b
- sub x1, x1, #1 // take NUL character out of count
-2: sub x0, x1, x4
- ret
-ENDPROC(__strncpy_from_user)
-
- .section .fixup,"ax"
- .align 0
-9: strb wzr, [x0] // null terminate
- mov x0, #-EFAULT
- ret
- .previous
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
new file mode 100644
index 000000000000..2ca665711bf2
--- /dev/null
+++ b/arch/arm64/lib/strnlen.S
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * determine the length of a fixed-size string
+ *
+ * Parameters:
+ * x0 - const string pointer
+ * x1 - maximal string length
+ * Returns:
+ * x0 - the return length of specific string
+ */
+
+/* Arguments and results. */
+srcin .req x0
+len .req x0
+limit .req x1
+
+/* Locals and temporaries. */
+src .req x2
+data1 .req x3
+data2 .req x4
+data2a .req x5
+has_nul1 .req x6
+has_nul2 .req x7
+tmp1 .req x8
+tmp2 .req x9
+tmp3 .req x10
+tmp4 .req x11
+zeroones .req x12
+pos .req x13
+limit_wd .req x14
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+ENTRY(strnlen)
+ cbz limit, .Lhit_limit
+ mov zeroones, #REP8_01
+ bic src, srcin, #15
+ ands tmp1, srcin, #15
+ b.ne .Lmisaligned
+ /* Calculate the number of full and partial words -1. */
+ sub limit_wd, limit, #1 /* Limit != 0, so no underflow. */
+ lsr limit_wd, limit_wd, #4 /* Convert to Qwords. */
+
+ /*
+ * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ * can be done in parallel across the entire word.
+ */
+ /*
+ * The inner loop deals with two Dwords at a time. This has a
+ * slightly higher start-up cost, but we should win quite quickly,
+ * especially on cores with a high number of issue slots per
+ * cycle, as we get much better parallelism out of the operations.
+ */
+.Lloop:
+ ldp data1, data2, [src], #16
+.Lrealigned:
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, #REP8_7f
+ bic has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ subs limit_wd, limit_wd, #1
+ orr tmp1, has_nul1, has_nul2
+ ccmp tmp1, #0, #0, pl /* NZCV = 0000 */
+ b.eq .Lloop
+
+ cbz tmp1, .Lhit_limit /* No null in final Qword. */
+
+ /*
+ * We know there's a null in the final Qword. The easiest thing
+ * to do now is work out the length of the string and return
+ * MIN (len, limit).
+ */
+ sub len, src, srcin
+ cbz has_nul1, .Lnul_in_data2
+CPU_BE( mov data2, data1 ) /*perpare data to re-calculate the syndrome*/
+
+ sub len, len, #8
+ mov has_nul2, has_nul1
+.Lnul_in_data2:
+ /*
+ * For big-endian, carry propagation (if the final byte in the
+ * string is 0x01) means we cannot use has_nul directly. The
+ * easiest way to get the correct byte is to byte-swap the data
+ * and calculate the syndrome a second time.
+ */
+CPU_BE( rev data2, data2 )
+CPU_BE( sub tmp1, data2, zeroones )
+CPU_BE( orr tmp2, data2, #REP8_7f )
+CPU_BE( bic has_nul2, tmp1, tmp2 )
+
+ sub len, len, #8
+ rev has_nul2, has_nul2
+ clz pos, has_nul2
+ add len, len, pos, lsr #3 /* Bits to bytes. */
+ cmp len, limit
+ csel len, len, limit, ls /* Return the lower value. */
+ ret
+
+.Lmisaligned:
+ /*
+ * Deal with a partial first word.
+ * We're doing two things in parallel here;
+ * 1) Calculate the number of words (but avoiding overflow if
+ * limit is near ULONG_MAX) - to do this we need to work out
+ * limit + tmp1 - 1 as a 65-bit value before shifting it;
+ * 2) Load and mask the initial data words - we force the bytes
+ * before the ones we are interested in to 0xff - this ensures
+ * early bytes will not hit any zero detection.
+ */
+ ldp data1, data2, [src], #16
+
+ sub limit_wd, limit, #1
+ and tmp3, limit_wd, #15
+ lsr limit_wd, limit_wd, #4
+
+ add tmp3, tmp3, tmp1
+ add limit_wd, limit_wd, tmp3, lsr #4
+
+ neg tmp4, tmp1
+ lsl tmp4, tmp4, #3 /* Bytes beyond alignment -> bits. */
+
+ mov tmp2, #~0
+ /* Big-endian. Early bytes are at MSB. */
+CPU_BE( lsl tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
+ /* Little-endian. Early bytes are at LSB. */
+CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
+
+ cmp tmp1, #8
+
+ orr data1, data1, tmp2
+ orr data2a, data2, tmp2
+
+ csinv data1, data1, xzr, le
+ csel data2, data2, data2a, le
+ b .Lrealigned
+
+.Lhit_limit:
+ mov len, limit
+ ret
+ENDPROC(strnlen)
diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S
deleted file mode 100644
index 7f7b176a5646..000000000000
--- a/arch/arm64/lib/strnlen_user.S
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Based on arch/arm/lib/strnlen_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/* Prototype: unsigned long __strnlen_user(const char *str, long n)
- * Purpose : get length of a string in user memory
- * Params : str - address of string in user memory
- * Returns : length of string *including terminator*
- * or zero on exception, or n if too long
- */
-ENTRY(__strnlen_user)
- mov x2, x0
-1: subs x1, x1, #1
- b.mi 2f
-USER(9f, ldrb w3, [x0], #1 )
- cbnz w3, 1b
-2: sub x0, x0, x2
- ret
-ENDPROC(__strnlen_user)
-
- .section .fixup,"ax"
- .align 0
-9: mov x0, #0
- ret
- .previous
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 3140a2abcdc2..c56179ed2c09 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,4 +1,5 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
- context.o tlb.o proc.o
+ context.o proc.o pageattr.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 48a386094fa3..23663837acff 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -30,8 +30,8 @@
*
* Corrupted registers: x0-x7, x9-x11
*/
-ENTRY(__flush_dcache_all)
- dsb sy // ensure ordering with previous memory accesses
+__flush_dcache_all:
+ dmb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
lsr x3, x3, #23 // left align loc bit field
@@ -128,7 +128,7 @@ USER(9f, dc cvau, x4 ) // clean D line to PoU
add x4, x4, x2
cmp x4, x1
b.lo 1b
- dsb sy
+ dsb ish
icache_line_size x2, x3
sub x3, x2, #1
@@ -139,14 +139,14 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
cmp x4, x1
b.lo 1b
9: // ignore any faulting cache operation
- dsb sy
+ dsb ish
isb
ret
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
/*
- * __flush_kern_dcache_page(kaddr)
+ * __flush_dcache_area(kaddr, size)
*
* Ensure that the data held in the page kaddr is written back to the
* page in question.
@@ -166,3 +166,97 @@ ENTRY(__flush_dcache_area)
dsb sy
ret
ENDPROC(__flush_dcache_area)
+
+/*
+ * __inval_cache_range(start, end)
+ * - start - start address of region
+ * - end - end address of region
+ */
+ENTRY(__inval_cache_range)
+ /* FALLTHROUGH */
+
+/*
+ * __dma_inv_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_inv_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ tst x1, x3 // end cache line aligned?
+ bic x1, x1, x3
+ b.eq 1f
+ dc civac, x1 // clean & invalidate D / U line
+1: tst x0, x3 // start cache line aligned?
+ bic x0, x0, x3
+ b.eq 2f
+ dc civac, x0 // clean & invalidate D / U line
+ b 3f
+2: dc ivac, x0 // invalidate D / U line
+3: add x0, x0, x2
+ cmp x0, x1
+ b.lo 2b
+ dsb sy
+ ret
+ENDPROC(__inval_cache_range)
+ENDPROC(__dma_inv_range)
+
+/*
+ * __dma_clean_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_clean_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc cvac, x0 // clean D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_clean_range)
+
+/*
+ * __dma_flush_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__dma_flush_range)
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc civac, x0 // clean & invalidate D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_flush_range)
+
+/*
+ * __dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_map_area)
+ add x1, x1, x0
+ cmp w2, #DMA_FROM_DEVICE
+ b.eq __dma_inv_range
+ b __dma_clean_range
+ENDPROC(__dma_map_area)
+
+/*
+ * __dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_unmap_area)
+ add x1, x1, x0
+ cmp w2, #DMA_TO_DEVICE
+ b.ne __dma_inv_range
+ ret
+ENDPROC(__dma_unmap_area)
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 9aecbace4128..13bbc3be6f5a 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
copy_page(kto, kfrom);
__flush_dcache_area(kto, PAGE_SIZE);
}
+EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
+EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4bd7579ec9e6..eeb1cf3ff299 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -20,35 +20,333 @@
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
-static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+ bool coherent)
{
- if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ return pgprot_writecombine(prot);
+ else if (!coherent)
+ return pgprot_dmacoherent(prot);
+ return prot;
+}
+
+static struct gen_pool *atomic_pool;
+
+#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+
+static int __init early_coherent_pool(char *p)
+{
+ atomic_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
+{
+ unsigned long val;
+ void *ptr = NULL;
+
+ if (!atomic_pool) {
+ WARN(1, "coherent pool not initialised!\n");
+ return NULL;
+ }
+
+ val = gen_pool_alloc(atomic_pool, size);
+ if (val) {
+ phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
+
+ *ret_page = phys_to_page(phys);
+ ptr = (void *)val;
+ }
+
+ return ptr;
+}
+
+static bool __in_atomic_pool(void *start, size_t size)
+{
+ return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
+}
+
+static int __free_from_pool(void *start, size_t size)
+{
+ if (!__in_atomic_pool(start, size))
+ return 0;
+
+ gen_pool_free(atomic_pool, (unsigned long)start, size);
+
+ return 1;
+}
+
+static void *__dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- flags |= GFP_DMA32;
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ flags |= GFP_DMA;
+ if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
+ struct page *page;
+
+ size = PAGE_ALIGN(size);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ return page_address(page);
+ } else {
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ }
+}
+
+static void __dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ bool freed;
+ phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return;
+ }
+
+ freed = dma_release_from_contiguous(dev,
+ phys_to_page(paddr),
+ size >> PAGE_SHIFT);
+ if (!freed)
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
-static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ struct page *page;
+ void *ptr, *coherent_ptr;
+
+ size = PAGE_ALIGN(size);
+
+ if (!(flags & __GFP_WAIT)) {
+ struct page *page = NULL;
+ void *addr = __alloc_from_pool(size, &page);
+
+ if (addr)
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+
+ return addr;
+
+ }
+
+ ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+ if (!ptr)
+ goto no_mem;
+
+ /* remove any dirty cache lines on the kernel alias */
+ __dma_flush_range(ptr, ptr + size);
+
+ /* create a coherent mapping */
+ page = virt_to_page(ptr);
+ coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ __get_dma_pgprot(attrs,
+ __pgprot(PROT_NORMAL_NC), false),
+ NULL);
+ if (!coherent_ptr)
+ goto no_map;
+
+ return coherent_ptr;
+
+no_map:
+ __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+no_mem:
+ *dma_handle = ~0;
+ return NULL;
+}
+
+static void __dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+
+ if (__free_from_pool(vaddr, size))
+ return;
+ vunmap(vaddr);
+ __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+}
+
+static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t dev_addr;
+
+ dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+
+ return dev_addr;
+}
+
+
+static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i, ret;
+
+ ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
+ for_each_sg(sgl, sg, ret, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+
+ return ret;
+}
+
+static void __swiotlb_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+ swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}
-static struct dma_map_ops arm64_swiotlb_dma_ops = {
- .alloc = arm64_swiotlb_alloc_coherent,
- .free = arm64_swiotlb_free_coherent,
+static void __swiotlb_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
+}
+
+static void __swiotlb_sync_single_for_device(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+}
+
+static void __swiotlb_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+ swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
+}
+
+static void __swiotlb_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+}
+
+/* vma->vm_page_prot must be set appropriately before calling this function */
+static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ int ret = -ENXIO;
+ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
+ PAGE_SHIFT;
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static int __swiotlb_mmap_noncoherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
+ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+static int __swiotlb_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ /* Just use whatever page_prot attributes were specified */
+ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+struct dma_map_ops noncoherent_swiotlb_dma_ops = {
+ .alloc = __dma_alloc_noncoherent,
+ .free = __dma_free_noncoherent,
+ .mmap = __swiotlb_mmap_noncoherent,
+ .map_page = __swiotlb_map_page,
+ .unmap_page = __swiotlb_unmap_page,
+ .map_sg = __swiotlb_map_sg_attrs,
+ .unmap_sg = __swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = __swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = __swiotlb_sync_sg_for_device,
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
+
+struct dma_map_ops coherent_swiotlb_dma_ops = {
+ .alloc = __dma_alloc_coherent,
+ .free = __dma_free_coherent,
+ .mmap = __swiotlb_mmap_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
@@ -60,12 +358,118 @@ static struct dma_map_ops arm64_swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
+EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
+
+static int dma_bus_notifier(struct notifier_block *nb,
+ unsigned long event, void *_dev)
+{
+ struct device *dev = _dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block platform_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+extern int swiotlb_late_init_with_default_size(size_t default_size);
-void __init arm64_swiotlb_init(void)
+static int __init atomic_pool_init(void)
{
- dma_ops = &arm64_swiotlb_dma_ops;
- swiotlb_init(1);
+ pgprot_t prot = __pgprot(PROT_NORMAL_NC);
+ unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
+ struct page *page;
+ void *addr;
+ unsigned int pool_size_order = get_order(atomic_pool_size);
+
+ if (dev_get_cma_area(NULL))
+ page = dma_alloc_from_contiguous(NULL, nr_pages,
+ pool_size_order);
+ else
+ page = alloc_pages(GFP_DMA, pool_size_order);
+
+ if (page) {
+ int ret;
+ void *page_addr = page_address(page);
+
+ memset(page_addr, 0, atomic_pool_size);
+ __dma_flush_range(page_addr, page_addr + atomic_pool_size);
+
+ atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!atomic_pool)
+ goto free_page;
+
+ addr = dma_common_contiguous_remap(page, atomic_pool_size,
+ VM_USERMAP, prot, atomic_pool_init);
+
+ if (!addr)
+ goto destroy_genpool;
+
+ ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
+ page_to_phys(page),
+ atomic_pool_size, -1);
+ if (ret)
+ goto remove_mapping;
+
+ gen_pool_set_algo(atomic_pool,
+ gen_pool_first_fit_order_align,
+ (void *)PAGE_SHIFT);
+
+ pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
+ atomic_pool_size / 1024);
+ return 0;
+ }
+ goto out;
+
+remove_mapping:
+ dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+destroy_genpool:
+ gen_pool_destroy(atomic_pool);
+ atomic_pool = NULL;
+free_page:
+ if (!dma_release_from_contiguous(NULL, page, nr_pages))
+ __free_pages(page, pool_size_order);
+out:
+ pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
+ atomic_pool_size / 1024);
+ return -ENOMEM;
+}
+
+static int __init swiotlb_late_init(void)
+{
+ size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+
+ /*
+ * These must be registered before of_platform_populate().
+ */
+ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+ bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+ dma_ops = &noncoherent_swiotlb_dma_ops;
+
+ return swiotlb_late_init_with_default_size(swiotlb_size);
+}
+
+static int __init arm64_dma_init(void)
+{
+ int ret = 0;
+
+ ret |= swiotlb_late_init();
+ ret |= atomic_pool_init();
+
+ return ret;
}
+arch_initcall(arm64_dma_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1426468b77f3..c23751b06120 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -130,7 +130,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
force_sig_info(sig, &si, tsk);
}
-void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
#define ESR_CM (1 << 8)
#define ESR_LNX_EXEC (1 << 24)
-/*
- * Check that the permissions on the VMA allow for the fault which occurred.
- * If we encountered a write fault, we must have write permission, otherwise
- * we allow any permission.
- */
-static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
-{
- unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
-
- if (esr & ESR_WRITE)
- mask = VM_WRITE;
- if (esr & ESR_LNX_EXEC)
- mask = VM_EXEC;
-
- return vma->vm_flags & mask ? false : true;
-}
-
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
- unsigned int esr, unsigned int flags,
+ unsigned int mm_flags, unsigned long vm_flags,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
* it.
*/
good_area:
- if (access_error(esr, vma)) {
+ /*
+ * Check that the permissions on the VMA allow for the fault which
+ * occurred. If we encountered a write or exec fault, we must have
+ * appropriate permissions, otherwise we allow any permission.
+ */
+ if (!(vma->vm_flags & vm_flags)) {
fault = VM_FAULT_BADACCESS;
goto out;
}
- return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+ unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
tsk = current;
mm = tsk->mm;
@@ -226,6 +213,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (in_atomic() || !mm)
goto no_context;
+ if (user_mode(regs))
+ mm_flags |= FAULT_FLAG_USER;
+
+ if (esr & ESR_LNX_EXEC) {
+ vm_flags = VM_EXEC;
+ } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+ vm_flags = VM_WRITE;
+ mm_flags |= FAULT_FLAG_WRITE;
+ }
+
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
@@ -248,7 +245,7 @@ retry:
#endif
}
- fault = __do_page_fault(mm, addr, esr, flags, tsk);
+ fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
/*
* If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +262,7 @@ retry:
*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +277,7 @@ retry:
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
* starvation.
*/
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
@@ -294,6 +291,13 @@ retry:
VM_FAULT_BADACCESS))))
return 0;
+ /*
+ * If we are in kernel mode at this point, we have no context to
+ * handle this fault with.
+ */
+ if (!user_mode(regs))
+ goto no_context;
+
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
@@ -304,13 +308,6 @@ retry:
return 0;
}
- /*
- * If we are in kernel mode at this point, we have no context to
- * handle this fault with.
- */
- if (!user_mode(regs))
- goto no_context;
-
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to successfully fix up
@@ -365,17 +362,6 @@ static int __kprobes do_translation_fault(unsigned long addr,
}
/*
- * Some section permission faults need to be handled gracefully. They can
- * happen due to a __{get,put}_user during an oops.
- */
-static int do_sect_fault(unsigned long addr, unsigned int esr,
- struct pt_regs *regs)
-{
- do_bad_area(addr, esr, regs);
- return 0;
-}
-
-/*
* This abort handler always returns "fault".
*/
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
@@ -398,12 +384,12 @@ static struct fault_info {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "reserved access flag fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, 0, "reserved permission fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
- { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
{ do_bad, SIGBUS, 0, "asynchronous external abort" },
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 88611c3a421a..0d64089d28b5 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -70,23 +70,17 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
#endif
}
-void __flush_dcache_page(struct page *page)
-{
- __flush_dcache_area(page_address(page), PAGE_SIZE);
-}
-
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
- unsigned long pfn;
- struct page *page;
+ struct page *page = pte_page(pte);
- pfn = pte_pfn(pte);
- if (!pfn_valid(pfn))
+ /* no flushing needed for anonymous pages */
+ if (!page_mapping(page))
return;
- page = pfn_to_page(pfn);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
- __flush_dcache_page(page);
+ __flush_dcache_area(page_address(page),
+ PAGE_SIZE << compound_order(page));
__flush_icache_all();
} else if (icache_is_aivivt()) {
__flush_icache_all();
@@ -94,28 +88,14 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
}
/*
- * Ensure cache coherency between kernel mapping and userspace mapping of this
- * page.
+ * This function is called when a page has been modified by the kernel. Mark
+ * it as dirty for later flushing when mapped in user space (if executable,
+ * see __sync_icache_dcache).
*/
void flush_dcache_page(struct page *page)
{
- struct address_space *mapping;
-
- /*
- * The zero page is never written to, so never has any dirty cache
- * lines, and therefore never needs to be flushed.
- */
- if (page == ZERO_PAGE(0))
- return;
-
- mapping = page_mapping(page);
- if (mapping && mapping_mapped(mapping)) {
- __flush_dcache_page(page);
- __flush_icache_all();
- set_bit(PG_dcache_clean, &page->flags);
- } else {
+ if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
- }
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
new file mode 100644
index 000000000000..023747bf4dd7
--- /dev/null
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -0,0 +1,74 @@
+/*
+ * arch/arm64/mm/hugetlbpage.c
+ *
+ * Copyright (C) 2013 Linaro Ltd.
+ *
+ * Based on arch/x86/mm/hugetlbpage.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+#endif
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return !(pmd_val(pmd) & PMD_TABLE_BIT);
+}
+
+int pud_huge(pud_t pud)
+{
+#ifndef __PAGETABLE_PMD_FOLDED
+ return !(pud_val(pud) & PUD_TABLE_BIT);
+#else
+ return 0;
+#endif
+}
+
+static __init int setup_hugepagesz(char *opt)
+{
+ unsigned long ps = memparse(opt, &opt);
+ if (ps == PMD_SIZE) {
+ hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
+ } else if (ps == PUD_SIZE) {
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ } else {
+ pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
+ return 0;
+ }
+ return 1;
+}
+__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f497ca77925a..f2bded0254c3 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -30,6 +30,8 @@
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of_fdt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <asm/prom.h>
#include <asm/sections.h>
@@ -44,8 +46,7 @@ static unsigned long phys_initrd_size __initdata = 0;
phys_addr_t memstart_addr __read_mostly = 0;
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
@@ -67,22 +68,31 @@ static int __init early_initrd(char *p)
}
early_param("initrd", early_initrd);
-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+ phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+ return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long max_dma32 = min;
+ unsigned long max_dma = min;
memset(zone_size, 0, sizeof(zone_size));
-#ifdef CONFIG_ZONE_DMA32
/* 4GB maximum for 32-bit only capable devices */
- max_dma32 = max(min, min(max, MAX_DMA32_PFN));
- zone_size[ZONE_DMA32] = max_dma32 - min;
-#endif
- zone_size[ZONE_NORMAL] = max - max_dma32;
+ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ max_dma = PFN_DOWN(max_zone_dma_phys());
+ zone_size[ZONE_DMA] = max_dma - min;
+ }
+ zone_size[ZONE_NORMAL] = max - max_dma;
memcpy(zhole_size, zone_size, sizeof(zhole_size));
@@ -92,15 +102,15 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-#ifdef CONFIG_ZONE_DMA32
- if (start < max_dma32) {
- unsigned long dma_end = min(end, max_dma32);
- zhole_size[ZONE_DMA32] -= dma_end - start;
+
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
+ unsigned long dma_end = min(end, max_dma);
+ zhole_size[ZONE_DMA] -= dma_end - start;
}
-#endif
- if (end > max_dma32) {
+
+ if (end > max_dma) {
unsigned long normal_end = min(end, max);
- unsigned long normal_start = max(start, max_dma32);
+ unsigned long normal_start = max(start, max_dma);
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
}
}
@@ -131,11 +141,34 @@ static void arm64_memory_present(void)
}
#endif
+static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
+
+/*
+ * Limit the memory size that was specified via FDT.
+ */
+static int __init early_mem(char *p)
+{
+ if (!p)
+ return 1;
+
+ memory_limit = memparse(p, &p) & PAGE_MASK;
+ pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
void __init arm64_memblock_init(void)
{
u64 *reserve_map, base, size;
+ phys_addr_t dma_phys_limit = 0;
- /* Register the kernel text, kernel data and initrd with memblock */
+ memblock_enforce_memory_limit(memory_limit);
+
+ /*
+ * Register the kernel text, kernel data, initrd, and initial
+ * pagetables with memblock.
+ */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
@@ -147,13 +180,6 @@ void __init arm64_memblock_init(void)
}
#endif
- /*
- * Reserve the page tables. These are already in use,
- * and can only be in node 0.
- */
- memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
- memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
-
/* Reserve the dtb region */
memblock_reserve(virt_to_phys(initial_boot_params),
be32_to_cpu(initial_boot_params->totalsize));
@@ -173,6 +199,13 @@ void __init arm64_memblock_init(void)
memblock_reserve(base, size);
}
+ early_init_fdt_scan_reserved_mem();
+
+ /* 4GB maximum for 32-bit only capable devices */
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ dma_phys_limit = max_zone_dma_phys();
+ dma_contiguous_reserve(dma_phys_limit);
+
memblock_allow_resize();
memblock_dump_all();
}
@@ -283,8 +316,6 @@ void __init mem_init(void)
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
- arm64_swiotlb_init();
-
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
#ifndef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 1725cd6db37a..cf25fa9aa68f 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -25,6 +25,10 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
pgprot_t prot, void *caller)
{
@@ -82,3 +86,105 @@ void __iounmap(volatile void __iomem *io_addr)
vunmap(addr);
}
EXPORT_SYMBOL(__iounmap);
+
+void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
+{
+ /* For normal memory we already have a cacheable mapping. */
+ if (pfn_valid(__phys_to_pfn(phys_addr)))
+ return (void __iomem *)__phys_to_virt(phys_addr);
+
+ return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#ifndef CONFIG_ARM64_64K_PAGES
+static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+
+static inline pud_t * __init early_ioremap_pud(unsigned long addr)
+{
+ pgd_t *pgd;
+
+ pgd = pgd_offset_k(addr);
+ BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+
+ return pud_offset(pgd, addr);
+}
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+ pud_t *pud = early_ioremap_pud(addr);
+
+ BUG_ON(pud_none(*pud) || pud_bad(*pud));
+
+ return pmd_offset(pud, addr);
+}
+
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
+{
+ pmd_t *pmd = early_ioremap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+void __init early_ioremap_init(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
+
+ pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pud_populate(&init_mm, pud, bm_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+ if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ pr_warn("pmd %p != %p\n",
+ pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n",
+ FIX_BTMAP_BEGIN);
+ }
+
+ early_ioremap_setup();
+}
+
+void __init __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *pte;
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ pte = early_ioremap_pte(addr);
+
+ if (pgprot_val(flags))
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ else {
+ pte_clear(&init_mm, addr, pte);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 916701e6d040..d519f4f50c8c 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1,3 +1,2 @@
-extern void __flush_dcache_page(struct page *page);
extern void __init bootmem_init(void);
extern void __init arm64_swiotlb_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index eeecc9c8ed68..a2155ca6921c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-pgprot_t pgprot_default;
-EXPORT_SYMBOL(pgprot_default);
-
-static pmdval_t prot_sect_kernel;
-
struct cachepolicy {
const char policy[16];
u64 mair;
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
}
early_param("cachepolicy", early_cachepolicy);
-/*
- * Adjust the PMD section entries according to the CPU in use.
- */
-static void __init init_mem_pgprot(void)
-{
- pteval_t default_pgprot;
- int i;
-
- default_pgprot = PTE_ATTRINDX(MT_NORMAL);
- prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
-
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- default_pgprot |= PTE_SHARED;
- prot_sect_kernel |= PMD_SECT_S;
-#endif
-
- for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
- protection_map[i] = __pgprot(v | default_pgprot);
- }
-
- pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
-}
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -168,7 +136,8 @@ static void __init *early_alloc(unsigned long sz)
}
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
- unsigned long end, unsigned long pfn)
+ unsigned long end, unsigned long pfn,
+ pgprot_t prot)
{
pte_t *pte;
@@ -180,16 +149,28 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ set_pte(pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys)
+ unsigned long end, phys_addr_t phys,
+ int map_io)
{
pmd_t *pmd;
unsigned long next;
+ pmdval_t prot_sect;
+ pgprot_t prot_pte;
+
+ if (map_io) {
+ prot_sect = PMD_TYPE_SECT | PMD_SECT_AF |
+ PMD_ATTRINDX(MT_DEVICE_nGnRE);
+ prot_pte = __pgprot(PROT_DEVICE_nGnRE);
+ } else {
+ prot_sect = PROT_SECT_NORMAL_EXEC;
+ prot_pte = PAGE_KERNEL_EXEC;
+ }
/*
* Check for initial section mappings in the pgd/pud and remove them.
@@ -203,23 +184,33 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0)
- set_pmd(pmd, __pmd(phys | prot_sect_kernel));
- else
- alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+ if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+ pmd_t old_pmd =*pmd;
+ set_pmd(pmd, __pmd(phys | prot_sect));
+ /*
+ * Check for previous table entries created during
+ * boot (__create_page_tables) and flush them.
+ */
+ if (!pmd_none(old_pmd))
+ flush_tlb_all();
+ } else {
+ alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
+ prot_pte);
+ }
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys)
+ unsigned long end, unsigned long phys,
+ int map_io)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys);
+ alloc_init_pmd(pud, addr, next, phys, map_io);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -228,74 +219,60 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
- phys_addr_t size)
+static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ int map_io)
{
unsigned long addr, length, end, next;
- pgd_t *pgd;
-
- if (virt < VMALLOC_START) {
- pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
- phys, virt);
- return;
- }
addr = virt & PAGE_MASK;
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
- pgd = pgd_offset_k(addr);
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys);
+ alloc_init_pud(pgd, addr, next, phys, map_io);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-#ifdef CONFIG_EARLY_PRINTK
-/*
- * Create an early I/O mapping using the pgd/pmd entries already populated
- * in head.S as this function is called too early to allocated any memory. The
- * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
- */
-void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size)
{
- unsigned long size, mask;
- bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- /*
- * No early pte entries with !ARM64_64K_PAGES configuration, so using
- * sections (pmd).
- */
- size = page64k ? PAGE_SIZE : SECTION_SIZE;
- mask = ~(size - 1);
-
- pgd = pgd_offset_k(virt);
- pud = pud_offset(pgd, virt);
- if (pud_none(*pud))
- return NULL;
- pmd = pmd_offset(pud, virt);
-
- if (page64k) {
- if (pmd_none(*pmd))
- return NULL;
- pte = pte_offset_kernel(pmd, virt);
- set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
- } else {
- set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
+ if (virt < VMALLOC_START) {
+ pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ &phys, virt);
+ return;
}
+ __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
+}
- return (void __iomem *)((virt & mask) + (phys & ~mask));
+void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
+{
+ if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
+ pr_warn("BUG: not creating id mapping for %pa\n", &addr);
+ return;
+ }
+ __create_mapping(&idmap_pg_dir[pgd_index(addr)],
+ addr, addr, size, map_io);
}
-#endif
static void __init map_mem(void)
{
struct memblock_region *reg;
+ phys_addr_t limit;
+
+ /*
+ * Temporarily limit the memblock range. We need to do this as
+ * create_mapping requires puds, pmds and ptes to be allocated from
+ * memory addressable from the initial direct kernel mapping.
+ *
+ * The initial direct kernel mapping, located at swapper_pg_dir,
+ * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
+ * aligned to 2MB as per Documentation/arm64/booting.txt).
+ */
+ limit = PHYS_OFFSET + PGDIR_SIZE;
+ memblock_set_current_limit(limit);
/* map all the memory banks */
for_each_memblock(memory, reg) {
@@ -305,8 +282,27 @@ static void __init map_mem(void)
if (start >= end)
break;
+#ifndef CONFIG_ARM64_64K_PAGES
+ /*
+ * For the first memory bank align the start address and
+ * current memblock limit to prevent create_mapping() from
+ * allocating pte page tables from unmapped memory.
+ * When 64K pages are enabled, the pte page table for the
+ * first PGDIR_SIZE is already present in swapper_pg_dir.
+ */
+ if (start < limit)
+ start = ALIGN(start, PMD_SIZE);
+ if (end < limit) {
+ limit = end & PMD_MASK;
+ memblock_set_current_limit(limit);
+ }
+#endif
+
create_mapping(start, __phys_to_virt(start), end - start);
}
+
+ /* Limit no longer required. */
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
/*
@@ -317,13 +313,6 @@ void __init paging_init(void)
{
void *zero_page;
- /*
- * Maximum PGDIR_SIZE addressable via the initial direct kernel
- * mapping in swapper_pg_dir.
- */
- memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE);
-
- init_mem_pgprot();
map_mem();
/*
@@ -339,7 +328,6 @@ void __init paging_init(void)
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
- __flush_dcache_page(empty_zero_page);
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
@@ -383,6 +371,9 @@ int kern_addr_valid(unsigned long addr)
if (pmd_none(*pmd))
return 0;
+ if (pmd_sect(*pmd))
+ return pfn_valid(pmd_pfn(*pmd));
+
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
@@ -423,7 +414,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
+ set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
new file mode 100644
index 000000000000..75e744e4cec5
--- /dev/null
+++ b/arch/arm64/mm/pageattr.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+struct page_change_data {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page_change_data *cdata = data;
+ pte_t pte = *ptep;
+
+ pte = clear_pte_bit(pte, cdata->clear_mask);
+ pte = set_pte_bit(pte, cdata->set_mask);
+
+ set_pte(ptep, pte);
+ return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+ pgprot_t set_mask, pgprot_t clear_mask)
+{
+ unsigned long start = addr;
+ unsigned long size = PAGE_SIZE*numpages;
+ unsigned long end = start + size;
+ int ret;
+ struct page_change_data data;
+
+ if (!IS_ALIGNED(addr, PAGE_SIZE)) {
+ addr &= PAGE_MASK;
+ WARN_ON_ONCE(1);
+ }
+
+ if (!is_module_address(start) || !is_module_address(end - 1))
+ return -EINVAL;
+
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, end);
+ return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_RDONLY),
+ __pgprot(PTE_WRITE));
+}
+EXPORT_SYMBOL_GPL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_WRITE),
+ __pgprot(PTE_RDONLY));
+}
+EXPORT_SYMBOL_GPL(set_memory_rw);
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_PXN),
+ __pgprot(0));
+}
+EXPORT_SYMBOL_GPL(set_memory_nx);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(0),
+ __pgprot(PTE_PXN));
+}
+EXPORT_SYMBOL_GPL(set_memory_x);
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada657..62c6101df260 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *new_pgd;
-
if (PGD_SIZE == PAGE_SIZE)
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
- if (!new_pgd)
- return NULL;
-
- return new_pgd;
+ return kzalloc(PGD_SIZE, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 8957b822010b..005d29e2977d 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -38,8 +38,7 @@
*/
.macro dcache_line_size, reg, tmp
mrs \tmp, ctr_el0 // read CTR
- lsr \tmp, \tmp, #16
- and \tmp, \tmp, #0xf // cache line size encoding
+ ubfm \tmp, \tmp, #16, #19 // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index a82ae8868077..c133ae53ed86 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -69,6 +69,21 @@ ENTRY(cpu_reset)
ret x0
ENDPROC(cpu_reset)
+ENTRY(cpu_soft_restart)
+ /* Save address of cpu_reset() and reset address */
+ mov x19, x0
+ mov x20, x1
+
+ /* Turn D-cache off */
+ bl cpu_cache_off
+
+ /* Push out all dirty data, and ensure cache is empty */
+ bl flush_cache_all
+
+ mov x0, x20
+ ret x19
+ENDPROC(cpu_soft_restart)
+
/*
* cpu_do_idle()
*
@@ -80,8 +95,77 @@ ENTRY(cpu_do_idle)
ret
ENDPROC(cpu_do_idle)
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+/**
+ * cpu_do_suspend - save CPU registers context
+ *
+ * x0: virtual address of context pointer
+ */
+ENTRY(cpu_do_suspend)
+ mrs x2, tpidr_el0
+ mrs x3, tpidrro_el0
+ mrs x4, contextidr_el1
+ mrs x5, mair_el1
+ mrs x6, cpacr_el1
+ mrs x7, ttbr1_el1
+ mrs x8, tcr_el1
+ mrs x9, vbar_el1
+ mrs x10, mdscr_el1
+ mrs x11, oslsr_el1
+ mrs x12, sctlr_el1
+ stp x2, x3, [x0]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ str x12, [x0, #80]
+ ret
+ENDPROC(cpu_do_suspend)
+
+/**
+ * cpu_do_resume - restore CPU register context
+ *
+ * x0: Physical address of context pointer
+ * x1: ttbr0_el1 to be restored
+ *
+ * Returns:
+ * sctlr_el1 value in x0
+ */
+ENTRY(cpu_do_resume)
+ /*
+ * Invalidate local tlb entries before turning on MMU
+ */
+ tlbi vmalle1
+ ldp x2, x3, [x0]
+ ldp x4, x5, [x0, #16]
+ ldp x6, x7, [x0, #32]
+ ldp x8, x9, [x0, #48]
+ ldp x10, x11, [x0, #64]
+ ldr x12, [x0, #80]
+ msr tpidr_el0, x2
+ msr tpidrro_el0, x3
+ msr contextidr_el1, x4
+ msr mair_el1, x5
+ msr cpacr_el1, x6
+ msr ttbr0_el1, x1
+ msr ttbr1_el1, x7
+ msr tcr_el1, x8
+ msr vbar_el1, x9
+ msr mdscr_el1, x10
+ /*
+ * Restore oslsr_el1 by writing oslar_el1
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+ ret
+ENDPROC(cpu_do_resume)
+#endif
+
/*
- * cpu_switch_mm(pgd_phys, tsk)
+ * cpu_do_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys.
*
@@ -95,10 +179,6 @@ ENTRY(cpu_do_switch_mm)
ret
ENDPROC(cpu_do_switch_mm)
-cpu_name:
- .ascii "AArch64 Processor"
- .align
-
.section ".text.init", #alloc, #execinstr
/*
@@ -108,19 +188,13 @@ cpu_name:
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
- /*
- * Preserve the link register across the function call.
- */
- mov x28, lr
- bl __flush_dcache_all
- mov lr, x28
ic iallu // I+BTB cache invalidate
- dsb sy
+ tlbi vmalle1is // invalidate I + D TLBs
+ dsb ish
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
msr mdscr_el1, xzr // Reset mdscr_el1
- tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
*
@@ -150,8 +224,14 @@ ENTRY(__cpu_setup)
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
- ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
- TCR_ASID16 | (1 << 31)
+ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \
+ TCR_ASID16 | TCR_TBI0 | (1 << 31)
+ /*
+ * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
+ * TCR_EL1.
+ */
+ mrs x9, ID_AA64MMFR0_EL1
+ bfi x10, x9, #32, #3
#ifdef CONFIG_ARM64_64K_PAGES
orr x10, x10, TCR_TG0_64K
orr x10, x10, TCR_TG1_64K
@@ -166,9 +246,9 @@ ENDPROC(__cpu_setup)
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
+ * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x030802e2 // clear
+ .word 0x000802e2 // clear
.word 0x0405d11d // set
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
deleted file mode 100644
index 8ae80a18e8ec..000000000000
--- a/arch/arm64/mm/tlb.S
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Based on arch/arm/mm/tlb.S
- *
- * Copyright (C) 1997-2002 Russell King
- * Copyright (C) 2012 ARM Ltd.
- * Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-#include "proc-macros.S"
-
-/*
- * __cpu_flush_user_tlb_range(start, end, vma)
- *
- * Invalidate a range of TLB entries in the specified address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
- */
-ENTRY(__cpu_flush_user_tlb_range)
- vma_vm_mm x3, x2 // get vma->vm_mm
- mmid x3, x3 // get vm_mm->context.id
- dsb sy
- lsr x0, x0, #12 // align address
- lsr x1, x1, #12
- bfi x0, x3, #48, #16 // start VA and ASID
- bfi x1, x3, #48, #16 // end VA and ASID
-1: tlbi vae1is, x0 // TLB invalidate by address and ASID
- add x0, x0, #1
- cmp x0, x1
- b.lo 1b
- dsb sy
- ret
-ENDPROC(__cpu_flush_user_tlb_range)
-
-/*
- * __cpu_flush_kern_tlb_range(start,end)
- *
- * Invalidate a range of kernel TLB entries.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- */
-ENTRY(__cpu_flush_kern_tlb_range)
- dsb sy
- lsr x0, x0, #12 // align address
- lsr x1, x1, #12
-1: tlbi vaae1is, x0 // TLB invalidate by address
- add x0, x0, #1
- cmp x0, x1
- b.lo 1b
- dsb sy
- isb
- ret
-ENDPROC(__cpu_flush_kern_tlb_range)