aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig45
-rw-r--r--arch/arm/Kconfig.debug41
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/compressed/head.S9
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts10
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi6
-rw-r--r--arch/arm/common/Makefile2
-rw-r--r--arch/arm/common/bL_switcher.c822
-rw-r--r--arch/arm/common/bL_switcher_dummy_if.c71
-rw-r--r--arch/arm/common/mcpm_entry.c27
-rw-r--r--arch/arm/common/mcpm_head.S18
-rw-r--r--arch/arm/common/mcpm_platsmp.c27
-rw-r--r--arch/arm/common/timer-sp.c2
-rw-r--r--arch/arm/configs/h3600_defconfig22
-rw-r--r--arch/arm/crypto/.gitignore1
-rw-r--r--arch/arm/crypto/Makefile14
-rw-r--r--arch/arm/crypto/aes_glue.c22
-rw-r--r--arch/arm/crypto/aes_glue.h19
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped2544
-rw-r--r--arch/arm/crypto/aesbs-glue.c434
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl2467
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h7
-rw-r--r--arch/arm/include/asm/atomic.h108
-rw-r--r--arch/arm/include/asm/bL_switcher.h77
-rw-r--r--arch/arm/include/asm/bug.h10
-rw-r--r--arch/arm/include/asm/cacheflush.h46
-rw-r--r--arch/arm/include/asm/cmpxchg.h58
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h54
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/coresight.h8
-rw-r--r--arch/arm/include/asm/io.h9
-rw-r--r--arch/arm/include/asm/kgdb.h3
-rw-r--r--arch/arm/include/asm/kvm_arm.h9
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h51
-rw-r--r--arch/arm/include/asm/kvm_host.h6
-rw-r--r--arch/arm/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mcpm.h39
-rw-r--r--arch/arm/include/asm/memory.h76
-rw-r--r--arch/arm/include/asm/mmu.h2
-rw-r--r--arch/arm/include/asm/pgalloc.h12
-rw-r--r--arch/arm/include/asm/pgtable-2level.h7
-rw-r--r--arch/arm/include/asm/pgtable-3level.h5
-rw-r--r--arch/arm/include/asm/processor.h33
-rw-r--r--arch/arm/include/asm/setup.h2
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/spinlock.h36
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h48
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h50
-rw-r--r--arch/arm/include/asm/xen/page.h44
-rw-r--r--arch/arm/include/debug/efm32.S45
-rw-r--r--arch/arm/include/debug/msm.S5
-rw-r--r--arch/arm/include/debug/pl01x.S2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm/include/uapi/asm/perf_regs.h23
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/entry-armv.S6
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/head.S82
-rw-r--r--arch/arm/kernel/hw_breakpoint.c14
-rw-r--r--arch/arm/kernel/kprobes.c8
-rw-r--r--arch/arm/kernel/module.c59
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/perf_regs.c30
-rw-r--r--arch/arm/kernel/setup.c28
-rw-r--r--arch/arm/kernel/signal.c38
-rw-r--r--arch/arm/kernel/sigreturn_codes.S80
-rw-r--r--arch/arm/kernel/sleep.S27
-rw-r--r--arch/arm/kernel/smp.c42
-rw-r--r--arch/arm/kernel/smp_scu.c14
-rw-r--r--arch/arm/kernel/smp_tlb.c36
-rw-r--r--arch/arm/kernel/smp_twd.c24
-rw-r--r--arch/arm/kernel/suspend.c8
-rw-r--r--arch/arm/kernel/traps.c24
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arm.c24
-rw-r--r--arch/arm/kvm/coproc.c120
-rw-r--r--arch/arm/kvm/coproc_a15.c117
-rw-r--r--arch/arm/kvm/coproc_a7.c54
-rw-r--r--arch/arm/kvm/emulate.c2
-rw-r--r--arch/arm/kvm/guest.c24
-rw-r--r--arch/arm/kvm/handle_exit.c20
-rw-r--r--arch/arm/kvm/mmio.c86
-rw-r--r--arch/arm/kvm/mmu.c223
-rw-r--r--arch/arm/kvm/psci.c21
-rw-r--r--arch/arm/kvm/reset.c15
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c41
-rw-r--r--arch/arm/mach-at91/Makefile1
-rw-r--r--arch/arm/mach-at91/at91rm9200.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c2
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c6
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c10
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c6
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c4
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c6
-rw-r--r--arch/arm/mach-at91/board.h4
-rw-r--r--arch/arm/mach-at91/cpuidle.c68
-rw-r--r--arch/arm/mach-at91/pm.c27
-rw-r--r--arch/arm/mach-at91/pm.h59
-rw-r--r--arch/arm/mach-at91/setup.c14
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-davinci/sram.c9
-rw-r--r--arch/arm/mach-exynos/common.c11
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/cpuidle.c18
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c2
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c2
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c8
-rw-r--r--arch/arm/mach-highbank/Kconfig3
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c4
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig4
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c4
-rw-r--r--arch/arm/mach-mvebu/Kconfig1
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S3
-rw-r--r--arch/arm/mach-mvebu/headsmp.S4
-rw-r--r--arch/arm/mach-omap2/board-flash.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c10
-rw-r--r--arch/arm/mach-omap2/gpmc.c48
-rw-r--r--arch/arm/mach-omap2/omap-pm.h2
-rw-r--r--arch/arm/mach-omap2/opp.c6
-rw-r--r--arch/arm/mach-omap2/pm.c8
-rw-r--r--arch/arm/mach-pxa/Kconfig3
-rw-r--r--arch/arm/mach-pxa/cm-x300.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270-income.c1
-rw-r--r--arch/arm/mach-pxa/ezx.c1
-rw-r--r--arch/arm/mach-pxa/hx4700.c1
-rw-r--r--arch/arm/mach-pxa/lpd270.c1
-rw-r--r--arch/arm/mach-pxa/magician.c1
-rw-r--r--arch/arm/mach-pxa/mainstone.c1
-rw-r--r--arch/arm/mach-pxa/mioa701.c1
-rw-r--r--arch/arm/mach-pxa/palm27x.c1
-rw-r--r--arch/arm/mach-pxa/palmtc.c35
-rw-r--r--arch/arm/mach-pxa/palmte2.c1
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c1
-rw-r--r--arch/arm/mach-pxa/raumfeld.c1
-rw-r--r--arch/arm/mach-pxa/tavorevb.c2
-rw-r--r--arch/arm/mach-pxa/viper.c1
-rw-r--r--arch/arm/mach-pxa/z2.c2
-rw-r--r--arch/arm/mach-pxa/zylonite.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-h1940.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-rx1950.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6440.c1
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6450.c1
-rw-r--r--arch/arm/mach-s5pc100/mach-smdkc100.c1
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c1
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/generic.c81
-rw-r--r--arch/arm/mach-sa1100/generic.h7
-rw-r--r--arch/arm/mach-sa1100/include/mach/gpio.h55
-rw-r--r--arch/arm/mach-sa1100/include/mach/h3xxx.h2
-rw-r--r--arch/arm/mach-sa1100/simpad.c1
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c1
-rw-r--r--arch/arm/mach-sti/Kconfig4
-rw-r--r--arch/arm/mach-tegra/Kconfig2
-rw-r--r--arch/arm/mach-tegra/apbio.c2
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-vexpress/Kconfig13
-rw-r--r--arch/arm/mach-vexpress/Makefile3
-rw-r--r--arch/arm/mach-vexpress/dcscb.c56
-rw-r--r--arch/arm/mach-vexpress/spc.c366
-rw-r--r--arch/arm/mach-vexpress/spc.h2
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c55
-rw-r--r--arch/arm/mach-zynq/common.c6
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/abort-ev6.S5
-rw-r--r--arch/arm/mm/alignment.c9
-rw-r--r--arch/arm/mm/dma-mapping.c55
-rw-r--r--arch/arm/mm/extable.c7
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/idmap.c8
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c82
-rw-r--r--arch/arm/mm/nommu.c9
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm/net/bpf_jit_32.c6
-rw-r--r--arch/arm/plat-samsung/dev-backlight.c5
-rw-r--r--arch/arm/plat-versatile/headsmp.S2
-rw-r--r--arch/arm/vfp/vfpmodule.c6
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/mm.c65
-rw-r--r--arch/arm/xen/p2m.c208
207 files changed, 9350 insertions, 1079 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index acb80708accd..214b698cefea 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,8 @@ config ARM
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT if MMU
select CLONE_BACKWARDS
@@ -51,6 +53,8 @@ config ARM
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
@@ -481,6 +485,7 @@ config ARCH_IXP4XX
bool "IXP4xx-based"
depends on MMU
select ARCH_HAS_DMA_SET_COHERENT_MASK
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CPU_XSCALE
@@ -688,7 +693,6 @@ config ARCH_SA1100
select GENERIC_CLOCKEVENTS
select HAVE_IDE
select ISA
- select NEED_MACH_GPIO_H
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
@@ -1064,11 +1068,6 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
-config XSCALE_PMU
- bool
- depends on CPU_XSCALE
- default y
-
config MULTI_IRQ_HANDLER
bool
help
@@ -1434,7 +1433,6 @@ config SMP
depends on GENERIC_CLOCKEVENTS
depends on HAVE_SMP
depends on MMU || ARM_MPU
- select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
@@ -1516,6 +1514,32 @@ config MCPM
for (multi-)cluster based systems, such as big.LITTLE based
systems.
+config BIG_LITTLE
+ bool "big.LITTLE support (Experimental)"
+ depends on CPU_V7 && SMP
+ select MCPM
+ help
+ This option enables support selections for the big.LITTLE
+ system architecture.
+
+config BL_SWITCHER
+ bool "big.LITTLE switcher support"
+ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+ select CPU_PM
+ select ARM_CPU_SUSPEND
+ help
+ The big.LITTLE "switcher" provides the core functionality to
+ transparently handle transition between a cluster of A15's
+ and a cluster of A7's in a big.LITTLE system.
+
+config BL_SWITCHER_DUMMY_IF
+ tristate "Simple big.LITTLE switcher user interface"
+ depends on BL_SWITCHER && DEBUG_KERNEL
+ help
+ This is a simple and dummy char dev interface to control
+ the big.LITTLE switcher core code. It is meant for
+ debugging purposes only.
+
choice
prompt "Memory split"
default VMSPLIT_3G
@@ -1839,6 +1863,12 @@ config CC_STACKPROTECTOR
neutralized via a kernel panic.
This feature requires gcc version 4.2 or above.
+config SWIOTLB
+ def_bool y
+
+config IOMMU_HELPER
+ def_bool SWIOTLB
+
config XEN_DOM0
def_bool y
depends on XEN
@@ -1849,6 +1879,7 @@ config XEN
depends on CPU_V7 && !CPU_V6
depends on !GENERIC_ATOMIC64
select ARM_PSCI
+ select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index d597c6b8488b..5765abf5ce84 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -318,6 +318,7 @@ choice
config DEBUG_MSM_UART1
bool "Kernel low-level debugging messages via MSM UART1"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the first serial port on MSM devices.
@@ -325,6 +326,7 @@ choice
config DEBUG_MSM_UART2
bool "Kernel low-level debugging messages via MSM UART2"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the second serial port on MSM devices.
@@ -332,6 +334,7 @@ choice
config DEBUG_MSM_UART3
bool "Kernel low-level debugging messages via MSM UART3"
depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the third serial port on MSM devices.
@@ -340,6 +343,7 @@ choice
bool "Kernel low-level debugging messages via MSM 8660 UART"
depends on ARCH_MSM8X60
select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8660 devices.
@@ -348,10 +352,20 @@ choice
bool "Kernel low-level debugging messages via MSM 8960 UART"
depends on ARCH_MSM8960
select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8960 devices.
+ config DEBUG_MSM8974_UART
+ bool "Kernel low-level debugging messages via MSM 8974 UART"
+ depends on ARCH_MSM8974
+ select MSM_HAS_DEBUG_UART_HS
+ select DEBUG_MSM_UART
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on MSM 8974 devices.
+
config DEBUG_MVEBU_UART
bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
depends on ARCH_MVEBU
@@ -841,6 +855,20 @@ choice
options; the platform specific options are deprecated
and will be soon removed.
+ config DEBUG_LL_UART_EFM32
+ bool "Kernel low-level debugging via efm32 UART"
+ depends on ARCH_EFM32
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to an UART or USART port on efm32 based
+ machines. Use the following addresses for DEBUG_UART_PHYS:
+
+ 0x4000c000 | USART0
+ 0x4000c400 | USART1
+ 0x4000c800 | USART2
+ 0x4000e000 | UART0
+ 0x4000e400 | UART1
+
config DEBUG_LL_UART_PL01X
bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
help
@@ -887,11 +915,16 @@ config DEBUG_STI_UART
bool
depends on ARCH_STI
+config DEBUG_MSM_UART
+ bool
+ depends on ARCH_MSM
+
config DEBUG_LL_INCLUDE
string
default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
default "debug/exynos.S" if DEBUG_EXYNOS_UART
+ default "debug/efm32.S" if DEBUG_LL_UART_EFM32
default "debug/icedcc.S" if DEBUG_ICEDCC
default "debug/imx.S" if DEBUG_IMX1_UART || \
DEBUG_IMX25_UART || \
@@ -902,11 +935,7 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX53_UART ||\
DEBUG_IMX6Q_UART || \
DEBUG_IMX6SL_UART
- default "debug/msm.S" if DEBUG_MSM_UART1 || \
- DEBUG_MSM_UART2 || \
- DEBUG_MSM_UART3 || \
- DEBUG_MSM8660_UART || \
- DEBUG_MSM8960_UART
+ default "debug/msm.S" if DEBUG_MSM_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
default "debug/sti.S" if DEBUG_STI_UART
@@ -959,6 +988,7 @@ config DEBUG_UART_PHYS
default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
default 0x20201000 if DEBUG_BCM2835
+ default 0x4000e400 if DEBUG_LL_UART_EFM32
default 0x40090000 if ARCH_LPC32XX
default 0x40100000 if DEBUG_PXA_UART1
default 0x42000000 if ARCH_GEMINI
@@ -989,6 +1019,7 @@ config DEBUG_UART_PHYS
default 0xfff36000 if DEBUG_HIGHBANK_UART
default 0xfffff700 if ARCH_IOP33X
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+ DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X
config DEBUG_UART_VIRT
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 8b667132d7b4..c99b1086d83d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,6 +16,7 @@ LDFLAGS :=
LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
+LDFLAGS_MODULE += --be8
endif
OBJCOPYFLAGS :=-O binary -R .comment -S
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 75189f13cf54..066b03480b63 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -135,6 +135,7 @@ start:
.word _edata @ zImage end address
THUMB( .thumb )
1:
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
mrs r9, cpsr
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install @ get into SVC mode, reversibly
@@ -699,9 +700,7 @@ __armv4_mmu_cache_on:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
@@ -728,9 +727,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
@ (needed for ARM1176)
#ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 987429436171..7e6c64ed966d 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -630,7 +630,7 @@
tsc {
ti,wires = <4>;
ti,x-plate-resistance = <200>;
- ti,coordiante-readouts = <5>;
+ ti,coordinate-readouts = <5>;
ti,wire-config = <0x00 0x11 0x22 0x33>;
};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 03febf85fd2f..4718ec4a4dbf 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -485,3 +485,13 @@
tx-num-evt = <1>;
rx-num-evt = <1>;
};
+
+&tscadc {
+ status = "okay";
+ tsc {
+ ti,wires = <4>;
+ ti,x-plate-resistance = <200>;
+ ti,coordinate-readouts = <5>;
+ ti,wire-config = <0x00 0x11 0x22 0x33>;
+ };
+};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index fcb9c8e42ab7..f6d8ffe98d0b 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -670,6 +670,12 @@
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
};
+
+ phy_sel: cpsw-phy-sel@44e10650 {
+ compatible = "ti,am3352-cpsw-phy-sel";
+ reg= <0x44e10650 0x4>;
+ reg-names = "gmii-sel";
+ };
};
ocmcram: ocmcram@40300000 {
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index eaa9cf4705a7..4bdc41622c36 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -16,3 +16,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
+obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
+obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
new file mode 100644
index 000000000000..5774b6ea7ad5
--- /dev/null
+++ b/arch/arm/common/bL_switcher.c
@@ -0,0 +1,822 @@
+/*
+ * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/moduleparam.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cputype.h>
+#include <asm/suspend.h>
+#include <asm/mcpm.h>
+#include <asm/bL_switcher.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power_cpu_migrate.h>
+
+
+/*
+ * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
+ * __attribute_const__ and we don't want the compiler to assume any
+ * constness here as the value _does_ change along some code paths.
+ */
+
+static int read_mpidr(void)
+{
+ unsigned int id;
+ asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
+ return id & MPIDR_HWID_BITMASK;
+}
+
+/*
+ * Get a global nanosecond time stamp for tracing.
+ */
+static s64 get_ns(void)
+{
+ struct timespec ts;
+ getnstimeofday(&ts);
+ return timespec_to_ns(&ts);
+}
+
+/*
+ * bL switcher core code.
+ */
+
+static void bL_do_switch(void *_arg)
+{
+ unsigned ib_mpidr, ib_cpu, ib_cluster;
+ long volatile handshake, **handshake_ptr = _arg;
+
+ pr_debug("%s\n", __func__);
+
+ ib_mpidr = cpu_logical_map(smp_processor_id());
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ /* Advertise our handshake location */
+ if (handshake_ptr) {
+ handshake = 0;
+ *handshake_ptr = &handshake;
+ } else
+ handshake = -1;
+
+ /*
+ * Our state has been saved at this point. Let's release our
+ * inbound CPU.
+ */
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
+ sev();
+
+ /*
+ * From this point, we must assume that our counterpart CPU might
+ * have taken over in its parallel world already, as if execution
+ * just returned from cpu_suspend(). It is therefore important to
+ * be very careful not to make any change the other guy is not
+ * expecting. This is why we need stack isolation.
+ *
+ * Fancy under cover tasks could be performed here. For now
+ * we have none.
+ */
+
+ /*
+ * Let's wait until our inbound is alive.
+ */
+ while (!handshake) {
+ wfe();
+ smp_mb();
+ }
+
+ /* Let's put ourself down. */
+ mcpm_cpu_power_down();
+
+ /* should never get here */
+ BUG();
+}
+
+/*
+ * Stack isolation. To ensure 'current' remains valid, we just use another
+ * piece of our thread's stack space which should be fairly lightly used.
+ * The selected area starts just above the thread_info structure located
+ * at the very bottom of the stack, aligned to a cache line, and indexed
+ * with the cluster number.
+ */
+#define STACK_SIZE 512
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+static int bL_switchpoint(unsigned long _arg)
+{
+ unsigned int mpidr = read_mpidr();
+ unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ void *stack = current_thread_info() + 1;
+ stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
+ stack += clusterid * STACK_SIZE + STACK_SIZE;
+ call_with_stack(bL_do_switch, (void *)_arg, stack);
+ BUG();
+}
+
+/*
+ * Generic switcher interface
+ */
+
+static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
+static int bL_switcher_cpu_pairing[NR_CPUS];
+
+/*
+ * bL_switch_to - Switch to a specific cluster for the current CPU
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function must be called on the CPU to be switched.
+ * Returns 0 on success, else a negative status code.
+ */
+static int bL_switch_to(unsigned int new_cluster_id)
+{
+ unsigned int mpidr, this_cpu, that_cpu;
+ unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
+ struct completion inbound_alive;
+ struct tick_device *tdev;
+ enum clock_event_mode tdev_mode;
+ long volatile *handshake_ptr;
+ int ipi_nr, ret;
+
+ this_cpu = smp_processor_id();
+ ob_mpidr = read_mpidr();
+ ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
+ ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
+ BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
+
+ if (new_cluster_id == ob_cluster)
+ return 0;
+
+ that_cpu = bL_switcher_cpu_pairing[this_cpu];
+ ib_mpidr = cpu_logical_map(that_cpu);
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
+ this_cpu, ob_mpidr, ib_mpidr);
+
+ this_cpu = smp_processor_id();
+
+ /* Close the gate for our entry vectors */
+ mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
+
+ /* Install our "inbound alive" notifier. */
+ init_completion(&inbound_alive);
+ ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
+ ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
+
+ /*
+ * Let's wake up the inbound CPU now in case it requires some delay
+ * to come online, but leave it gated in our entry vector code.
+ */
+ ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
+ if (ret) {
+ pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Raise a SGI on the inbound CPU to make sure it doesn't stall
+ * in a possible WFI, such as in bL_power_down().
+ */
+ gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
+
+ /*
+ * Wait for the inbound to come up. This allows for other
+ * tasks to be scheduled in the mean time.
+ */
+ wait_for_completion(&inbound_alive);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
+
+ /*
+ * From this point we are entering the switch critical zone
+ * and can't take any interrupts anymore.
+ */
+ local_irq_disable();
+ local_fiq_disable();
+ trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+
+ /* redirect GIC's SGIs to our counterpart */
+ gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
+
+ tdev = tick_get_device(this_cpu);
+ if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
+ tdev = NULL;
+ if (tdev) {
+ tdev_mode = tdev->evtdev->mode;
+ clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ }
+
+ ret = cpu_pm_enter();
+
+ /* we can not tolerate errors at this point */
+ if (ret)
+ panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
+
+ /* Swap the physical CPUs in the logical map for this logical CPU. */
+ cpu_logical_map(this_cpu) = ib_mpidr;
+ cpu_logical_map(that_cpu) = ob_mpidr;
+
+ /* Let's do the actual CPU switch. */
+ ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
+ if (ret > 0)
+ panic("%s: cpu_suspend() returned %d\n", __func__, ret);
+
+ /* We are executing on the inbound CPU at this point */
+ mpidr = read_mpidr();
+ pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
+ BUG_ON(mpidr != ib_mpidr);
+
+ mcpm_cpu_powered_up();
+
+ ret = cpu_pm_exit();
+
+ if (tdev) {
+ clockevents_set_mode(tdev->evtdev, tdev_mode);
+ clockevents_program_event(tdev->evtdev,
+ tdev->evtdev->next_event, 1);
+ }
+
+ trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+ local_fiq_enable();
+ local_irq_enable();
+
+ *handshake_ptr = 1;
+ dsb_sev();
+
+ if (ret)
+ pr_err("%s exiting with error %d\n", __func__, ret);
+ return ret;
+}
+
+struct bL_thread {
+ spinlock_t lock;
+ struct task_struct *task;
+ wait_queue_head_t wq;
+ int wanted_cluster;
+ struct completion started;
+ bL_switch_completion_handler completer;
+ void *completer_cookie;
+};
+
+static struct bL_thread bL_threads[NR_CPUS];
+
+static int bL_switcher_thread(void *arg)
+{
+ struct bL_thread *t = arg;
+ struct sched_param param = { .sched_priority = 1 };
+ int cluster;
+ bL_switch_completion_handler completer;
+ void *completer_cookie;
+
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ complete(&t->started);
+
+ do {
+ if (signal_pending(current))
+ flush_signals(current);
+ wait_event_interruptible(t->wq,
+ t->wanted_cluster != -1 ||
+ kthread_should_stop());
+
+ spin_lock(&t->lock);
+ cluster = t->wanted_cluster;
+ completer = t->completer;
+ completer_cookie = t->completer_cookie;
+ t->wanted_cluster = -1;
+ t->completer = NULL;
+ spin_unlock(&t->lock);
+
+ if (cluster != -1) {
+ bL_switch_to(cluster);
+
+ if (completer)
+ completer(completer_cookie);
+ }
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
+{
+ struct task_struct *task;
+
+ task = kthread_create_on_node(bL_switcher_thread, arg,
+ cpu_to_node(cpu), "kswitcher_%d", cpu);
+ if (!IS_ERR(task)) {
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+ } else
+ pr_err("%s failed for CPU %d\n", __func__, cpu);
+ return task;
+}
+
+/*
+ * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
+ * with completion notification via a callback
+ *
+ * @cpu: the CPU to switch
+ * @new_cluster_id: the ID of the cluster to switch to.
+ * @completer: switch completion callback. if non-NULL,
+ * @completer(@completer_cookie) will be called on completion of
+ * the switch, in non-atomic context.
+ * @completer_cookie: opaque context argument for @completer.
+ *
+ * This function causes a cluster switch on the given CPU by waking up
+ * the appropriate switcher thread. This function may or may not return
+ * before the switch has occurred.
+ *
+ * If a @completer callback function is supplied, it will be called when
+ * the switch is complete. This can be used to determine asynchronously
+ * when the switch is complete, regardless of when bL_switch_request()
+ * returns. When @completer is supplied, no new switch request is permitted
+ * for the affected CPU until after the switch is complete, and @completer
+ * has returned.
+ */
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie)
+{
+ struct bL_thread *t;
+
+ if (cpu >= ARRAY_SIZE(bL_threads)) {
+ pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+ return -EINVAL;
+ }
+
+ t = &bL_threads[cpu];
+
+ if (IS_ERR(t->task))
+ return PTR_ERR(t->task);
+ if (!t->task)
+ return -ESRCH;
+
+ spin_lock(&t->lock);
+ if (t->completer) {
+ spin_unlock(&t->lock);
+ return -EBUSY;
+ }
+ t->completer = completer;
+ t->completer_cookie = completer_cookie;
+ t->wanted_cluster = new_cluster_id;
+ spin_unlock(&t->lock);
+ wake_up(&t->wq);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bL_switch_request_cb);
+
+/*
+ * Activation and configuration code.
+ */
+
+static DEFINE_MUTEX(bL_switcher_activation_lock);
+static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
+static unsigned int bL_switcher_active;
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
+static cpumask_t bL_switcher_removed_logical_cpus;
+
+int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
+
+int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
+
+static int bL_activation_notify(unsigned long val)
+{
+ int ret;
+
+ ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
+ if (ret & NOTIFY_STOP_MASK)
+ pr_err("%s: notifier chain failed with status 0x%x\n",
+ __func__, ret);
+ return notifier_to_errno(ret);
+}
+
+static void bL_switcher_restore_cpus(void)
+{
+ int i;
+
+ for_each_cpu(i, &bL_switcher_removed_logical_cpus)
+ cpu_up(i);
+}
+
+static int bL_switcher_halve_cpus(void)
+{
+ int i, j, cluster_0, gic_id, ret;
+ unsigned int cpu, cluster, mask;
+ cpumask_t available_cpus;
+
+ /* First pass to validate what we have */
+ mask = 0;
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster >= 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+ if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
+ return -EINVAL;
+ mask |= (1 << cluster);
+ }
+ if (mask != 3) {
+ pr_err("%s: no CPU pairing possible\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Now let's do the pairing. We match each CPU with another CPU
+ * from a different cluster. To get a uniform scheduling behavior
+ * without fiddling with CPU topology and compute capacity data,
+ * we'll use logical CPUs initially belonging to the same cluster.
+ */
+ memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
+ cpumask_copy(&available_cpus, cpu_online_mask);
+ cluster_0 = -1;
+ for_each_cpu(i, &available_cpus) {
+ int match = -1;
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster_0 == -1)
+ cluster_0 = cluster;
+ if (cluster != cluster_0)
+ continue;
+ cpumask_clear_cpu(i, &available_cpus);
+ for_each_cpu(j, &available_cpus) {
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+ /*
+ * Let's remember the last match to create "odd"
+ * pairings on purpose in order for other code not
+ * to assume any relation between physical and
+ * logical CPU numbers.
+ */
+ if (cluster != cluster_0)
+ match = j;
+ }
+ if (match != -1) {
+ bL_switcher_cpu_pairing[i] = match;
+ cpumask_clear_cpu(match, &available_cpus);
+ pr_info("CPU%d paired with CPU%d\n", i, match);
+ }
+ }
+
+ /*
+ * Now we disable the unwanted CPUs i.e. everything that has no
+ * pairing information (that includes the pairing counterparts).
+ */
+ cpumask_clear(&bL_switcher_removed_logical_cpus);
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+
+ /* Let's take note of the GIC ID for this CPU */
+ gic_id = gic_get_cpu_id(i);
+ if (gic_id < 0) {
+ pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
+ bL_switcher_restore_cpus();
+ return -EINVAL;
+ }
+ bL_gic_id[cpu][cluster] = gic_id;
+ pr_info("GIC ID for CPU %u cluster %u is %u\n",
+ cpu, cluster, gic_id);
+
+ if (bL_switcher_cpu_pairing[i] != -1) {
+ bL_switcher_cpu_original_cluster[i] = cluster;
+ continue;
+ }
+
+ ret = cpu_down(i);
+ if (ret) {
+ bL_switcher_restore_cpus();
+ return ret;
+ }
+ cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
+ }
+
+ return 0;
+}
+
+/* Determine the logical CPU a given physical CPU is grouped on. */
+int bL_switcher_get_logical_index(u32 mpidr)
+{
+ int cpu;
+
+ if (!bL_switcher_active)
+ return -EUNATCH;
+
+ mpidr &= MPIDR_HWID_BITMASK;
+ for_each_online_cpu(cpu) {
+ int pairing = bL_switcher_cpu_pairing[cpu];
+ if (pairing == -1)
+ continue;
+ if ((mpidr == cpu_logical_map(cpu)) ||
+ (mpidr == cpu_logical_map(pairing)))
+ return cpu;
+ }
+ return -EINVAL;
+}
+
+static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
+{
+ trace_cpu_migrate_current(get_ns(), read_mpidr());
+}
+
+int bL_switcher_trace_trigger(void)
+{
+ int ret;
+
+ preempt_disable();
+
+ bL_switcher_trace_trigger_cpu(NULL);
+ ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
+
+static int bL_switcher_enable(void)
+{
+ int cpu, ret;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ lock_device_hotplug();
+ if (bL_switcher_active) {
+ unlock_device_hotplug();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return 0;
+ }
+
+ pr_info("big.LITTLE switcher initializing\n");
+
+ ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
+ if (ret)
+ goto error;
+
+ ret = bL_switcher_halve_cpus();
+ if (ret)
+ goto error;
+
+ bL_switcher_trace_trigger();
+
+ for_each_online_cpu(cpu) {
+ struct bL_thread *t = &bL_threads[cpu];
+ spin_lock_init(&t->lock);
+ init_waitqueue_head(&t->wq);
+ init_completion(&t->started);
+ t->wanted_cluster = -1;
+ t->task = bL_switcher_thread_create(cpu, t);
+ }
+
+ bL_switcher_active = 1;
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ pr_info("big.LITTLE switcher initialized\n");
+ goto out;
+
+error:
+ pr_warn("big.LITTLE switcher initialization failed\n");
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ unlock_device_hotplug();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return ret;
+}
+
+#ifdef CONFIG_SYSFS
+
+static void bL_switcher_disable(void)
+{
+ unsigned int cpu, cluster;
+ struct bL_thread *t;
+ struct task_struct *task;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ lock_device_hotplug();
+
+ if (!bL_switcher_active)
+ goto out;
+
+ if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ goto out;
+ }
+
+ bL_switcher_active = 0;
+
+ /*
+ * To deactivate the switcher, we must shut down the switcher
+ * threads to prevent any other requests from being accepted.
+ * Then, if the final cluster for given logical CPU is not the
+ * same as the original one, we'll recreate a switcher thread
+ * just for the purpose of switching the CPU back without any
+ * possibility for interference from external requests.
+ */
+ for_each_online_cpu(cpu) {
+ t = &bL_threads[cpu];
+ task = t->task;
+ t->task = NULL;
+ if (!task || IS_ERR(task))
+ continue;
+ kthread_stop(task);
+ /* no more switch may happen on this CPU at this point */
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ init_completion(&t->started);
+ t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
+ task = bL_switcher_thread_create(cpu, t);
+ if (!IS_ERR(task)) {
+ wait_for_completion(&t->started);
+ kthread_stop(task);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ }
+ /* If execution gets here, we're in trouble. */
+ pr_crit("%s: unable to restore original cluster for CPU %d\n",
+ __func__, cpu);
+ pr_crit("%s: CPU %d can't be restored\n",
+ __func__, bL_switcher_cpu_pairing[cpu]);
+ cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
+ &bL_switcher_removed_logical_cpus);
+ }
+
+ bL_switcher_restore_cpus();
+ bL_switcher_trace_trigger();
+
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ unlock_device_hotplug();
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+
+static ssize_t bL_switcher_active_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", bL_switcher_active);
+}
+
+static ssize_t bL_switcher_active_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+
+ switch (buf[0]) {
+ case '0':
+ bL_switcher_disable();
+ ret = 0;
+ break;
+ case '1':
+ ret = bL_switcher_enable();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return (ret >= 0) ? count : ret;
+}
+
+static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = bL_switcher_trace_trigger();
+
+ return ret ? ret : count;
+}
+
+static struct kobj_attribute bL_switcher_active_attr =
+ __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
+
+static struct kobj_attribute bL_switcher_trace_trigger_attr =
+ __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
+
+static struct attribute *bL_switcher_attrs[] = {
+ &bL_switcher_active_attr.attr,
+ &bL_switcher_trace_trigger_attr.attr,
+ NULL,
+};
+
+static struct attribute_group bL_switcher_attr_group = {
+ .attrs = bL_switcher_attrs,
+};
+
+static struct kobject *bL_switcher_kobj;
+
+static int __init bL_switcher_sysfs_init(void)
+{
+ int ret;
+
+ bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
+ if (!bL_switcher_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
+ if (ret)
+ kobject_put(bL_switcher_kobj);
+ return ret;
+}
+
+#endif /* CONFIG_SYSFS */
+
+bool bL_switcher_get_enabled(void)
+{
+ mutex_lock(&bL_switcher_activation_lock);
+
+ return bL_switcher_active;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
+
+void bL_switcher_put_enabled(void)
+{
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
+
+/*
+ * Veto any CPU hotplug operation on those CPUs we've removed
+ * while the switcher is active.
+ * We're just not ready to deal with that given the trickery involved.
+ */
+static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (bL_switcher_active) {
+ int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
+ switch (action & 0xf) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ if (pairing == -1)
+ return NOTIFY_BAD;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static bool no_bL_switcher;
+core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
+
+static int __init bL_switcher_init(void)
+{
+ int ret;
+
+ if (MAX_NR_CLUSTERS != 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+
+ cpu_notifier(bL_switcher_hotplug_callback, 0);
+
+ if (!no_bL_switcher) {
+ ret = bL_switcher_enable();
+ if (ret)
+ return ret;
+ }
+
+#ifdef CONFIG_SYSFS
+ ret = bL_switcher_sysfs_init();
+ if (ret)
+ pr_err("%s: unable to create sysfs entry\n", __func__);
+#endif
+
+ return 0;
+}
+
+late_initcall(bL_switcher_init);
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
new file mode 100644
index 000000000000..3f47f1203c6b
--- /dev/null
+++ b/arch/arm/common/bL_switcher_dummy_if.c
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * Dummy interface to user space for debugging purpose only.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+#include <asm/bL_switcher.h>
+
+static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ unsigned char val[3];
+ unsigned int cpu, cluster;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ if (len < 3)
+ return -EINVAL;
+
+ if (copy_from_user(val, buf, 3))
+ return -EFAULT;
+
+ /* format: <cpu#>,<cluster#> */
+ if (val[0] < '0' || val[0] > '9' ||
+ val[1] != ',' ||
+ val[2] < '0' || val[2] > '1')
+ return -EINVAL;
+
+ cpu = val[0] - '0';
+ cluster = val[2] - '0';
+ ret = bL_switch_request(cpu, cluster);
+
+ return ret ? : len;
+}
+
+static const struct file_operations bL_switcher_fops = {
+ .write = bL_switcher_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice bL_switcher_device = {
+ MISC_DYNAMIC_MINOR,
+ "b.L_switcher",
+ &bL_switcher_fops
+};
+
+static int __init bL_switcher_dummy_if_init(void)
+{
+ return misc_register(&bL_switcher_device);
+}
+
+static void __exit bL_switcher_dummy_if_exit(void)
+{
+ misc_deregister(&bL_switcher_device);
+}
+
+module_init(bL_switcher_dummy_if_init);
+module_exit(bL_switcher_dummy_if_exit);
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 990250965f2c..26020a03f659 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
+extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val)
+{
+ unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+ poke[0] = poke_phys_addr;
+ poke[1] = poke_val;
+ __cpuc_flush_dcache_area((void *)poke, 8);
+ outer_clean_range(__pa(poke), __pa(poke + 2));
+}
+
static const struct mcpm_platform_ops *platform_ops;
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
@@ -90,6 +102,21 @@ void mcpm_cpu_power_down(void)
BUG();
}
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
+{
+ int ret;
+
+ if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
+ return -EUNATCH;
+
+ ret = platform_ops->power_down_finish(cpu, cluster);
+ if (ret)
+ pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
+ __func__, cpu, cluster, ret);
+
+ return ret;
+}
+
void mcpm_cpu_suspend(u64 expected_residency)
{
phys_reset_t phys_reset;
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 39c96df3477a..e02db4b81a66 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -15,6 +15,7 @@
#include <linux/linkage.h>
#include <asm/mcpm.h>
+#include <asm/assembler.h>
#include "vlock.h"
@@ -47,6 +48,7 @@
ENTRY(mcpm_entry_point)
+ ARM_BE8(setend be)
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
THUMB( .thumb )
@@ -71,12 +73,19 @@ ENTRY(mcpm_entry_point)
* position independent way.
*/
adr r5, 3f
- ldmia r5, {r6, r7, r8, r11}
+ ldmia r5, {r0, r6, r7, r8, r11}
+ add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync
add r11, r5, r11 @ r11 = first_man_locks
+ @ Perform an early poke, if any
+ add r0, r0, r4, lsl #3
+ ldmia r0, {r0, r1}
+ teq r0, #0
+ strne r1, [r0]
+
mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base
@@ -195,7 +204,8 @@ mcpm_entry_gated:
.align 2
-3: .word mcpm_entry_vectors - .
+3: .word mcpm_entry_early_pokes - .
+ .word mcpm_entry_vectors - 3b
.word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b
.word first_man_locks - 3b
@@ -214,6 +224,10 @@ first_man_locks:
ENTRY(mcpm_entry_vectors)
.space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+ .type mcpm_entry_early_pokes, #object
+ENTRY(mcpm_entry_early_pokes)
+ .space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
.type mcpm_power_up_setup_phys, #object
ENTRY(mcpm_power_up_setup_phys)
.space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 1bc34c7567fd..177251a4dd9a 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,14 +19,23 @@
#include <asm/smp.h>
#include <asm/smp_plat.h>
+static void cpu_to_pcpu(unsigned int cpu,
+ unsigned int *pcpu, unsigned int *pcluster)
+{
+ unsigned int mpidr;
+
+ mpidr = cpu_logical_map(cpu);
+ *pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ *pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+}
+
static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned int mpidr, pcpu, pcluster, ret;
+ unsigned int pcpu, pcluster, ret;
extern void secondary_startup(void);
- mpidr = cpu_logical_map(cpu);
- pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
__func__, cpu, pcpu, pcluster);
@@ -47,6 +56,15 @@ static void mcpm_secondary_init(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
+static int mcpm_cpu_kill(unsigned int cpu)
+{
+ unsigned int pcpu, pcluster;
+
+ cpu_to_pcpu(cpu, &pcpu, &pcluster);
+
+ return !mcpm_cpu_power_down_finish(pcpu, pcluster);
+}
+
static int mcpm_cpu_disable(unsigned int cpu)
{
/*
@@ -73,6 +91,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
.smp_boot_secondary = mcpm_boot_secondary,
.smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_kill = mcpm_cpu_kill,
.cpu_disable = mcpm_cpu_disable,
.cpu_die = mcpm_cpu_die,
#endif
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index e901d0f3e0bb..ce922d0ea7aa 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = {
static struct irqaction sp804_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sp804_timer_interrupt,
.dev_id = &sp804_clockevent,
};
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index 317960f12488..0142ec37e0be 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -1,5 +1,6 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
@@ -11,11 +12,11 @@ CONFIG_ARCH_SA1100=y
CONFIG_SA1100_H3600=y
CONFIG_PCCARD=y
CONFIG_PCMCIA_SA1100=y
+CONFIG_PREEMPT=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
# CONFIG_CPU_FREQ_STAT is not set
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
@@ -24,13 +25,10 @@ CONFIG_IRDA=m
CONFIG_IRLAN=m
CONFIG_IRNET=m
CONFIG_IRCOMM=m
-CONFIG_SA1100_FIR=m
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -41,19 +39,15 @@ CONFIG_MTD_SA1100=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-CONFIG_NET_PCMCIA=y
CONFIG_PCMCIA_PCNET=y
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
+# CONFIG_WLAN is not set
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@@ -64,8 +58,6 @@ CONFIG_SERIAL_SA1100_CONSOLE=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_SA1100=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_MSDOS_FS=m
@@ -74,6 +66,4 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=m
CONFIG_NFS_FS=y
CONFIG_NFSD=m
-CONFIG_SMB_FS=m
CONFIG_NLS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
new file mode 100644
index 000000000000..6231d36b3635
--- /dev/null
+++ b/arch/arm/crypto/.gitignore
@@ -0,0 +1 @@
+aesbs-core.S
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index a2c83851bc90..81cda39860c5 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -3,7 +3,17 @@
#
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
-aes-arm-y := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y := aes-armv4.o aes_glue.o
+aes-arm-bs-y := aesbs-core.o aesbs-glue.o
+sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+ $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index 59f7877ead6a..3003fa1f6fb4 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -6,22 +6,12 @@
#include <linux/crypto.h>
#include <crypto/aes.h>
-#define AES_MAXNR 14
+#include "aes_glue.h"
-typedef struct {
- unsigned int rd_key[4 *(AES_MAXNR + 1)];
- int rounds;
-} AES_KEY;
-
-struct AES_CTX {
- AES_KEY enc_key;
- AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
@@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
+ .cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644
index 000000000000..cca3e51eb606
--- /dev/null
+++ b/arch/arm/crypto/aes_glue.h
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+ unsigned int rd_key[4 * (AES_MAXNR + 1)];
+ int rounds;
+};
+
+struct AES_CTX {
+ struct AES_KEY enc_key;
+ struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+ const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+ const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644
index 000000000000..64205d453260
--- /dev/null
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt 19.5 cycles per byte processed with 128-bit key
+@ decrypt 22.1 cycles per byte processed with 128-bit key
+@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@ <appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@ <ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax unified @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code 32
+#endif
+
+.fpu neon
+
+.type _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+ adr r6,_bsaes_decrypt8
+ vldmia r4!, {q9} @ round 0 key
+ add r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+ vldmia r6!, {q8} @ .LM0ISR
+ veor q10, q0, q9 @ xor with round0 key
+ veor q11, q1, q9
+ vtbl.8 d0, {q10}, d16
+ vtbl.8 d1, {q10}, d17
+ veor q12, q2, q9
+ vtbl.8 d2, {q11}, d16
+ vtbl.8 d3, {q11}, d17
+ veor q13, q3, q9
+ vtbl.8 d4, {q12}, d16
+ vtbl.8 d5, {q12}, d17
+ veor q14, q4, q9
+ vtbl.8 d6, {q13}, d16
+ vtbl.8 d7, {q13}, d17
+ veor q15, q5, q9
+ vtbl.8 d8, {q14}, d16
+ vtbl.8 d9, {q14}, d17
+ veor q10, q6, q9
+ vtbl.8 d10, {q15}, d16
+ vtbl.8 d11, {q15}, d17
+ veor q11, q7, q9
+ vtbl.8 d12, {q10}, d16
+ vtbl.8 d13, {q10}, d17
+ vtbl.8 d14, {q11}, d16
+ vtbl.8 d15, {q11}, d17
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q4, #1
+ veor q10, q10, q7
+ veor q11, q11, q5
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #1
+ veor q5, q5, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q3
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q5, #2
+ vshr.u64 q11, q4, #2
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q5, q5, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q3
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q3, #4
+ vshr.u64 q11, q2, #4
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #4
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q5
+ veor q11, q11, q4
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ sub r5,r5,#1
+ b .Ldec_sbox
+.align 4
+.Ldec_loop:
+ vldmia r4!, {q8-q11}
+ veor q8, q8, q0
+ veor q9, q9, q1
+ vtbl.8 d0, {q8}, d24
+ vtbl.8 d1, {q8}, d25
+ vldmia r4!, {q8}
+ veor q10, q10, q2
+ vtbl.8 d2, {q9}, d24
+ vtbl.8 d3, {q9}, d25
+ vldmia r4!, {q9}
+ veor q11, q11, q3
+ vtbl.8 d4, {q10}, d24
+ vtbl.8 d5, {q10}, d25
+ vldmia r4!, {q10}
+ vtbl.8 d6, {q11}, d24
+ vtbl.8 d7, {q11}, d25
+ vldmia r4!, {q11}
+ veor q8, q8, q4
+ veor q9, q9, q5
+ vtbl.8 d8, {q8}, d24
+ vtbl.8 d9, {q8}, d25
+ veor q10, q10, q6
+ vtbl.8 d10, {q9}, d24
+ vtbl.8 d11, {q9}, d25
+ veor q11, q11, q7
+ vtbl.8 d12, {q10}, d24
+ vtbl.8 d13, {q10}, d25
+ vtbl.8 d14, {q11}, d24
+ vtbl.8 d15, {q11}, d25
+.Ldec_sbox:
+ veor q1, q1, q4
+ veor q3, q3, q4
+
+ veor q4, q4, q7
+ veor q1, q1, q6
+ veor q2, q2, q7
+ veor q6, q6, q4
+
+ veor q0, q0, q1
+ veor q2, q2, q5
+ veor q7, q7, q6
+ veor q3, q3, q0
+ veor q5, q5, q0
+ veor q1, q1, q3
+ veor q11, q3, q0
+ veor q10, q7, q4
+ veor q9, q1, q6
+ veor q13, q4, q0
+ vmov q8, q10
+ veor q12, q5, q2
+
+ vorr q10, q10, q9
+ veor q15, q11, q8
+ vand q14, q11, q12
+ vorr q11, q11, q12
+ veor q12, q12, q9
+ vand q8, q8, q9
+ veor q9, q6, q2
+ vand q15, q15, q12
+ vand q13, q13, q9
+ veor q9, q3, q7
+ veor q12, q1, q5
+ veor q11, q11, q13
+ veor q10, q10, q13
+ vand q13, q9, q12
+ vorr q9, q9, q12
+ veor q11, q11, q15
+ veor q8, q8, q13
+ veor q10, q10, q14
+ veor q9, q9, q15
+ veor q8, q8, q14
+ vand q12, q4, q6
+ veor q9, q9, q14
+ vand q13, q0, q2
+ vand q14, q7, q1
+ vorr q15, q3, q5
+ veor q11, q11, q12
+ veor q9, q9, q14
+ veor q8, q8, q15
+ veor q10, q10, q13
+
+ @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
+
+ @ new smaller inversion
+
+ vand q14, q11, q9
+ vmov q12, q8
+
+ veor q13, q10, q14
+ veor q15, q8, q14
+ veor q14, q8, q14 @ q14=q15
+
+ vbsl q13, q9, q8
+ vbsl q15, q11, q10
+ veor q11, q11, q10
+
+ vbsl q12, q13, q14
+ vbsl q8, q14, q13
+
+ vand q14, q12, q15
+ veor q9, q9, q8
+
+ veor q14, q14, q11
+ veor q12, q5, q2
+ veor q8, q1, q6
+ veor q10, q15, q14
+ vand q10, q10, q5
+ veor q5, q5, q1
+ vand q11, q1, q15
+ vand q5, q5, q14
+ veor q1, q11, q10
+ veor q5, q5, q11
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q2
+ veor q12, q12, q8
+ veor q2, q2, q6
+ vand q8, q8, q15
+ vand q6, q6, q13
+ vand q12, q12, q14
+ vand q2, q2, q9
+ veor q8, q8, q12
+ veor q2, q2, q6
+ veor q12, q12, q11
+ veor q6, q6, q10
+ veor q5, q5, q12
+ veor q2, q2, q12
+ veor q1, q1, q8
+ veor q6, q6, q8
+
+ veor q12, q3, q0
+ veor q8, q7, q4
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q0
+ veor q12, q12, q8
+ veor q0, q0, q4
+ vand q8, q8, q15
+ vand q4, q4, q13
+ vand q12, q12, q14
+ vand q0, q0, q9
+ veor q8, q8, q12
+ veor q0, q0, q4
+ veor q12, q12, q11
+ veor q4, q4, q10
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q10, q15, q14
+ vand q10, q10, q3
+ veor q3, q3, q7
+ vand q11, q7, q15
+ vand q3, q3, q14
+ veor q7, q11, q10
+ veor q3, q3, q11
+ veor q3, q3, q12
+ veor q0, q0, q12
+ veor q7, q7, q8
+ veor q4, q4, q8
+ veor q1, q1, q7
+ veor q6, q6, q5
+
+ veor q4, q4, q1
+ veor q2, q2, q7
+ veor q5, q5, q7
+ veor q4, q4, q2
+ veor q7, q7, q0
+ veor q4, q4, q5
+ veor q3, q3, q6
+ veor q6, q6, q1
+ veor q3, q3, q4
+
+ veor q4, q4, q0
+ veor q7, q7, q3
+ subs r5,r5,#1
+ bcc .Ldec_done
+ @ multiplication by 0x05-0x00-0x04-0x00
+ vext.8 q8, q0, q0, #8
+ vext.8 q14, q3, q3, #8
+ vext.8 q15, q5, q5, #8
+ veor q8, q8, q0
+ vext.8 q9, q1, q1, #8
+ veor q14, q14, q3
+ vext.8 q10, q6, q6, #8
+ veor q15, q15, q5
+ vext.8 q11, q4, q4, #8
+ veor q9, q9, q1
+ vext.8 q12, q2, q2, #8
+ veor q10, q10, q6
+ vext.8 q13, q7, q7, #8
+ veor q11, q11, q4
+ veor q12, q12, q2
+ veor q13, q13, q7
+
+ veor q0, q0, q14
+ veor q1, q1, q14
+ veor q6, q6, q8
+ veor q2, q2, q10
+ veor q4, q4, q9
+ veor q1, q1, q15
+ veor q6, q6, q15
+ veor q2, q2, q14
+ veor q7, q7, q11
+ veor q4, q4, q14
+ veor q3, q3, q12
+ veor q2, q2, q15
+ veor q7, q7, q15
+ veor q5, q5, q13
+ vext.8 q8, q0, q0, #12 @ x0 <<< 32
+ vext.8 q9, q1, q1, #12
+ veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
+ vext.8 q10, q6, q6, #12
+ veor q1, q1, q9
+ vext.8 q11, q4, q4, #12
+ veor q6, q6, q10
+ vext.8 q12, q2, q2, #12
+ veor q4, q4, q11
+ vext.8 q13, q7, q7, #12
+ veor q2, q2, q12
+ vext.8 q14, q3, q3, #12
+ veor q7, q7, q13
+ vext.8 q15, q5, q5, #12
+ veor q3, q3, q14
+
+ veor q9, q9, q0
+ veor q5, q5, q15
+ vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor q10, q10, q1
+ veor q8, q8, q5
+ veor q9, q9, q5
+ vext.8 q1, q1, q1, #8
+ veor q13, q13, q2
+ veor q0, q0, q8
+ veor q14, q14, q7
+ veor q1, q1, q9
+ vext.8 q8, q2, q2, #8
+ veor q12, q12, q4
+ vext.8 q9, q7, q7, #8
+ veor q15, q15, q3
+ vext.8 q2, q4, q4, #8
+ veor q11, q11, q6
+ vext.8 q7, q5, q5, #8
+ veor q12, q12, q5
+ vext.8 q4, q3, q3, #8
+ veor q11, q11, q5
+ vext.8 q3, q6, q6, #8
+ veor q5, q9, q13
+ veor q11, q11, q2
+ veor q7, q7, q15
+ veor q6, q4, q14
+ veor q4, q8, q12
+ veor q2, q3, q10
+ vmov q3, q11
+ @ vmov q5, q9
+ vldmia r6, {q12} @ .LISR
+ ite eq @ Thumb2 thing, sanity check in ARM
+ addeq r6,r6,#0x10
+ bne .Ldec_loop
+ vldmia r6, {q12} @ .LISRM0
+ b .Ldec_loop
+.align 4
+.Ldec_done:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q3, #1
+ vshr.u64 q11, q2, #1
+ veor q10, q10, q5
+ veor q11, q11, q7
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #1
+ veor q7, q7, q11
+ vshl.u64 q11, q11, #1
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q4
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q4, q4, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q7, #2
+ vshr.u64 q11, q2, #2
+ veor q10, q10, q5
+ veor q11, q11, q3
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #2
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #2
+ veor q7, q7, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q4
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q4, q4, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q4, #4
+ vshr.u64 q11, q6, #4
+ veor q10, q10, q5
+ veor q11, q11, q3
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #4
+ veor q4, q4, q10
+ veor q6, q6, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q7
+ veor q11, q11, q2
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vldmia r4, {q8} @ last round key
+ veor q6, q6, q8
+ veor q4, q4, q8
+ veor q2, q2, q8
+ veor q7, q7, q8
+ veor q3, q3, q8
+ veor q5, q5, q8
+ veor q0, q0, q8
+ veor q1, q1, q8
+ bx lr
+.size _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR: @ InvShiftRows constants
+ .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+ .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+ .quad 0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR: @ ShiftRows constants
+ .quad 0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+ .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+ .quad 0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+ .quad 0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+ .quad 0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 6
+.size _bsaes_const,.-_bsaes_const
+
+.type _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+ adr r6,_bsaes_encrypt8
+ vldmia r4!, {q9} @ round 0 key
+ sub r6,r6,#_bsaes_encrypt8-.LM0SR
+
+ vldmia r6!, {q8} @ .LM0SR
+_bsaes_encrypt8_alt:
+ veor q10, q0, q9 @ xor with round0 key
+ veor q11, q1, q9
+ vtbl.8 d0, {q10}, d16
+ vtbl.8 d1, {q10}, d17
+ veor q12, q2, q9
+ vtbl.8 d2, {q11}, d16
+ vtbl.8 d3, {q11}, d17
+ veor q13, q3, q9
+ vtbl.8 d4, {q12}, d16
+ vtbl.8 d5, {q12}, d17
+ veor q14, q4, q9
+ vtbl.8 d6, {q13}, d16
+ vtbl.8 d7, {q13}, d17
+ veor q15, q5, q9
+ vtbl.8 d8, {q14}, d16
+ vtbl.8 d9, {q14}, d17
+ veor q10, q6, q9
+ vtbl.8 d10, {q15}, d16
+ vtbl.8 d11, {q15}, d17
+ veor q11, q7, q9
+ vtbl.8 d12, {q10}, d16
+ vtbl.8 d13, {q10}, d17
+ vtbl.8 d14, {q11}, d16
+ vtbl.8 d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q4, #1
+ veor q10, q10, q7
+ veor q11, q11, q5
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #1
+ veor q5, q5, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q3
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q5, #2
+ vshr.u64 q11, q4, #2
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q5, q5, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q3
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q3, #4
+ vshr.u64 q11, q2, #4
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #4
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q5
+ veor q11, q11, q4
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ sub r5,r5,#1
+ b .Lenc_sbox
+.align 4
+.Lenc_loop:
+ vldmia r4!, {q8-q11}
+ veor q8, q8, q0
+ veor q9, q9, q1
+ vtbl.8 d0, {q8}, d24
+ vtbl.8 d1, {q8}, d25
+ vldmia r4!, {q8}
+ veor q10, q10, q2
+ vtbl.8 d2, {q9}, d24
+ vtbl.8 d3, {q9}, d25
+ vldmia r4!, {q9}
+ veor q11, q11, q3
+ vtbl.8 d4, {q10}, d24
+ vtbl.8 d5, {q10}, d25
+ vldmia r4!, {q10}
+ vtbl.8 d6, {q11}, d24
+ vtbl.8 d7, {q11}, d25
+ vldmia r4!, {q11}
+ veor q8, q8, q4
+ veor q9, q9, q5
+ vtbl.8 d8, {q8}, d24
+ vtbl.8 d9, {q8}, d25
+ veor q10, q10, q6
+ vtbl.8 d10, {q9}, d24
+ vtbl.8 d11, {q9}, d25
+ veor q11, q11, q7
+ vtbl.8 d12, {q10}, d24
+ vtbl.8 d13, {q10}, d25
+ vtbl.8 d14, {q11}, d24
+ vtbl.8 d15, {q11}, d25
+.Lenc_sbox:
+ veor q2, q2, q1
+ veor q5, q5, q6
+ veor q3, q3, q0
+ veor q6, q6, q2
+ veor q5, q5, q0
+
+ veor q6, q6, q3
+ veor q3, q3, q7
+ veor q7, q7, q5
+ veor q3, q3, q4
+ veor q4, q4, q5
+
+ veor q2, q2, q7
+ veor q3, q3, q1
+ veor q1, q1, q5
+ veor q11, q7, q4
+ veor q10, q1, q2
+ veor q9, q5, q3
+ veor q13, q2, q4
+ vmov q8, q10
+ veor q12, q6, q0
+
+ vorr q10, q10, q9
+ veor q15, q11, q8
+ vand q14, q11, q12
+ vorr q11, q11, q12
+ veor q12, q12, q9
+ vand q8, q8, q9
+ veor q9, q3, q0
+ vand q15, q15, q12
+ vand q13, q13, q9
+ veor q9, q7, q1
+ veor q12, q5, q6
+ veor q11, q11, q13
+ veor q10, q10, q13
+ vand q13, q9, q12
+ vorr q9, q9, q12
+ veor q11, q11, q15
+ veor q8, q8, q13
+ veor q10, q10, q14
+ veor q9, q9, q15
+ veor q8, q8, q14
+ vand q12, q2, q3
+ veor q9, q9, q14
+ vand q13, q4, q0
+ vand q14, q1, q5
+ vorr q15, q7, q6
+ veor q11, q11, q12
+ veor q9, q9, q14
+ veor q8, q8, q15
+ veor q10, q10, q13
+
+ @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
+
+ @ new smaller inversion
+
+ vand q14, q11, q9
+ vmov q12, q8
+
+ veor q13, q10, q14
+ veor q15, q8, q14
+ veor q14, q8, q14 @ q14=q15
+
+ vbsl q13, q9, q8
+ vbsl q15, q11, q10
+ veor q11, q11, q10
+
+ vbsl q12, q13, q14
+ vbsl q8, q14, q13
+
+ vand q14, q12, q15
+ veor q9, q9, q8
+
+ veor q14, q14, q11
+ veor q12, q6, q0
+ veor q8, q5, q3
+ veor q10, q15, q14
+ vand q10, q10, q6
+ veor q6, q6, q5
+ vand q11, q5, q15
+ vand q6, q6, q14
+ veor q5, q11, q10
+ veor q6, q6, q11
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q0
+ veor q12, q12, q8
+ veor q0, q0, q3
+ vand q8, q8, q15
+ vand q3, q3, q13
+ vand q12, q12, q14
+ vand q0, q0, q9
+ veor q8, q8, q12
+ veor q0, q0, q3
+ veor q12, q12, q11
+ veor q3, q3, q10
+ veor q6, q6, q12
+ veor q0, q0, q12
+ veor q5, q5, q8
+ veor q3, q3, q8
+
+ veor q12, q7, q4
+ veor q8, q1, q2
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q4
+ veor q12, q12, q8
+ veor q4, q4, q2
+ vand q8, q8, q15
+ vand q2, q2, q13
+ vand q12, q12, q14
+ vand q4, q4, q9
+ veor q8, q8, q12
+ veor q4, q4, q2
+ veor q12, q12, q11
+ veor q2, q2, q10
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q10, q15, q14
+ vand q10, q10, q7
+ veor q7, q7, q1
+ vand q11, q1, q15
+ vand q7, q7, q14
+ veor q1, q11, q10
+ veor q7, q7, q11
+ veor q7, q7, q12
+ veor q4, q4, q12
+ veor q1, q1, q8
+ veor q2, q2, q8
+ veor q7, q7, q0
+ veor q1, q1, q6
+ veor q6, q6, q0
+ veor q4, q4, q7
+ veor q0, q0, q1
+
+ veor q1, q1, q5
+ veor q5, q5, q2
+ veor q2, q2, q3
+ veor q3, q3, q5
+ veor q4, q4, q5
+
+ veor q6, q6, q3
+ subs r5,r5,#1
+ bcc .Lenc_done
+ vext.8 q8, q0, q0, #12 @ x0 <<< 32
+ vext.8 q9, q1, q1, #12
+ veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
+ vext.8 q10, q4, q4, #12
+ veor q1, q1, q9
+ vext.8 q11, q6, q6, #12
+ veor q4, q4, q10
+ vext.8 q12, q3, q3, #12
+ veor q6, q6, q11
+ vext.8 q13, q7, q7, #12
+ veor q3, q3, q12
+ vext.8 q14, q2, q2, #12
+ veor q7, q7, q13
+ vext.8 q15, q5, q5, #12
+ veor q2, q2, q14
+
+ veor q9, q9, q0
+ veor q5, q5, q15
+ vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor q10, q10, q1
+ veor q8, q8, q5
+ veor q9, q9, q5
+ vext.8 q1, q1, q1, #8
+ veor q13, q13, q3
+ veor q0, q0, q8
+ veor q14, q14, q7
+ veor q1, q1, q9
+ vext.8 q8, q3, q3, #8
+ veor q12, q12, q6
+ vext.8 q9, q7, q7, #8
+ veor q15, q15, q2
+ vext.8 q3, q6, q6, #8
+ veor q11, q11, q4
+ vext.8 q7, q5, q5, #8
+ veor q12, q12, q5
+ vext.8 q6, q2, q2, #8
+ veor q11, q11, q5
+ vext.8 q2, q4, q4, #8
+ veor q5, q9, q13
+ veor q4, q8, q12
+ veor q3, q3, q11
+ veor q7, q7, q15
+ veor q6, q6, q14
+ @ vmov q4, q8
+ veor q2, q2, q10
+ @ vmov q5, q9
+ vldmia r6, {q12} @ .LSR
+ ite eq @ Thumb2 thing, samity check in ARM
+ addeq r6,r6,#0x10
+ bne .Lenc_loop
+ vldmia r6, {q12} @ .LSRM0
+ b .Lenc_loop
+.align 4
+.Lenc_done:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q3, #1
+ veor q10, q10, q5
+ veor q11, q11, q7
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #1
+ veor q7, q7, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q3, q3, q11
+ vshr.u64 q10, q4, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q6
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q6, q6, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q4, q4, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q7, #2
+ vshr.u64 q11, q3, #2
+ veor q10, q10, q5
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q7, q7, q10
+ veor q3, q3, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q6
+ veor q11, q11, q4
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q6, q6, q10
+ vshl.u64 q10, q10, #2
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q6, #4
+ vshr.u64 q11, q4, #4
+ veor q10, q10, q5
+ veor q11, q11, q2
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #4
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q7
+ veor q11, q11, q3
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vldmia r4, {q8} @ last round key
+ veor q4, q4, q8
+ veor q6, q6, q8
+ veor q3, q3, q8
+ veor q7, q7, q8
+ veor q2, q2, q8
+ veor q5, q5, q8
+ veor q0, q0, q8
+ veor q1, q1, q8
+ bx lr
+.size _bsaes_encrypt8,.-_bsaes_encrypt8
+.type _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+ adr r6,_bsaes_key_convert
+ vld1.8 {q7}, [r4]! @ load round 0 key
+ sub r6,r6,#_bsaes_key_convert-.LM0
+ vld1.8 {q15}, [r4]! @ load round 1 key
+
+ vmov.i8 q8, #0x01 @ bit masks
+ vmov.i8 q9, #0x02
+ vmov.i8 q10, #0x04
+ vmov.i8 q11, #0x08
+ vmov.i8 q12, #0x10
+ vmov.i8 q13, #0x20
+ vldmia r6, {q14} @ .LM0
+
+#ifdef __ARMEL__
+ vrev32.8 q7, q7
+ vrev32.8 q15, q15
+#endif
+ sub r5,r5,#1
+ vstmia r12!, {q7} @ save round 0 key
+ b .Lkey_loop
+
+.align 4
+.Lkey_loop:
+ vtbl.8 d14,{q15},d28
+ vtbl.8 d15,{q15},d29
+ vmov.i8 q6, #0x40
+ vmov.i8 q15, #0x80
+
+ vtst.8 q0, q7, q8
+ vtst.8 q1, q7, q9
+ vtst.8 q2, q7, q10
+ vtst.8 q3, q7, q11
+ vtst.8 q4, q7, q12
+ vtst.8 q5, q7, q13
+ vtst.8 q6, q7, q6
+ vtst.8 q7, q7, q15
+ vld1.8 {q15}, [r4]! @ load next round key
+ vmvn q0, q0 @ "pnot"
+ vmvn q1, q1
+ vmvn q5, q5
+ vmvn q6, q6
+#ifdef __ARMEL__
+ vrev32.8 q15, q15
+#endif
+ subs r5,r5,#1
+ vstmia r12!,{q0-q7} @ write bit-sliced round key
+ bne .Lkey_loop
+
+ vmov.i8 q7,#0x63 @ compose .L63
+ @ don't save last round key
+ bx lr
+.size _bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global bsaes_cbc_encrypt
+.type bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef __KERNEL__
+ cmp r2, #128
+#ifndef __thumb__
+ blo AES_cbc_encrypt
+#else
+ bhs 1f
+ b AES_cbc_encrypt
+1:
+#endif
+#endif
+
+ @ it is up to the caller to make sure we are called with enc == 0
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr r8, [ip] @ IV is 1st arg on the stack
+ mov r2, r2, lsr#4 @ len in 16 byte blocks
+ sub sp, #0x10 @ scratch space to carry over the IV
+ mov r9, sp @ save sp
+
+ ldr r10, [r3, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
+ add r12, #96 @ sifze of bit-slices key schedule
+
+ @ populate the key schedule
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ mov sp, r12 @ sp is sp
+ bl _bsaes_key_convert
+ vldmia sp, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia sp, {q7}
+#else
+ ldr r12, [r3, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [r3, #244]
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ add r12, r3, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, r3, #248
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+
+.align 2
+0:
+#endif
+
+ vld1.8 {q15}, [r8] @ load IV
+ b .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+ subs r2, r2, #0x8
+ bmi .Lcbc_dec_loop_finish
+
+ vld1.8 {q0-q1}, [r0]! @ load input
+ vld1.8 {q2-q3}, [r0]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, sp @ pass the key
+#else
+ add r4, r3, #248
+#endif
+ vld1.8 {q4-q5}, [r0]!
+ mov r5, r10
+ vld1.8 {q6-q7}, [r0]
+ sub r0, r0, #0x60
+ vstmia r9, {q15} @ put aside IV
+
+ bl _bsaes_decrypt8
+
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q14-q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q3, q3, q13
+ vst1.8 {q6}, [r1]!
+ veor q5, q5, q14
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ vst1.8 {q3}, [r1]!
+ vst1.8 {q5}, [r1]!
+
+ b .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+ adds r2, r2, #8
+ beq .Lcbc_dec_done
+
+ vld1.8 {q0}, [r0]! @ load input
+ cmp r2, #2
+ blo .Lcbc_dec_one
+ vld1.8 {q1}, [r0]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, sp @ pass the key
+#else
+ add r4, r3, #248
+#endif
+ mov r5, r10
+ vstmia r9, {q15} @ put aside IV
+ beq .Lcbc_dec_two
+ vld1.8 {q2}, [r0]!
+ cmp r2, #4
+ blo .Lcbc_dec_three
+ vld1.8 {q3}, [r0]!
+ beq .Lcbc_dec_four
+ vld1.8 {q4}, [r0]!
+ cmp r2, #6
+ blo .Lcbc_dec_five
+ vld1.8 {q5}, [r0]!
+ beq .Lcbc_dec_six
+ vld1.8 {q6}, [r0]!
+ sub r0, r0, #0x70
+
+ bl _bsaes_decrypt8
+
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q3, q3, q13
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ vst1.8 {q3}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+ sub r0, r0, #0x60
+ bl _bsaes_decrypt8
+ vldmia r9,{q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+ sub r0, r0, #0x50
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q15}, [r0]!
+ veor q4, q4, q10
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q2, q2, q11
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+ sub r0, r0, #0x40
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q15}, [r0]!
+ veor q4, q4, q10
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+ sub r0, r0, #0x30
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q15}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+ sub r0, r0, #0x20
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q15}, [r0]! @ reload input
+ veor q1, q1, q8
+ vst1.8 {q0-q1}, [r1]! @ write output
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+ sub r0, r0, #0x10
+ mov r10, r1 @ save original out pointer
+ mov r1, r9 @ use the iv scratch space as out buffer
+ mov r2, r3
+ vmov q4,q15 @ just in case ensure that IV
+ vmov q5,q0 @ and input are preserved
+ bl AES_decrypt
+ vld1.8 {q0}, [r9,:64] @ load result
+ veor q0, q0, q4 @ ^= IV
+ vmov q15, q5 @ q5 holds input
+ vst1.8 {q0}, [r10] @ write output
+
+.Lcbc_dec_done:
+#ifndef BSAES_ASM_EXTENDED_KEY
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+.Lcbc_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r9
+ bne .Lcbc_dec_bzero
+#endif
+
+ mov sp, r9
+ add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
+ vst1.8 {q15}, [r8] @ return IV
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc}
+.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern AES_encrypt
+.global bsaes_ctr32_encrypt_blocks
+.type bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+ cmp r2, #8 @ use plain AES for
+ blo .Lctr_enc_short @ small sizes
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr r8, [ip] @ ctr is 1st arg on the stack
+ sub sp, sp, #0x10 @ scratch space to carry over the ctr
+ mov r9, sp @ save sp
+
+ ldr r10, [r3, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
+ add r12, #96 @ size of bit-sliced key schedule
+
+ @ populate the key schedule
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ mov sp, r12 @ sp is sp
+ bl _bsaes_key_convert
+ veor q7,q7,q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+
+ vld1.8 {q0}, [r8] @ load counter
+ add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
+ vldmia sp, {q4} @ load round0 key
+#else
+ ldr r12, [r3, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [r3, #244]
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ add r12, r3, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7,q7,q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+
+.align 2
+0: add r12, r3, #248
+ vld1.8 {q0}, [r8] @ load counter
+ adrl r8, .LREVM0SR @ borrow r8
+ vldmia r12, {q4} @ load round0 key
+ sub sp, #0x10 @ place for adjusted round0 key
+#endif
+
+ vmov.i32 q8,#1 @ compose 1<<96
+ veor q9,q9,q9
+ vrev32.8 q0,q0
+ vext.8 q8,q9,q8,#4
+ vrev32.8 q4,q4
+ vadd.u32 q9,q8,q8 @ compose 2<<96
+ vstmia sp, {q4} @ save adjusted round0 key
+ b .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+ vadd.u32 q10, q8, q9 @ compose 3<<96
+ vadd.u32 q1, q0, q8 @ +1
+ vadd.u32 q2, q0, q9 @ +2
+ vadd.u32 q3, q0, q10 @ +3
+ vadd.u32 q4, q1, q10
+ vadd.u32 q5, q2, q10
+ vadd.u32 q6, q3, q10
+ vadd.u32 q7, q4, q10
+ vadd.u32 q10, q5, q10 @ next counter
+
+ @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+ @ to flip byte order in 32-bit counter
+
+ vldmia sp, {q9} @ load round0 key
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x10 @ pass next round key
+#else
+ add r4, r3, #264
+#endif
+ vldmia r8, {q8} @ .LREVM0SR
+ mov r5, r10 @ pass rounds
+ vstmia r9, {q10} @ save next counter
+ sub r6, r8, #.LREVM0SR-.LSR @ pass constants
+
+ bl _bsaes_encrypt8_alt
+
+ subs r2, r2, #8
+ blo .Lctr_enc_loop_done
+
+ vld1.8 {q8-q9}, [r0]! @ load input
+ vld1.8 {q10-q11}, [r0]!
+ veor q0, q8
+ veor q1, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q10
+ veor q6, q11
+ vld1.8 {q14-q15}, [r0]!
+ veor q3, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q7, q13
+ veor q2, q14
+ vst1.8 {q4}, [r1]!
+ veor q5, q15
+ vst1.8 {q6}, [r1]!
+ vmov.i32 q8, #1 @ compose 1<<96
+ vst1.8 {q3}, [r1]!
+ veor q9, q9, q9
+ vst1.8 {q7}, [r1]!
+ vext.8 q8, q9, q8, #4
+ vst1.8 {q2}, [r1]!
+ vadd.u32 q9,q8,q8 @ compose 2<<96
+ vst1.8 {q5}, [r1]!
+ vldmia r9, {q0} @ load counter
+
+ bne .Lctr_enc_loop
+ b .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+ add r2, r2, #8
+ vld1.8 {q8}, [r0]! @ load input
+ veor q0, q8
+ vst1.8 {q0}, [r1]! @ write output
+ cmp r2, #2
+ blo .Lctr_enc_done
+ vld1.8 {q9}, [r0]!
+ veor q1, q9
+ vst1.8 {q1}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q10}, [r0]!
+ veor q4, q10
+ vst1.8 {q4}, [r1]!
+ cmp r2, #4
+ blo .Lctr_enc_done
+ vld1.8 {q11}, [r0]!
+ veor q6, q11
+ vst1.8 {q6}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q12}, [r0]!
+ veor q3, q12
+ vst1.8 {q3}, [r1]!
+ cmp r2, #6
+ blo .Lctr_enc_done
+ vld1.8 {q13}, [r0]!
+ veor q7, q13
+ vst1.8 {q7}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q14}, [r0]
+ veor q2, q14
+ vst1.8 {q2}, [r1]!
+
+.Lctr_enc_done:
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifndef BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r9
+ bne .Lctr_enc_bzero
+#else
+ vstmia sp, {q0-q1}
+#endif
+
+ mov sp, r9
+ add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.align 4
+.Lctr_enc_short:
+ ldr ip, [sp] @ ctr pointer is passed on stack
+ stmdb sp!, {r4-r8, lr}
+
+ mov r4, r0 @ copy arguments
+ mov r5, r1
+ mov r6, r2
+ mov r7, r3
+ ldr r8, [ip, #12] @ load counter LSW
+ vld1.8 {q1}, [ip] @ load whole counter value
+#ifdef __ARMEL__
+ rev r8, r8
+#endif
+ sub sp, sp, #0x10
+ vst1.8 {q1}, [sp,:64] @ copy counter value
+ sub sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+ add r0, sp, #0x10 @ input counter value
+ mov r1, sp @ output on the stack
+ mov r2, r7 @ key
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [r4]! @ load input
+ vld1.8 {q1}, [sp,:64] @ load encrypted counter
+ add r8, r8, #1
+#ifdef __ARMEL__
+ rev r0, r8
+ str r0, [sp, #0x1c] @ next counter value
+#else
+ str r8, [sp, #0x1c] @ next counter value
+#endif
+ veor q0,q0,q1
+ vst1.8 {q0}, [r5]! @ store output
+ subs r6, r6, #1
+ bne .Lctr_enc_short_loop
+
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vstmia sp!, {q0-q1}
+
+ ldmia sp!, {r4-r8, pc}
+.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl bsaes_xts_encrypt
+.type bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future r3
+
+ mov r7, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0,sp @ pointer to initial tweak
+#endif
+
+ ldr r1, [r10, #240] @ get # of rounds
+ mov r3, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #96 @ size of bit-sliced key schedule
+ sub r12, #48 @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7, q7, q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+#else
+ ldr r12, [r10, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [r10, #244]
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ add r12, r10, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7, q7, q15 @ fix up last round key
+ vstmia r12, {q7}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+
+ vld1.8 {q8}, [r0] @ initial tweak
+ adr r2, .Lxts_magic
+
+ subs r9, #0x80
+ blo .Lxts_enc_short
+ b .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q6, q8, #63
+ mov r0, sp
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q9, #63
+ veor q9, q9, q6
+ vand q7, q7, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q10, #63
+ veor q10, q10, q7
+ vand q6, q6, q5
+ vld1.8 {q0}, [r7]!
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q11, #63
+ veor q11, q11, q6
+ vand q7, q7, q5
+ vld1.8 {q1}, [r7]!
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q12, #63
+ veor q12, q12, q7
+ vand q6, q6, q5
+ vld1.8 {q2}, [r7]!
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q13, #63
+ veor q13, q13, q6
+ vand q7, q7, q5
+ vld1.8 {q3}, [r7]!
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q14, #63
+ veor q14, q14, q7
+ vand q6, q6, q5
+ vld1.8 {q4}, [r7]!
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q15, #63
+ veor q15, q15, q6
+ vand q7, q7, q5
+ vld1.8 {q5}, [r7]!
+ veor q4, q4, q12
+ vadd.u64 q8, q15, q15
+ vst1.64 {q15}, [r0,:128]!
+ vswp d15,d14
+ veor q8, q8, q7
+ vst1.64 {q8}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6-q7}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ veor q7, q7, q15
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vld1.64 {q14-q15}, [r0,:128]!
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q2, q14
+ vst1.8 {q10-q11}, [r8]!
+ veor q13, q5, q15
+ vst1.8 {q12-q13}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+
+ subs r9, #0x80
+ bpl .Lxts_enc_loop
+
+.Lxts_enc_short:
+ adds r9, #0x70
+ bmi .Lxts_enc_done
+
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q7, q8, #63
+ mov r0, sp
+ vand q7, q7, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q9, #63
+ veor q9, q9, q7
+ vand q6, q6, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q10, #63
+ veor q10, q10, q6
+ vand q7, q7, q5
+ vld1.8 {q0}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_1
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q11, #63
+ veor q11, q11, q7
+ vand q6, q6, q5
+ vld1.8 {q1}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_2
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q12, #63
+ veor q12, q12, q6
+ vand q7, q7, q5
+ vld1.8 {q2}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_3
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q13, #63
+ veor q13, q13, q7
+ vand q6, q6, q5
+ vld1.8 {q3}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_4
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q14, #63
+ veor q14, q14, q6
+ vand q7, q7, q5
+ vld1.8 {q4}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_5
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q15, #63
+ veor q15, q15, q7
+ vand q6, q6, q5
+ vld1.8 {q5}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_6
+ veor q4, q4, q12
+ sub r9, #0x10
+ vst1.64 {q15}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vld1.64 {q14}, [r0,:128]!
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q2, q14
+ vst1.8 {q10-q11}, [r8]!
+ vst1.8 {q12}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+ vst1.64 {q14}, [r0,:128] @ next round tweak
+
+ veor q4, q4, q12
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q5, q5, q13
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ vst1.8 {q10-q11}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+ .quad 1, 0x87
+
+.align 5
+.Lxts_enc_5:
+ vst1.64 {q13}, [r0,:128] @ next round tweak
+
+ veor q3, q3, q11
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q4, q4, q12
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ vst1.8 {q10}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+ vst1.64 {q12}, [r0,:128] @ next round tweak
+
+ veor q2, q2, q10
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q3, q3, q11
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vst1.8 {q8-q9}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+ vst1.64 {q11}, [r0,:128] @ next round tweak
+
+ veor q1, q1, q9
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q2, q2, q10
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ vst1.8 {q8}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+ vst1.64 {q10}, [r0,:128] @ next round tweak
+
+ veor q0, q0, q8
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q1, q1, q9
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ vst1.8 {q0-q1}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r8]!
+ mov r3, r4
+
+ vmov q8, q9 @ next round tweak
+
+.Lxts_enc_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds r9, #0x10
+ beq .Lxts_enc_ret
+ sub r6, r8, #0x10
+
+.Lxts_enc_steal:
+ ldrb r0, [r7], #1
+ ldrb r1, [r8, #-0x10]
+ strb r0, [r8, #-0x10]
+ strb r1, [r8], #1
+
+ subs r9, #1
+ bhi .Lxts_enc_steal
+
+ vld1.8 {q0}, [r6]
+ mov r0, sp
+ veor q0, q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r6]
+ mov r3, r4
+#endif
+
+.Lxts_enc_ret:
+ bic r0, r3, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_enc_bzero
+
+ mov sp, r3
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {q8}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future r3
+
+ mov r7, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0, sp @ pointer to initial tweak
+#endif
+
+ ldr r1, [r10, #240] @ get # of rounds
+ mov r3, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #96 @ size of bit-sliced key schedule
+ sub r12, #48 @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, sp, #0x90
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+#else
+ ldr r12, [r10, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [r10, #244]
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ add r12, r10, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, r10, #248
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+ vld1.8 {q8}, [r0] @ initial tweak
+ adr r2, .Lxts_magic
+
+ tst r9, #0xf @ if not multiple of 16
+ it ne @ Thumb2 thing, sanity check in ARM
+ subne r9, #0x10 @ subtract another 16 bytes
+ subs r9, #0x80
+
+ blo .Lxts_dec_short
+ b .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q6, q8, #63
+ mov r0, sp
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q9, #63
+ veor q9, q9, q6
+ vand q7, q7, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q10, #63
+ veor q10, q10, q7
+ vand q6, q6, q5
+ vld1.8 {q0}, [r7]!
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q11, #63
+ veor q11, q11, q6
+ vand q7, q7, q5
+ vld1.8 {q1}, [r7]!
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q12, #63
+ veor q12, q12, q7
+ vand q6, q6, q5
+ vld1.8 {q2}, [r7]!
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q13, #63
+ veor q13, q13, q6
+ vand q7, q7, q5
+ vld1.8 {q3}, [r7]!
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q14, #63
+ veor q14, q14, q7
+ vand q6, q6, q5
+ vld1.8 {q4}, [r7]!
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q15, #63
+ veor q15, q15, q6
+ vand q7, q7, q5
+ vld1.8 {q5}, [r7]!
+ veor q4, q4, q12
+ vadd.u64 q8, q15, q15
+ vst1.64 {q15}, [r0,:128]!
+ vswp d15,d14
+ veor q8, q8, q7
+ vst1.64 {q8}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6-q7}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ veor q7, q7, q15
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vld1.64 {q14-q15}, [r0,:128]!
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q3, q14
+ vst1.8 {q10-q11}, [r8]!
+ veor q13, q5, q15
+ vst1.8 {q12-q13}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+
+ subs r9, #0x80
+ bpl .Lxts_dec_loop
+
+.Lxts_dec_short:
+ adds r9, #0x70
+ bmi .Lxts_dec_done
+
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q7, q8, #63
+ mov r0, sp
+ vand q7, q7, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q9, #63
+ veor q9, q9, q7
+ vand q6, q6, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q10, #63
+ veor q10, q10, q6
+ vand q7, q7, q5
+ vld1.8 {q0}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_1
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q11, #63
+ veor q11, q11, q7
+ vand q6, q6, q5
+ vld1.8 {q1}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_2
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q12, #63
+ veor q12, q12, q6
+ vand q7, q7, q5
+ vld1.8 {q2}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_3
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q13, #63
+ veor q13, q13, q7
+ vand q6, q6, q5
+ vld1.8 {q3}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_4
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q14, #63
+ veor q14, q14, q6
+ vand q7, q7, q5
+ vld1.8 {q4}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_5
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q15, #63
+ veor q15, q15, q7
+ vand q6, q6, q5
+ vld1.8 {q5}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_6
+ veor q4, q4, q12
+ sub r9, #0x10
+ vst1.64 {q15}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vld1.64 {q14}, [r0,:128]!
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q3, q14
+ vst1.8 {q10-q11}, [r8]!
+ vst1.8 {q12}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+ vst1.64 {q14}, [r0,:128] @ next round tweak
+
+ veor q4, q4, q12
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q5, q5, q13
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ vst1.8 {q10-q11}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+ vst1.64 {q13}, [r0,:128] @ next round tweak
+
+ veor q3, q3, q11
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q4, q4, q12
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ vst1.8 {q10}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+ vst1.64 {q12}, [r0,:128] @ next round tweak
+
+ veor q2, q2, q10
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q3, q3, q11
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vst1.8 {q8-q9}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+ vst1.64 {q11}, [r0,:128] @ next round tweak
+
+ veor q1, q1, q9
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q2, q2, q10
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ vst1.8 {q8}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+ vst1.64 {q10}, [r0,:128] @ next round tweak
+
+ veor q0, q0, q8
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q1, q1, q9
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ vst1.8 {q0-q1}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+ mov r5, r2 @ preserve magic
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r8]!
+ mov r3, r4
+ mov r2, r5
+
+ vmov q8, q9 @ next round tweak
+
+.Lxts_dec_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds r9, #0x10
+ beq .Lxts_dec_ret
+
+ @ calculate one round of extra tweak for the stolen ciphertext
+ vldmia r2, {q5}
+ vshr.s64 q6, q8, #63
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vswp d13,d12
+ veor q9, q9, q6
+
+ @ perform the final decryption with the last tweak value
+ vld1.8 {q0}, [r7]!
+ mov r0, sp
+ veor q0, q0, q9
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q9
+ vst1.8 {q0}, [r8]
+
+ mov r6, r8
+.Lxts_dec_steal:
+ ldrb r1, [r8]
+ ldrb r0, [r7], #1
+ strb r1, [r8, #0x10]
+ strb r0, [r8], #1
+
+ subs r9, #1
+ bhi .Lxts_dec_steal
+
+ vld1.8 {q0}, [r6]
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r6]
+ mov r3, r4
+#endif
+
+.Lxts_dec_ret:
+ bic r0, r3, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_dec_bzero
+
+ mov sp, r3
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {q8}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644
index 000000000000..4522366da759
--- /dev/null
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+ struct AES_KEY rk;
+ int converted;
+ u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+ struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+ struct AES_KEY enc;
+ struct BS_KEY dec;
+};
+
+struct aesbs_ctr_ctx {
+ struct BS_KEY enc;
+};
+
+struct aesbs_xts_ctx {
+ struct BS_KEY enc;
+ struct BS_KEY dec;
+ struct AES_KEY twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 8;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->dec.rk = ctx->enc;
+ private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+ ctx->dec.converted = 0;
+ return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 8;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->enc.converted = 0;
+ return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 4;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->dec.rk = ctx->enc.rk;
+ private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+ private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+ ctx->enc.converted = ctx->dec.converted = 0;
+ return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while (walk.nbytes) {
+ u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *src = walk.src.virt.addr;
+
+ if (walk.dst.virt.addr == walk.src.virt.addr) {
+ u8 *iv = walk.iv;
+
+ do {
+ crypto_xor(src, iv, AES_BLOCK_SIZE);
+ AES_encrypt(src, src, &ctx->enc);
+ iv = src;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks);
+ memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+ } else {
+ u8 *dst = walk.dst.virt.addr;
+
+ do {
+ crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+ AES_encrypt(walk.iv, dst, &ctx->enc);
+ memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+ src += AES_BLOCK_SIZE;
+ dst += AES_BLOCK_SIZE;
+ } while (--blocks);
+ }
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+ kernel_neon_begin();
+ bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->dec, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ while (walk.nbytes) {
+ u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *src = walk.src.virt.addr;
+ u8 bk[2][AES_BLOCK_SIZE];
+ u8 *iv = walk.iv;
+
+ do {
+ if (walk.dst.virt.addr == walk.src.virt.addr)
+ memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+ AES_decrypt(src, dst, &ctx->dec.rk);
+ crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+ if (walk.dst.virt.addr == walk.src.virt.addr)
+ iv = bk[blocks & 1];
+ else
+ iv = src;
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+ int i;
+
+ for (i = 3; i >= 0; i--, addend = 1) {
+ u32 n = be32_to_cpu(ctr[i]) + addend;
+
+ ctr[i] = cpu_to_be32(n);
+ if (n >= addend)
+ break;
+ }
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ u32 blocks;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+ __be32 *ctr = (__be32 *)walk.iv;
+ u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+ /* avoid 32 bit counter overflow in the NEON code */
+ if (unlikely(headroom < blocks)) {
+ blocks = headroom + 1;
+ tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+ }
+ kernel_neon_begin();
+ bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+ walk.dst.virt.addr, blocks,
+ &ctx->enc, walk.iv);
+ kernel_neon_end();
+ inc_be128_ctr(ctr, blocks);
+
+ nbytes -= blocks * AES_BLOCK_SIZE;
+ if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+ break;
+
+ err = blkcipher_walk_done(desc, &walk, tail);
+ }
+ if (walk.nbytes) {
+ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 ks[AES_BLOCK_SIZE];
+
+ AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+ if (tdst != tsrc)
+ memcpy(tdst, tsrc, nbytes);
+ crypto_xor(tdst, ks, nbytes);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ /* generate the initial tweak */
+ AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+ while (walk.nbytes) {
+ kernel_neon_begin();
+ bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->enc, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ /* generate the initial tweak */
+ AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+ while (walk.nbytes) {
+ kernel_neon_begin();
+ bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->dec, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+ .cra_name = "__cbc-aes-neonbs",
+ .cra_driver_name = "__driver-cbc-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_cbc_set_key,
+ .encrypt = aesbs_cbc_encrypt,
+ .decrypt = aesbs_cbc_decrypt,
+ },
+}, {
+ .cra_name = "__ctr-aes-neonbs",
+ .cra_driver_name = "__driver-ctr-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_ctr_set_key,
+ .encrypt = aesbs_ctr_encrypt,
+ .decrypt = aesbs_ctr_encrypt,
+ },
+}, {
+ .cra_name = "__xts-aes-neonbs",
+ .cra_driver_name = "__driver-xts-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_xts_set_key,
+ .encrypt = aesbs_xts_encrypt,
+ .decrypt = aesbs_xts_decrypt,
+ },
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = __ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+} };
+
+static int __init aesbs_mod_init(void)
+{
+ if (!cpu_has_neon())
+ return -ENODEV;
+
+ return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+ crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644
index 000000000000..f3d96d932573
--- /dev/null
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt 19.5 cycles per byte processed with 128-bit key
+# decrypt 22.1 cycles per byte processed with 128-bit key
+# key conv. 440 cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+# <appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+# <ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+
+sub Sbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InBasisChange (@b);
+ &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s);
+ &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ veor @b[2], @b[2], @b[1]
+ veor @b[5], @b[5], @b[6]
+ veor @b[3], @b[3], @b[0]
+ veor @b[6], @b[6], @b[2]
+ veor @b[5], @b[5], @b[0]
+
+ veor @b[6], @b[6], @b[3]
+ veor @b[3], @b[3], @b[7]
+ veor @b[7], @b[7], @b[5]
+ veor @b[3], @b[3], @b[4]
+ veor @b[4], @b[4], @b[5]
+
+ veor @b[2], @b[2], @b[7]
+ veor @b[3], @b[3], @b[1]
+ veor @b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ veor @b[0], @b[0], @b[6]
+ veor @b[1], @b[1], @b[4]
+ veor @b[4], @b[4], @b[6]
+ veor @b[2], @b[2], @b[0]
+ veor @b[6], @b[6], @b[1]
+
+ veor @b[1], @b[1], @b[5]
+ veor @b[5], @b[5], @b[3]
+ veor @b[3], @b[3], @b[7]
+ veor @b[7], @b[7], @b[5]
+ veor @b[2], @b[2], @b[5]
+
+ veor @b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InvInBasisChange (@b);
+ &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s);
+ &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange { # OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+ veor @b[1], @b[1], @b[7]
+ veor @b[4], @b[4], @b[7]
+
+ veor @b[7], @b[7], @b[5]
+ veor @b[1], @b[1], @b[3]
+ veor @b[2], @b[2], @b[5]
+ veor @b[3], @b[3], @b[7]
+
+ veor @b[6], @b[6], @b[1]
+ veor @b[2], @b[2], @b[0]
+ veor @b[5], @b[5], @b[3]
+ veor @b[4], @b[4], @b[6]
+ veor @b[0], @b[0], @b[6]
+ veor @b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange { # InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+ veor @b[1], @b[1], @b[5]
+ veor @b[2], @b[2], @b[7]
+
+ veor @b[3], @b[3], @b[1]
+ veor @b[4], @b[4], @b[5]
+ veor @b[7], @b[7], @b[5]
+ veor @b[3], @b[3], @b[4]
+ veor @b[5], @b[5], @b[0]
+ veor @b[3], @b[3], @b[7]
+ veor @b[6], @b[6], @b[2]
+ veor @b[2], @b[2], @b[1]
+ veor @b[6], @b[6], @b[3]
+
+ veor @b[3], @b[3], @b[0]
+ veor @b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ vand $t0, $t0, $x0
+ veor $x0, $x0, $x1
+ vand $t1, $x1, $y0
+ vand $x0, $x0, $y1
+ veor $x1, $t1, $t0
+ veor $x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N { # not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ vand $t0, $t0, $x0
+ veor $x0, $x0, $x1
+ vand $x1, $x1, $y0
+ vand $x0, $x0, $y1
+ veor $x1, $x1, $x0
+ veor $x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+ $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ veor $t1, $y2, $y3
+ vand $t0, $t0, $x0
+ vand $t1, $t1, $x2
+ veor $x0, $x0, $x1
+ veor $x2, $x2, $x3
+ vand $x1, $x1, $y0
+ vand $x3, $x3, $y2
+ vand $x0, $x0, $y1
+ vand $x2, $x2, $y3
+ veor $x1, $x1, $x0
+ veor $x2, $x2, $x3
+ veor $x0, $x0, $t0
+ veor $x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+ veor @t[0], @x[0], @x[2]
+ veor @t[1], @x[1], @x[3]
+___
+ &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+ veor @y[0], @y[0], @y[2]
+ veor @y[1], @y[1], @y[3]
+___
+ Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ veor @x[0], @x[0], @t[0]
+ veor @x[2], @x[2], @t[0]
+ veor @x[1], @x[1], @t[1]
+ veor @x[3], @x[3], @t[1]
+
+ veor @t[0], @x[4], @x[6]
+ veor @t[1], @x[5], @x[7]
+___
+ &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ veor @y[0], @y[0], @y[2]
+ veor @y[1], @y[1], @y[3]
+___
+ &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+ veor @x[4], @x[4], @t[0]
+ veor @x[6], @x[6], @t[0]
+ veor @x[5], @x[5], @t[1]
+ veor @x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+ veor @t[3], @x[4], @x[6]
+ veor @t[2], @x[5], @x[7]
+ veor @t[1], @x[1], @x[3]
+ veor @s[1], @x[7], @x[6]
+ vmov @t[0], @t[2]
+ veor @s[0], @x[0], @x[2]
+
+ vorr @t[2], @t[2], @t[1]
+ veor @s[3], @t[3], @t[0]
+ vand @s[2], @t[3], @s[0]
+ vorr @t[3], @t[3], @s[0]
+ veor @s[0], @s[0], @t[1]
+ vand @t[0], @t[0], @t[1]
+ veor @t[1], @x[3], @x[2]
+ vand @s[3], @s[3], @s[0]
+ vand @s[1], @s[1], @t[1]
+ veor @t[1], @x[4], @x[5]
+ veor @s[0], @x[1], @x[0]
+ veor @t[3], @t[3], @s[1]
+ veor @t[2], @t[2], @s[1]
+ vand @s[1], @t[1], @s[0]
+ vorr @t[1], @t[1], @s[0]
+ veor @t[3], @t[3], @s[3]
+ veor @t[0], @t[0], @s[1]
+ veor @t[2], @t[2], @s[2]
+ veor @t[1], @t[1], @s[3]
+ veor @t[0], @t[0], @s[2]
+ vand @s[0], @x[7], @x[3]
+ veor @t[1], @t[1], @s[2]
+ vand @s[1], @x[6], @x[2]
+ vand @s[2], @x[5], @x[1]
+ vorr @s[3], @x[4], @x[0]
+ veor @t[3], @t[3], @s[0]
+ veor @t[1], @t[1], @s[2]
+ veor @t[0], @t[0], @s[3]
+ veor @t[2], @t[2], @s[1]
+
+ @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+ @ new smaller inversion
+
+ vand @s[2], @t[3], @t[1]
+ vmov @s[0], @t[0]
+
+ veor @s[1], @t[2], @s[2]
+ veor @s[3], @t[0], @s[2]
+ veor @s[2], @t[0], @s[2] @ @s[2]=@s[3]
+
+ vbsl @s[1], @t[1], @t[0]
+ vbsl @s[3], @t[3], @t[2]
+ veor @t[3], @t[3], @t[2]
+
+ vbsl @s[0], @s[1], @s[2]
+ vbsl @t[0], @s[2], @s[1]
+
+ vand @s[2], @s[0], @s[3]
+ veor @t[1], @t[1], @t[0]
+
+ veor @s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+ &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+ vldmia $key!, {@t[0]-@t[3]}
+ veor @t[0], @t[0], @x[0]
+ veor @t[1], @t[1], @x[1]
+ vtbl.8 `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+ vldmia $key!, {@t[0]}
+ veor @t[2], @t[2], @x[2]
+ vtbl.8 `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+ vldmia $key!, {@t[1]}
+ veor @t[3], @t[3], @x[3]
+ vtbl.8 `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+ vldmia $key!, {@t[2]}
+ vtbl.8 `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+ vldmia $key!, {@t[3]}
+ veor @t[0], @t[0], @x[4]
+ veor @t[1], @t[1], @x[5]
+ vtbl.8 `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+ veor @t[2], @t[2], @x[6]
+ vtbl.8 `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+ veor @t[3], @t[3], @x[7]
+ vtbl.8 `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+ vtbl.8 `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16]; # optional
+$code.=<<___;
+ vext.8 @t[0], @x[0], @x[0], #12 @ x0 <<< 32
+ vext.8 @t[1], @x[1], @x[1], #12
+ veor @x[0], @x[0], @t[0] @ x0 ^ (x0 <<< 32)
+ vext.8 @t[2], @x[2], @x[2], #12
+ veor @x[1], @x[1], @t[1]
+ vext.8 @t[3], @x[3], @x[3], #12
+ veor @x[2], @x[2], @t[2]
+ vext.8 @t[4], @x[4], @x[4], #12
+ veor @x[3], @x[3], @t[3]
+ vext.8 @t[5], @x[5], @x[5], #12
+ veor @x[4], @x[4], @t[4]
+ vext.8 @t[6], @x[6], @x[6], #12
+ veor @x[5], @x[5], @t[5]
+ vext.8 @t[7], @x[7], @x[7], #12
+ veor @x[6], @x[6], @t[6]
+
+ veor @t[1], @t[1], @x[0]
+ veor @x[7], @x[7], @t[7]
+ vext.8 @x[0], @x[0], @x[0], #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor @t[2], @t[2], @x[1]
+ veor @t[0], @t[0], @x[7]
+ veor @t[1], @t[1], @x[7]
+ vext.8 @x[1], @x[1], @x[1], #8
+ veor @t[5], @t[5], @x[4]
+ veor @x[0], @x[0], @t[0]
+ veor @t[6], @t[6], @x[5]
+ veor @x[1], @x[1], @t[1]
+ vext.8 @t[0], @x[4], @x[4], #8
+ veor @t[4], @t[4], @x[3]
+ vext.8 @t[1], @x[5], @x[5], #8
+ veor @t[7], @t[7], @x[6]
+ vext.8 @x[4], @x[3], @x[3], #8
+ veor @t[3], @t[3], @x[2]
+ vext.8 @x[5], @x[7], @x[7], #8
+ veor @t[4], @t[4], @x[7]
+ vext.8 @x[3], @x[6], @x[6], #8
+ veor @t[3], @t[3], @x[7]
+ vext.8 @x[6], @x[2], @x[2], #8
+ veor @x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+ veor @x[2], @t[0], @t[4]
+ veor @x[4], @x[4], @t[3]
+ veor @x[5], @x[5], @t[7]
+ veor @x[3], @x[3], @t[6]
+ @ vmov @x[2], @t[0]
+ veor @x[6], @x[6], @t[2]
+ @ vmov @x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+ veor @t[3], @t[3], @x[4]
+ veor @x[5], @x[5], @t[7]
+ veor @x[2], @x[3], @t[6]
+ veor @x[3], @t[0], @t[4]
+ veor @x[4], @x[6], @t[2]
+ vmov @x[6], @t[3]
+ @ vmov @x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+ @ multiplication by 0x0e
+ vext.8 @t[7], @x[7], @x[7], #12
+ vmov @t[2], @x[2]
+ veor @x[2], @x[2], @x[5] @ 2 5
+ veor @x[7], @x[7], @x[5] @ 7 5
+ vext.8 @t[0], @x[0], @x[0], #12
+ vmov @t[5], @x[5]
+ veor @x[5], @x[5], @x[0] @ 5 0 [1]
+ veor @x[0], @x[0], @x[1] @ 0 1
+ vext.8 @t[1], @x[1], @x[1], #12
+ veor @x[1], @x[1], @x[2] @ 1 25
+ veor @x[0], @x[0], @x[6] @ 01 6 [2]
+ vext.8 @t[3], @x[3], @x[3], #12
+ veor @x[1], @x[1], @x[3] @ 125 3 [4]
+ veor @x[2], @x[2], @x[0] @ 25 016 [3]
+ veor @x[3], @x[3], @x[7] @ 3 75
+ veor @x[7], @x[7], @x[6] @ 75 6 [0]
+ vext.8 @t[6], @x[6], @x[6], #12
+ vmov @t[4], @x[4]
+ veor @x[6], @x[6], @x[4] @ 6 4
+ veor @x[4], @x[4], @x[3] @ 4 375 [6]
+ veor @x[3], @x[3], @x[7] @ 375 756=36
+ veor @x[6], @x[6], @t[5] @ 64 5 [7]
+ veor @x[3], @x[3], @t[2] @ 36 2
+ vext.8 @t[5], @t[5], @t[5], #12
+ veor @x[3], @x[3], @t[4] @ 362 4 [5]
+___
+ my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+ @ multiplication by 0x0b
+ veor @y[1], @y[1], @y[0]
+ veor @y[0], @y[0], @t[0]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[1], @y[1], @t[1]
+ veor @y[0], @y[0], @t[5]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[1], @y[1], @t[6]
+ veor @y[0], @y[0], @t[7]
+ veor @t[7], @t[7], @t[6] @ clobber t[7]
+
+ veor @y[3], @y[3], @t[0]
+ veor @y[1], @y[1], @y[0]
+ vext.8 @t[0], @t[0], @t[0], #12
+ veor @y[2], @y[2], @t[1]
+ veor @y[4], @y[4], @t[1]
+ vext.8 @t[1], @t[1], @t[1], #12
+ veor @y[2], @y[2], @t[2]
+ veor @y[3], @y[3], @t[2]
+ veor @y[5], @y[5], @t[2]
+ veor @y[2], @y[2], @t[7]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[3], @y[3], @t[3]
+ veor @y[6], @y[6], @t[3]
+ veor @y[4], @y[4], @t[3]
+ veor @y[7], @y[7], @t[4]
+ vext.8 @t[3], @t[3], @t[3], #12
+ veor @y[5], @y[5], @t[4]
+ veor @y[7], @y[7], @t[7]
+ veor @t[7], @t[7], @t[5] @ clobber t[7] even more
+ veor @y[3], @y[3], @t[5]
+ veor @y[4], @y[4], @t[4]
+
+ veor @y[5], @y[5], @t[7]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[6], @y[6], @t[7]
+ veor @y[4], @y[4], @t[7]
+
+ veor @t[7], @t[7], @t[5]
+ vext.8 @t[5], @t[5], @t[5], #12
+
+ @ multiplication by 0x0d
+ veor @y[4], @y[4], @y[7]
+ veor @t[7], @t[7], @t[6] @ restore t[7]
+ veor @y[7], @y[7], @t[4]
+ vext.8 @t[6], @t[6], @t[6], #12
+ veor @y[2], @y[2], @t[0]
+ veor @y[7], @y[7], @t[5]
+ vext.8 @t[7], @t[7], @t[7], #12
+ veor @y[2], @y[2], @t[2]
+
+ veor @y[3], @y[3], @y[1]
+ veor @y[1], @y[1], @t[1]
+ veor @y[0], @y[0], @t[0]
+ veor @y[3], @y[3], @t[0]
+ veor @y[1], @y[1], @t[5]
+ veor @y[0], @y[0], @t[5]
+ vext.8 @t[0], @t[0], @t[0], #12
+ veor @y[1], @y[1], @t[7]
+ veor @y[0], @y[0], @t[6]
+ veor @y[3], @y[3], @y[1]
+ veor @y[4], @y[4], @t[1]
+ vext.8 @t[1], @t[1], @t[1], #12
+
+ veor @y[7], @y[7], @t[7]
+ veor @y[4], @y[4], @t[2]
+ veor @y[5], @y[5], @t[2]
+ veor @y[2], @y[2], @t[6]
+ veor @t[6], @t[6], @t[3] @ clobber t[6]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[4], @y[4], @y[7]
+ veor @y[3], @y[3], @t[6]
+
+ veor @y[6], @y[6], @t[6]
+ veor @y[5], @y[5], @t[5]
+ vext.8 @t[5], @t[5], @t[5], #12
+ veor @y[6], @y[6], @t[4]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[5], @y[5], @t[6]
+ veor @y[6], @y[6], @t[7]
+ vext.8 @t[7], @t[7], @t[7], #12
+ veor @t[6], @t[6], @t[3] @ restore t[6]
+ vext.8 @t[3], @t[3], @t[3], #12
+
+ @ multiplication by 0x09
+ veor @y[4], @y[4], @y[1]
+ veor @t[1], @t[1], @y[1] @ t[1]=y[1]
+ veor @t[0], @t[0], @t[5] @ clobber t[0]
+ vext.8 @t[6], @t[6], @t[6], #12
+ veor @t[1], @t[1], @t[5]
+ veor @y[3], @y[3], @t[0]
+ veor @t[0], @t[0], @y[0] @ t[0]=y[0]
+ veor @t[1], @t[1], @t[6]
+ veor @t[6], @t[6], @t[7] @ clobber t[6]
+ veor @y[4], @y[4], @t[1]
+ veor @y[7], @y[7], @t[4]
+ veor @y[6], @y[6], @t[3]
+ veor @y[5], @y[5], @t[2]
+ veor @t[4], @t[4], @y[4] @ t[4]=y[4]
+ veor @t[3], @t[3], @y[3] @ t[3]=y[3]
+ veor @t[5], @t[5], @y[5] @ t[5]=y[5]
+ veor @t[2], @t[2], @y[2] @ t[2]=y[2]
+ veor @t[3], @t[3], @t[7]
+ veor @XMM[5], @t[5], @t[6]
+ veor @XMM[6], @t[6], @y[6] @ t[6]=y[6]
+ veor @XMM[2], @t[2], @t[6]
+ veor @XMM[7], @t[7], @y[7] @ t[7]=y[7]
+
+ vmov @XMM[0], @t[0]
+ vmov @XMM[1], @t[1]
+ @ vmov @XMM[2], @t[2]
+ vmov @XMM[3], @t[3]
+ vmov @XMM[4], @t[4]
+ @ vmov @XMM[5], @t[5]
+ @ vmov @XMM[6], @t[6]
+ @ vmov @XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 |
+# | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 |
+
+$code.=<<___;
+ @ multiplication by 0x05-0x00-0x04-0x00
+ vext.8 @t[0], @x[0], @x[0], #8
+ vext.8 @t[6], @x[6], @x[6], #8
+ vext.8 @t[7], @x[7], @x[7], #8
+ veor @t[0], @t[0], @x[0]
+ vext.8 @t[1], @x[1], @x[1], #8
+ veor @t[6], @t[6], @x[6]
+ vext.8 @t[2], @x[2], @x[2], #8
+ veor @t[7], @t[7], @x[7]
+ vext.8 @t[3], @x[3], @x[3], #8
+ veor @t[1], @t[1], @x[1]
+ vext.8 @t[4], @x[4], @x[4], #8
+ veor @t[2], @t[2], @x[2]
+ vext.8 @t[5], @x[5], @x[5], #8
+ veor @t[3], @t[3], @x[3]
+ veor @t[4], @t[4], @x[4]
+ veor @t[5], @t[5], @x[5]
+
+ veor @x[0], @x[0], @t[6]
+ veor @x[1], @x[1], @t[6]
+ veor @x[2], @x[2], @t[0]
+ veor @x[4], @x[4], @t[2]
+ veor @x[3], @x[3], @t[1]
+ veor @x[1], @x[1], @t[7]
+ veor @x[2], @x[2], @t[7]
+ veor @x[4], @x[4], @t[6]
+ veor @x[5], @x[5], @t[3]
+ veor @x[3], @x[3], @t[6]
+ veor @x[6], @x[6], @t[4]
+ veor @x[4], @x[4], @t[7]
+ veor @x[5], @x[5], @t[7]
+ veor @x[7], @x[7], @t[5]
+___
+ &MixColumns (@x,@t,1); # flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+ vshr.u64 $t, $b, #$n
+ veor $t, $t, $a
+ vand $t, $t, $mask
+ veor $a, $a, $t
+ vshl.u64 $t, $t, #$n
+ veor $b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+ vshr.u64 $t0, $b0, #$n
+ vshr.u64 $t1, $b1, #$n
+ veor $t0, $t0, $a0
+ veor $t1, $t1, $a1
+ vand $t0, $t0, $mask
+ vand $t1, $t1, $mask
+ veor $a0, $a0, $t0
+ vshl.u64 $t0, $t0, #$n
+ veor $a1, $a1, $t1
+ vshl.u64 $t1, $t1, #$n
+ veor $b0, $b0, $t0
+ veor $b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+ vmov.i8 $t0,#0x55 @ compose .LBS0
+ vmov.i8 $t1,#0x33 @ compose .LBS1
+___
+ &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+ &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+ vmov.i8 $t0,#0x0f @ compose .LBS2
+___
+ &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+ &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+ &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax unified @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code 32
+#endif
+
+.fpu neon
+
+.type _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+ adr $const,_bsaes_decrypt8
+ vldmia $key!, {@XMM[9]} @ round 0 key
+ add $const,$const,#.LM0ISR-_bsaes_decrypt8
+
+ vldmia $const!, {@XMM[8]} @ .LM0ISR
+ veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
+ veor @XMM[11], @XMM[1], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ veor @XMM[12], @XMM[2], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+ veor @XMM[13], @XMM[3], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+ veor @XMM[14], @XMM[4], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+ veor @XMM[15], @XMM[5], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+ veor @XMM[10], @XMM[6], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+ veor @XMM[11], @XMM[7], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ sub $rounds,$rounds,#1
+ b .Ldec_sbox
+.align 4
+.Ldec_loop:
+___
+ &ShiftRows (@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+ &InvSbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ subs $rounds,$rounds,#1
+ bcc .Ldec_done
+___
+ &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+ vldmia $const, {@XMM[12]} @ .LISR
+ ite eq @ Thumb2 thing, sanity check in ARM
+ addeq $const,$const,#0x10
+ bne .Ldec_loop
+ vldmia $const, {@XMM[12]} @ .LISRM0
+ b .Ldec_loop
+.align 4
+.Ldec_done:
+___
+ &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+ vldmia $key, {@XMM[8]} @ last round key
+ veor @XMM[6], @XMM[6], @XMM[8]
+ veor @XMM[4], @XMM[4], @XMM[8]
+ veor @XMM[2], @XMM[2], @XMM[8]
+ veor @XMM[7], @XMM[7], @XMM[8]
+ veor @XMM[3], @XMM[3], @XMM[8]
+ veor @XMM[5], @XMM[5], @XMM[8]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[1], @XMM[8]
+ bx lr
+.size _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR: @ InvShiftRows constants
+ .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+ .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+ .quad 0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR: @ ShiftRows constants
+ .quad 0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+ .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+ .quad 0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+ .quad 0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+ .quad 0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 6
+.size _bsaes_const,.-_bsaes_const
+
+.type _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+ adr $const,_bsaes_encrypt8
+ vldmia $key!, {@XMM[9]} @ round 0 key
+ sub $const,$const,#_bsaes_encrypt8-.LM0SR
+
+ vldmia $const!, {@XMM[8]} @ .LM0SR
+_bsaes_encrypt8_alt:
+ veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
+ veor @XMM[11], @XMM[1], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ veor @XMM[12], @XMM[2], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+ veor @XMM[13], @XMM[3], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+ veor @XMM[14], @XMM[4], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+ veor @XMM[15], @XMM[5], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+ veor @XMM[10], @XMM[6], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+ veor @XMM[11], @XMM[7], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ sub $rounds,$rounds,#1
+ b .Lenc_sbox
+.align 4
+.Lenc_loop:
+___
+ &ShiftRows (@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+ &Sbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ subs $rounds,$rounds,#1
+ bcc .Lenc_done
+___
+ &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+ vldmia $const, {@XMM[12]} @ .LSR
+ ite eq @ Thumb2 thing, samity check in ARM
+ addeq $const,$const,#0x10
+ bne .Lenc_loop
+ vldmia $const, {@XMM[12]} @ .LSRM0
+ b .Lenc_loop
+.align 4
+.Lenc_done:
+___
+ # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+ &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+ vldmia $key, {@XMM[8]} @ last round key
+ veor @XMM[4], @XMM[4], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[8]
+ veor @XMM[3], @XMM[3], @XMM[8]
+ veor @XMM[7], @XMM[7], @XMM[8]
+ veor @XMM[2], @XMM[2], @XMM[8]
+ veor @XMM[5], @XMM[5], @XMM[8]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[1], @XMM[8]
+ bx lr
+.size _bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+ &swapmove (@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+ @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+ vmov @x[2], @x[0]
+ vmov @x[3], @x[1]
+___
+ #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+ &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+ @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+ vmov @x[4], @x[0]
+ vmov @x[6], @x[2]
+ vmov @x[5], @x[1]
+ vmov @x[7], @x[3]
+___
+ &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3);
+ &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+ adr $const,_bsaes_key_convert
+ vld1.8 {@XMM[7]}, [$inp]! @ load round 0 key
+ sub $const,$const,#_bsaes_key_convert-.LM0
+ vld1.8 {@XMM[15]}, [$inp]! @ load round 1 key
+
+ vmov.i8 @XMM[8], #0x01 @ bit masks
+ vmov.i8 @XMM[9], #0x02
+ vmov.i8 @XMM[10], #0x04
+ vmov.i8 @XMM[11], #0x08
+ vmov.i8 @XMM[12], #0x10
+ vmov.i8 @XMM[13], #0x20
+ vldmia $const, {@XMM[14]} @ .LM0
+
+#ifdef __ARMEL__
+ vrev32.8 @XMM[7], @XMM[7]
+ vrev32.8 @XMM[15], @XMM[15]
+#endif
+ sub $rounds,$rounds,#1
+ vstmia $out!, {@XMM[7]} @ save round 0 key
+ b .Lkey_loop
+
+.align 4
+.Lkey_loop:
+ vtbl.8 `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+ vtbl.8 `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+ vmov.i8 @XMM[6], #0x40
+ vmov.i8 @XMM[15], #0x80
+
+ vtst.8 @XMM[0], @XMM[7], @XMM[8]
+ vtst.8 @XMM[1], @XMM[7], @XMM[9]
+ vtst.8 @XMM[2], @XMM[7], @XMM[10]
+ vtst.8 @XMM[3], @XMM[7], @XMM[11]
+ vtst.8 @XMM[4], @XMM[7], @XMM[12]
+ vtst.8 @XMM[5], @XMM[7], @XMM[13]
+ vtst.8 @XMM[6], @XMM[7], @XMM[6]
+ vtst.8 @XMM[7], @XMM[7], @XMM[15]
+ vld1.8 {@XMM[15]}, [$inp]! @ load next round key
+ vmvn @XMM[0], @XMM[0] @ "pnot"
+ vmvn @XMM[1], @XMM[1]
+ vmvn @XMM[5], @XMM[5]
+ vmvn @XMM[6], @XMM[6]
+#ifdef __ARMEL__
+ vrev32.8 @XMM[15], @XMM[15]
+#endif
+ subs $rounds,$rounds,#1
+ vstmia $out!,{@XMM[0]-@XMM[7]} @ write bit-sliced round key
+ bne .Lkey_loop
+
+ vmov.i8 @XMM[7],#0x63 @ compose .L63
+ @ don't save last round key
+ bx lr
+.size _bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) { # following four functions are unsupported interface
+ # used for benchmarking...
+$code.=<<___;
+.globl bsaes_enc_key_convert
+.type bsaes_enc_key_convert,%function
+.align 4
+bsaes_enc_key_convert:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ ldr r5,[$inp,#240] @ pass rounds
+ mov r4,$inp @ pass key
+ mov r12,$out @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl bsaes_encrypt_128
+.type bsaes_encrypt_128,%function
+.align 4
+bsaes_encrypt_128:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+.Lenc128_loop:
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+ mov r4,$key @ pass the key
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5,#10 @ pass rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+
+ bl _bsaes_encrypt8
+
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ subs $len,$len,#0x80
+ vst1.8 {@XMM[5]}, [$out]!
+ bhi .Lenc128_loop
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl bsaes_dec_key_convert
+.type bsaes_dec_key_convert,%function
+.align 4
+bsaes_dec_key_convert:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ ldr r5,[$inp,#240] @ pass rounds
+ mov r4,$inp @ pass key
+ mov r12,$out @ pass key schedule
+ bl _bsaes_key_convert
+ vldmia $out, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia $out, {@XMM[7]}
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl bsaes_decrypt_128
+.type bsaes_decrypt_128,%function
+.align 4
+bsaes_decrypt_128:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+.Ldec128_loop:
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+ mov r4,$key @ pass the key
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5,#10 @ pass rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+
+ bl _bsaes_decrypt8
+
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ subs $len,$len,#0x80
+ vst1.8 {@XMM[5]}, [$out]!
+ bhi .Ldec128_loop
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global bsaes_cbc_encrypt
+.type bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef __KERNEL__
+ cmp $len, #128
+#ifndef __thumb__
+ blo AES_cbc_encrypt
+#else
+ bhs 1f
+ b AES_cbc_encrypt
+1:
+#endif
+#endif
+
+ @ it is up to the caller to make sure we are called with enc == 0
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr $ivp, [ip] @ IV is 1st arg on the stack
+ mov $len, $len, lsr#4 @ len in 16 byte blocks
+ sub sp, #0x10 @ scratch space to carry over the IV
+ mov $fp, sp @ save sp
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ add r12, #`128-32` @ sifze of bit-slices key schedule
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12 @ sp is $keysched
+ bl _bsaes_key_convert
+ vldmia $keysched, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia $keysched, {@XMM[7]}
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, $key, #248
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+
+.align 2
+0:
+#endif
+
+ vld1.8 {@XMM[15]}, [$ivp] @ load IV
+ b .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+ subs $len, $len, #0x8
+ bmi .Lcbc_dec_loop_finish
+
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, $keysched @ pass the key
+#else
+ add r4, $key, #248
+#endif
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5, $rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]
+ sub $inp, $inp, #0x60
+ vstmia $fp, {@XMM[15]} @ put aside IV
+
+ bl _bsaes_decrypt8
+
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[3], @XMM[3], @XMM[13]
+ vst1.8 {@XMM[6]}, [$out]!
+ veor @XMM[5], @XMM[5], @XMM[14]
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ vst1.8 {@XMM[5]}, [$out]!
+
+ b .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+ adds $len, $len, #8
+ beq .Lcbc_dec_done
+
+ vld1.8 {@XMM[0]}, [$inp]! @ load input
+ cmp $len, #2
+ blo .Lcbc_dec_one
+ vld1.8 {@XMM[1]}, [$inp]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, $keysched @ pass the key
+#else
+ add r4, $key, #248
+#endif
+ mov r5, $rounds
+ vstmia $fp, {@XMM[15]} @ put aside IV
+ beq .Lcbc_dec_two
+ vld1.8 {@XMM[2]}, [$inp]!
+ cmp $len, #4
+ blo .Lcbc_dec_three
+ vld1.8 {@XMM[3]}, [$inp]!
+ beq .Lcbc_dec_four
+ vld1.8 {@XMM[4]}, [$inp]!
+ cmp $len, #6
+ blo .Lcbc_dec_five
+ vld1.8 {@XMM[5]}, [$inp]!
+ beq .Lcbc_dec_six
+ vld1.8 {@XMM[6]}, [$inp]!
+ sub $inp, $inp, #0x70
+
+ bl _bsaes_decrypt8
+
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[3], @XMM[3], @XMM[13]
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+ sub $inp, $inp, #0x60
+ bl _bsaes_decrypt8
+ vldmia $fp,{@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+ sub $inp, $inp, #0x50
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+ sub $inp, $inp, #0x40
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+ sub $inp, $inp, #0x30
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+ sub $inp, $inp, #0x20
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[15]}, [$inp]! @ reload input
+ veor @XMM[1], @XMM[1], @XMM[8]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+ sub $inp, $inp, #0x10
+ mov $rounds, $out @ save original out pointer
+ mov $out, $fp @ use the iv scratch space as out buffer
+ mov r2, $key
+ vmov @XMM[4],@XMM[15] @ just in case ensure that IV
+ vmov @XMM[5],@XMM[0] @ and input are preserved
+ bl AES_decrypt
+ vld1.8 {@XMM[0]}, [$fp,:64] @ load result
+ veor @XMM[0], @XMM[0], @XMM[4] @ ^= IV
+ vmov @XMM[15], @XMM[5] @ @XMM[5] holds input
+ vst1.8 {@XMM[0]}, [$rounds] @ write output
+
+.Lcbc_dec_done:
+#ifndef BSAES_ASM_EXTENDED_KEY
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+.Lcbc_dec_bzero: @ wipe key schedule [if any]
+ vstmia $keysched!, {q0-q1}
+ cmp $keysched, $fp
+ bne .Lcbc_dec_bzero
+#endif
+
+ mov sp, $fp
+ add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
+ vst1.8 {@XMM[15]}, [$ivp] @ return IV
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc}
+.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6"; # shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern AES_encrypt
+.global bsaes_ctr32_encrypt_blocks
+.type bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+ cmp $len, #8 @ use plain AES for
+ blo .Lctr_enc_short @ small sizes
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr $ctr, [ip] @ ctr is 1st arg on the stack
+ sub sp, sp, #0x10 @ scratch space to carry over the ctr
+ mov $fp, sp @ save sp
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ add r12, #`128-32` @ size of bit-sliced key schedule
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12 @ sp is $keysched
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+ vld1.8 {@XMM[0]}, [$ctr] @ load counter
+ add $ctr, $const, #.LREVM0SR-.LM0 @ borrow $ctr
+ vldmia $keysched, {@XMM[4]} @ load round0 key
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+.align 2
+0: add r12, $key, #248
+ vld1.8 {@XMM[0]}, [$ctr] @ load counter
+ adrl $ctr, .LREVM0SR @ borrow $ctr
+ vldmia r12, {@XMM[4]} @ load round0 key
+ sub sp, #0x10 @ place for adjusted round0 key
+#endif
+
+ vmov.i32 @XMM[8],#1 @ compose 1<<96
+ veor @XMM[9],@XMM[9],@XMM[9]
+ vrev32.8 @XMM[0],@XMM[0]
+ vext.8 @XMM[8],@XMM[9],@XMM[8],#4
+ vrev32.8 @XMM[4],@XMM[4]
+ vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+ vstmia $keysched, {@XMM[4]} @ save adjusted round0 key
+ b .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+ vadd.u32 @XMM[10], @XMM[8], @XMM[9] @ compose 3<<96
+ vadd.u32 @XMM[1], @XMM[0], @XMM[8] @ +1
+ vadd.u32 @XMM[2], @XMM[0], @XMM[9] @ +2
+ vadd.u32 @XMM[3], @XMM[0], @XMM[10] @ +3
+ vadd.u32 @XMM[4], @XMM[1], @XMM[10]
+ vadd.u32 @XMM[5], @XMM[2], @XMM[10]
+ vadd.u32 @XMM[6], @XMM[3], @XMM[10]
+ vadd.u32 @XMM[7], @XMM[4], @XMM[10]
+ vadd.u32 @XMM[10], @XMM[5], @XMM[10] @ next counter
+
+ @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+ @ to flip byte order in 32-bit counter
+
+ vldmia $keysched, {@XMM[9]} @ load round0 key
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, $keysched, #0x10 @ pass next round key
+#else
+ add r4, $key, #`248+16`
+#endif
+ vldmia $ctr, {@XMM[8]} @ .LREVM0SR
+ mov r5, $rounds @ pass rounds
+ vstmia $fp, {@XMM[10]} @ save next counter
+ sub $const, $ctr, #.LREVM0SR-.LSR @ pass constants
+
+ bl _bsaes_encrypt8_alt
+
+ subs $len, $len, #8
+ blo .Lctr_enc_loop_done
+
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ load input
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[10]
+ veor @XMM[6], @XMM[11]
+ vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
+ veor @XMM[3], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[7], @XMM[13]
+ veor @XMM[2], @XMM[14]
+ vst1.8 {@XMM[4]}, [$out]!
+ veor @XMM[5], @XMM[15]
+ vst1.8 {@XMM[6]}, [$out]!
+ vmov.i32 @XMM[8], #1 @ compose 1<<96
+ vst1.8 {@XMM[3]}, [$out]!
+ veor @XMM[9], @XMM[9], @XMM[9]
+ vst1.8 {@XMM[7]}, [$out]!
+ vext.8 @XMM[8], @XMM[9], @XMM[8], #4
+ vst1.8 {@XMM[2]}, [$out]!
+ vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+ vst1.8 {@XMM[5]}, [$out]!
+ vldmia $fp, {@XMM[0]} @ load counter
+
+ bne .Lctr_enc_loop
+ b .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+ add $len, $len, #8
+ vld1.8 {@XMM[8]}, [$inp]! @ load input
+ veor @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]! @ write output
+ cmp $len, #2
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[9]}, [$inp]!
+ veor @XMM[1], @XMM[9]
+ vst1.8 {@XMM[1]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[10]}, [$inp]!
+ veor @XMM[4], @XMM[10]
+ vst1.8 {@XMM[4]}, [$out]!
+ cmp $len, #4
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[11]}, [$inp]!
+ veor @XMM[6], @XMM[11]
+ vst1.8 {@XMM[6]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[12]}, [$inp]!
+ veor @XMM[3], @XMM[12]
+ vst1.8 {@XMM[3]}, [$out]!
+ cmp $len, #6
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[13]}, [$inp]!
+ veor @XMM[7], @XMM[13]
+ vst1.8 {@XMM[7]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[14]}, [$inp]
+ veor @XMM[2], @XMM[14]
+ vst1.8 {@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifndef BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero: @ wipe key schedule [if any]
+ vstmia $keysched!, {q0-q1}
+ cmp $keysched, $fp
+ bne .Lctr_enc_bzero
+#else
+ vstmia $keysched, {q0-q1}
+#endif
+
+ mov sp, $fp
+ add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.align 4
+.Lctr_enc_short:
+ ldr ip, [sp] @ ctr pointer is passed on stack
+ stmdb sp!, {r4-r8, lr}
+
+ mov r4, $inp @ copy arguments
+ mov r5, $out
+ mov r6, $len
+ mov r7, $key
+ ldr r8, [ip, #12] @ load counter LSW
+ vld1.8 {@XMM[1]}, [ip] @ load whole counter value
+#ifdef __ARMEL__
+ rev r8, r8
+#endif
+ sub sp, sp, #0x10
+ vst1.8 {@XMM[1]}, [sp,:64] @ copy counter value
+ sub sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+ add r0, sp, #0x10 @ input counter value
+ mov r1, sp @ output on the stack
+ mov r2, r7 @ key
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [r4]! @ load input
+ vld1.8 {@XMM[1]}, [sp,:64] @ load encrypted counter
+ add r8, r8, #1
+#ifdef __ARMEL__
+ rev r0, r8
+ str r0, [sp, #0x1c] @ next counter value
+#else
+ str r8, [sp, #0x1c] @ next counter value
+#endif
+ veor @XMM[0],@XMM[0],@XMM[1]
+ vst1.8 {@XMM[0]}, [r5]! @ store output
+ subs r6, r6, #1
+ bne .Lctr_enc_short_loop
+
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vstmia sp!, {q0-q1}
+
+ ldmia sp!, {r4-r8, pc}
+.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2,
+# const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6"; # returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl bsaes_xts_encrypt
+.type bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future $fp
+
+ mov $inp, r0
+ mov $out, r1
+ mov $len, r2
+ mov $key, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0,sp @ pointer to initial tweak
+#endif
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+ mov $fp, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #`128-32` @ size of bit-sliced key schedule
+ sub r12, #`32+16` @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+
+ vld1.8 {@XMM[8]}, [r0] @ initial tweak
+ adr $magic, .Lxts_magic
+
+ subs $len, #0x80
+ blo .Lxts_enc_short
+ b .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ vadd.u64 @XMM[8], @XMM[15], @XMM[15]
+ vst1.64 {@XMM[15]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ veor @XMM[8], @XMM[8], @T[0]
+ vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ veor @XMM[7], @XMM[7], @XMM[15]
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[2], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ veor @XMM[13], @XMM[5], @XMM[15]
+ vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ subs $len, #0x80
+ bpl .Lxts_enc_loop
+
+.Lxts_enc_short:
+ adds $len, #0x70
+ bmi .Lxts_enc_done
+
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+ subs $len, #0x10
+ bmi .Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ sub $len, #0x10
+ vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vld1.64 {@XMM[14]}, [r0,:128]!
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[2], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ vst1.8 {@XMM[12]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+ vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
+
+ veor @XMM[4], @XMM[4], @XMM[12]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[5], @XMM[5], @XMM[13]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+ .quad 1, 0x87
+
+.align 5
+.Lxts_enc_5:
+ vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
+
+ veor @XMM[3], @XMM[3], @XMM[11]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[4], @XMM[4], @XMM[12]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ vst1.8 {@XMM[10]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+ vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
+
+ veor @XMM[2], @XMM[2], @XMM[10]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[3], @XMM[3], @XMM[11]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+ vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
+
+ veor @XMM[1], @XMM[1], @XMM[9]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[2], @XMM[2], @XMM[10]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ vld1.64 {@XMM[10]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ vst1.8 {@XMM[8]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+ vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
+
+ veor @XMM[0], @XMM[0], @XMM[8]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[1], @XMM[1], @XMM[9]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]!
+ mov $fp, r4
+
+ vmov @XMM[8], @XMM[9] @ next round tweak
+
+.Lxts_enc_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds $len, #0x10
+ beq .Lxts_enc_ret
+ sub r6, $out, #0x10
+
+.Lxts_enc_steal:
+ ldrb r0, [$inp], #1
+ ldrb r1, [$out, #-0x10]
+ strb r0, [$out, #-0x10]
+ strb r1, [$out], #1
+
+ subs $len, #1
+ bhi .Lxts_enc_steal
+
+ vld1.8 {@XMM[0]}, [r6]
+ mov r0, sp
+ veor @XMM[0], @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [r6]
+ mov $fp, r4
+#endif
+
+.Lxts_enc_ret:
+ bic r0, $fp, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_enc_bzero
+
+ mov sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {@XMM[8]}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future $fp
+
+ mov $inp, r0
+ mov $out, r1
+ mov $len, r2
+ mov $key, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0, sp @ pointer to initial tweak
+#endif
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+ mov $fp, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #`128-32` @ size of bit-sliced key schedule
+ sub r12, #`32+16` @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, sp, #0x90
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, $key, #248
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+ vld1.8 {@XMM[8]}, [r0] @ initial tweak
+ adr $magic, .Lxts_magic
+
+ tst $len, #0xf @ if not multiple of 16
+ it ne @ Thumb2 thing, sanity check in ARM
+ subne $len, #0x10 @ subtract another 16 bytes
+ subs $len, #0x80
+
+ blo .Lxts_dec_short
+ b .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ vadd.u64 @XMM[8], @XMM[15], @XMM[15]
+ vst1.64 {@XMM[15]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ veor @XMM[8], @XMM[8], @T[0]
+ vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ veor @XMM[7], @XMM[7], @XMM[15]
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[3], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ veor @XMM[13], @XMM[5], @XMM[15]
+ vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ subs $len, #0x80
+ bpl .Lxts_dec_loop
+
+.Lxts_dec_short:
+ adds $len, #0x70
+ bmi .Lxts_dec_done
+
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+ subs $len, #0x10
+ bmi .Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ sub $len, #0x10
+ vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vld1.64 {@XMM[14]}, [r0,:128]!
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[3], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ vst1.8 {@XMM[12]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+ vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
+
+ veor @XMM[4], @XMM[4], @XMM[12]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[5], @XMM[5], @XMM[13]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+ vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
+
+ veor @XMM[3], @XMM[3], @XMM[11]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[4], @XMM[4], @XMM[12]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ vst1.8 {@XMM[10]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+ vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
+
+ veor @XMM[2], @XMM[2], @XMM[10]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[3], @XMM[3], @XMM[11]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+ vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
+
+ veor @XMM[1], @XMM[1], @XMM[9]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[2], @XMM[2], @XMM[10]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ vld1.64 {@XMM[10]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ vst1.8 {@XMM[8]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+ vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
+
+ veor @XMM[0], @XMM[0], @XMM[8]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[1], @XMM[1], @XMM[9]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+ mov r5, $magic @ preserve magic
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]!
+ mov $fp, r4
+ mov $magic, r5
+
+ vmov @XMM[8], @XMM[9] @ next round tweak
+
+.Lxts_dec_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds $len, #0x10
+ beq .Lxts_dec_ret
+
+ @ calculate one round of extra tweak for the stolen ciphertext
+ vldmia $magic, {$twmask}
+ vshr.s64 @XMM[6], @XMM[8], #63
+ vand @XMM[6], @XMM[6], $twmask
+ vadd.u64 @XMM[9], @XMM[8], @XMM[8]
+ vswp `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+ veor @XMM[9], @XMM[9], @XMM[6]
+
+ @ perform the final decryption with the last tweak value
+ vld1.8 {@XMM[0]}, [$inp]!
+ mov r0, sp
+ veor @XMM[0], @XMM[0], @XMM[9]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[9]
+ vst1.8 {@XMM[0]}, [$out]
+
+ mov r6, $out
+.Lxts_dec_steal:
+ ldrb r1, [$out]
+ ldrb r0, [$inp], #1
+ strb r1, [$out, #0x10]
+ strb r0, [$out], #1
+
+ subs $len, #1
+ bhi .Lxts_dec_steal
+
+ vld1.8 {@XMM[0]}, [r6]
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [r6]
+ mov $fp, r4
+#endif
+
+.Lxts_dec_ret:
+ bic r0, $fp, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_dec_bzero
+
+ mov sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {@XMM[8]}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+ next if (/^#!/);
+ last if (!s/^#/@/ and !/^$/);
+ print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 1a7024b41351..c38b58c80202 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -24,6 +24,7 @@ generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index fcc1b5bf6979..5c2285160575 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,6 +53,13 @@
#define put_byte_3 lsl #0
#endif
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
/*
* Data preload for architectures that support it
*/
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..62d2cb53b069 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,6 +12,7 @@
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
+#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long tmp;
int result;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
@@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long tmp;
int result;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
@@ -114,7 +117,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
- unsigned long oldval, res;
+ int oldval;
+ unsigned long res;
smp_mb();
@@ -134,21 +138,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long tmp, tmp2;
-
- __asm__ __volatile__("@ atomic_clear_mask\n"
-"1: ldrex %0, [%3]\n"
-" bic %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
- : "r" (addr), "Ir" (mask)
- : "cc");
-}
-
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
@@ -197,15 +186,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- *addr &= ~mask;
- raw_local_irq_restore(flags);
-}
-
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -238,15 +218,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
- u64 __aligned(8) counter;
+ long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
- u64 result;
+ long long result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrd %0, %H0, [%1]"
@@ -257,7 +237,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -266,9 +246,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
);
}
#else
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
- u64 result;
+ long long result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
@@ -279,10 +259,11 @@ static inline u64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
{
- u64 tmp;
+ long long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %0, %3, %H3, [%2]\n"
@@ -294,15 +275,16 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
}
#endif
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -311,17 +293,17 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
: "cc");
}
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -334,15 +316,16 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
return result;
}
-static inline void atomic64_sub(u64 i, atomic64_t *v)
+static inline void atomic64_sub(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -351,17 +334,17 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
: "cc");
}
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -374,9 +357,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
return result;
}
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+ long long new)
{
- u64 oldval;
+ long long oldval;
unsigned long res;
smp_mb();
@@ -398,9 +382,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
return oldval;
}
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
@@ -419,18 +403,18 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
return result;
}
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
- u64 result;
+ long long result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, #1\n"
-" sbc %H0, %H0, #0\n"
-" teq %H0, #0\n"
+" subs %Q0, %Q0, #1\n"
+" sbc %R0, %R0, #0\n"
+" teq %R0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
@@ -445,9 +429,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- u64 val;
+ long long val;
unsigned long tmp;
int ret = 1;
@@ -459,8 +443,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
" beq 2f\n"
-" adds %0, %0, %6\n"
-" adc %H0, %H0, %H6\n"
+" adds %Q0, %Q0, %Q6\n"
+" adc %R0, %R0, %R6\n"
" strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..1714800fa113
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+ return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c3653a..b274bde24905 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
#define _ASMARM_BUG_H
#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_BUG
@@ -12,10 +14,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
#endif
@@ -33,7 +35,7 @@
#define __BUG(__file, __line, __value) \
do { \
- asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
@@ -48,7 +50,7 @@ do { \
#define __BUG(__file, __line, __value) \
do { \
- asm volatile(BUG_INSTR_TYPE #__value); \
+ asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 15f2d5bf8875..ee753f1749cd 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ * fp is preserved to the stack explicitly prior disabling the cache
+ * since adding it to the clobber list is incompatible with having
+ * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
+ * trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "stmfd sp!, {fp, ip} \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "clrex \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb \n\t" \
+ "ldmfd sp!, {fp, ip}" \
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r9","r10","lr","memory" )
+
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c10540d..df2fbba7efc8 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long oldval;
+ unsigned long res;
+
+ __asm__ __volatile__(
+"1: ldrexd %1, %H1, [%3]\n"
+" teq %1, %4\n"
+" teqeq %H1, %H4\n"
+" bne 2f\n"
+" strexd %0, %5, %H5, [%3]\n"
+" teq %0, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (old), "r" (new)
+ : "cc");
+
+ return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long ret;
+
+ smp_mb();
+ ret = __cmpxchg64(ptr, old, new);
+ smp_mb();
+
+ return ret;
+}
+
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
- atomic64_t, \
- counter), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
- local64_t, \
- a), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
+ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_relaxed(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 9672e978d50d..acdde76b39bb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -10,6 +10,7 @@
#define CPUID_TLBTYPE 3
#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
+#define CPUID_REVIDR 6
#ifdef CONFIG_CPU_V7M
#define CPUID_EXT_PFR0 0x40
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b951503..e701a4d9aa59 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -11,17 +11,28 @@
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
+
#define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (xen_initial_domain())
+ return xen_dma_ops;
+ else
+ return __generic_dma_ops(dev);
+}
+
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
BUG_ON(!dev);
@@ -64,6 +75,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
+
#else
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
@@ -86,6 +98,46 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
}
#endif
+/* The ARM override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ unsigned int offset = paddr & ~PAGE_MASK;
+ return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ unsigned int offset = dev_addr & ~PAGE_MASK;
+ return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ u64 limit, mask;
+
+ if (!dev->dma_mask)
+ return 0;
+
+ mask = *dev->dma_mask;
+
+ limit = (mask + 1) & ~mask;
+ if (limit && size > limit)
+ return 0;
+
+ if ((addr | (addr + size - 1)) & ~mask)
+ return 0;
+
+ return 1;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) { }
+
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df63..fe3ea776dc34 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 8
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 0cf7a6b842ff..ad774f37c47c 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -24,8 +24,8 @@
#define TRACER_TIMEOUT 10000
#define etm_writel(t, v, x) \
- (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+ (writel_relaxed((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
@@ -142,8 +142,8 @@
#define ETBFF_TRIGFL BIT(10)
#define etb_writel(t, v, x) \
- (__raw_writel((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
+ (writel_relaxed((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t) \
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d070741b2b37..3c597c222ef2 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -24,9 +24,11 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
+#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -372,6 +374,13 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+struct bio_vec;
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce9ea34..0a9d5dd93294 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
+#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm(".word 0xe7ffdeff");
+ asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 64e96960de29..1d3153c7eb41 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -57,6 +57,7 @@
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
+ * TWE: Trap WFE
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations
@@ -67,7 +68,7 @@
*/
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_SWIO | HCR_TIDCP)
+ HCR_TWE | HCR_SWIO | HCR_TIDCP)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
/* System Control Register (SCTLR) bits */
@@ -95,12 +96,12 @@
#define TTBCR_IRGN1 (3 << 24)
#define TTBCR_EPD1 (1 << 23)
#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (3 << 16)
+#define TTBCR_T1SZ (7 << 16)
#define TTBCR_SH0 (3 << 12)
#define TTBCR_ORGN0 (3 << 10)
#define TTBCR_IRGN0 (3 << 8)
#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ 3
+#define TTBCR_T0SZ (7 << 0)
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
/* Hyp System Trap Register */
@@ -208,6 +209,8 @@
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
+#define HSR_WFI_IS_WFE (1U << 0)
+
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
#define HSR_DABT_S1PTW (1U << 7)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index a2f43ddcc300..661da11f76f4 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -39,7 +39,7 @@
#define c6_IFAR 17 /* Instruction Fault Address Register */
#define c7_PAR 18 /* Physical Address Register */
#define c7_PAR_high 19 /* PAR top 32 bits */
-#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */
+#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */
#define c10_PRRR 21 /* Primary Region Remap Register */
#define c10_NMRR 22 /* Normal Memory Remap Register */
#define c12_VBAR 23 /* Vector Base Address Register */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a464e8d7b6c5..0fa90c962ac8 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -157,4 +157,55 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cp15[c0_MPIDR];
+}
+
+static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+{
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+ return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return be16_to_cpu(data & 0xffff);
+ default:
+ return be32_to_cpu(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_be16(data & 0xffff);
+ default:
+ return cpu_to_be32(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 7d22517d8071..8a6f6db14ee4 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -38,11 +38,6 @@
#define KVM_VCPU_MAX_FEATURES 1
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
-
#include <kvm/arm_vgic.h>
struct kvm_vcpu;
@@ -154,6 +149,7 @@ struct kvm_vcpu_stat {
struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
struct kvm_one_reg;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 9b28c41f4ba9..77de4a41cc50 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -62,6 +62,12 @@ phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
+static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
+{
+ *pmd = new_pmd;
+ flush_pmd_entry(pmd);
+}
+
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
*pte = new_pte;
@@ -103,9 +109,15 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= L_PTE_S2_RDWR;
}
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+ pmd_val(*pmd) |= L_PMD_S2_RDWR;
+}
+
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+ unsigned long size)
{
/*
* If we are going to insert an instruction page and the icache is
@@ -120,8 +132,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
if (icache_is_pipt()) {
- unsigned long hva = gfn_to_hva(kvm, gfn);
- __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+ __cpuc_coherent_user_range(hva, hva + size);
} else if (!icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 402a2bc6aa68..17a3fa2979e8 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -49,6 +49,7 @@ struct machine_desc {
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **,
struct meminfo *);
+ void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index fc82a88f5b69..608516ebabfe 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -42,6 +42,14 @@ extern void mcpm_entry_point(void);
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
* CPU/cluster power operations API for higher subsystems to use.
*/
@@ -81,10 +89,40 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
+ *
+ * On success, the CPU is not guaranteed to be truly halted until
+ * mcpm_cpu_power_down_finish() subsequently returns non-zero for the
+ * specified cpu. Until then, other CPUs should make sure they do not
+ * trash memory the target CPU might be executing/accessing.
*/
void mcpm_cpu_power_down(void);
/**
+ * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
+ * make sure it is powered off
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * Call this function to ensure that a pending powerdown has taken
+ * effect and the CPU is safely parked before performing non-mcpm
+ * operations that may affect the CPU (such as kexec trashing the
+ * kernel text).
+ *
+ * It is *not* necessary to call this function if you only need to
+ * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
+ * event.
+ *
+ * Do not call this function unless the specified CPU has already
+ * called mcpm_cpu_power_down() or has committed to doing so.
+ *
+ * @return:
+ * - zero if the CPU is in a safely parked state
+ * - nonzero otherwise (e.g., timeout)
+ */
+int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
+
+/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
* @expected_residency: duration in microseconds the CPU is expected
@@ -126,6 +164,7 @@ int mcpm_cpu_powered_up(void);
struct mcpm_platform_ops {
int (*power_up)(unsigned int cpu, unsigned int cluster);
void (*power_down)(void);
+ int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
void (*suspend)(u64);
void (*powered_up)(void);
};
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e750a938fd3c..4dd21457ef9d 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -172,8 +172,13 @@
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_7_0 0x81
+
+extern u64 __pv_phys_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
-extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
#define __pv_stub(from,to,instr,type) \
@@ -185,22 +190,58 @@ extern unsigned long __pv_phys_offset;
: "=r" (to) \
: "r" (from), "I" (type))
-static inline unsigned long __virt_to_phys(unsigned long x)
+#define __pv_stub_mov_hi(t) \
+ __asm__ volatile("@ __pv_stub_mov\n" \
+ "1: mov %R0, %1\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b\n" \
+ " .popsection\n" \
+ : "=r" (t) \
+ : "I" (__PV_BITS_7_0))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__ volatile("@ __pv_add_carry_stub\n" \
+ "1: adds %Q0, %1, %2\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 1b\n" \
+ " .popsection\n" \
+ : "+r" (y) \
+ : "r" (x), "I" (__PV_BITS_31_24) \
+ : "cc")
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
{
- unsigned long t;
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+ phys_addr_t t;
+
+ if (sizeof(phys_addr_t) == 4) {
+ __pv_stub(x, t, "add", __PV_BITS_31_24);
+ } else {
+ __pv_stub_mov_hi(t);
+ __pv_add_carry_stub(x, t);
+ }
return t;
}
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
{
unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24);
return t;
}
+
#else
-#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+ return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
#endif
#endif
#endif /* __ASSEMBLY__ */
@@ -238,16 +279,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
static inline void *phys_to_virt(phys_addr_t x)
{
- return (void *)(__phys_to_virt((unsigned long)(x)));
+ return (void *)__phys_to_virt(x);
}
/*
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
+
+/*
+ * These are for systems that have a hardware interconnect supported alias of
+ * physical memory for idmap purposes. Most cases should leave these
+ * untouched.
+ */
+static inline phys_addr_t __virt_to_idmap(unsigned long x)
+{
+ if (arch_virt_to_idmap)
+ return arch_virt_to_idmap(x);
+ else
+ return __virt_to_phys(x);
+}
+
+#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
+
/*
* Virtual <-> DMA view memory address translations
* Again, these are *only* valid on the kernel direct mapped RAM
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 6f18da09668b..64fd15159b7d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -16,7 +16,7 @@ typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
+#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 943504f53f57..78a779361682 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -102,12 +102,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
#else
pte = alloc_pages(PGALLOC_GFP, 0);
#endif
- if (pte) {
- if (!PageHighMem(pte))
- clean_pte_table(page_address(pte));
- pgtable_page_ctor(pte);
+ if (!pte)
+ return NULL;
+ if (!PageHighMem(pte))
+ clean_pte_table(page_address(pte));
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
}
-
return pte;
}
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386ee..86a659a19526 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd) (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18c85f5..4f9503908dca 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -126,6 +126,8 @@
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+
/*
* Hyp-mode PL2 PTE definitions for LPAE.
*/
@@ -206,6 +208,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 413f3876341c..c3d5fc124a05 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -22,6 +22,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/ptrace.h>
#include <asm/types.h>
+#include <asm/unified.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 9998b\n" \
+ " " up "\n" \
+ " .popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up) up
+#endif
+
/*
* Prefetching support - only ARMv5.
*/
@@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
"pld\t%a0"
- :
- : "p" (ptr)
- : "cc");
+ :: "p" (ptr));
}
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define ARCH_HAS_PREFETCHW
-#define prefetchw(ptr) prefetch(ptr)
-
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) do { } while (0)
-
+static inline void prefetchw(const void *ptr)
+{
+ __asm__ __volatile__(
+ ".arch_extension mp\n"
+ __ALT_SMP_ASM(
+ WASM(pldw) "\t%a0",
+ WASM(pld) "\t%a0"
+ )
+ :: "p" (ptr));
+}
+#endif
#endif
#define HAVE_ARCH_PICK_MMAP_LAYOUT
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f05609501..8d6a089dfb76 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -49,7 +49,7 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
-extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
+extern int arm_add_memory(u64 start, u64 size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a8cae71caceb..22a3b9b5d4a1 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
struct smp_operations {
#ifdef CONFIG_SMP
/*
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c28060c9a..ef3c6072aa45 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,21 +5,13 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
-#include <asm/processor.h>
+#include <linux/prefetch.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
-#define ALT_SMP(smp, up) \
- "9998: " smp "\n" \
- " .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
- " " up "\n" \
- " .popsection\n"
-
#ifdef CONFIG_THUMB2_KERNEL
-#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
@@ -31,17 +23,18 @@
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
-#define WFE(cond) ALT_SMP( \
+#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
-#define SEV ALT_SMP("sev", "nop")
-#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
+#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
u32 newval;
arch_spinlock_t lockval;
+ prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned long contended, res;
u32 slock;
+ prefetchw(&lock->slock);
do {
__asm__ __volatile__(
" ldrex %0, [%3]\n"
@@ -127,10 +122,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
dsb_sev();
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
- return tickets.owner != tickets.next;
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -152,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
@@ -170,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
+ prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
@@ -203,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
/*
* Read locks are a bit more hairy:
@@ -221,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
@@ -241,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
smp_mb();
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -259,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
+ prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
@@ -280,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index b262d2f8b478..47663fcb10ad 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -25,7 +25,7 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
typedef struct {
- volatile unsigned int lock;
+ u32 lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 38960264040c..def9e570199f 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
}
-#include <asm/cputype.h>
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline int erratum_a15_798181(void)
-{
- unsigned int midr = read_cpuid_id();
-
- /* Cortex-A15 r0p0..r3p2 affected */
- if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
- return 0;
- return 1;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
- /*
- * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
- */
- asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
- dsb(ish);
-}
-#else
-static inline int erratum_a15_798181(void)
-{
- return 0;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-}
-#endif
-
/*
* flush_pmd_entry
*
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
#endif
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+ if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+ erratum_a15_798181_handler))
+ return erratum_a15_798181_handler();
+ return false;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index f5989f46b4d2..b88beaba6b4a 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -38,6 +38,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr.w
#define BSYM(sym) sym + 1
+#else
+#define WASM(instr) #instr ".w"
#endif
#else /* !CONFIG_THUMB2_KERNEL */
@@ -50,6 +52,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr
#define BSYM(sym) sym
+#else
+#define WASM(instr) #instr
#endif
#endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index d7ab99a0c9eb..1317ee40f4df 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE;
}
+extern struct dma_map_ops *xen_dma_ops;
+
#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..1109017499e5
--- /dev/null
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (__generic_dma_ops(hwdev)->unmap_page)
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (__generic_dma_ops(hwdev)->sync_single_for_device)
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 359a7b50b158..75579a9d6f76 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -6,12 +6,12 @@
#include <linux/pfn.h>
#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <xen/xen.h>
#include <xen/interface/grant_table.h>
-#define pfn_to_mfn(pfn) (pfn)
#define phys_to_machine_mapping_valid(pfn) (1)
-#define mfn_to_pfn(mfn) (mfn)
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
#define pte_mfn pte_pfn
@@ -32,6 +32,38 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
+unsigned long __pfn_to_mfn(unsigned long pfn);
+unsigned long __mfn_to_pfn(unsigned long mfn);
+extern struct rb_root phys_to_mach;
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ mfn = __pfn_to_mfn(pfn);
+ if (mfn != INVALID_P2M_ENTRY)
+ return mfn;
+ }
+
+ return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (phys_to_mach.rb_node != NULL) {
+ pfn = __mfn_to_pfn(mfn);
+ if (pfn != INVALID_P2M_ENTRY)
+ return pfn;
+ }
+
+ return mfn;
+}
+
+#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
+
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
@@ -76,11 +108,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte)
return 0;
}
-static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return true;
-}
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+ unsigned long nr_pages);
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644
index 000000000000..2265a199280c
--- /dev/null
+++ b/arch/arm/include/debug/efm32.S
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define UARTn_CMD 0x000c
+#define UARTn_CMD_TXEN 0x0004
+
+#define UARTn_STATUS 0x0010
+#define UARTn_STATUS_TXC 0x0020
+#define UARTn_STATUS_TXBL 0x0040
+
+#define UARTn_TXDATA 0x0034
+
+ .macro addruart, rx, tmp
+ ldr \rx, =(CONFIG_DEBUG_UART_PHYS)
+
+ /*
+ * enable TX. The driver might disable it to save energy. We
+ * don't care about disabling at the end as during debug power
+ * consumption isn't that important.
+ */
+ ldr \tmp, =(UARTn_CMD_TXEN)
+ str \tmp, [\rx, #UARTn_CMD]
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx, #UARTn_TXDATA]
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #UARTn_STATUS]
+ tst \rd, #UARTn_STATUS_TXBL
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, UARTn_STATUS]
+ tst \rd, #UARTn_STATUS_TXC
+ bne 1001b
+ .endm
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S
index 9166e1bc470e..9d653d475903 100644
--- a/arch/arm/include/debug/msm.S
+++ b/arch/arm/include/debug/msm.S
@@ -46,6 +46,11 @@
#define MSM_DEBUG_UART_PHYS 0x16440000
#endif
+#ifdef CONFIG_DEBUG_MSM8974_UART
+#define MSM_DEBUG_UART_BASE 0xFA71E000
+#define MSM_DEBUG_UART_PHYS 0xF991E000
+#endif
+
.macro addruart, rp, rv, tmp
#ifdef MSM_DEBUG_UART_PHYS
ldr \rp, =MSM_DEBUG_UART_PHYS
diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S
index 37c6895b87e6..92ef808a2337 100644
--- a/arch/arm/include/debug/pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -25,12 +25,14 @@
.macro waituart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
+ ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_TXFF
bne 1001b
.endm
.macro busyuart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
+ ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_BUSY
bne 1001b
.endm
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 18d76fd5a2af..70a1c9da30ca 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -7,6 +7,7 @@ header-y += hwcap.h
header-y += ioctls.h
header-y += kvm_para.h
header-y += mman.h
+header-y += perf_regs.h
header-y += posix_types.h
header-y += ptrace.h
header-y += setup.h
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index c1ee007523d7..c498b60c0505 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -63,7 +63,8 @@ struct kvm_regs {
/* Supported Processor Types */
#define KVM_ARM_TARGET_CORTEX_A15 0
-#define KVM_ARM_NUM_TARGETS 1
+#define KVM_ARM_TARGET_CORTEX_A7 1
+#define KVM_ARM_NUM_TARGETS 2
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..ce59448458b2
--- /dev/null
+++ b/arch/arm/include/uapi/asm/perf_regs.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_ARM_PERF_REGS_H
+#define _ASM_ARM_PERF_REGS_H
+
+enum perf_event_arm_regs {
+ PERF_REG_ARM_R0,
+ PERF_REG_ARM_R1,
+ PERF_REG_ARM_R2,
+ PERF_REG_ARM_R3,
+ PERF_REG_ARM_R4,
+ PERF_REG_ARM_R5,
+ PERF_REG_ARM_R6,
+ PERF_REG_ARM_R7,
+ PERF_REG_ARM_R8,
+ PERF_REG_ARM_R9,
+ PERF_REG_ARM_R10,
+ PERF_REG_ARM_FP,
+ PERF_REG_ARM_IP,
+ PERF_REG_ARM_SP,
+ PERF_REG_ARM_LR,
+ PERF_REG_ARM_PC,
+ PERF_REG_ARM_MAX,
+};
+#endif /* _ASM_ARM_PERF_REGS_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5140df5f23aa..a30fc9be9e9e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg
obj-y := elf.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o \
- setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
+ setup.o signal.o sigreturn_codes.o \
+ stacktrace.o sys_arm.o time.o traps.o
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
+obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 60d3b738d420..1f031ddd0667 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset);
+EXPORT_SYMBOL(__pv_offset);
#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9cbe70c8b0ef..b3fb8c9e1ff2 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,6 +192,7 @@ __dabt_svc:
svc_entry
mov r2, sp
dabt_helper
+ THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
@@ -416,9 +417,8 @@ __und_usr:
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r0, r0 @ little endian instruction
-#endif
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc6bd9683ba4..a2dcafdf1bc8 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -393,9 +393,7 @@ ENTRY(vector_swi)
#else
USER( ldr r10, [lr, #-4] ) @ get SWI instruction
#endif
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r10, r10 @ little endian instruction
-#endif
+ ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 476de57dcef2..7801866e626a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
__HEAD
ENTRY(stext)
+ ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
@@ -352,6 +353,9 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
+
+ ARM_BE8(setend be) @ ensure we are in BE8 mode
+
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
@@ -555,6 +559,14 @@ ENTRY(fixup_smp)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
+#ifdef __ARMEB__
+#define LOW_OFFSET 0x4
+#define HIGH_OFFSET 0x0
+#else
+#define LOW_OFFSET 0x0
+#define HIGH_OFFSET 0x4
+#endif
+
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/* __fixup_pv_table - patch the stub instructions with the delta between
@@ -565,17 +577,20 @@ ENDPROC(fixup_smp)
__HEAD
__fixup_pv_table:
adr r0, 1f
- ldmia r0, {r3-r5, r7}
- sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
+ ldmia r0, {r3-r7}
+ mvn ip, #0
+ subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
- add r7, r7, r3 @ adjust __pv_phys_offset address
- str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
+ add r6, r6, r3 @ adjust __pv_phys_offset address
+ add r7, r7, r3 @ adjust __pv_offset address
+ str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
+ strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
THUMB( it ne @ cross section branch )
bne __error
- str r6, [r7, #4] @ save to __pv_offset
+ str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
b __fixup_a_pv_table
ENDPROC(__fixup_pv_table)
@@ -584,10 +599,19 @@ ENDPROC(__fixup_pv_table)
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_offset
+ .long __pv_offset
.text
__fixup_a_pv_table:
+ adr r0, 3f
+ ldr r6, [r0]
+ add r6, r6, r3
+ ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
+ ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
+ mov r6, r6, lsr #24
+ cmn r0, #1
#ifdef CONFIG_THUMB2_KERNEL
+ moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
lsls r6, #24
beq 2f
clz r7, r6
@@ -601,18 +625,42 @@ __fixup_a_pv_table:
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
- and ip, 0x8f00
- orr ip, r6 @ mask in offset bits 31-24
+ARM_BE8(rev16 ip, ip)
+ tst ip, #0x4000
+ and ip, #0x8f00
+ orrne ip, r6 @ mask in offset bits 31-24
+ orreq ip, r0 @ mask in offset bits 7-0
+ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
+ bne 2f
+ ldrh ip, [r7]
+ARM_BE8(rev16 ip, ip)
+ bic ip, #0x20
+ orr ip, ip, r0, lsr #16
+ARM_BE8(rev16 ip, ip)
+ strh ip, [r7]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 1b
bx lr
#else
+ moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
b 2f
1: ldr ip, [r7, r3]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ @ in BE8, we load data in BE, but instructions still in LE
+ bic ip, ip, #0xff000000
+ tst ip, #0x000f0000 @ check the rotation field
+ orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
+ biceq ip, ip, #0x00004000 @ clear bit 22
+ orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0
+#else
bic ip, ip, #0x000000ff
- orr ip, ip, r6 @ mask in offset bits 31-24
+ tst ip, #0xf00 @ check the rotation field
+ orrne ip, ip, r6 @ mask in offset bits 31-24
+ biceq ip, ip, #0x400000 @ clear bit 22
+ orreq ip, ip, r0 @ mask in offset bits 7-0
+#endif
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
@@ -621,28 +669,30 @@ __fixup_a_pv_table:
#endif
ENDPROC(__fixup_a_pv_table)
+ .align
+3: .long __pv_offset
+
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
- ldr r2, 2f @ get address of __pv_phys_offset
mov r3, #0 @ no offset
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
- ldr r6, [r2, #4] @ get __pv_offset
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
- .align
-2: .long __pv_phys_offset
-
.data
.globl __pv_phys_offset
.type __pv_phys_offset, %object
__pv_phys_offset:
- .long 0
- .size __pv_phys_offset, . - __pv_phys_offset
+ .quad 0
+ .size __pv_phys_offset, . -__pv_phys_offset
+
+ .globl __pv_offset
+ .type __pv_offset, %object
__pv_offset:
- .long 0
+ .quad 0
+ .size __pv_offset, . -__pv_offset
#endif
#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7b95de601357..3d446605cbf8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_base = ARM_BASE_BCR;
val_base = ARM_BASE_BVR;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
ctrl_base = ARM_BASE_WCR;
val_base = ARM_BASE_WVR;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */
base = ARM_BASE_BCR;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
base = ARM_BASE_WCR;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
/* The exception entry code places the amended lr in the PC. */
addr = regs->ARM_pc;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f34003f..a7b621ece23d 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
}
static void __kprobes
@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
continue;
if (ri->rp && ri->rp->handler) {
- __get_cpu_var(current_kprobe) = &ri->rp->kp;
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
- __get_cpu_var(current_kprobe) = NULL;
+ __this_cpu_write(current_kprobe, NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 084dc8896986..45e478157278 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_XIP_KERNEL
/*
@@ -40,7 +41,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
@@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
Elf32_Sym *sym;
const char *symname;
s32 offset;
+ u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2;
#endif
@@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
- offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ offset = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = (offset & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
@@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
offset >>= 2;
+ offset &= 0x00ffffff;
- *(u32 *)loc &= 0xff000000;
- *(u32 *)loc |= offset & 0x00ffffff;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
+ *(u32 *)loc |= __opcode_to_mem_arm(offset);
break;
case R_ARM_V4BX:
@@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
* other bits to re-code instruction as
* MOV PC,Rm.
*/
- *(u32 *)loc &= 0xf000000f;
- *(u32 *)loc |= 0x01a0f000;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
+ *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
break;
case R_ARM_PREL31:
@@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
- offset = *(u32 *)loc;
+ offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
offset = (offset ^ 0x8000) - 0x8000;
@@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
offset >>= 16;
- *(u32 *)loc &= 0xfff0f000;
- *(u32 *)loc |= ((offset & 0xf000) << 4) |
- (offset & 0x0fff);
+ tmp &= 0xfff0f000;
+ tmp |= ((offset & 0xf000) << 4) |
+ (offset & 0x0fff);
+
+ *(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
sign = (offset >> 24) & 1;
j1 = sign ^ (~(offset >> 23) & 1);
j2 = sign ^ (~(offset >> 22) & 1);
- *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ upper = (u16)((upper & 0xf800) | (sign << 10) |
((offset >> 12) & 0x03ff));
- *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
- (j1 << 13) | (j2 << 11) |
- ((offset >> 1) & 0x07ff));
+ lower = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
offset >>= 16;
- *(u16 *)loc = (u16)((upper & 0xfbf0) |
- ((offset & 0xf000) >> 12) |
- ((offset & 0x0800) >> 1));
- *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
- ((offset & 0x0700) << 4) |
- (offset & 0x00ff));
+ upper = (u16)((upper & 0xfbf0) |
+ ((offset & 0xf000) >> 12) |
+ ((offset & 0x0800) >> 1));
+ lower = (u16)((lower & 0x8f00) |
+ ((offset & 0x0700) << 4) |
+ (offset & 0x00ff));
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e186ee1e63f6..bc3f2efa0d86 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
- if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 8d6147b2001f..d85055cd24ba 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
{
- return &__get_cpu_var(cpu_hw_events);
+ return this_cpu_ptr(&cpu_hw_events);
}
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
new file mode 100644
index 000000000000..6e4379c67cbc
--- /dev/null
+++ b/arch/arm/kernel/perf_regs.c
@@ -0,0 +1,30 @@
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
+ return 0;
+
+ return regs->uregs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 5d65438685d8..6a1b8a81b1ae 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup);
#endif
extern void paging_init(const struct machine_desc *desc);
+extern void early_paging_init(const struct machine_desc *,
+ struct proc_info_list *);
extern void sanity_check_meminfo(void);
extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc);
@@ -599,6 +601,8 @@ static void __init setup_processor(void)
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif
+ erratum_a15_798181_init();
+
feat_v6_fixup();
cacheid_init();
@@ -619,9 +623,10 @@ void __init dump_machine_table(void)
/* can't use cpu_relax() here as it may require MMU setup */;
}
-int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
+int __init arm_add_memory(u64 start, u64 size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
@@ -634,10 +639,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
- bank->start = PAGE_ALIGN(start);
+ aligned_start = PAGE_ALIGN(start);
-#ifndef CONFIG_ARM_LPAE
- if (bank->start + size < bank->start) {
+#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+ if (aligned_start > ULONG_MAX) {
+ printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
+ "32-bit physical address space\n", (long long)start);
+ return -EINVAL;
+ }
+
+ if (aligned_start + size > ULONG_MAX) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
/*
@@ -645,10 +656,11 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
* This means we lose a page after masking.
*/
- size = ULONG_MAX - bank->start;
+ size = ULONG_MAX - aligned_start;
}
#endif
+ bank->start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/*
@@ -669,8 +681,8 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
- phys_addr_t size;
- phys_addr_t start;
+ u64 size;
+ u64 start;
char *endp;
/*
@@ -878,6 +890,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+ early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index ab3304225272..04d63880037f 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -21,29 +21,7 @@
#include <asm/unistd.h>
#include <asm/vfp.h>
-/*
- * For ARM syscalls, we encode the syscall number into the instruction.
- */
-#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-
-/*
- * With EABI, the syscall number has to be loaded into r7.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-/*
- * For Thumb syscalls, we pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-static const unsigned long sigreturn_codes[7] = {
- MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
-};
+extern const unsigned long sigreturn_codes[7];
static unsigned long signal_return_offset;
@@ -375,12 +353,18 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
*/
thumb = handler & 1;
- if (thumb) {
- cpsr |= PSR_T_BIT;
#if __LINUX_ARM_ARCH__ >= 7
- /* clear the If-Then Thumb-2 execution state */
- cpsr &= ~PSR_IT_MASK;
+ /*
+ * Clear the If-Then Thumb-2 execution state
+ * ARM spec requires this to be all 000s in ARM mode
+ * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
+ * signal transition without this.
+ */
+ cpsr &= ~PSR_IT_MASK;
#endif
+
+ if (thumb) {
+ cpsr |= PSR_T_BIT;
} else
cpsr &= ~PSR_T_BIT;
}
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
new file mode 100644
index 000000000000..3c5d0f2170fd
--- /dev/null
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -0,0 +1,80 @@
+/*
+ * sigreturn_codes.S - code sinpets for sigreturn syscalls
+ *
+ * Created by: Victor Kamensky, 2013-08-13
+ * Copyright: (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/unistd.h>
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ * With EABI, the syscall number has to be loaded into r7. As result
+ * ARM syscall sequence snippet will have move and svc in .arm encoding
+ *
+ * For Thumb syscalls, we pass the syscall number via r7. We therefore
+ * need two 16-bit instructions in .thumb encoding
+ *
+ * Please note sigreturn_codes code are not executed in place. Instead
+ * they just copied by kernel into appropriate places. Code inside of
+ * arch/arm/kernel/signal.c is very sensitive to layout of these code
+ * snippets.
+ */
+
+#if __LINUX_ARM_ARCH__ <= 4
+ /*
+ * Note we manually set minimally required arch that supports
+ * required thumb opcodes for early arch versions. It is OK
+ * for this file to be used in combination with other
+ * lower arch variants, since these code snippets are only
+ * used as input data.
+ */
+ .arch armv4t
+#endif
+
+ .section .rodata
+ .global sigreturn_codes
+ .type sigreturn_codes, #object
+
+ .arm
+
+sigreturn_codes:
+
+ /* ARM sigreturn syscall code snippet */
+ mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn syscall code snippet */
+ .thumb
+ movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /* ARM sigreturn_rt syscall code snippet */
+ .arm
+ mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn_rt syscall code snippet */
+ .thumb
+ movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /*
+ * Note on addtional space: setup_return in signal.c
+ * algorithm uses two words copy regardless whether
+ * it is thumb case or not, so we need additional
+ * word after real last entry.
+ */
+ .arm
+ .space 4
+
+ .size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index db1536b8b30b..b907d9b790ab 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -55,6 +55,7 @@
* specific registers and some other data for resume.
* r0 = suspend function arg0
* r1 = suspend function
+ * r2 = MPIDR value the resuming CPU will use
*/
ENTRY(__cpu_suspend)
stmfd sp!, {r4 - r11, lr}
@@ -67,23 +68,18 @@ ENTRY(__cpu_suspend)
mov r5, sp @ current virtual SP
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
sub sp, sp, r4 @ allocate CPU state on stack
- stmfd sp!, {r0, r1} @ save suspend func arg and pointer
- add r0, sp, #8 @ save pointer to save block
- mov r1, r4 @ size of save block
- mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
+ stmfd sp!, {r0, r1} @ save suspend func arg and pointer
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
- ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
- ALT_UP_B(1f)
- ldr r8, =mpidr_hash
- /*
- * This ldmia relies on the memory layout of the mpidr_hash
- * struct mpidr_hash.
- */
- ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
- compute_mpidr_hash lr, r5, r6, r7, r9, r4
- add r3, r3, lr, lsl #2
-1:
+ ALT_SMP(ldr r0, =mpidr_hash)
+ ALT_UP_B(1f)
+ /* This ldmia relies on the memory layout of the mpidr_hash struct */
+ ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
+ compute_mpidr_hash r0, r6, r7, r8, r2, r1
+ add r3, r3, r0, lsl #2
+1: mov r2, r5 @ virtual SP
+ mov r1, r4 @ size of save block
+ add r0, sp, #8 @ pointer to save block
bl __cpu_suspend_save
adr lr, BSYM(cpu_suspend_abort)
ldmfd sp!, {r0, pc} @ call suspend fn
@@ -130,6 +126,7 @@ ENDPROC(cpu_resume_after_mmu)
.data
.align
ENTRY(cpu_resume)
+ARM_BE8(setend be) @ ensure we are in BE mode
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 72024ea8a3a6..dc894ab3622b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/cpufreq.h>
+#include <linux/irq_work.h>
#include <linux/atomic.h>
#include <asm/smp.h>
@@ -66,6 +67,8 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_IRQ_WORK,
+ IPI_COMPLETION,
};
static DECLARE_COMPLETION(cpu_running);
@@ -80,7 +83,7 @@ void __init smp_set_ops(struct smp_operations *ops)
static unsigned long get_arch_pgd(pgd_t *pgd)
{
- phys_addr_t pgdir = virt_to_phys(pgd);
+ phys_addr_t pgdir = virt_to_idmap(pgd);
BUG_ON(pgdir & ARCH_PGD_MASK);
return pgdir >> ARCH_PGD_SHIFT;
}
@@ -448,6 +451,14 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ if (is_smp())
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
static const char *ipi_types[NR_IPI] = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -456,6 +467,8 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_IRQ_WORK, "IRQ work interrupts"),
+ S(IPI_COMPLETION, "completion interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -515,6 +528,19 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+ per_cpu(cpu_completion, cpu) = completion;
+ return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+ complete(per_cpu(cpu_completion, cpu));
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -565,6 +591,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+#ifdef CONFIG_IRQ_WORK
+ case IPI_IRQ_WORK:
+ irq_enter();
+ irq_work_run();
+ irq_exit();
+ break;
+#endif
+
+ case IPI_COMPLETION:
+ irq_enter();
+ ipi_complete(cpu);
+ irq_exit();
+ break;
+
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 5bc1a63284e3..1aafa0d785eb 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -28,7 +28,7 @@
*/
unsigned int __init scu_get_core_count(void __iomem *scu_base)
{
- unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
+ unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
return (ncores & 0x03) + 1;
}
@@ -42,19 +42,19 @@ void scu_enable(void __iomem *scu_base)
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
- scu_ctrl = __raw_readl(scu_base + 0x30);
+ scu_ctrl = readl_relaxed(scu_base + 0x30);
if (!(scu_ctrl & 1))
- __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+ writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
}
#endif
- scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
+ scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)
return;
scu_ctrl |= 1;
- __raw_writel(scu_ctrl, scu_base + SCU_CTRL);
+ writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
/*
* Ensure that the data accessed by CPU0 before the SCU was
@@ -80,9 +80,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode)
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
- val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+ val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
val |= mode;
- __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+ writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
return 0;
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 83ccca303df8..95d063620b76 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
+#ifdef CONFIG_ARM_ERRATA_798181
+bool (*erratum_a15_798181_handler)(void);
+
+static bool erratum_a15_798181_partial(void)
+{
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+ dsb(ish);
+ return false;
+}
+
+static bool erratum_a15_798181_broadcast(void)
+{
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+ dsb(ish);
+ return true;
+}
+
+void erratum_a15_798181_init(void)
+{
+ unsigned int midr = read_cpuid_id();
+ unsigned int revidr = read_cpuid(CPUID_REVIDR);
+
+ /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
+ (revidr & 0x210) == 0x210) {
+ return;
+ }
+ if (revidr & 0x10)
+ erratum_a15_798181_handler = erratum_a15_798181_partial;
+ else
+ erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+}
+#endif
+
static void ipi_flush_tlb_a15_erratum(void *arg)
{
dmb();
@@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void)
if (!erratum_a15_798181())
return;
- dummy_flush_tlb_a15_erratum();
smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
}
@@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
if (!erratum_a15_798181())
return;
- dummy_flush_tlb_a15_erratum();
this_cpu = get_cpu();
a15_erratum_get_cpumask(this_cpu, mm, &mask);
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2985c9f0905d..6591e26fc13f 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_PERIODIC:
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
@@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode,
ctrl = 0;
}
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
}
static int twd_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
- unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
ctrl |= TWD_TIMER_CONTROL_ENABLE;
- __raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
return 0;
}
@@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt,
*/
static int twd_timer_ack(void)
{
- if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
- __raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+ if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
+ writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
return 1;
}
@@ -211,15 +211,15 @@ static void twd_calibrate_rate(void)
waitjiffies += 5;
/* enable, no interrupt or reload */
- __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
/* maximum value */
- __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
while (get_jiffies_64() < waitjiffies)
udelay(10);
- count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
+ count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
@@ -277,7 +277,7 @@ static void twd_timer_setup(void)
* bother with the below.
*/
if (per_cpu(percpu_setup_called, cpu)) {
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clockevents_register_device(clk);
enable_percpu_irq(clk->irq, 0);
return;
@@ -290,7 +290,7 @@ static void twd_timer_setup(void)
* The following is done once per CPU the first time .setup() is
* called.
*/
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 41cf3cbf756d..2835d35234ca 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -10,7 +10,7 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
+extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void);
#ifdef CONFIG_MMU
@@ -21,6 +21,7 @@ extern void cpu_resume_mmu(void);
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
+ u32 __mpidr = cpu_logical_map(smp_processor_id());
int ret;
if (!idmap_pgd)
@@ -32,7 +33,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* resume (indicated by a zero return code), we need to switch
* back to the correct page tables.
*/
- ret = __cpu_suspend(arg, fn);
+ ret = __cpu_suspend(arg, fn, __mpidr);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
@@ -44,7 +45,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
#else
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
- return __cpu_suspend(arg, fn);
+ u32 __mpidr = cpu_logical_map(smp_processor_id());
+ return __cpu_suspend(arg, fn, __mpidr);
}
#define idmap_pgd NULL
#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 8fcda140358d..6125f259b7b5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,6 +34,7 @@
#include <asm/unwind.h>
#include <asm/tls.h>
#include <asm/system_misc.h>
+#include <asm/opcodes.h>
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
@@ -341,15 +342,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
- unsigned short bkpt;
+ u16 bkpt;
+ u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
#else
- unsigned long bkpt;
+ u32 bkpt;
+ u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
- return bkpt == BUG_INSTR_VALUE;
+ return bkpt == insn;
}
#endif
@@ -402,25 +405,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (processor_mode(regs) == SVC_MODE) {
#ifdef CONFIG_THUMB2_KERNEL
if (thumb_mode(regs)) {
- instr = ((u16 *)pc)[0];
+ instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
if (is_wide_instruction(instr)) {
- instr <<= 16;
- instr |= ((u16 *)pc)[1];
+ u16 inst2;
+ inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
+ instr = __opcode_thumb32_compose(instr, inst2);
}
} else
#endif
- instr = *(u32 *) pc;
+ instr = __mem_to_opcode_arm(*(u32 *) pc);
} else if (thumb_mode(regs)) {
if (get_user(instr, (u16 __user *)pc))
goto die_sig;
+ instr = __mem_to_opcode_thumb16(instr);
if (is_wide_instruction(instr)) {
unsigned int instr2;
if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
- instr <<= 16;
- instr |= instr2;
+ instr2 = __mem_to_opcode_thumb16(instr2);
+ instr = __opcode_thumb32_compose(instr, instr2);
}
} else if (get_user(instr, (u32 __user *)pc)) {
+ instr = __mem_to_opcode_arm(instr);
goto die_sig;
}
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index ebf5015508b5..466bd299b1a8 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
bool "Kernel-based Virtual Machine (KVM) support"
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
depends on ARM_VIRT_EXT && ARM_LPAE
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index d99bee4950e5..789bca9e64a7 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -19,6 +19,6 @@ kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9c697db2787e..2a700e00528d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -65,7 +65,7 @@ static bool vgic_present;
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
- __get_cpu_var(kvm_arm_running_vcpu) = vcpu;
+ __this_cpu_write(kvm_arm_running_vcpu, vcpu);
}
/**
@@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
BUG_ON(preemptible());
- return __get_cpu_var(kvm_arm_running_vcpu);
+ return __this_cpu_read(kvm_arm_running_vcpu);
}
/**
@@ -152,12 +152,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages)
{
return 0;
}
@@ -797,6 +798,19 @@ long kvm_arch_vm_ioctl(struct file *filp,
return -EFAULT;
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
}
+ case KVM_ARM_PREFERRED_TARGET: {
+ int err;
+ struct kvm_vcpu_init init;
+
+ err = kvm_vcpu_preferred_target(&init);
+ if (err)
+ return err;
+
+ if (copy_to_user(argp, &init, sizeof(init)))
+ return -EFAULT;
+
+ return 0;
+ }
default:
return -EINVAL;
}
@@ -815,7 +829,7 @@ static void cpu_init_hyp_mode(void *dummy)
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
pgd_ptr = kvm_mmu_get_httbr();
- stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
+ stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index db9cf692d4dd..78c0885d6501 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -71,6 +71,98 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ /*
+ * Compute guest MPIDR. We build a virtual cluster out of the
+ * vcpu_id, but we read the 'U' bit from the underlying
+ * hardware directly.
+ */
+ vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
+ ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
+ (vcpu->vcpu_id & 3));
+}
+
+/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
+static bool access_actlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+ return true;
+}
+
+/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
+static bool access_cbar(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p);
+ return read_zero(vcpu, p);
+}
+
+/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
+static bool access_l2ctlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+ return true;
+}
+
+static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ u32 l2ctlr, ncores;
+
+ asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
+ l2ctlr &= ~(3 << 24);
+ ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
+ /* How many cores in the current cluster and the next ones */
+ ncores -= (vcpu->vcpu_id & ~3);
+ /* Cap it to the maximum number of cores in a single cluster */
+ ncores = min(ncores, 3U);
+ l2ctlr |= (ncores & 3) << 24;
+
+ vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+}
+
+static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+{
+ u32 actlr;
+
+ /* ACTLR contains SMP bit: make sure you create all cpus first! */
+ asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+ /* Make the SMP bit consistent with the guest configuration */
+ if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
+ actlr |= 1U << 6;
+ else
+ actlr &= ~(1U << 6);
+
+ vcpu->arch.cp15[c1_ACTLR] = actlr;
+}
+
+/*
+ * TRM entries: A7:4.3.50, A15:4.3.49
+ * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
+ */
+static bool access_l2ectlr(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = 0;
+ return true;
+}
+
/* See note at ARM ARM B1.14.4 */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
@@ -153,10 +245,22 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
* registers preceding 32-bit ones.
*/
static const struct coproc_reg cp15_regs[] = {
+ /* MPIDR: we use VMPIDR for guest access. */
+ { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
+ NULL, reset_mpidr, c0_MPIDR },
+
/* CSSELR: swapped by interrupt.S. */
{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
NULL, reset_unknown, c0_CSSELR },
+ /* ACTLR: trapped by HCR.TAC bit. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
+ access_actlr, reset_actlr, c1_ACTLR },
+
+ /* CPACR: swapped by interrupt.S. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
+ NULL, reset_val, c1_CPACR, 0x00000000 },
+
/* TTBR0/TTBR1: swapped by interrupt.S. */
{ CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
{ CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
@@ -195,6 +299,13 @@ static const struct coproc_reg cp15_regs[] = {
{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
/*
+ * L2CTLR access (guest wants to know #CPUs).
+ */
+ { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
+ access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
+ { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
+
+ /*
* Dummy performance monitor implementation.
*/
{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
@@ -234,6 +345,9 @@ static const struct coproc_reg cp15_regs[] = {
/* CNTKCTL: swapped by interrupt.S. */
{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
NULL, reset_val, c14_CNTKCTL, 0x00000000 },
+
+ /* The Configuration Base Address Register. */
+ { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
};
/* Target specific emulation tables */
@@ -241,6 +355,12 @@ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
{
+ unsigned int i;
+
+ for (i = 1; i < table->num; i++)
+ BUG_ON(cmp_reg(&table->table[i-1],
+ &table->table[i]) >= 0);
+
target_tables[table->target] = table;
}
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index cf93472b9dd6..bb0cac1410cc 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -17,101 +17,12 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
-#include <asm/cputype.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <linux/init.h>
-static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- /*
- * Compute guest MPIDR:
- * (Even if we present only one VCPU to the guest on an SMP
- * host we don't set the U bit in the MPIDR, or vice versa, as
- * revealing the underlying hardware properties is likely to
- * be the best choice).
- */
- vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
- | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
-}
-
#include "coproc.h"
-/* A15 TRM 4.3.28: RO WI */
-static bool access_actlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
- return true;
-}
-
-/* A15 TRM 4.3.60: R/O. */
-static bool access_cbar(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return write_to_read_only(vcpu, p);
- return read_zero(vcpu, p);
-}
-
-/* A15 TRM 4.3.48: R/O WI. */
-static bool access_l2ctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
- return true;
-}
-
-static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- u32 l2ctlr, ncores;
-
- asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
- l2ctlr &= ~(3 << 24);
- ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
- l2ctlr |= (ncores & 3) << 24;
-
- vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
-}
-
-static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- u32 actlr;
-
- /* ACTLR contains SMP bit: make sure you create all cpus first! */
- asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
- /* Make the SMP bit consistent with the guest configuration */
- if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
- actlr |= 1U << 6;
- else
- actlr &= ~(1U << 6);
-
- vcpu->arch.cp15[c1_ACTLR] = actlr;
-}
-
-/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
-static bool access_l2ectlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = 0;
- return true;
-}
-
/*
* A15-specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
@@ -121,29 +32,9 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
- /* MPIDR: we use VMPIDR for guest access. */
- { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
- NULL, reset_mpidr, c0_MPIDR },
-
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
NULL, reset_val, c1_SCTLR, 0x00C50078 },
- /* ACTLR: trapped by HCR.TAC bit. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
- access_actlr, reset_actlr, c1_ACTLR },
- /* CPACR: swapped by interrupt.S. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
- NULL, reset_val, c1_CPACR, 0x00000000 },
-
- /*
- * L2CTLR access (guest wants to know #CPUs).
- */
- { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
- access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
- { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
-
- /* The Configuration Base Address Register. */
- { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
};
static struct kvm_coproc_target_table a15_target_table = {
@@ -154,12 +45,6 @@ static struct kvm_coproc_target_table a15_target_table = {
static int __init coproc_a15_init(void)
{
- unsigned int i;
-
- for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
- BUG_ON(cmp_reg(&a15_regs[i-1],
- &a15_regs[i]) >= 0);
-
kvm_register_target_coproc_table(&a15_target_table);
return 0;
}
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
new file mode 100644
index 000000000000..1df767331588
--- /dev/null
+++ b/arch/arm/kvm/coproc_a7.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Copyright (C) 2013 - ARM Ltd
+ *
+ * Authors: Rusty Russell <rusty@rustcorp.au>
+ * Christoffer Dall <c.dall@virtualopensystems.com>
+ * Jonathan Austin <jonathan.austin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/kvm_host.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
+#include <linux/init.h>
+
+#include "coproc.h"
+
+/*
+ * Cortex-A7 specific CP15 registers.
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
+ */
+static const struct coproc_reg a7_regs[] = {
+ /* SCTLR: swapped by interrupt.S. */
+ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+ NULL, reset_val, c1_SCTLR, 0x00C50878 },
+};
+
+static struct kvm_coproc_target_table a7_target_table = {
+ .target = KVM_ARM_TARGET_CORTEX_A7,
+ .table = a7_regs,
+ .num = ARRAY_SIZE(a7_regs),
+};
+
+static int __init coproc_a7_init(void)
+{
+ kvm_register_target_coproc_table(&a7_target_table);
+ return 0;
+}
+late_initcall(coproc_a7_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index bdede9e7da51..d6c005283678 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -354,7 +354,7 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
if (is_pabt) {
- /* Set DFAR and DFSR */
+ /* Set IFAR and IFSR */
vcpu->arch.cp15[c6_IFAR] = addr;
is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
/* Always give debug fault for now - should give guest a clue */
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 152d03612181..20f8d97904af 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -190,6 +190,8 @@ int __attribute_const__ kvm_target_cpu(void)
return -EINVAL;
switch (part_number) {
+ case ARM_CPU_PART_CORTEX_A7:
+ return KVM_ARM_TARGET_CORTEX_A7;
case ARM_CPU_PART_CORTEX_A15:
return KVM_ARM_TARGET_CORTEX_A15;
default:
@@ -202,7 +204,7 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
{
unsigned int i;
- /* We can only do a cortex A15 for now. */
+ /* We can only cope with guest==host and only on A15/A7 (for now). */
if (init->target != kvm_target_cpu())
return -EINVAL;
@@ -222,6 +224,26 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
return kvm_reset_vcpu(vcpu);
}
+int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+{
+ int target = kvm_target_cpu();
+
+ if (target < 0)
+ return -ENODEV;
+
+ memset(init, 0, sizeof(*init));
+
+ /*
+ * For now, we don't return any features.
+ * In future, we might use features to return target
+ * specific features available for the preferred
+ * target type.
+ */
+ init->target = (__u32)target;
+
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index df4c82d47ad7..a92079011a83 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -73,23 +73,29 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
/**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
- * halt execution of world-switches and schedule other host processes until
- * there is an incoming IRQ or FIQ to the VM.
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
*/
-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
trace_kvm_wfi(*vcpu_pc(vcpu));
- kvm_vcpu_block(vcpu);
+ if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
+ kvm_vcpu_on_spin(vcpu);
+ else
+ kvm_vcpu_block(vcpu);
+
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
- [HSR_EC_WFI] = kvm_handle_wfi,
+ [HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
[HSR_EC_CP14_MR] = kvm_handle_cp14_access,
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 0c25d9487d53..4cb5a93182e9 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -23,6 +23,68 @@
#include "trace.h"
+static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
+{
+ void *datap = NULL;
+ union {
+ u8 byte;
+ u16 hword;
+ u32 word;
+ u64 dword;
+ } tmp;
+
+ switch (len) {
+ case 1:
+ tmp.byte = data;
+ datap = &tmp.byte;
+ break;
+ case 2:
+ tmp.hword = data;
+ datap = &tmp.hword;
+ break;
+ case 4:
+ tmp.word = data;
+ datap = &tmp.word;
+ break;
+ case 8:
+ tmp.dword = data;
+ datap = &tmp.dword;
+ break;
+ }
+
+ memcpy(buf, datap, len);
+}
+
+static unsigned long mmio_read_buf(char *buf, unsigned int len)
+{
+ unsigned long data = 0;
+ union {
+ u16 hword;
+ u32 word;
+ u64 dword;
+ } tmp;
+
+ switch (len) {
+ case 1:
+ data = buf[0];
+ break;
+ case 2:
+ memcpy(&tmp.hword, buf, len);
+ data = tmp.hword;
+ break;
+ case 4:
+ memcpy(&tmp.word, buf, len);
+ data = tmp.word;
+ break;
+ case 8:
+ memcpy(&tmp.dword, buf, len);
+ data = tmp.dword;
+ break;
+ }
+
+ return data;
+}
+
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* @vcpu: The VCPU pointer
@@ -33,28 +95,27 @@
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- unsigned long *dest;
+ unsigned long data;
unsigned int len;
int mask;
if (!run->mmio.is_write) {
- dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
- *dest = 0;
-
len = run->mmio.len;
if (len > sizeof(unsigned long))
return -EINVAL;
- memcpy(dest, run->mmio.data, len);
-
- trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
- *((u64 *)run->mmio.data));
+ data = mmio_read_buf(run->mmio.data, len);
if (vcpu->arch.mmio_decode.sign_extend &&
len < sizeof(unsigned long)) {
mask = 1U << ((len * 8) - 1);
- *dest = (*dest ^ mask) - mask;
+ data = (data ^ mask) - mask;
}
+
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+ data);
+ data = vcpu_data_host_to_guest(vcpu, data, len);
+ *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
}
return 0;
@@ -105,6 +166,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
struct kvm_exit_mmio mmio;
+ unsigned long data;
unsigned long rt;
int ret;
@@ -125,13 +187,15 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
rt = vcpu->arch.mmio_decode.rt;
+ data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
+
trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
KVM_TRACE_MMIO_READ_UNSATISFIED,
mmio.len, fault_ipa,
- (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
+ (mmio.is_write) ? data : 0);
if (mmio.is_write)
- memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
+ mmio_write_buf(mmio.data, mmio.len, data);
if (vgic_handle_mmio(vcpu, run, &mmio))
return 1;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index b0de86b56c13..371958370de4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -19,6 +19,7 @@
#include <linux/mman.h>
#include <linux/kvm_host.h>
#include <linux/io.h>
+#include <linux/hugetlb.h>
#include <trace/events/kvm.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
@@ -41,6 +42,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
+#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
+
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
/*
@@ -93,19 +96,29 @@ static bool page_empty(void *ptr)
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
- pmd_t *pmd_table = pmd_offset(pud, 0);
- pud_clear(pud);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- pmd_free(NULL, pmd_table);
+ if (pud_huge(*pud)) {
+ pud_clear(pud);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ } else {
+ pmd_t *pmd_table = pmd_offset(pud, 0);
+ pud_clear(pud);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ pmd_free(NULL, pmd_table);
+ }
put_page(virt_to_page(pud));
}
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
- pte_t *pte_table = pte_offset_kernel(pmd, 0);
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- pte_free_kernel(NULL, pte_table);
+ if (kvm_pmd_huge(*pmd)) {
+ pmd_clear(pmd);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ } else {
+ pte_t *pte_table = pte_offset_kernel(pmd, 0);
+ pmd_clear(pmd);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ pte_free_kernel(NULL, pte_table);
+ }
put_page(virt_to_page(pmd));
}
@@ -136,18 +149,32 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
continue;
}
+ if (pud_huge(*pud)) {
+ /*
+ * If we are dealing with a huge pud, just clear it and
+ * move on.
+ */
+ clear_pud_entry(kvm, pud, addr);
+ addr = pud_addr_end(addr, end);
+ continue;
+ }
+
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
addr = pmd_addr_end(addr, end);
continue;
}
- pte = pte_offset_kernel(pmd, addr);
- clear_pte_entry(kvm, pte, addr);
- next = addr + PAGE_SIZE;
+ if (!kvm_pmd_huge(*pmd)) {
+ pte = pte_offset_kernel(pmd, addr);
+ clear_pte_entry(kvm, pte, addr);
+ next = addr + PAGE_SIZE;
+ }
- /* If we emptied the pte, walk back up the ladder */
- if (page_empty(pte)) {
+ /*
+ * If the pmd entry is to be cleared, walk back up the ladder
+ */
+ if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr);
next = pmd_addr_end(addr, end);
if (page_empty(pmd) && !page_empty(pud)) {
@@ -420,29 +447,71 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
kvm->arch.pgd = NULL;
}
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr, const pte_t *new_pte, bool iomap)
+static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte, old_pte;
- /* Create 2nd stage page table mapping - Level 1 */
pgd = kvm->arch.pgd + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
if (!cache)
- return 0; /* ignore calls from kvm_set_spte_hva */
+ return NULL;
pmd = mmu_memory_cache_alloc(cache);
pud_populate(NULL, pud, pmd);
get_page(virt_to_page(pud));
}
- pmd = pmd_offset(pud, addr);
+ return pmd_offset(pud, addr);
+}
+
+static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+ *cache, phys_addr_t addr, const pmd_t *new_pmd)
+{
+ pmd_t *pmd, old_pmd;
+
+ pmd = stage2_get_pmd(kvm, cache, addr);
+ VM_BUG_ON(!pmd);
+
+ /*
+ * Mapping in huge pages should only happen through a fault. If a
+ * page is merged into a transparent huge page, the individual
+ * subpages of that huge page should be unmapped through MMU
+ * notifiers before we get here.
+ *
+ * Merging of CompoundPages is not supported; they should become
+ * splitting first, unmapped, merged, and mapped back in on-demand.
+ */
+ VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+
+ old_pmd = *pmd;
+ kvm_set_pmd(pmd, *new_pmd);
+ if (pmd_present(old_pmd))
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ else
+ get_page(virt_to_page(pmd));
+ return 0;
+}
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+ pmd_t *pmd;
+ pte_t *pte, old_pte;
- /* Create 2nd stage page table mapping - Level 2 */
+ /* Create stage-2 page table mapping - Level 1 */
+ pmd = stage2_get_pmd(kvm, cache, addr);
+ if (!pmd) {
+ /*
+ * Ignore calls from kvm_set_spte_hva for unallocated
+ * address ranges.
+ */
+ return 0;
+ }
+
+ /* Create stage-2 page mappings - Level 2 */
if (pmd_none(*pmd)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
@@ -507,16 +576,60 @@ out:
return ret;
}
+static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
+{
+ pfn_t pfn = *pfnp;
+ gfn_t gfn = *ipap >> PAGE_SHIFT;
+
+ if (PageTransCompound(pfn_to_page(pfn))) {
+ unsigned long mask;
+ /*
+ * The address we faulted on is backed by a transparent huge
+ * page. However, because we map the compound huge page and
+ * not the individual tail page, we need to transfer the
+ * refcount to the head page. We have to be careful that the
+ * THP doesn't start to split while we are adjusting the
+ * refcounts.
+ *
+ * We are sure this doesn't happen, because mmu_notifier_retry
+ * was successful and we are holding the mmu_lock, so if this
+ * THP is trying to split, it will be blocked in the mmu
+ * notifier before touching any of the pages, specifically
+ * before being able to call __split_huge_page_refcount().
+ *
+ * We can therefore safely transfer the refcount from PG_tail
+ * to PG_head and switch the pfn from a tail page to the head
+ * page accordingly.
+ */
+ mask = PTRS_PER_PMD - 1;
+ VM_BUG_ON((gfn & mask) != (pfn & mask));
+ if (pfn & mask) {
+ *ipap &= PMD_MASK;
+ kvm_release_pfn_clean(pfn);
+ pfn &= ~mask;
+ kvm_get_pfn(pfn);
+ *pfnp = pfn;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- gfn_t gfn, struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot *memslot,
unsigned long fault_status)
{
- pte_t new_pte;
- pfn_t pfn;
int ret;
- bool write_fault, writable;
+ bool write_fault, writable, hugetlb = false, force_pte = false;
unsigned long mmu_seq;
+ gfn_t gfn = fault_ipa >> PAGE_SHIFT;
+ unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
+ struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct vm_area_struct *vma;
+ pfn_t pfn;
write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
if (fault_status == FSC_PERM && !write_fault) {
@@ -524,6 +637,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
+ /* Let's check if we will get back a huge page backed by hugetlbfs */
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma_intersection(current->mm, hva, hva + 1);
+ if (is_vm_hugetlb_page(vma)) {
+ hugetlb = true;
+ gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+ } else {
+ /*
+ * Pages belonging to VMAs not aligned to the PMD mapping
+ * granularity cannot be mapped using block descriptors even
+ * if the pages belong to a THP for the process, because the
+ * stage-2 block descriptor will cover more than a single THP
+ * and we loose atomicity for unmapping, updates, and splits
+ * of the THP or other pages in the stage-2 block range.
+ */
+ if (vma->vm_start & ~PMD_MASK)
+ force_pte = true;
+ }
+ up_read(&current->mm->mmap_sem);
+
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
if (ret)
@@ -541,26 +674,40 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*/
smp_rmb();
- pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
+ pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
if (is_error_pfn(pfn))
return -EFAULT;
- new_pte = pfn_pte(pfn, PAGE_S2);
- coherent_icache_guest_page(vcpu->kvm, gfn);
-
- spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+ spin_lock(&kvm->mmu_lock);
+ if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
- if (writable) {
- kvm_set_s2pte_writable(&new_pte);
- kvm_set_pfn_dirty(pfn);
+ if (!hugetlb && !force_pte)
+ hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+
+ if (hugetlb) {
+ pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
+ new_pmd = pmd_mkhuge(new_pmd);
+ if (writable) {
+ kvm_set_s2pmd_writable(&new_pmd);
+ kvm_set_pfn_dirty(pfn);
+ }
+ coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
+ ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+ } else {
+ pte_t new_pte = pfn_pte(pfn, PAGE_S2);
+ if (writable) {
+ kvm_set_s2pte_writable(&new_pte);
+ kvm_set_pfn_dirty(pfn);
+ }
+ coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
}
- stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
+
out_unlock:
- spin_unlock(&vcpu->kvm->mmu_lock);
+ spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
- return 0;
+ return ret;
}
/**
@@ -629,7 +776,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
memslot = gfn_to_memslot(vcpu->kvm, gfn);
- ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+ ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
if (ret == 0)
ret = 1;
out_unlock:
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 86a693a02ba3..0881bf169fbc 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -18,6 +18,7 @@
#include <linux/kvm_host.h>
#include <linux/wait.h>
+#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_psci.h>
@@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = NULL, *tmp;
wait_queue_head_t *wq;
unsigned long cpu_id;
+ unsigned long mpidr;
phys_addr_t target_pc;
+ int i;
cpu_id = *vcpu_reg(source_vcpu, 1);
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
- if (cpu_id >= atomic_read(&kvm->online_vcpus))
+ kvm_for_each_vcpu(i, tmp, kvm) {
+ mpidr = kvm_vcpu_get_mpidr(tmp);
+ if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
+ vcpu = tmp;
+ break;
+ }
+ }
+
+ if (!vcpu)
return KVM_PSCI_RET_INVAL;
target_pc = *vcpu_reg(source_vcpu, 2);
- vcpu = kvm_get_vcpu(kvm, cpu_id);
-
wq = kvm_arch_vcpu_wq(vcpu);
if (!waitqueue_active(wq))
return KVM_PSCI_RET_INVAL;
@@ -62,6 +71,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
vcpu_set_thumb(vcpu);
}
+ /* Propagate caller endianness */
+ if (kvm_vcpu_is_be(source_vcpu))
+ kvm_vcpu_set_be(vcpu);
+
*vcpu_pc(vcpu) = target_pc;
vcpu->arch.pause = false;
smp_mb(); /* Make sure the above is visible */
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index c02ba4af599f..f558c073c023 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -30,16 +30,14 @@
#include <kvm/arm_arch_timer.h>
/******************************************************************************
- * Cortex-A15 Reset Values
+ * Cortex-A15 and Cortex-A7 Reset Values
*/
-static const int a15_max_cpu_idx = 3;
-
-static struct kvm_regs a15_regs_reset = {
+static struct kvm_regs cortexa_regs_reset = {
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
};
-static const struct kvm_irq_level a15_vtimer_irq = {
+static const struct kvm_irq_level cortexa_vtimer_irq = {
{ .irq = 27 },
.level = 1,
};
@@ -62,12 +60,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
const struct kvm_irq_level *cpu_vtimer_irq;
switch (vcpu->arch.target) {
+ case KVM_ARM_TARGET_CORTEX_A7:
case KVM_ARM_TARGET_CORTEX_A15:
- if (vcpu->vcpu_id > a15_max_cpu_idx)
- return -EINVAL;
- reset_regs = &a15_regs_reset;
+ reset_regs = &cortexa_regs_reset;
vcpu->arch.midr = read_cpuid_id();
- cpu_vtimer_irq = &a15_vtimer_irq;
+ cpu_vtimer_irq = &cortexa_vtimer_irq;
break;
default:
return -ENODEV;
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index d6408d1ee543..e0c68d5bb7dc 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -10,6 +10,11 @@ UNWIND( .fnstart )
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
+#if __LINUX_ARM_ARCH__ >= 7
+ .arch_extension mp
+ ALT_SMP(W(pldw) [r1])
+ ALT_UP(W(nop))
+#endif
mov r3, r2, lsl r3
1: ldrex r2, [r1]
\instr r2, r2, r3
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 025f742dd4df..3e58d710013c 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -18,6 +18,7 @@
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
#include <linux/highmem.h>
+#include <linux/hugetlb.h>
#include <asm/current.h>
#include <asm/page.h>
@@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
return 0;
pmd = pmd_offset(pud, addr);
- if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
+ if (unlikely(pmd_none(*pmd)))
+ return 0;
+
+ /*
+ * A pmd can be bad if it refers to a HugeTLB or THP page.
+ *
+ * Both THP and HugeTLB pages have the same pmd layout
+ * and should not be manipulated by the pte functions.
+ *
+ * Lock the page table for the destination and check
+ * to see that it's still huge and whether or not we will
+ * need to fault on write, or if we have a splitting THP.
+ */
+ if (unlikely(pmd_thp_or_huge(*pmd))) {
+ ptl = &current->mm->page_table_lock;
+ spin_lock(ptl);
+ if (unlikely(!pmd_thp_or_huge(*pmd)
+ || pmd_hugewillfault(*pmd)
+ || pmd_trans_splitting(*pmd))) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ *ptep = NULL;
+ *ptlp = ptl;
+ return 1;
+ }
+
+ if (unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
from += tocopy;
n -= tocopy;
- pte_unmap_unlock(pte, ptl);
+ if (pte)
+ pte_unmap_unlock(pte, ptl);
+ else
+ spin_unlock(ptl);
}
if (!atomic)
up_read(&current->mm->mmap_sem);
@@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
addr += tocopy;
n -= tocopy;
- pte_unmap_unlock(pte, ptl);
+ if (pte)
+ pte_unmap_unlock(pte, ptl);
+ else
+ spin_unlock(ptl);
}
up_read(&current->mm->mmap_sem);
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index e0fda04e6aa1..90aab2d5a07f 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -98,7 +98,6 @@ obj-y += leds.o
# Power Management
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_AT91_SLOW_CLOCK) += pm_slowclock.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
ifeq ($(CONFIG_PM_DEBUG),y)
CFLAGS_pm.o += -DDEBUG
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 4aad93d54d6f..25805f2f6010 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -27,6 +27,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -327,6 +328,7 @@ static void __init at91rm9200_ioremap_registers(void)
{
at91rm9200_ioremap_st(AT91RM9200_BASE_ST);
at91_ioremap_ramc(0, AT91RM9200_BASE_MC, 256);
+ at91_pm_set_standby(at91rm9200_standby);
}
static void __init at91rm9200_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index ae10d14de700..d6a1fa85371d 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -28,6 +28,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -342,6 +343,7 @@ static void __init at91sam9260_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9260_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index e761e74ac64b..23ba1d8a1531 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -27,6 +27,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -284,6 +285,7 @@ static void __init at91sam9261_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
at91_ioremap_matrix(AT91SAM9261_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9261_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 629ea5fc95cf..b2a34740146a 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -465,7 +465,7 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -498,7 +498,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data) {
return;
@@ -559,7 +559,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index e6fed6253a7f..7eccb0fc57bc 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -26,6 +26,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -321,6 +322,7 @@ static void __init at91sam9263_ioremap_registers(void)
at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0);
at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1);
at91_ioremap_matrix(AT91SAM9263_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9263_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 858c8aac2daf..4aeadddbc181 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -832,7 +832,7 @@ void __init at91_add_device_can(struct at91_can_data *data) {}
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -859,7 +859,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data)
return;
@@ -891,7 +891,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 9f7a97c84c12..9405aa08b104 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -26,6 +26,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -370,6 +371,7 @@ static void __init at91sam9g45_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC);
at91_ioremap_matrix(AT91SAM9G45_BASE_MATRIX);
+ at91_pm_set_standby(at91_ddr_standby);
}
static void __init at91sam9g45_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index acb703e13331..cb36fa872d30 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -965,7 +965,7 @@ void __init at91_add_device_isi(struct isi_platform_data *data,
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -991,7 +991,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data)
return;
@@ -1037,7 +1037,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 301e1728d353..0750ffb7e6b1 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -27,6 +27,7 @@
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
+#include "pm.h"
/* --------------------------------------------------------------------
* Clocks
@@ -287,6 +288,7 @@ static void __init at91sam9rl_ioremap_registers(void)
at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
at91_ioremap_matrix(AT91SAM9RL_BASE_MATRIX);
+ at91_pm_set_standby(at91sam9_sdram_standby);
}
static void __init at91sam9rl_initialize(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 352468f265a9..a698bdab2cce 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -498,7 +498,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
@@ -525,7 +525,7 @@ static struct platform_device at91_lcdc_device = {
.num_resources = ARRAY_SIZE(lcdc_resources),
};
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
{
if (!data) {
return;
@@ -557,7 +557,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
platform_device_register(&at91_lcdc_device);
}
#else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
#endif
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index d3437624ca4e..473546b9408b 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -389,7 +389,7 @@ static struct fb_monspecs at91fb_default_stn_monspecs = {
| ATMEL_LCDC_IFWIDTH_4 \
| ATMEL_LCDC_SCANMOD_SINGLE)
-static void at91_lcdc_stn_power_control(int on)
+static void at91_lcdc_stn_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
/* backlight */
if (on) { /* power up */
@@ -401,7 +401,7 @@ static void at91_lcdc_stn_power_control(int on)
}
}
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.default_bpp = 1,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9261_DEFAULT_STN_LCDCON2,
@@ -445,7 +445,7 @@ static struct fb_monspecs at91fb_default_tft_monspecs = {
| ATMEL_LCDC_DISTYPE_TFT \
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
-static void at91_lcdc_tft_power_control(int on)
+static void at91_lcdc_tft_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
if (on)
at91_set_gpio_value(AT91_PIN_PA12, 0); /* power up */
@@ -453,7 +453,7 @@ static void at91_lcdc_tft_power_control(int on)
at91_set_gpio_value(AT91_PIN_PA12, 1); /* power down */
}
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -465,7 +465,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
#endif
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index 3284df05df14..8b4942cbb6d9 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -275,13 +275,13 @@ static struct fb_monspecs at91fb_default_monspecs = {
| ATMEL_LCDC_DISTYPE_TFT \
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
at91_set_gpio_value(AT91_PIN_PA30, on);
}
/* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -292,7 +292,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
};
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 2a94896a1375..ef39078c8ce2 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -284,7 +284,7 @@ static struct fb_monspecs at91fb_default_monspecs = {
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
/* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 32,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -295,7 +295,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
};
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index aa265dcf2128..604eecf6cd70 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -170,7 +170,7 @@ static struct fb_monspecs at91fb_default_monspecs = {
| ATMEL_LCDC_DISTYPE_TFT \
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
{
if (on)
at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */
@@ -179,7 +179,7 @@ static void at91_lcdc_power_control(int on)
}
/* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
.lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
@@ -191,7 +191,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
};
#else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
#endif
diff --git a/arch/arm/mach-at91/board.h b/arch/arm/mach-at91/board.h
index 4a234fb2ab3b..6c08b341167d 100644
--- a/arch/arm/mach-at91/board.h
+++ b/arch/arm/mach-at91/board.h
@@ -107,8 +107,8 @@ extern void __init at91_add_device_pwm(u32 mask);
extern void __init at91_add_device_ssc(unsigned id, unsigned pins);
/* LCD Controller */
-struct atmel_lcdfb_info;
-extern void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data);
+struct atmel_lcdfb_pdata;
+extern void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data);
/* AC97 */
extern void __init at91_add_device_ac97(struct ac97c_platform_data *data);
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
deleted file mode 100644
index 4ec6a6d9b9be..000000000000
--- a/arch/arm/mach-at91/cpuidle.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * based on arch/arm/mach-kirkwood/cpuidle.c
- *
- * CPU idle support for AT91 SoC
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- * The cpu idle uses wait-for-interrupt and RAM self refresh in order
- * to implement two idle states -
- * #1 wait-for-interrupt
- * #2 wait-for-interrupt and RAM self refresh
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/cpuidle.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <asm/proc-fns.h>
-#include <asm/cpuidle.h>
-#include <mach/cpu.h>
-
-#include "pm.h"
-
-#define AT91_MAX_STATES 2
-
-/* Actual code that puts the SoC in different idle states */
-static int at91_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- if (cpu_is_at91rm9200())
- at91rm9200_standby();
- else if (cpu_is_at91sam9g45())
- at91sam9g45_standby();
- else if (cpu_is_at91sam9263())
- at91sam9263_standby();
- else
- at91sam9_standby();
-
- return index;
-}
-
-static struct cpuidle_driver at91_idle_driver = {
- .name = "at91_idle",
- .owner = THIS_MODULE,
- .states[0] = ARM_CPUIDLE_WFI_STATE,
- .states[1] = {
- .enter = at91_enter_idle,
- .exit_latency = 10,
- .target_residency = 10000,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .name = "RAM_SR",
- .desc = "WFI and DDR Self Refresh",
- },
- .state_count = AT91_MAX_STATES,
-};
-
-/* Initialize CPU idle by registering the idle states */
-static int __init at91_init_cpuidle(void)
-{
- return cpuidle_register(&at91_idle_driver, NULL);
-}
-
-device_initcall(at91_init_cpuidle);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 15afb5d9271f..9986542e8060 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -39,6 +39,8 @@
#include "at91_rstc.h"
#include "at91_shdwc.h"
+static void (*at91_pm_standby)(void);
+
static void __init show_reset_status(void)
{
static char reset[] __initdata = "reset";
@@ -266,14 +268,8 @@ static int at91_pm_enter(suspend_state_t state)
* For ARM 926 based chips, this requirement is weaker
* as at91sam9 can access a RAM in self-refresh mode.
*/
- if (cpu_is_at91rm9200())
- at91rm9200_standby();
- else if (cpu_is_at91sam9g45())
- at91sam9g45_standby();
- else if (cpu_is_at91sam9263())
- at91sam9263_standby();
- else
- at91sam9_standby();
+ if (at91_pm_standby)
+ at91_pm_standby();
break;
case PM_SUSPEND_ON:
@@ -314,6 +310,18 @@ static const struct platform_suspend_ops at91_pm_ops = {
.end = at91_pm_end,
};
+static struct platform_device at91_cpuidle_device = {
+ .name = "cpuidle-at91",
+};
+
+void at91_pm_set_standby(void (*at91_standby)(void))
+{
+ if (at91_standby) {
+ at91_cpuidle_device.dev.platform_data = at91_standby;
+ at91_pm_standby = at91_standby;
+ }
+}
+
static int __init at91_pm_init(void)
{
#ifdef CONFIG_AT91_SLOW_CLOCK
@@ -325,6 +333,9 @@ static int __init at91_pm_init(void)
/* AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. */
if (cpu_is_at91rm9200())
at91_ramc_write(0, AT91RM9200_SDRAMC_LPR, 0);
+
+ if (at91_cpuidle_device.dev.platform_data)
+ platform_device_register(&at91_cpuidle_device);
suspend_set_ops(&at91_pm_ops);
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 2f5908f0b8c5..3ed190ce062b 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -11,9 +11,13 @@
#ifndef __ARCH_ARM_MACH_AT91_PM
#define __ARCH_ARM_MACH_AT91_PM
+#include <asm/proc-fns.h>
+
#include <mach/at91_ramc.h>
#include <mach/at91rm9200_sdramc.h>
+extern void at91_pm_set_standby(void (*at91_standby)(void));
+
/*
* The AT91RM9200 goes into self-refresh mode with this command, and will
* terminate self-refresh automatically on the next SDRAM access.
@@ -45,16 +49,18 @@ static inline void at91rm9200_standby(void)
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember.
*/
-static inline void at91sam9g45_standby(void)
+static inline void at91_ddr_standby(void)
{
/* Those two values allow us to delay self-refresh activation
* to the maximum. */
- u32 lpr0, lpr1;
- u32 saved_lpr0, saved_lpr1;
+ u32 lpr0, lpr1 = 0;
+ u32 saved_lpr0, saved_lpr1 = 0;
- saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
- lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
- lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+ if (at91_ramc_base[1]) {
+ saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
+ lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
+ lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+ }
saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
@@ -62,25 +68,29 @@ static inline void at91sam9g45_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
- at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
- at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
}
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember.
*/
-static inline void at91sam9263_standby(void)
+static inline void at91sam9_sdram_standby(void)
{
- u32 lpr0, lpr1;
- u32 saved_lpr0, saved_lpr1;
+ u32 lpr0, lpr1 = 0;
+ u32 saved_lpr0, saved_lpr1 = 0;
- saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
- lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
- lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+ if (at91_ramc_base[1]) {
+ saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
+ lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
+ lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+ }
saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
@@ -88,27 +98,14 @@ static inline void at91sam9263_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
- at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
- at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
-}
-
-static inline void at91sam9_standby(void)
-{
- u32 saved_lpr, lpr;
-
- saved_lpr = at91_ramc_read(0, AT91_SDRAMC_LPR);
-
- lpr = saved_lpr & ~AT91_SDRAMC_LPCB;
- at91_ramc_write(0, AT91_SDRAMC_LPR, lpr |
- AT91_SDRAMC_LPCB_SELF_REFRESH);
-
- cpu_do_idle();
-
- at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr);
+ if (at91_ramc_base[1])
+ at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
#endif
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index b17fbcf4d9e8..094b3459c288 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -23,6 +23,7 @@
#include "at91_shdwc.h"
#include "soc.h"
#include "generic.h"
+#include "pm.h"
struct at91_init_soc __initdata at91_boot_soc;
@@ -376,15 +377,16 @@ static void at91_dt_rstc(void)
}
static struct of_device_id ramc_ids[] = {
- { .compatible = "atmel,at91rm9200-sdramc" },
- { .compatible = "atmel,at91sam9260-sdramc" },
- { .compatible = "atmel,at91sam9g45-ddramc" },
+ { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
+ { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
+ { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
{ /*sentinel*/ }
};
static void at91_dt_ramc(void)
{
struct device_node *np;
+ const struct of_device_id *of_id;
np = of_find_matching_node(NULL, ramc_ids);
if (!np)
@@ -396,6 +398,12 @@ static void at91_dt_ramc(void)
/* the controller may have 2 banks */
at91_ramc_base[1] = of_iomap(np, 1);
+ of_id = of_match_node(ramc_ids, np);
+ if (!of_id)
+ pr_warn("AT91: ramc no standby function available\n");
+ else
+ at91_pm_set_standby(of_id->data);
+
of_node_put(np);
}
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index e026b19b23ea..a075b3e0c5c7 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -40,7 +40,6 @@ config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138/AM18x based system"
select ARCH_DAVINCI_DA8XX
select ARCH_HAS_CPUFREQ
- select CPU_FREQ_TABLE
select CP_INTC
config ARCH_DAVINCI_DA8XX
diff --git a/arch/arm/mach-davinci/sram.c b/arch/arm/mach-davinci/sram.c
index f18928b073f5..8540dddf1fbd 100644
--- a/arch/arm/mach-davinci/sram.c
+++ b/arch/arm/mach-davinci/sram.c
@@ -25,7 +25,6 @@ struct gen_pool *sram_get_gen_pool(void)
void *sram_alloc(size_t len, dma_addr_t *dma)
{
- unsigned long vaddr;
dma_addr_t dma_base = davinci_soc_info.sram_dma;
if (dma)
@@ -33,13 +32,7 @@ void *sram_alloc(size_t len, dma_addr_t *dma)
if (!sram_pool || (dma && !dma_base))
return NULL;
- vaddr = gen_pool_alloc(sram_pool, len);
- if (!vaddr)
- return NULL;
-
- if (dma)
- *dma = gen_pool_virt_to_phys(sram_pool, vaddr);
- return (void *)vaddr;
+ return gen_pool_dma_alloc(sram_pool, len, dma);
}
EXPORT_SYMBOL(sram_alloc);
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index a4e7ba828810..61d2906ccefb 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -28,6 +28,7 @@
#include <linux/of_address.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/platform_device.h>
#include <asm/proc-fns.h>
#include <asm/exception.h>
@@ -292,6 +293,16 @@ void exynos5_restart(enum reboot_mode mode, const char *cmd)
__raw_writel(val, addr);
}
+static struct platform_device exynos_cpuidle = {
+ .name = "exynos_cpuidle",
+ .id = -1,
+};
+
+void __init exynos_cpuidle_init(void)
+{
+ platform_device_register(&exynos_cpuidle);
+}
+
void __init exynos_init_late(void)
{
if (of_machine_is_compatible("samsung,exynos5440"))
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index f0fa2050d08d..ff9b6a9419b0 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -21,6 +21,7 @@ struct map_desc;
void exynos_init_io(void);
void exynos4_restart(enum reboot_mode mode, const char *cmd);
void exynos5_restart(enum reboot_mode mode, const char *cmd);
+void exynos_cpuidle_init(void);
void exynos_init_late(void);
void exynos_firmware_init(void);
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index ac139226d63c..ddbfe8709fe7 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/export.h>
#include <linux/time.h>
+#include <linux/platform_device.h>
#include <asm/proc-fns.h>
#include <asm/smp_scu.h>
@@ -192,7 +193,7 @@ static void __init exynos5_core_down_clk(void)
__raw_writel(tmp, EXYNOS5_PWR_CTRL2);
}
-static int __init exynos4_init_cpuidle(void)
+static int exynos_cpuidle_probe(struct platform_device *pdev)
{
int cpu_id, ret;
struct cpuidle_device *device;
@@ -205,7 +206,7 @@ static int __init exynos4_init_cpuidle(void)
ret = cpuidle_register_driver(&exynos4_idle_driver);
if (ret) {
- printk(KERN_ERR "CPUidle failed to register driver\n");
+ dev_err(&pdev->dev, "failed to register cpuidle driver\n");
return ret;
}
@@ -219,11 +220,20 @@ static int __init exynos4_init_cpuidle(void)
ret = cpuidle_register_device(device);
if (ret) {
- printk(KERN_ERR "CPUidle register device failed\n");
+ dev_err(&pdev->dev, "failed to register cpuidle device\n");
return ret;
}
}
return 0;
}
-device_initcall(exynos4_init_cpuidle);
+
+static struct platform_driver exynos_cpuidle_driver = {
+ .probe = exynos_cpuidle_probe,
+ .driver = {
+ .name = "exynos_cpuidle",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(exynos_cpuidle_driver);
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 4b8f6e2ca163..4603e6bd424b 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -21,6 +21,8 @@
static void __init exynos4_dt_machine_init(void)
{
+ exynos_cpuidle_init();
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 7976ab333192..1fe075a70c1e 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -43,6 +43,8 @@ static void __init exynos5_dt_machine_init(void)
}
}
+ exynos_cpuidle_init();
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 1fd2cf097e30..eb1fa5c84723 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -692,14 +692,14 @@ static void netwinder_led_set(struct led_classdev *cdev,
unsigned long flags;
u32 reg;
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
reg = nw_gpio_read();
if (b != LED_OFF)
reg &= ~led->mask;
else
reg |= led->mask;
nw_gpio_modify_op(led->mask, reg);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
static enum led_brightness netwinder_led_get(struct led_classdev *cdev)
@@ -709,9 +709,9 @@ static enum led_brightness netwinder_led_get(struct led_classdev *cdev)
unsigned long flags;
u32 reg;
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
reg = nw_gpio_read();
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
return (reg & led->mask) ? LED_OFF : LED_FULL;
}
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 6e1d72326454..0aded64a9ebc 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -4,11 +4,12 @@ config ARCH_HIGHBANK
select ARCH_HAS_CPUFREQ
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_HAS_OPP
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_ERRATA_764369 if SMP
select ARM_ERRATA_775420
- select ARM_ERRATA_798181
+ select ARM_ERRATA_798181 if SMP
select ARM_GIC
select ARM_PSCI
select ARM_TIMER_SP804
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 0f9f24116daa..d0cfb225ec9a 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -22,7 +22,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/phy.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
@@ -176,7 +176,7 @@ static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
- if (opp_disable(cpu_dev, 1200000000))
+ if (dev_pm_opp_disable(cpu_dev, 1200000000))
pr_warn("failed to disable 1.2 GHz OPP\n");
put_node:
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 30e1ebe3a891..c342dc4e8a45 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -1,9 +1,5 @@
if ARCH_IXP4XX
-config ARCH_SUPPORTS_BIG_ENDIAN
- bool
- default y
-
menu "Intel IXP4xx Implementation Options"
comment "IXP4xx Platforms"
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 702232996c8c..cfadd974f5ce 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -191,7 +191,6 @@ static struct pxa3xx_nand_platform_data dkb_nand_info = {
#define SCLK_SOURCE_SELECT(x) (x << 30) /* 0x0 ~ 0x3 */
/* link config */
#define CFG_DUMBMODE(mode) (mode << 28) /* 0x0 ~ 0x6*/
-#define CFG_GRA_SWAPRB(x) (x << 0) /* 1: rbswap enabled */
static struct mmp_mach_path_config dkb_disp_config[] = {
[0] = {
.name = "mmp-parallel",
@@ -199,8 +198,7 @@ static struct mmp_mach_path_config dkb_disp_config[] = {
.output_type = PATH_OUT_PARALLEL,
.path_config = CFG_IOPADMODE(0x1)
| SCLK_SOURCE_SELECT(0x1),
- .link_config = CFG_DUMBMODE(0x2)
- | CFG_GRA_SWAPRB(0x1),
+ .link_config = CFG_DUMBMODE(0x2),
},
};
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 9eb63d724602..5e269d7263ce 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -1,5 +1,6 @@
config ARCH_MVEBU
bool "Marvell SOCs with Device Tree support" if ARCH_MULTI_V7
+ select ARCH_SUPPORTS_BIG_ENDIAN
select CLKSRC_MMIO
select COMMON_CLK
select GENERIC_CLOCKEVENTS
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 5476669ba905..ee7598fe75db 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -20,6 +20,8 @@
#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
+#include <asm/assembler.h>
+
.text
/*
* r0: Coherency fabric base register address
@@ -29,6 +31,7 @@ ENTRY(ll_set_cpu_coherent)
/* Create bit by cpu index */
mov r3, #(1 << 24)
lsl r1, r3, r1
+ARM_BE8(rev r1, r1)
/* Add CPU to SMP group - Atomic */
add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index 8a1b0c96e9ec..3dd80df428f7 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,12 +21,16 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
+
/*
* Armada XP specific entry point for secondary CPUs.
* We add the CPU to the coherency fabric and then jump to secondary
* startup
*/
ENTRY(armada_xp_secondary_startup)
+ ARM_BE8(setend be ) @ go BE8 if entered LE
+
/* Get coherency fabric base physical address */
adr r0, 1f
ldr r1, [r0]
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index fc20a61f6b2a..ac82512b9c8c 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
board_nand_data.nr_parts = nr_parts;
board_nand_data.devsize = nand_type;
- board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
+ board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW;
gpmc_nand_init(&board_nand_data, gpmc_t);
}
#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 09f6149b59a9..d6ed819ff15c 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -25,7 +25,7 @@
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include <linux/mtd/mtd.h>
@@ -516,11 +516,11 @@ static int __init beagle_opp_init(void)
return -ENODEV;
}
/* Enable MPU 1GHz and lower opps */
- r = opp_enable(mpu_dev, 800000000);
+ r = dev_pm_opp_enable(mpu_dev, 800000000);
/* TODO: MPU 1GHz needs SR and ABB */
/* Enable IVA 800MHz and lower opps */
- r |= opp_enable(iva_dev, 660000000);
+ r |= dev_pm_opp_enable(iva_dev, 660000000);
/* TODO: DSP 800MHz needs SR and ABB */
if (r) {
pr_err("%s: failed to enable higher opp %d\n",
@@ -529,8 +529,8 @@ static int __init beagle_opp_init(void)
* Cleanup - disable the higher freqs - we dont care
* about the results
*/
- opp_disable(mpu_dev, 800000000);
- opp_disable(iva_dev, 660000000);
+ dev_pm_opp_disable(mpu_dev, 800000000);
+ dev_pm_opp_disable(iva_dev, 660000000);
}
}
return 0;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 51525faa0aec..81de56251955 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1341,14 +1341,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
#ifdef CONFIG_MTD_NAND
-static const char * const nand_ecc_opts[] = {
- [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
- [OMAP_ECC_HAMMING_CODE_HW] = "hw",
- [OMAP_ECC_HAMMING_CODE_HW_ROMCODE] = "hw-romcode",
- [OMAP_ECC_BCH4_CODE_HW] = "bch4",
- [OMAP_ECC_BCH8_CODE_HW] = "bch8",
-};
-
static const char * const nand_xfer_types[] = {
[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
[NAND_OMAP_POLLED] = "polled",
@@ -1378,13 +1370,41 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
gpmc_nand_data->cs = val;
gpmc_nand_data->of_node = child;
- if (!of_property_read_string(child, "ti,nand-ecc-opt", &s))
- for (val = 0; val < ARRAY_SIZE(nand_ecc_opts); val++)
- if (!strcasecmp(s, nand_ecc_opts[val])) {
- gpmc_nand_data->ecc_opt = val;
- break;
- }
+ /* Detect availability of ELM module */
+ gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+ if (gpmc_nand_data->elm_of_node == NULL)
+ gpmc_nand_data->elm_of_node =
+ of_parse_phandle(child, "elm_id", 0);
+ if (gpmc_nand_data->elm_of_node == NULL)
+ pr_warn("%s: ti,elm-id property not found\n", __func__);
+
+ /* select ecc-scheme for NAND */
+ if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+ pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
+ return -ENODEV;
+ }
+ if (!strcmp(s, "ham1") || !strcmp(s, "sw") ||
+ !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_HAM1_CODE_HW;
+ else if (!strcmp(s, "bch4"))
+ if (gpmc_nand_data->elm_of_node)
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH4_CODE_HW;
+ else
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+ else if (!strcmp(s, "bch8"))
+ if (gpmc_nand_data->elm_of_node)
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH8_CODE_HW;
+ else
+ gpmc_nand_data->ecc_opt =
+ OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+ else
+ pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
+ /* select data transfer mode for NAND controller */
if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
if (!strcasecmp(s, nand_xfer_types[val])) {
diff --git a/arch/arm/mach-omap2/omap-pm.h b/arch/arm/mach-omap2/omap-pm.h
index 67faa7b8fe92..1d777e63e05c 100644
--- a/arch/arm/mach-omap2/omap-pm.h
+++ b/arch/arm/mach-omap2/omap-pm.h
@@ -17,7 +17,7 @@
#include <linux/device.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
/*
* agent_id values for use with omap_pm_set_min_bus_tput():
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
index 82fd8c72f750..a358a07e18f2 100644
--- a/arch/arm/mach-omap2/opp.c
+++ b/arch/arm/mach-omap2/opp.c
@@ -18,7 +18,7 @@
*/
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include "omap_device.h"
@@ -85,14 +85,14 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
dev = &oh->od->pdev->dev;
}
- r = opp_add(dev, opp_def->freq, opp_def->u_volt);
+ r = dev_pm_opp_add(dev, opp_def->freq, opp_def->u_volt);
if (r) {
dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n",
__func__, opp_def->freq,
opp_def->hwmod_name, i, r);
} else {
if (!opp_def->default_available)
- r = opp_disable(dev, opp_def->freq);
+ r = dev_pm_opp_disable(dev, opp_def->freq);
if (r)
dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n",
__func__, opp_def->freq,
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 360b2daf54dd..e1b41416fbf1 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/err.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/cpu.h>
@@ -131,7 +131,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
{
struct voltagedomain *voltdm;
struct clk *clk;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq, bootup_volt;
struct device *dev;
@@ -172,7 +172,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
clk_put(clk);
rcu_read_lock();
- opp = opp_find_freq_ceil(dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("%s: unable to find boot up OPP for vdd_%s\n",
@@ -180,7 +180,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
goto exit;
}
- bootup_volt = opp_get_voltage(opp);
+ bootup_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
if (!bootup_volt) {
pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index a8427115ee07..96100dbf5a2e 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -615,14 +615,12 @@ endmenu
config PXA25x
bool
select CPU_XSCALE
- select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA21x/25x/26x variants
config PXA27x
bool
select CPU_XSCALE
- select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA27x variants
@@ -635,7 +633,6 @@ config CPU_PXA26x
config PXA3xx
bool
select CPU_XSC3
- select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA3xx variants
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index f9423493ed36..584439bfa59f 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -310,6 +310,7 @@ static struct platform_pwm_backlight_data cm_x300_backlight_data = {
.max_brightness = 100,
.dft_brightness = 100,
.pwm_period_ns = 10000,
+ .enable_gpio = -1,
};
static struct platform_device cm_x300_backlight_device = {
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index 2d4a7b4d5d78..3aa264640c9d 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -189,6 +189,7 @@ static struct platform_pwm_backlight_data income_backlight_data = {
.max_brightness = 0x3ff,
.dft_brightness = 0x1ff,
.pwm_period_ns = 1000000,
+ .enable_gpio = -1,
};
static struct platform_device income_backlight = {
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index fe2eb8394dff..ab93441e596e 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -54,6 +54,7 @@ static struct platform_pwm_backlight_data ezx_backlight_data = {
.max_brightness = 1023,
.dft_brightness = 1023,
.pwm_period_ns = 78770,
+ .enable_gpio = -1,
};
static struct platform_device ezx_backlight_device = {
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 133109ec7332..a7c30eb0c8db 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -561,6 +561,7 @@ static struct platform_pwm_backlight_data backlight_data = {
.max_brightness = 200,
.dft_brightness = 100,
.pwm_period_ns = 30923,
+ .enable_gpio = -1,
};
static struct platform_device backlight = {
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 1255ee00f3d1..9f6ec167902a 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -269,6 +269,7 @@ static struct platform_pwm_backlight_data lpd270_backlight_data = {
.max_brightness = 1,
.dft_brightness = 1,
.pwm_period_ns = 78770,
+ .enable_gpio = -1,
};
static struct platform_device lpd270_backlight_device = {
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index f44532fc648b..fab30d666cc7 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -378,6 +378,7 @@ static struct platform_pwm_backlight_data backlight_data = {
.max_brightness = 272,
.dft_brightness = 100,
.pwm_period_ns = 30923,
+ .enable_gpio = -1,
.init = magician_backlight_init,
.notify = magician_backlight_notify,
.exit = magician_backlight_exit,
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index dd70343c8708..08ccc0718f31 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -338,6 +338,7 @@ static struct platform_pwm_backlight_data mainstone_backlight_data = {
.max_brightness = 1023,
.dft_brightness = 1023,
.pwm_period_ns = 78770,
+ .enable_gpio = -1,
};
static struct platform_device mainstone_backlight_device = {
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index acc9d3cc0762..f70583fee59f 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -186,6 +186,7 @@ static struct platform_pwm_backlight_data mioa701_backlight_data = {
.max_brightness = 100,
.dft_brightness = 50,
.pwm_period_ns = 4000 * 1024, /* Fl = 250kHz */
+ .enable_gpio = -1,
};
/*
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 17d4c53017ca..e54a296fb81f 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -322,6 +322,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = {
.max_brightness = 0xfe,
.dft_brightness = 0x7e,
.pwm_period_ns = 3500 * 1024,
+ .enable_gpio = -1,
.init = palm27x_backlight_init,
.notify = palm27x_backlight_notify,
.exit = palm27x_backlight_exit,
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 100b176f7e88..7691c974ca4b 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -166,45 +166,12 @@ static inline void palmtc_keys_init(void) {}
* Backlight
******************************************************************************/
#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
-static int palmtc_backlight_init(struct device *dev)
-{
- int ret;
-
- ret = gpio_request(GPIO_NR_PALMTC_BL_POWER, "BL POWER");
- if (ret)
- goto err;
- ret = gpio_direction_output(GPIO_NR_PALMTC_BL_POWER, 1);
- if (ret)
- goto err2;
-
- return 0;
-
-err2:
- gpio_free(GPIO_NR_PALMTC_BL_POWER);
-err:
- return ret;
-}
-
-static int palmtc_backlight_notify(struct device *dev, int brightness)
-{
- /* backlight is on when GPIO16 AF0 is high */
- gpio_set_value(GPIO_NR_PALMTC_BL_POWER, brightness);
- return brightness;
-}
-
-static void palmtc_backlight_exit(struct device *dev)
-{
- gpio_free(GPIO_NR_PALMTC_BL_POWER);
-}
-
static struct platform_pwm_backlight_data palmtc_backlight_data = {
.pwm_id = 1,
.max_brightness = PALMTC_MAX_INTENSITY,
.dft_brightness = PALMTC_MAX_INTENSITY,
.pwm_period_ns = PALMTC_PERIOD_NS,
- .init = palmtc_backlight_init,
- .notify = palmtc_backlight_notify,
- .exit = palmtc_backlight_exit,
+ .enable_gpio = GPIO_NR_PALMTC_BL_POWER,
};
static struct platform_device palmtc_backlight = {
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index 0742721ced2d..956fd24ee6fd 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -165,6 +165,7 @@ static struct platform_pwm_backlight_data palmte2_backlight_data = {
.max_brightness = PALMTE2_MAX_INTENSITY,
.dft_brightness = PALMTE2_MAX_INTENSITY,
.pwm_period_ns = PALMTE2_PERIOD_NS,
+ .enable_gpio = -1,
.init = palmte2_backlight_init,
.notify = palmte2_backlight_notify,
.exit = palmte2_backlight_exit,
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 3133ba82c508..9a4e470f162b 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -153,6 +153,7 @@ static struct platform_pwm_backlight_data pcm990_backlight_data = {
.max_brightness = 1023,
.dft_brightness = 1023,
.pwm_period_ns = 78770,
+ .enable_gpio = -1,
};
static struct platform_device pcm990_backlight_device = {
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 969b0ba7fa70..8386dc30b3e4 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -539,6 +539,7 @@ static struct platform_pwm_backlight_data raumfeld_pwm_backlight_data = {
.dft_brightness = 100,
/* 10000 ns = 10 ms ^= 100 kHz */
.pwm_period_ns = 10000,
+ .enable_gpio = -1,
};
static struct platform_device raumfeld_pwm_backlight_device = {
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c
index 4680efe55345..a71da84e784b 100644
--- a/arch/arm/mach-pxa/tavorevb.c
+++ b/arch/arm/mach-pxa/tavorevb.c
@@ -175,6 +175,7 @@ static struct platform_pwm_backlight_data tavorevb_backlight_data[] = {
.max_brightness = 100,
.dft_brightness = 100,
.pwm_period_ns = 100000,
+ .enable_gpio = -1,
},
[1] = {
/* secondary backlight */
@@ -182,6 +183,7 @@ static struct platform_pwm_backlight_data tavorevb_backlight_data[] = {
.max_brightness = 100,
.dft_brightness = 100,
.pwm_period_ns = 100000,
+ .enable_gpio = -1,
},
};
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 9c363c081d3f..29905b127ad9 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -401,6 +401,7 @@ static struct platform_pwm_backlight_data viper_backlight_data = {
.max_brightness = 100,
.dft_brightness = 100,
.pwm_period_ns = 1000000,
+ .enable_gpio = -1,
.init = viper_backlight_init,
.notify = viper_backlight_notify,
.exit = viper_backlight_exit,
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index 2513d8f4931f..e1a121b36cfa 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -206,6 +206,7 @@ static struct platform_pwm_backlight_data z2_backlight_data[] = {
.max_brightness = 1023,
.dft_brightness = 0,
.pwm_period_ns = 1260320,
+ .enable_gpio = -1,
},
[1] = {
/* LCD Backlight */
@@ -213,6 +214,7 @@ static struct platform_pwm_backlight_data z2_backlight_data[] = {
.max_brightness = 1023,
.dft_brightness = 512,
.pwm_period_ns = 1260320,
+ .enable_gpio = -1,
},
};
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 36cf7cf95ec1..77daea478e88 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -125,6 +125,7 @@ static struct platform_pwm_backlight_data zylonite_backlight_data = {
.max_brightness = 100,
.dft_brightness = 100,
.pwm_period_ns = 10000,
+ .enable_gpio = -1,
};
static struct platform_device zylonite_backlight_device = {
diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
index 74dd47988b41..952b6a040d1f 100644
--- a/arch/arm/mach-s3c24xx/mach-h1940.c
+++ b/arch/arm/mach-s3c24xx/mach-h1940.c
@@ -504,6 +504,7 @@ static struct platform_pwm_backlight_data backlight_data = {
.dft_brightness = 50,
/* tcnt = 0x31 */
.pwm_period_ns = 36296,
+ .enable_gpio = -1,
.init = h1940_backlight_init,
.notify = h1940_backlight_notify,
.exit = h1940_backlight_exit,
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index 206b1f7546d1..034b7fe45c49 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -522,6 +522,7 @@ static struct platform_pwm_backlight_data rx1950_backlight_data = {
.max_brightness = 24,
.dft_brightness = 4,
.pwm_period_ns = 48000,
+ .enable_gpio = -1,
.init = rx1950_backlight_init,
.notify = rx1950_backlight_notify,
.exit = rx1950_backlight_exit,
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index aca7d16e195d..758e31b26550 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -114,6 +114,7 @@ static struct platform_pwm_backlight_data crag6410_backlight_data = {
.max_brightness = 1000,
.dft_brightness = 600,
.pwm_period_ns = 100000, /* about 1kHz */
+ .enable_gpio = -1,
};
static struct platform_device crag6410_backlight_device = {
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index e8064044ef79..614a03a92cf7 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -114,6 +114,7 @@ static struct platform_pwm_backlight_data hmt_backlight_data = {
.max_brightness = 100 * 256,
.dft_brightness = 40 * 256,
.pwm_period_ns = 1000000000 / (100 * 256 * 20),
+ .enable_gpio = -1,
.init = hmt_bl_init,
.notify = hmt_bl_notify,
.exit = hmt_bl_exit,
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index 0f47237be3b2..a6b338fd0470 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -151,6 +151,7 @@ static struct platform_pwm_backlight_data smartq_backlight_data = {
.max_brightness = 1000,
.dft_brightness = 600,
.pwm_period_ns = 1000000000 / (1000 * 20),
+ .enable_gpio = -1,
.init = smartq_bl_init,
};
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 2a7b32ca5c96..d5ea938cc9a1 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -625,6 +625,7 @@ static struct samsung_bl_gpio_info smdk6410_bl_gpio_info = {
static struct platform_pwm_backlight_data smdk6410_bl_data = {
.pwm_id = 1,
+ .enable_gpio = -1,
};
static struct s3c_hsotg_plat smdk6410_hsotg_pdata;
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 0b00304c1e91..9efdcc03df3b 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -223,6 +223,7 @@ static struct samsung_bl_gpio_info smdk6440_bl_gpio_info = {
static struct platform_pwm_backlight_data smdk6440_bl_data = {
.pwm_id = 1,
+ .enable_gpio = -1,
};
static void __init smdk6440_map_io(void)
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index 5949296e88fd..c3cacc067efe 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -242,6 +242,7 @@ static struct samsung_bl_gpio_info smdk6450_bl_gpio_info = {
static struct platform_pwm_backlight_data smdk6450_bl_data = {
.pwm_id = 1,
+ .enable_gpio = -1,
};
static void __init smdk6450_map_io(void)
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 7c57a221785e..9e256b9fc930 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -216,6 +216,7 @@ static struct samsung_bl_gpio_info smdkc100_bl_gpio_info = {
static struct platform_pwm_backlight_data smdkc100_bl_data = {
.pwm_id = 0,
+ .enable_gpio = -1,
};
static void __init smdkc100_map_io(void)
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index 6d72bb992e38..f52cc15c2d85 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -279,6 +279,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
static struct platform_pwm_backlight_data smdkv210_bl_data = {
.pwm_id = 3,
.pwm_period_ns = 1000,
+ .enable_gpio = -1,
};
static void __init smdkv210_map_io(void)
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index e838ba27e443..c9808c684152 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -512,6 +512,9 @@ static void __init assabet_map_io(void)
* Its called GPCLKR0 in my SA1110 manual.
*/
Ser1SDCR0 |= SDCR0_SUS;
+ MSC1 = (MSC1 & ~0xffff) |
+ MSC_NonBrst | MSC_32BitStMem |
+ MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
if (!machine_has_neponset())
sa1100_register_uart_fns(&assabet_port_fns);
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index f25b6119e028..d4ea142c4edd 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -42,74 +42,31 @@ EXPORT_SYMBOL(reset_status);
/*
* This table is setup for a 3.6864MHz Crystal.
*/
-static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
- 590, /* 59.0 MHz */
- 737, /* 73.7 MHz */
- 885, /* 88.5 MHz */
- 1032, /* 103.2 MHz */
- 1180, /* 118.0 MHz */
- 1327, /* 132.7 MHz */
- 1475, /* 147.5 MHz */
- 1622, /* 162.2 MHz */
- 1769, /* 176.9 MHz */
- 1917, /* 191.7 MHz */
- 2064, /* 206.4 MHz */
- 2212, /* 221.2 MHz */
- 2359, /* 235.9 MHz */
- 2507, /* 250.7 MHz */
- 2654, /* 265.4 MHz */
- 2802 /* 280.2 MHz */
+struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = {
+ { .frequency = 59000, /* 59.0 MHz */},
+ { .frequency = 73700, /* 73.7 MHz */},
+ { .frequency = 88500, /* 88.5 MHz */},
+ { .frequency = 103200, /* 103.2 MHz */},
+ { .frequency = 118000, /* 118.0 MHz */},
+ { .frequency = 132700, /* 132.7 MHz */},
+ { .frequency = 147500, /* 147.5 MHz */},
+ { .frequency = 162200, /* 162.2 MHz */},
+ { .frequency = 176900, /* 176.9 MHz */},
+ { .frequency = 191700, /* 191.7 MHz */},
+ { .frequency = 206400, /* 206.4 MHz */},
+ { .frequency = 221200, /* 221.2 MHz */},
+ { .frequency = 235900, /* 235.9 MHz */},
+ { .frequency = 250700, /* 250.7 MHz */},
+ { .frequency = 265400, /* 265.4 MHz */},
+ { .frequency = 280200, /* 280.2 MHz */},
+ { .frequency = CPUFREQ_TABLE_END, },
};
-/* rounds up(!) */
-unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
-{
- int i;
-
- khz /= 100;
-
- for (i = 0; i < NR_FREQS; i++)
- if (cclk_frequency_100khz[i] >= khz)
- break;
-
- return i;
-}
-
-unsigned int sa11x0_ppcr_to_freq(unsigned int idx)
-{
- unsigned int freq = 0;
- if (idx < NR_FREQS)
- freq = cclk_frequency_100khz[idx] * 100;
- return freq;
-}
-
-
-/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on
- * this platform, anyway.
- */
-int sa11x0_verify_speed(struct cpufreq_policy *policy)
-{
- unsigned int tmp;
- if (policy->cpu)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
- /* make sure that at least one frequency is within the policy */
- tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100;
- if (tmp > policy->max)
- policy->max = tmp;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
- return 0;
-}
-
unsigned int sa11x0_getspeed(unsigned int cpu)
{
if (cpu)
return 0;
- return cclk_frequency_100khz[PPCR & 0xf] * 100;
+ return sa11x0_freq_table[PPCR & 0xf].frequency;
}
/*
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 9a33695c9492..0d92e119b36b 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -3,6 +3,7 @@
*
* Author: Nicolas Pitre
*/
+#include <linux/cpufreq.h>
#include <linux/reboot.h>
extern void sa1100_timer_init(void);
@@ -19,12 +20,8 @@ extern void sa11x0_init_late(void);
extern void sa1110_mb_enable(void);
extern void sa1110_mb_disable(void);
-struct cpufreq_policy;
-
-extern unsigned int sa11x0_freq_to_ppcr(unsigned int khz);
-extern int sa11x0_verify_speed(struct cpufreq_policy *policy);
+extern struct cpufreq_frequency_table sa11x0_freq_table[];
extern unsigned int sa11x0_getspeed(unsigned int cpu);
-extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx);
struct flash_platform_data;
struct resource;
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h
deleted file mode 100644
index 6a9eecf3137e..000000000000
--- a/arch/arm/mach-sa1100/include/mach/gpio.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/gpio.h
- *
- * SA1100 GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_SA1100_GPIO_H
-#define __ASM_ARCH_SA1100_GPIO_H
-
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_get_value(unsigned gpio)
-{
- if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
- return GPLR & GPIO_GPIO(gpio);
- else
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
- if (value)
- GPSR = GPIO_GPIO(gpio);
- else
- GPCR = GPIO_GPIO(gpio);
- else
- __gpio_set_value(gpio, value);
-}
-
-#define gpio_cansleep __gpio_cansleep
-
-#endif
diff --git a/arch/arm/mach-sa1100/include/mach/h3xxx.h b/arch/arm/mach-sa1100/include/mach/h3xxx.h
index 7d9df16f04a2..c810620db53d 100644
--- a/arch/arm/mach-sa1100/include/mach/h3xxx.h
+++ b/arch/arm/mach-sa1100/include/mach/h3xxx.h
@@ -13,6 +13,8 @@
#ifndef _INCLUDE_H3XXX_H_
#define _INCLUDE_H3XXX_H_
+#include "hardware.h" /* Gives GPIO_MAX */
+
/* Physical memory regions corresponding to chip selects */
#define H3600_EGPIO_PHYS (SA1100_CS5_PHYS + 0x01000000)
#define H3600_BANK_2_PHYS SA1100_CS2_PHYS
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index bcbc94540e45..41e476e571d7 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -19,6 +19,7 @@
#include <mach/hardware.h>
#include <asm/setup.h>
+#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 8bc8e4c58847..958e3cbf0ac2 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -423,6 +423,7 @@ static struct platform_pwm_backlight_data pwm_backlight_data = {
.max_brightness = 255,
.dft_brightness = 255,
.pwm_period_ns = 33333, /* 30kHz */
+ .enable_gpio = -1,
};
static struct platform_device pwm_backlight_device = {
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 939ddb8f482b..d71654bc8d54 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -30,7 +30,7 @@ config SOC_STIH415
default y
help
This enables support for STMicroelectronics Digital Consumer
- Electronics family StiH415 parts, primarily targetted at set-top-box
+ Electronics family StiH415 parts, primarily targeted at set-top-box
and other digital audio/video applications using Flattned Device
Trees.
@@ -39,7 +39,7 @@ config SOC_STIH416
default y
help
This enables support for STMicroelectronics Digital Consumer
- Electronics family StiH416 parts, primarily targetted at set-top-box
+ Electronics family StiH416 parts, primarily targeted at set-top-box
and other digital audio/video applications using Flattened Device
Trees.
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 0bf04a0bca9d..09e740f58b27 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -51,7 +51,7 @@ config ARCH_TEGRA_3x_SOC
config ARCH_TEGRA_114_SOC
bool "Enable support for Tegra114 family"
- select ARM_ERRATA_798181
+ select ARM_ERRATA_798181 if SMP
select ARM_L1_CACHE_SHIFT_6
select HAVE_ARM_ARCH_TIMER
select PINCTRL_TEGRA114
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
index d7aa52ea6cfc..bc471973cf04 100644
--- a/arch/arm/mach-tegra/apbio.c
+++ b/arch/arm/mach-tegra/apbio.c
@@ -114,7 +114,7 @@ static int do_dma_transfer(unsigned long apb_add,
dma_desc->callback = apb_dma_complete;
dma_desc->callback_param = NULL;
- INIT_COMPLETION(tegra_apb_wait);
+ reinit_completion(&tegra_apb_wait);
dmaengine_submit(dma_desc);
dma_async_issue_pending(tegra_apb_dma_chan);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c67f8ad5ccd5..0034d2cd6973 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -29,7 +29,6 @@ if ARCH_U8500
config UX500_SOC_DB8500
bool
- select CPU_FREQ_TABLE if CPU_FREQ
select MFD_DB8500_PRCMU
select PINCTRL_DB8500
select PINCTRL_DB8540
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index d7e7422527ca..4a70be485ff8 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,6 +1,7 @@
config ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
select ARM_TIMER_SP804
@@ -64,10 +65,22 @@ config ARCH_VEXPRESS_DCSCB
This is needed to provide CPU and cluster power management
on RTSM implementing big.LITTLE.
+config ARCH_VEXPRESS_SPC
+ bool "Versatile Express Serial Power Controller (SPC)"
+ select ARCH_HAS_CPUFREQ
+ select ARCH_HAS_OPP
+ select PM_OPP
+ help
+ The TC2 (A15x2 A7x3) versatile express core tile integrates a logic
+ block called Serial Power Controller (SPC) that provides the interface
+ between the dual cluster test-chip and the M3 microcontroller that
+ carries out power management.
+
config ARCH_VEXPRESS_TC2_PM
bool "Versatile Express TC2 power management"
depends on MCPM
select ARM_CCI
+ select ARCH_VEXPRESS_SPC
help
Support for CPU and cluster power management on Versatile Express
with a TC2 (A15x2 A7x3) big.LITTLE core tile.
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 505e64ab3eae..0997e0b7494c 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -8,7 +8,8 @@ obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
CFLAGS_dcscb.o += -march=armv7-a
-obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o spc.o
+obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
+obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
CFLAGS_tc2_pm.o += -march=armv7-a
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 3a6384c6c435..14d499688736 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -133,38 +133,8 @@ static void dcscb_power_down(void)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush all cache levels for this cluster.
- *
- * To do so we do:
- * - Clear the SCTLR.C bit to prevent further cache allocations
- * - Flush the whole cache
- * - Clear the ACTLR "SMP" bit to disable local coherency
- *
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including to the stack.
- *
- * Note: fp is preserved to the stack explicitly prior doing
- * this since adding it to the clobber list is incompatible
- * with having CONFIG_FRAME_POINTER=y.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
/*
* This is a harmless no-op. On platforms with a real
@@ -183,26 +153,8 @@ static void dcscb_power_down(void)
} else {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush the local CPU cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index eefb029197ca..033d34dcbd3f 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -17,14 +17,31 @@
* GNU General Public License for more details.
*/
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/semaphore.h>
#include <asm/cacheflush.h>
#define SPCLOG "vexpress-spc: "
+#define PERF_LVL_A15 0x00
+#define PERF_REQ_A15 0x04
+#define PERF_LVL_A7 0x08
+#define PERF_REQ_A7 0x0c
+#define COMMS 0x10
+#define COMMS_REQ 0x14
+#define PWC_STATUS 0x18
+#define PWC_FLAG 0x1c
+
/* SPC wake-up IRQs status and mask */
#define WAKE_INT_MASK 0x24
#define WAKE_INT_RAW 0x28
@@ -36,12 +53,45 @@
#define A15_BX_ADDR0 0x68
#define A7_BX_ADDR0 0x78
+/* SPC system config interface registers */
+#define SYSCFG_WDATA 0x70
+#define SYSCFG_RDATA 0x74
+
+/* A15/A7 OPP virtual register base */
+#define A15_PERFVAL_BASE 0xC10
+#define A7_PERFVAL_BASE 0xC30
+
+/* Config interface control bits */
+#define SYSCFG_START (1 << 31)
+#define SYSCFG_SCC (6 << 20)
+#define SYSCFG_STAT (14 << 20)
+
/* wake-up interrupt masks */
#define GBL_WAKEUP_INT_MSK (0x3 << 10)
/* TC2 static dual-cluster configuration */
#define MAX_CLUSTERS 2
+/*
+ * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
+ * operation, the operation could start just before jiffie is about
+ * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
+ */
+#define TIMEOUT_US 20000
+
+#define MAX_OPPS 8
+#define CA15_DVFS 0
+#define CA7_DVFS 1
+#define SPC_SYS_CFG 2
+#define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
+#define STAT_ERR(type) ((1 << 1) << (type << 2))
+#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
+
+struct ve_spc_opp {
+ unsigned long freq;
+ unsigned long u_volt;
+};
+
struct ve_spc_drvdata {
void __iomem *baseaddr;
/*
@@ -49,6 +99,12 @@ struct ve_spc_drvdata {
* It corresponds to A15 processors MPIDR[15:8] bitfield
*/
u32 a15_clusid;
+ uint32_t cur_rsp_mask;
+ uint32_t cur_rsp_stat;
+ struct semaphore sem;
+ struct completion done;
+ struct ve_spc_opp *opps[MAX_CLUSTERS];
+ int num_opps[MAX_CLUSTERS];
};
static struct ve_spc_drvdata *info;
@@ -157,8 +213,197 @@ void ve_spc_powerdown(u32 cluster, bool enable)
writel_relaxed(enable, info->baseaddr + pwdrn_reg);
}
-int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
+static int ve_spc_get_performance(int cluster, u32 *freq)
+{
+ struct ve_spc_opp *opps = info->opps[cluster];
+ u32 perf_cfg_reg = 0;
+ u32 perf;
+
+ perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
+
+ perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
+ if (perf >= info->num_opps[cluster])
+ return -EINVAL;
+
+ opps += perf;
+ *freq = opps->freq;
+
+ return 0;
+}
+
+/* find closest match to given frequency in OPP table */
+static int ve_spc_round_performance(int cluster, u32 freq)
+{
+ int idx, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+ u32 fmin = 0, fmax = ~0, ftmp;
+
+ freq /= 1000; /* OPP entries in kHz */
+ for (idx = 0; idx < max_opp; idx++, opps++) {
+ ftmp = opps->freq;
+ if (ftmp >= freq) {
+ if (ftmp <= fmax)
+ fmax = ftmp;
+ } else {
+ if (ftmp >= fmin)
+ fmin = ftmp;
+ }
+ }
+ if (fmax != ~0)
+ return fmax * 1000;
+ else
+ return fmin * 1000;
+}
+
+static int ve_spc_find_performance_index(int cluster, u32 freq)
+{
+ int idx, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+
+ for (idx = 0; idx < max_opp; idx++, opps++)
+ if (opps->freq == freq)
+ break;
+ return (idx == max_opp) ? -EINVAL : idx;
+}
+
+static int ve_spc_waitforcompletion(int req_type)
+{
+ int ret = wait_for_completion_interruptible_timeout(
+ &info->done, usecs_to_jiffies(TIMEOUT_US));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
+ return ret;
+}
+
+static int ve_spc_set_performance(int cluster, u32 freq)
+{
+ u32 perf_cfg_reg, perf_stat_reg;
+ int ret, perf, req_type;
+
+ if (cluster_is_a15(cluster)) {
+ req_type = CA15_DVFS;
+ perf_cfg_reg = PERF_LVL_A15;
+ perf_stat_reg = PERF_REQ_A15;
+ } else {
+ req_type = CA7_DVFS;
+ perf_cfg_reg = PERF_LVL_A7;
+ perf_stat_reg = PERF_REQ_A7;
+ }
+
+ perf = ve_spc_find_performance_index(cluster, freq);
+
+ if (perf < 0)
+ return perf;
+
+ if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+ info->cur_rsp_mask = RESPONSE_MASK(req_type);
+
+ writel(perf, info->baseaddr + perf_cfg_reg);
+ ret = ve_spc_waitforcompletion(req_type);
+
+ info->cur_rsp_mask = 0;
+ up(&info->sem);
+
+ return ret;
+}
+
+static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
+{
+ int ret;
+
+ if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+ info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
+
+ /* Set the control value */
+ writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
+ ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
+
+ if (ret == 0)
+ *data = readl(info->baseaddr + SYSCFG_RDATA);
+
+ info->cur_rsp_mask = 0;
+ up(&info->sem);
+
+ return ret;
+}
+
+static irqreturn_t ve_spc_irq_handler(int irq, void *data)
+{
+ struct ve_spc_drvdata *drv_data = data;
+ uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
+
+ if (info->cur_rsp_mask & status) {
+ info->cur_rsp_stat = status;
+ complete(&drv_data->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * +--------------------------+
+ * | 31 20 | 19 0 |
+ * +--------------------------+
+ * | u_volt | freq(kHz) |
+ * +--------------------------+
+ */
+#define MULT_FACTOR 20
+#define VOLT_SHIFT 20
+#define FREQ_MASK (0xFFFFF)
+static int ve_spc_populate_opps(uint32_t cluster)
{
+ uint32_t data = 0, off, ret, idx;
+ struct ve_spc_opp *opps;
+
+ opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
+ if (!opps)
+ return -ENOMEM;
+
+ info->opps[cluster] = opps;
+
+ off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
+ for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
+ ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
+ if (!ret) {
+ opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
+ opps->u_volt = data >> VOLT_SHIFT;
+ } else {
+ break;
+ }
+ }
+ info->num_opps[cluster] = idx;
+
+ return ret;
+}
+
+static int ve_init_opp_table(struct device *cpu_dev)
+{
+ int cluster = topology_physical_package_id(cpu_dev->id);
+ int idx, ret = 0, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+
+ for (idx = 0; idx < max_opp; idx++, opps++) {
+ ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
+ opps->freq, opps->u_volt);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
+{
+ int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_err(SPCLOG "unable to allocate mem\n");
@@ -168,6 +413,25 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
info->baseaddr = baseaddr;
info->a15_clusid = a15_clusid;
+ if (irq <= 0) {
+ pr_err(SPCLOG "Invalid IRQ %d\n", irq);
+ kfree(info);
+ return -EINVAL;
+ }
+
+ init_completion(&info->done);
+
+ readl_relaxed(info->baseaddr + PWC_STATUS);
+
+ ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
+ | IRQF_ONESHOT, "vexpress-spc", info);
+ if (ret) {
+ pr_err(SPCLOG "IRQ %d request failed\n", irq);
+ kfree(info);
+ return -ENODEV;
+ }
+
+ sema_init(&info->sem, 1);
/*
* Multi-cluster systems may need this data when non-coherent, during
* cluster power-up/power-down. Make sure driver info reaches main
@@ -178,3 +442,103 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
return 0;
}
+
+struct clk_spc {
+ struct clk_hw hw;
+ int cluster;
+};
+
+#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
+static unsigned long spc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+ u32 freq;
+
+ if (ve_spc_get_performance(spc->cluster, &freq))
+ return -EIO;
+
+ return freq * 1000;
+}
+
+static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+
+ return ve_spc_round_performance(spc->cluster, drate);
+}
+
+static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+
+ return ve_spc_set_performance(spc->cluster, rate / 1000);
+}
+
+static struct clk_ops clk_spc_ops = {
+ .recalc_rate = spc_recalc_rate,
+ .round_rate = spc_round_rate,
+ .set_rate = spc_set_rate,
+};
+
+static struct clk *ve_spc_clk_register(struct device *cpu_dev)
+{
+ struct clk_init_data init;
+ struct clk_spc *spc;
+
+ spc = kzalloc(sizeof(*spc), GFP_KERNEL);
+ if (!spc) {
+ pr_err("could not allocate spc clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spc->hw.init = &init;
+ spc->cluster = topology_physical_package_id(cpu_dev->id);
+
+ init.name = dev_name(cpu_dev);
+ init.ops = &clk_spc_ops;
+ init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+
+ return devm_clk_register(cpu_dev, &spc->hw);
+}
+
+static int __init ve_spc_clk_init(void)
+{
+ int cpu;
+ struct clk *clk;
+
+ if (!info)
+ return 0; /* Continue only if SPC is initialised */
+
+ if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
+ pr_err("failed to build OPP table\n");
+ return -ENODEV;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_warn("failed to get cpu%d device\n", cpu);
+ continue;
+ }
+ clk = ve_spc_clk_register(cpu_dev);
+ if (IS_ERR(clk)) {
+ pr_warn("failed to register cpu%d clock\n", cpu);
+ continue;
+ }
+ if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
+ pr_warn("failed to register cpu%d clock lookup\n", cpu);
+ continue;
+ }
+
+ if (ve_init_opp_table(cpu_dev))
+ pr_warn("failed to initialise cpu%d opp table\n", cpu);
+ }
+
+ platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ve_spc_clk_init);
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
index 5f7e4a446a17..dbd44c3720f9 100644
--- a/arch/arm/mach-vexpress/spc.h
+++ b/arch/arm/mach-vexpress/spc.h
@@ -15,7 +15,7 @@
#ifndef __SPC_H_
#define __SPC_H_
-int __init ve_spc_init(void __iomem *base, u32 a15_clusid);
+int __init ve_spc_init(void __iomem *base, u32 a15_clusid, int irq);
void ve_spc_global_wakeup_irq(bool set);
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index e6eb48192912..05a364c5077a 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/irqchip/arm-gic.h>
@@ -156,32 +157,7 @@ static void tc2_pm_down(u64 residency)
: : "r" (0x400) );
}
- /*
- * We need to disable and flush the whole (L1 and L2) cache.
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including the stack.
- *
- * Note: fp is preserved to the stack explicitly prior doing
- * this since adding it to the clobber list is incompatible
- * with having CONFIG_FRAME_POINTER=y.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ v7_exit_coherency_flush(all);
cci_disable_port_by_cpu(mpidr);
@@ -197,26 +173,7 @@ static void tc2_pm_down(u64 residency)
arch_spin_unlock(&tc2_pm_lock);
- /*
- * We need to disable and flush only the L1 cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "str fp, [sp, #-4]! \n\t"
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb \n\t"
- "ldr fp, [sp], #4"
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","lr","memory");
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
@@ -311,7 +268,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
static int __init tc2_pm_init(void)
{
- int ret;
+ int ret, irq;
void __iomem *scc;
u32 a15_cluster_id, a7_cluster_id, sys_info;
struct device_node *np;
@@ -336,13 +293,15 @@ static int __init tc2_pm_init(void)
tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
+ irq = irq_of_parse_and_map(np, 0);
+
/*
* A subset of the SCC registers is also used to communicate
* with the SPC (power controller). We need to be able to
* drive it very early in the boot process to power up
* processors, so we initialize the SPC driver here.
*/
- ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id);
+ ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
if (ret)
return ret;
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5f252569c689..9a7bd137c8fd 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -44,6 +44,10 @@ static struct of_device_id zynq_of_bus_ids[] __initdata = {
{}
};
+static struct platform_device zynq_cpuidle_device = {
+ .name = "cpuidle-zynq",
+};
+
/**
* zynq_init_machine - System specific initialization, intended to be
* called from board specific initialization.
@@ -56,6 +60,8 @@ static void __init zynq_init_machine(void)
l2x0_of_init(0x02060000, 0xF0F0FFFF);
of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL);
+
+ platform_device_register(&zynq_cpuidle_device);
}
static void __init zynq_timer_init(void)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index cd2c88e7a8f7..1f8fed94c2a4 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -952,3 +952,9 @@ config ARCH_HAS_BARRIERS
help
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+
+config ARCH_SUPPORTS_BIG_ENDIAN
+ bool
+ help
+ This option specifies the architecture can support big endian
+ operation.
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 80741992a9fc..3815a8262af0 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -38,9 +38,8 @@ ENTRY(v6_early_abort)
bne do_DataAbort
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r3, r3
-#endif
+ ARM_BE8(rev r3, r3)
+
do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 6f4585b89078..924036473b16 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -25,6 +25,7 @@
#include <asm/cp15.h>
#include <asm/system_info.h>
#include <asm/unaligned.h>
+#include <asm/opcodes.h>
#include "fault.h"
@@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = probe_kernel_address(ptr + 1, tinst2);
- instr = (tinstr << 16) | tinst2;
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
}
- } else
+ } else {
fault = probe_kernel_address(instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
if (fault) {
type = TYPE_FAULT;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1272ed202dde..79f8b39801a8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
static u64 get_coherent_dma_mask(struct device *dev)
{
- u64 mask = (u64)arm_dma_limit;
+ u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- if ((~mask) & (u64)arm_dma_limit) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (u64)arm_dma_limit);
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then fail the
+ * allocation.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ return 0;
+ }
+
+ /*
+ * Now check that the mask, when translated to a PFN,
+ * fits within the allowable addresses which we can
+ * allocate.
+ */
+ if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ arm_dma_pfn_limit + 1);
return 0;
}
}
@@ -687,7 +707,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -700,7 +720,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- if (mask < (u64)arm_dma_limit)
+ unsigned long limit;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
return 0;
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ limit = dma_to_pfn(dev, mask);
+
+ if (limit < arm_dma_pfn_limit)
+ return 0;
+
return 1;
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
index 9d285626bc7d..312e15e6d00b 100644
--- a/arch/arm/mm/extable.c
+++ b/arch/arm/mm/extable.c
@@ -9,8 +9,13 @@ int fixup_exception(struct pt_regs *regs)
const struct exception_table_entry *fixup;
fixup = search_exception_tables(instruction_pointer(regs));
- if (fixup)
+ if (fixup) {
regs->ARM_pc = fixup->fixup;
+#ifdef CONFIG_THUMB2_KERNEL
+ /* Clear the IT state to avoid nasty surprises in the fixup */
+ regs->ARM_cpsr &= ~PSR_IT_MASK;
+#endif
+ }
return fixup != NULL;
}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2a5907b5c8d2..ff379ac115df 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -65,7 +65,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
/*
* If we are using split PTE locks, then we need to take the page
* lock here. Otherwise we are using shared mm->page_table_lock
@@ -84,10 +84,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
{
spin_unlock(ptl);
}
-#else /* !USE_SPLIT_PTLOCKS */
+#else /* !USE_SPLIT_PTE_PTLOCKS */
static inline void do_pte_lock(spinlock_t *ptl) {}
static inline void do_pte_unlock(spinlock_t *ptl) {}
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 83cb3ac27095..8e0e52eb76b5 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -10,6 +10,7 @@
#include <asm/system_info.h>
pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
@@ -67,8 +68,9 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
unsigned long addr, end;
unsigned long next;
- addr = virt_to_phys(text_start);
- end = virt_to_phys(text_end);
+ addr = virt_to_idmap(text_start);
+ end = virt_to_idmap(text_end);
+ pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
@@ -90,8 +92,6 @@ static int __init init_static_idmap(void)
if (!idmap_pgd)
return -ENOMEM;
- pr_info("Setting up static identity map for 0x%p - 0x%p\n",
- __idmap_text_start, __idmap_text_end);
identity_mapping_add(idmap_pgd, __idmap_text_start,
__idmap_text_end, 0);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ca907f805c57..3e8f106ee5fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -209,6 +209,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
@@ -231,6 +232,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
+ arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
#endif
}
@@ -418,12 +420,10 @@ void __init bootmem_init(void)
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
- *
- * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
- * the system, not the maximum PFN.
*/
- max_low_pfn = max_low - PHYS_PFN_OFFSET;
- max_pfn = max_high - PHYS_PFN_OFFSET;
+ min_low_pfn = min;
+ max_low_pfn = max_low;
+ max_pfn = max_high;
}
/*
@@ -529,7 +529,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;
/* set highmem page free */
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0f..d5a982d15a88 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
#else
#define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 0c6356255fe3..d27158c38eb0 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b1d17eeb59b8..78eeeca78f5a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -28,6 +28,8 @@
#include <asm/highmem.h>
#include <asm/system_info.h>
#include <asm/traps.h>
+#include <asm/procinfo.h>
+#include <asm/memory.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -1315,6 +1317,86 @@ static void __init map_lowmem(void)
}
}
+#ifdef CONFIG_ARM_LPAE
+/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
+ unsigned long map_start, map_end;
+ pgd_t *pgd0, *pgdk;
+ pud_t *pud0, *pudk, *pud_start;
+ pmd_t *pmd0, *pmdk;
+ phys_addr_t phys;
+ int i;
+
+ if (!(mdesc->init_meminfo))
+ return;
+
+ /* remap kernel code and data */
+ map_start = init_mm.start_code;
+ map_end = init_mm.brk;
+
+ /* get a handle on things... */
+ pgd0 = pgd_offset_k(0);
+ pud_start = pud0 = pud_offset(pgd0, 0);
+ pmd0 = pmd_offset(pud0, 0);
+
+ pgdk = pgd_offset_k(map_start);
+ pudk = pud_offset(pgdk, map_start);
+ pmdk = pmd_offset(pudk, map_start);
+
+ mdesc->init_meminfo();
+
+ /* Run the patch stub to update the constants */
+ fixup_pv_table(&__pv_table_begin,
+ (&__pv_table_end - &__pv_table_begin) << 2);
+
+ /*
+ * Cache cleaning operations for self-modifying code
+ * We should clean the entries by MVA but running a
+ * for loop over every pv_table entry pointer would
+ * just complicate the code.
+ */
+ flush_cache_louis();
+ dsb();
+ isb();
+
+ /* remap level 1 table */
+ for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
+ set_pud(pud0,
+ __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
+ pmd0 += PTRS_PER_PMD;
+ }
+
+ /* remap pmds for kernel mapping */
+ phys = __pa(map_start) & PMD_MASK;
+ do {
+ *pmdk++ = __pmd(phys | pmdprot);
+ phys += PMD_SIZE;
+ } while (phys < map_end);
+
+ flush_cache_all();
+ cpu_switch_mm(pgd0, &init_mm);
+ cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+ local_flush_bp_all();
+ local_flush_tlb_all();
+}
+
+#else
+
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ if (mdesc->init_meminfo)
+ mdesc->init_meminfo();
+}
+
+#endif
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 34d4ab217bab..5c668b7a31f9 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -296,6 +296,15 @@ void __init sanity_check_meminfo(void)
}
/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+}
+
+/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1128064fddcb..45dc29f85d56 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -220,9 +220,7 @@ __v6_setup:
#endif /* CONFIG_MMU */
adr r5, v6_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c63d9bdee51e..60920f62fdf5 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -367,9 +367,7 @@ __v7_setup:
#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 99b44e0e8d86..9ed155ad0f97 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -19,6 +19,7 @@
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
+#include <asm/opcodes.h>
#include "bpf_jit_32.h"
@@ -113,8 +114,11 @@ static u32 jit_udiv(u32 dividend, u32 divisor)
static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
{
+ inst |= (cond << 28);
+ inst = __opcode_to_mem_arm(inst);
+
if (ctx->target != NULL)
- ctx->target[ctx->idx] = inst | (cond << 28);
+ ctx->target[ctx->idx] = inst;
ctx->idx++;
}
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
index d51f9565567c..be4ad0b21c08 100644
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -70,6 +70,7 @@ static struct samsung_bl_drvdata samsung_dfl_bl_data __initdata = {
.max_brightness = 255,
.dft_brightness = 255,
.pwm_period_ns = 78770,
+ .enable_gpio = -1,
.init = samsung_bl_init,
.exit = samsung_bl_exit,
},
@@ -121,6 +122,10 @@ void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
samsung_bl_data->lth_brightness = bl_data->lth_brightness;
if (bl_data->pwm_period_ns)
samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns;
+ if (bl_data->enable_gpio >= 0)
+ samsung_bl_data->enable_gpio = bl_data->enable_gpio;
+ if (bl_data->enable_gpio_flags)
+ samsung_bl_data->enable_gpio_flags = bl_data->enable_gpio_flags;
if (bl_data->init)
samsung_bl_data->init = bl_data->init;
if (bl_data->notify)
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S
index 2677bc3762d7..40f27e52de75 100644
--- a/arch/arm/plat-versatile/headsmp.S
+++ b/arch/arm/plat-versatile/headsmp.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
/*
* Realview/Versatile Express specific entry point for secondary CPUs.
@@ -17,6 +18,7 @@
* until we're ready for them to initialise.
*/
ENTRY(versatile_secondary_startup)
+ ARM_BE8(setend be)
mrc p15, 0, r0, c0, c0, 5
bic r0, #0xff000000
adr r4, 1f
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 52b8f40b1c73..2f37e1d6cb45 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -642,9 +642,9 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
static int vfp_hotplug(struct notifier_block *b, unsigned long action,
void *hcpu)
{
- if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
- vfp_force_reload((long)hcpu, current_thread_info());
- } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ if (action == CPU_DYING || action == CPU_DYING_FROZEN)
+ vfp_current_hw_state[(long)hcpu] = NULL;
+ else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;
}
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 43841033afd3..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
-obj-y := enlighten.o hypercall.o grant-table.o
+obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
new file mode 100644
index 000000000000..b0e77de99148
--- /dev/null
+++ b/arch/arm/xen/mm.c
@@ -0,0 +1,65 @@
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
+
+#include <xen/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/swiotlb-xen.h>
+
+#include <asm/cacheflush.h>
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/interface.h>
+
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle)
+{
+ if (!xen_initial_domain())
+ return -EINVAL;
+
+ /* we assume that dom0 is mapped 1:1 for now */
+ *dma_handle = pstart;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
+
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+{
+ return;
+}
+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+
+struct dma_map_ops *xen_dma_ops;
+EXPORT_SYMBOL_GPL(xen_dma_ops);
+
+static struct dma_map_ops xen_swiotlb_dma_ops = {
+ .mapping_error = xen_swiotlb_dma_mapping_error,
+ .alloc = xen_swiotlb_alloc_coherent,
+ .free = xen_swiotlb_free_coherent,
+ .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = xen_swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+ .map_sg = xen_swiotlb_map_sg_attrs,
+ .unmap_sg = xen_swiotlb_unmap_sg_attrs,
+ .map_page = xen_swiotlb_map_page,
+ .unmap_page = xen_swiotlb_unmap_page,
+ .dma_supported = xen_swiotlb_dma_supported,
+ .set_dma_mask = xen_swiotlb_set_dma_mask,
+};
+
+int __init xen_mm_init(void)
+{
+ if (!xen_initial_domain())
+ return 0;
+ xen_swiotlb_init(1, false);
+ xen_dma_ops = &xen_swiotlb_dma_ops;
+ return 0;
+}
+arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
new file mode 100644
index 000000000000..23732cdff551
--- /dev/null
+++ b/arch/arm/xen/p2m.c
@@ -0,0 +1,208 @@
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/rwlock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
+
+#include <xen/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/swiotlb-xen.h>
+
+#include <asm/cacheflush.h>
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/interface.h>
+
+struct xen_p2m_entry {
+ unsigned long pfn;
+ unsigned long mfn;
+ unsigned long nr_pages;
+ struct rb_node rbnode_mach;
+ struct rb_node rbnode_phys;
+};
+
+rwlock_t p2m_lock;
+struct rb_root phys_to_mach = RB_ROOT;
+static struct rb_root mach_to_phys = RB_ROOT;
+
+static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
+{
+ struct rb_node **link = &phys_to_mach.rb_node;
+ struct rb_node *parent = NULL;
+ struct xen_p2m_entry *entry;
+ int rc = 0;
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
+
+ if (new->mfn == entry->mfn)
+ goto err_out;
+ if (new->pfn == entry->pfn)
+ goto err_out;
+
+ if (new->pfn < entry->pfn)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+ rb_link_node(&new->rbnode_phys, parent, link);
+ rb_insert_color(&new->rbnode_phys, &phys_to_mach);
+ goto out;
+
+err_out:
+ rc = -EINVAL;
+ pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
+ __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
+out:
+ return rc;
+}
+
+unsigned long __pfn_to_mfn(unsigned long pfn)
+{
+ struct rb_node *n = phys_to_mach.rb_node;
+ struct xen_p2m_entry *entry;
+ unsigned long irqflags;
+
+ read_lock_irqsave(&p2m_lock, irqflags);
+ while (n) {
+ entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+ if (entry->pfn <= pfn &&
+ entry->pfn + entry->nr_pages > pfn) {
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+ return entry->mfn + (pfn - entry->pfn);
+ }
+ if (pfn < entry->pfn)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+
+ return INVALID_P2M_ENTRY;
+}
+EXPORT_SYMBOL_GPL(__pfn_to_mfn);
+
+static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
+{
+ struct rb_node **link = &mach_to_phys.rb_node;
+ struct rb_node *parent = NULL;
+ struct xen_p2m_entry *entry;
+ int rc = 0;
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
+
+ if (new->mfn == entry->mfn)
+ goto err_out;
+ if (new->pfn == entry->pfn)
+ goto err_out;
+
+ if (new->mfn < entry->mfn)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+ rb_link_node(&new->rbnode_mach, parent, link);
+ rb_insert_color(&new->rbnode_mach, &mach_to_phys);
+ goto out;
+
+err_out:
+ rc = -EINVAL;
+ pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
+ __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
+out:
+ return rc;
+}
+
+unsigned long __mfn_to_pfn(unsigned long mfn)
+{
+ struct rb_node *n = mach_to_phys.rb_node;
+ struct xen_p2m_entry *entry;
+ unsigned long irqflags;
+
+ read_lock_irqsave(&p2m_lock, irqflags);
+ while (n) {
+ entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
+ if (entry->mfn <= mfn &&
+ entry->mfn + entry->nr_pages > mfn) {
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+ return entry->pfn + (mfn - entry->mfn);
+ }
+ if (mfn < entry->mfn)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ read_unlock_irqrestore(&p2m_lock, irqflags);
+
+ return INVALID_P2M_ENTRY;
+}
+EXPORT_SYMBOL_GPL(__mfn_to_pfn);
+
+bool __set_phys_to_machine_multi(unsigned long pfn,
+ unsigned long mfn, unsigned long nr_pages)
+{
+ int rc;
+ unsigned long irqflags;
+ struct xen_p2m_entry *p2m_entry;
+ struct rb_node *n = phys_to_mach.rb_node;
+
+ if (mfn == INVALID_P2M_ENTRY) {
+ write_lock_irqsave(&p2m_lock, irqflags);
+ while (n) {
+ p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+ if (p2m_entry->pfn <= pfn &&
+ p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
+ rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
+ rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ kfree(p2m_entry);
+ return true;
+ }
+ if (pfn < p2m_entry->pfn)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ return true;
+ }
+
+ p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
+ if (!p2m_entry) {
+ pr_warn("cannot allocate xen_p2m_entry\n");
+ return false;
+ }
+ p2m_entry->pfn = pfn;
+ p2m_entry->nr_pages = nr_pages;
+ p2m_entry->mfn = mfn;
+
+ write_lock_irqsave(&p2m_lock, irqflags);
+ if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
+ (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ return false;
+ }
+ write_unlock_irqrestore(&p2m_lock, irqflags);
+ return true;
+}
+EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ return __set_phys_to_machine_multi(pfn, mfn, 1);
+}
+EXPORT_SYMBOL_GPL(__set_phys_to_machine);
+
+int p2m_init(void)
+{
+ rwlock_init(&p2m_lock);
+ return 0;
+}
+arch_initcall(p2m_init);