aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/arc/boot/dts/nsimosci.dts12
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/include/asm/delay.h5
-rw-r--r--arch/arc/include/asm/irqflags.h7
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/sections.h1
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/syscall.h5
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/entry.S24
-rw-r--r--arch/arc/kernel/head.S7
-rw-r--r--arch/arc/kernel/irq.c3
-rw-r--r--arch/arc/kernel/ptrace.c6
-rw-r--r--arch/arc/kernel/setup.c3
-rw-r--r--arch/arc/kernel/signal.c25
-rw-r--r--arch/arc/kernel/unaligned.c6
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arc/mm/fault.c6
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/Kconfig141
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/Makefile1
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c44
-rw-r--r--arch/arm/boot/compressed/head.S9
-rw-r--r--arch/arm/boot/dts/Makefile9
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi16
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi78
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts2
-rw-r--r--arch/arm/boot/dts/at91rm9200.dtsi6
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi18
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi8
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi8
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi8
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi10
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi4
-rw-r--r--arch/arm/boot/dts/clcd-panels.dtsi52
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts1
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/imx23.dtsi8
-rw-r--r--arch/arm/boot/dts/imx28.dtsi8
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi4
-rw-r--r--arch/arm/boot/dts/integratorcp.dts9
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts159
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts165
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts177
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts171
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts183
-rw-r--r--arch/arm/boot/dts/rtsm_ve-motherboard.dtsi231
-rw-r--r--arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts244
-rw-r--r--arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts358
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi4
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi4
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi4
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi1
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi1
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts4
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts168
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts4
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts4
-rw-r--r--arch/arm/common/Kconfig46
-rw-r--r--arch/arm/common/Makefile5
-rw-r--r--arch/arm/common/bL_switcher.c864
-rw-r--r--arch/arm/common/bL_switcher_dummy_if.c71
-rw-r--r--arch/arm/common/fiq_debugger.c1376
-rw-r--r--arch/arm/common/fiq_debugger_ringbuf.h94
-rw-r--r--arch/arm/common/fiq_glue.S25
-rw-r--r--arch/arm/common/fiq_glue_setup.c53
-rw-r--r--arch/arm/common/mcpm_entry.c12
-rw-r--r--arch/arm/common/mcpm_head.S18
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/crypto/aes-armv4.S6
-rw-r--r--arch/arm/include/asm/a.out-core.h45
-rw-r--r--arch/arm/include/asm/arch_timer.h59
-rw-r--r--arch/arm/include/asm/assembler.h7
-rw-r--r--arch/arm/include/asm/atomic.h26
-rw-r--r--arch/arm/include/asm/bL_switcher.h83
-rw-r--r--arch/arm/include/asm/bug.h10
-rw-r--r--arch/arm/include/asm/cacheflush.h47
-rw-r--r--arch/arm/include/asm/cp15.h14
-rw-r--r--arch/arm/include/asm/div64.h2
-rw-r--r--arch/arm/include/asm/dma-contiguous.h3
-rw-r--r--arch/arm/include/asm/elf.h8
-rw-r--r--arch/arm/include/asm/fiq_debugger.h64
-rw-r--r--arch/arm/include/asm/fiq_glue.h3
-rw-r--r--arch/arm/include/asm/ftrace.h10
-rw-r--r--arch/arm/include/asm/futex.h6
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/coresight.h10
-rw-r--r--arch/arm/include/asm/hardware/debug-pl01x.S2
-rw-r--r--arch/arm/include/asm/io.h8
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/arm/include/asm/kgdb.h3
-rw-r--r--arch/arm/include/asm/kvm_asm.h22
-rw-r--r--arch/arm/include/asm/mach/arch.h5
-rw-r--r--arch/arm/include/asm/mcpm.h8
-rw-r--r--arch/arm/include/asm/mmu.h5
-rw-r--r--arch/arm/include/asm/mmu_context.h30
-rw-r--r--arch/arm/include/asm/outercache.h4
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/pmu.h12
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/psci.h21
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/spinlock.h82
-rw-r--r--arch/arm/include/asm/syscall.h6
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlb.h7
-rw-r--r--arch/arm/include/asm/topology.h34
-rw-r--r--arch/arm/include/asm/uaccess.h3
-rw-r--r--arch/arm/include/asm/unistd.h3
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/a.out.h34
-rw-r--r--arch/arm/include/uapi/asm/hwcap.h3
-rw-r--r--arch/arm/include/uapi/asm/unistd.h6
-rw-r--r--arch/arm/kernel/Makefile8
-rw-r--r--arch/arm/kernel/calls.S4
-rw-r--r--arch/arm/kernel/crash_dump.c2
-rw-r--r--arch/arm/kernel/devtree.c2
-rw-r--r--arch/arm/kernel/entry-armv.S109
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/fiq.c19
-rw-r--r--arch/arm/kernel/head.S13
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/hyp-stub.S2
-rw-r--r--arch/arm/kernel/machine_kexec.c8
-rw-r--r--arch/arm/kernel/module.c57
-rw-r--r--arch/arm/kernel/perf_event.c41
-rw-r--r--arch/arm/kernel/perf_event_cpu.c117
-rw-r--r--arch/arm/kernel/perf_event_v7.c57
-rw-r--r--arch/arm/kernel/process.c53
-rw-r--r--arch/arm/kernel/psci.c49
-rw-r--r--arch/arm/kernel/psci_smp.c84
-rw-r--r--arch/arm/kernel/setup.c55
-rw-r--r--arch/arm/kernel/signal.c72
-rw-r--r--arch/arm/kernel/signal.h12
-rw-r--r--arch/arm/kernel/sigreturn_codes.S80
-rw-r--r--arch/arm/kernel/sleep.S33
-rw-r--r--arch/arm/kernel/smp.c28
-rw-r--r--arch/arm/kernel/smp_scu.c14
-rw-r--r--arch/arm/kernel/smp_tlb.c18
-rw-r--r--arch/arm/kernel/smp_twd.c26
-rw-r--r--arch/arm/kernel/stacktrace.c20
-rw-r--r--arch/arm/kernel/topology.c135
-rw-r--r--arch/arm/kernel/traps.c80
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm/kvm/arm.c30
-rw-r--r--arch/arm/kvm/coproc.c28
-rw-r--r--arch/arm/kvm/coproc.h3
-rw-r--r--arch/arm/kvm/coproc_a15.c6
-rw-r--r--arch/arm/kvm/interrupts.S16
-rw-r--r--arch/arm/kvm/interrupts_head.S14
-rw-r--r--arch/arm/kvm/mmu.c34
-rw-r--r--arch/arm/mach-at91/Makefile2
-rw-r--r--arch/arm/mach-at91/at91sam9260.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263.c3
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c3
-rw-r--r--arch/arm/mach-at91/at91sam9n12.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c3
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c6
-rw-r--r--arch/arm/mach-at91/generic.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9n12.h5
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9x5.h5
-rw-r--r--arch/arm/mach-at91/include/mach/sama5d3.h5
-rw-r--r--arch/arm/mach-at91/sam9_smc.c2
-rw-r--r--arch/arm/mach-at91/sama5d3.c12
-rw-r--r--arch/arm/mach-at91/sysirq_mask.c75
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-ebsa110/core.c2
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c26
-rw-r--r--arch/arm/mach-footbridge/common.c3
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c5
-rw-r--r--arch/arm/mach-footbridge/dc21285.c4
-rw-r--r--arch/arm/mach-footbridge/ebsa285.c22
-rw-r--r--arch/arm/mach-highbank/Kconfig1
-rw-r--r--arch/arm/mach-highbank/highbank.c15
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c2
-rw-r--r--arch/arm/mach-imx/devices/platform-ipu-core.c2
-rw-r--r--arch/arm/mach-imx/mm-imx3.c2
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c3
-rw-r--r--arch/arm/mach-iop13xx/io.c2
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig4
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-msm/common.h2
-rw-r--r--arch/arm/mach-msm/io.c2
-rw-r--r--arch/arm/mach-mvebu/Kconfig1
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S3
-rw-r--r--arch/arm/mach-mvebu/headsmp.S3
-rw-r--r--arch/arm/mach-mxs/pm.h4
-rw-r--r--arch/arm/mach-omap1/board-h2.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-innovator.c2
-rw-r--r--arch/arm/mach-omap1/board-osk.c2
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c3
-rw-r--r--arch/arm/mach-omap2/control.c3
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c25
-rw-r--r--arch/arm/mach-omap2/gpmc.c4
-rw-r--r--arch/arm/mach-omap2/irq.c10
-rw-r--r--arch/arm/mach-omap2/mux.c6
-rw-r--r--arch/arm/mach-omap2/omap4-common.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c47
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c23
-rw-r--r--arch/arm/mach-omap2/pm.h2
-rw-r--r--arch/arm/mach-pxa/reset.c8
-rw-r--r--arch/arm/mach-pxa/tosa.c102
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2410.c161
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c3
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/include/mach/collie.h2
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c4
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c2
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c4
-rw-r--r--arch/arm/mach-shmobile/setup-emev2.c8
-rw-r--r--arch/arm/mach-shmobile/setup-r8a73a4.c2
-rw-r--r--arch/arm/mach-tegra/common.c11
-rw-r--r--arch/arm/mach-versatile/include/mach/platform.h2
-rw-r--r--arch/arm/mach-versatile/pci.c47
-rw-r--r--arch/arm/mach-vexpress/Kconfig21
-rw-r--r--arch/arm/mach-vexpress/Makefile8
-rw-r--r--arch/arm/mach-vexpress/core.h2
-rw-r--r--arch/arm/mach-vexpress/dcscb.c236
-rw-r--r--arch/arm/mach-vexpress/dcscb_setup.S38
-rw-r--r--arch/arm/mach-vexpress/include/mach/tc2.h10
-rw-r--r--arch/arm/mach-vexpress/platsmp.c22
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c277
-rw-r--r--arch/arm/mach-vexpress/tc2_pm_psci.c173
-rw-r--r--arch/arm/mach-vexpress/tc2_pm_setup.S68
-rw-r--r--arch/arm/mach-vexpress/v2m.c29
-rw-r--r--arch/arm/mach-virt/Makefile1
-rw-r--r--arch/arm/mach-virt/platsmp.c50
-rw-r--r--arch/arm/mach-virt/virt.c3
-rw-r--r--arch/arm/mm/Kconfig46
-rw-r--r--arch/arm/mm/abort-ev6.S5
-rw-r--r--arch/arm/mm/alignment.c9
-rw-r--r--arch/arm/mm/cache-v7.S14
-rw-r--r--arch/arm/mm/context.c55
-rw-r--r--arch/arm/mm/dma-mapping.c8
-rw-r--r--arch/arm/mm/extable.c7
-rw-r--r--arch/arm/mm/fault.c13
-rw-r--r--arch/arm/mm/idmap.c7
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/ioremap.c10
-rw-r--r--arch/arm/mm/mmap.c8
-rw-r--r--arch/arm/mm/mmu.c24
-rw-r--r--arch/arm/mm/nommu.c6
-rw-r--r--arch/arm/mm/pgd.c3
-rw-r--r--arch/arm/mm/proc-macros.S19
-rw-r--r--arch/arm/mm/proc-v6.S7
-rw-r--r--arch/arm/mm/proc-v7-2level.S9
-rw-r--r--arch/arm/mm/proc-v7-3level.S20
-rw-r--r--arch/arm/mm/proc-v7.S17
-rw-r--r--arch/arm/net/bpf_jit_32.c12
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h5
-rw-r--r--arch/arm/plat-samsung/s5p-dev-mfc.c4
-rw-r--r--arch/arm/plat-versatile/headsmp.S4
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm64/Kconfig257
-rw-r--r--arch/arm64/Kconfig.debug14
-rw-r--r--arch/arm64/Makefile19
-rw-r--r--arch/arm64/boot/.gitignore1
-rw-r--r--arch/arm64/boot/Makefile13
-rw-r--r--arch/arm64/boot/dts/Makefile18
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts30
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi398
-rw-r--r--arch/arm64/boot/dts/clcd-panels.dtsi52
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/boot/dts/fvp-base-gicv2-psci.dts294
-rw-r--r--arch/arm64/boot/dts/juno.dts498
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-aemv8a.dts51
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi9
-rw-r--r--arch/arm64/configs/defconfig23
-rw-r--r--arch/arm64/crypto/Kconfig53
-rw-r--r--arch/arm64/crypto/Makefile38
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S222
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c297
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c155
-rw-r--r--arch/arm64/crypto/aes-ce.S133
-rw-r--r--arch/arm64/crypto/aes-glue.c446
-rw-r--r--arch/arm64/crypto/aes-modes.S532
-rw-r--r--arch/arm64/crypto/aes-neon.S382
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S87
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c156
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S153
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c174
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S156
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c255
-rw-r--r--arch/arm64/include/asm/Kbuild4
-rw-r--r--arch/arm64/include/asm/arch_timer.h65
-rw-r--r--arch/arm64/include/asm/assembler.h31
-rw-r--r--arch/arm64/include/asm/atomic.h55
-rw-r--r--arch/arm64/include/asm/bL_switcher.h54
-rw-r--r--arch/arm64/include/asm/barrier.h5
-rw-r--r--arch/arm64/include/asm/cacheflush.h22
-rw-r--r--arch/arm64/include/asm/cmpxchg.h54
-rw-r--r--arch/arm64/include/asm/compat.h21
-rw-r--r--arch/arm64/include/asm/cpu_ops.h65
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/cputype.h31
-rw-r--r--arch/arm64/include/asm/debug-monitors.h94
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/dma-contiguous.h28
-rw-r--r--arch/arm64/include/asm/dma-mapping.h25
-rw-r--r--arch/arm64/include/asm/efi.h14
-rw-r--r--arch/arm64/include/asm/elf.h21
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/fixmap.h67
-rw-r--r--arch/arm64/include/asm/fpsimd.h23
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h35
-rw-r--r--arch/arm64/include/asm/ftrace.h59
-rw-r--r--arch/arm64/include/asm/futex.h11
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/hugetlb.h117
-rw-r--r--arch/arm64/include/asm/hwcap.h20
-rw-r--r--arch/arm64/include/asm/insn.h113
-rw-r--r--arch/arm64/include/asm/io.h11
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h23
-rw-r--r--arch/arm64/include/asm/jump_label.h52
-rw-r--r--arch/arm64/include/asm/kgdb.h84
-rw-r--r--arch/arm64/include/asm/memory.h22
-rw-r--r--arch/arm64/include/asm/mmu.h6
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/neon.h18
-rw-r--r--arch/arm64/include/asm/page.h9
-rw-r--r--arch/arm64/include/asm/percpu.h49
-rw-r--r--arch/arm64/include/asm/pgtable-2level-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable-2level-types.h2
-rw-r--r--arch/arm64/include/asm/pgtable-3level-types.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h31
-rw-r--r--arch/arm64/include/asm/pgtable.h292
-rw-r--r--arch/arm64/include/asm/proc-fns.h3
-rw-r--r--arch/arm64/include/asm/processor.h9
-rw-r--r--arch/arm64/include/asm/psci.h23
-rw-r--r--arch/arm64/include/asm/ptrace.h8
-rw-r--r--arch/arm64/include/asm/sigcontext.h31
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/include/asm/smp_plat.h13
-rw-r--r--arch/arm64/include/asm/spinlock.h92
-rw-r--r--arch/arm64/include/asm/spinlock_types.h10
-rw-r--r--arch/arm64/include/asm/suspend.h27
-rw-r--r--arch/arm64/include/asm/syscall.h7
-rw-r--r--arch/arm64/include/asm/thread_info.h21
-rw-r--r--arch/arm64/include/asm/timex.h6
-rw-r--r--arch/arm64/include/asm/tlb.h13
-rw-r--r--arch/arm64/include/asm/tlbflush.h51
-rw-r--r--arch/arm64/include/asm/topology.h70
-rw-r--r--arch/arm64/include/asm/uaccess.h43
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/virt.h3
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h94
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h4
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h7
-rw-r--r--arch/arm64/include/uapi/asm/perf_regs.h40
-rw-r--r--arch/arm64/kernel/Makefile20
-rw-r--r--arch/arm64/kernel/arm64ksyms.c10
-rw-r--r--arch/arm64/kernel/asm-offsets.c44
-rw-r--r--arch/arm64/kernel/cpu_ops.c99
-rw-r--r--arch/arm64/kernel/cputable.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c157
-rw-r--r--arch/arm64/kernel/early_printk.c8
-rw-r--r--arch/arm64/kernel/efi-entry.S109
-rw-r--r--arch/arm64/kernel/efi-stub.c79
-rw-r--r--arch/arm64/kernel/efi.c469
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S24
-rw-r--r--arch/arm64/kernel/entry-ftrace.S218
-rw-r--r--arch/arm64/kernel/entry.S47
-rw-r--r--arch/arm64/kernel/fpsimd.c225
-rw-r--r--arch/arm64/kernel/ftrace.c177
-rw-r--r--arch/arm64/kernel/head.S279
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c205
-rw-r--r--arch/arm64/kernel/insn.c304
-rw-r--r--arch/arm64/kernel/irq.c61
-rw-r--r--arch/arm64/kernel/jump_label.c58
-rw-r--r--arch/arm64/kernel/kgdb.c336
-rw-r--r--arch/arm64/kernel/kuser32.S55
-rw-r--r--arch/arm64/kernel/module.c156
-rw-r--r--arch/arm64/kernel/perf_event.c204
-rw-r--r--arch/arm64/kernel/perf_regs.c46
-rw-r--r--arch/arm64/kernel/process.c58
-rw-r--r--arch/arm64/kernel/psci.c196
-rw-r--r--arch/arm64/kernel/ptrace.c201
-rw-r--r--arch/arm64/kernel/return_address.c55
-rw-r--r--arch/arm64/kernel/setup.c190
-rw-r--r--arch/arm64/kernel/signal.c42
-rw-r--r--arch/arm64/kernel/signal32.c46
-rw-r--r--arch/arm64/kernel/sleep.S184
-rw-r--r--arch/arm64/kernel/smp.c261
-rw-r--r--arch/arm64/kernel/smp_psci.c53
-rw-r--r--arch/arm64/kernel/smp_spin_table.c60
-rw-r--r--arch/arm64/kernel/stacktrace.c10
-rw-r--r--arch/arm64/kernel/suspend.c140
-rw-r--r--arch/arm64/kernel/sys32.S22
-rw-r--r--arch/arm64/kernel/time.c6
-rw-r--r--arch/arm64/kernel/topology.c590
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/kernel/vdso.c85
-rw-r--r--arch/arm64/kernel/vdso/Makefile6
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S50
-rw-r--r--arch/arm64/lib/Makefile8
-rw-r--r--arch/arm64/lib/bitops.S3
-rw-r--r--arch/arm64/lib/strncpy_from_user.S50
-rw-r--r--arch/arm64/lib/strnlen_user.S47
-rw-r--r--arch/arm64/mm/Makefile3
-rw-r--r--arch/arm64/mm/cache.S98
-rw-r--r--arch/arm64/mm/copypage.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c310
-rw-r--r--arch/arm64/mm/fault.c67
-rw-r--r--arch/arm64/mm/flush.c38
-rw-r--r--arch/arm64/mm/hugetlbpage.c70
-rw-r--r--arch/arm64/mm/init.c56
-rw-r--r--arch/arm64/mm/ioremap.c96
-rw-r--r--arch/arm64/mm/mm.h1
-rw-r--r--arch/arm64/mm/mmu.c185
-rw-r--r--arch/arm64/mm/pgd.c11
-rw-r--r--arch/arm64/mm/proc-macros.S3
-rw-r--r--arch/arm64/mm/proc.S89
-rw-r--r--arch/arm64/mm/tlb.S71
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/boards/mimc200/fram.c1
-rw-r--r--arch/avr32/boot/u-boot/head.S35
-rw-r--r--arch/avr32/kernel/entry-avr32b.S3
-rw-r--r--arch/avr32/kernel/head.S20
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/blackfin/include/asm/ftrace.h11
-rw-r--r--arch/c6x/kernel/devicetree.c3
-rw-r--r--arch/c6x/mm/init.c1
-rw-r--r--arch/cris/include/asm/io.h1
-rw-r--r--arch/ia64/include/asm/processor.h2
-rw-r--r--arch/ia64/include/asm/tlb.h9
-rw-r--r--arch/ia64/kernel/efi.c59
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/emu/natfeat.c23
-rw-r--r--arch/m68k/include/asm/div64.h9
-rw-r--r--arch/metag/include/asm/barrier.h3
-rw-r--r--arch/metag/include/asm/processor.h2
-rw-r--r--arch/metag/mm/init.c5
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/kernel/prom.c11
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/ath79/clock.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c23
-rw-r--r--arch/mips/include/asm/io.h5
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h1
-rw-r--r--arch/mips/include/asm/reg.h260
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h30
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c32
-rw-r--r--arch/mips/kernel/irq-gic.c6
-rw-r--r--arch/mips/kernel/irq-msc01.c2
-rw-r--r--arch/mips/kernel/prom.c3
-rw-r--r--arch/mips/kernel/ptrace.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S8
-rw-r--r--arch/mips/kernel/scall64-64.S6
-rw-r--r--arch/mips/kernel/scall64-n32.S6
-rw-r--r--arch/mips/kernel/scall64-o32.S8
-rw-r--r--arch/mips/kernel/unaligned.c1
-rw-r--r--arch/mips/kvm/kvm_mips.c15
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c7
-rw-r--r--arch/mips/lantiq/dts/easy50712.dts1
-rw-r--r--arch/mips/mm/c-r4k.c5
-rw-r--r--arch/mips/mm/dma-default.c16
-rw-r--r--arch/mips/mm/tlbex.c1
-rw-r--r--arch/mips/power/hibernate.S1
-rw-r--r--arch/mips/ralink/dts/mt7620a_eval.dts1
-rw-r--r--arch/mips/ralink/dts/rt2880_eval.dts1
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts1
-rw-r--r--arch/mips/ralink/dts/rt3883_eval.dts1
-rw-r--r--arch/openrisc/kernel/entry.S59
-rw-r--r--arch/openrisc/kernel/prom.c3
-rw-r--r--arch/openrisc/kernel/signal.c198
-rw-r--r--arch/parisc/include/asm/cacheflush.h10
-rw-r--r--arch/parisc/include/asm/ftrace.h10
-rw-r--r--arch/parisc/include/asm/page.h4
-rw-r--r--arch/parisc/include/asm/parisc-device.h3
-rw-r--r--arch/parisc/include/asm/processor.h2
-rw-r--r--arch/parisc/include/asm/socket.h11
-rw-r--r--arch/parisc/include/asm/special_insns.h9
-rw-r--r--arch/parisc/include/asm/tlbflush.h5
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h11
-rw-r--r--arch/parisc/kernel/cache.c166
-rw-r--r--arch/parisc/kernel/hardware.c3
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/inventory.c1
-rw-r--r--arch/parisc/kernel/sys_parisc.c25
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/kernel/traps.c6
-rw-r--r--arch/parisc/lib/memcpy.c79
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/include/asm/compat.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h10
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/page.h10
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h6
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h6
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h7
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h32
-rw-r--r--arch/powerpc/include/asm/reg.h33
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h9
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h1
-rw-r--r--arch/powerpc/kernel/align.c10
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cacheinfo.c3
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/kernel/crash_dump.c8
-rw-r--r--arch/powerpc/kernel/entry_64.S36
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S57
-rw-r--r--arch/powerpc/kernel/fadump.c4
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c22
-rw-r--r--arch/powerpc/kernel/process.c54
-rw-r--r--arch/powerpc/kernel/prom.c71
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/reloc_64.S1
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c78
-rw-r--r--arch/powerpc/kernel/signal_64.c16
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/tm.S90
-rw-r--r--arch/powerpc/kernel/traps.c55
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1
-rw-r--r--arch/powerpc/kvm/e500_mmu.c2
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/lib/crtsavres.S186
-rw-r--r--arch/powerpc/lib/sstep.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/numa.c136
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c7
-rw-r--r--arch/powerpc/perf/core-book3s.c68
-rw-r--r--arch/powerpc/perf/power8-pmu.c24
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/efika.c4
-rw-r--r--arch/powerpc/platforms/chrp/setup.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c12
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c33
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c1
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c22
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c35
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/crypto/aes_s390.c118
-rw-r--r--arch/s390/crypto/des_s390.c98
-rw-r--r--arch/s390/include/asm/bitops.h2
-rw-r--r--arch/s390/include/asm/ccwdev.h2
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/include/asm/lowcore.h11
-rw-r--r--arch/s390/include/asm/tlb.h8
-rw-r--r--arch/s390/include/uapi/asm/statfs.h10
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ptrace.c9
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c21
-rw-r--r--arch/s390/lib/uaccess_pt.c3
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/page-states.c10
-rw-r--r--arch/s390/net/bpf_jit_comp.c30
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/score/Kconfig3
-rw-r--r--arch/score/Makefile4
-rw-r--r--arch/score/include/asm/checksum.h93
-rw-r--r--arch/score/include/asm/io.h1
-rw-r--r--arch/score/include/asm/pgalloc.h2
-rw-r--r--arch/score/kernel/entry.S4
-rw-r--r--arch/score/kernel/process.c4
-rw-r--r--arch/score/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/include/asm/ftrace.h10
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/sh/kernel/dumpstack.c2
-rw-r--r--arch/sh/kernel/kgdb.c1
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c5
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h10
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h12
-rw-r--r--arch/sparc/include/asm/uaccess_64.h4
-rw-r--r--arch/sparc/kernel/asm-offsets.c2
-rw-r--r--arch/sparc/kernel/ds.c5
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/ktlb.S3
-rw-r--r--arch/sparc/kernel/ldc.c2
-rw-r--r--arch/sparc/kernel/pci.c4
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/smp_64.c6
-rw-r--r--arch/sparc/kernel/sys32.S2
-rw-r--r--arch/sparc/kernel/syscalls.S12
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/kernel/unaligned_64.c12
-rw-r--r--arch/sparc/lib/NG2memcpy.S1
-rw-r--r--arch/sparc/lib/ksyms.c9
-rw-r--r--arch/sparc/math-emu/math_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c102
-rw-r--r--arch/sparc/mm/hypersparc.S8
-rw-r--r--arch/sparc/mm/init_64.c27
-rw-r--r--arch/sparc/mm/swift.S8
-rw-r--r--arch/sparc/mm/tsb.c14
-rw-r--r--arch/sparc/mm/tsunami.S6
-rw-r--r--arch/sparc/mm/viking.S10
-rw-r--r--arch/sparc/net/bpf_jit_comp.c25
-rw-r--r--arch/tile/include/asm/compat.h1
-rw-r--r--arch/tile/include/asm/percpu.h34
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/um/include/shared/os.h1
-rw-r--r--arch/um/kernel/Makefile2
-rw-r--r--arch/um/kernel/exitcode.c4
-rw-r--r--arch/um/kernel/maccess.c24
-rw-r--r--arch/um/os-Linux/process.c52
-rw-r--r--arch/unicore32/mm/alignment.c1
-rw-r--r--arch/x86/Kconfig33
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/Makefile6
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/eboot.c146
-rw-r--r--arch/x86/boot/compressed/eboot.h52
-rw-r--r--arch/x86/boot/compressed/head_32.S14
-rw-r--r--arch/x86/boot/compressed/head_64.S9
-rw-r--r--arch/x86/boot/header.S26
-rw-r--r--arch/x86/boot/tools/build.c37
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S29
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c14
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/bootparam_utils.h4
-rw-r--r--arch/x86/include/asm/checksum_32.h22
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/dma-contiguous.h1
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/espfix.h16
-rw-r--r--arch/x86/include/asm/fpu-internal.h13
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/include/asm/irqflags.h2
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mce.h13
-rw-r--r--arch/x86/include/asm/mmu_context.h20
-rw-r--r--arch/x86/include/asm/pgtable.h11
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/include/asm/ptrace.h16
-rw-r--r--arch/x86/include/asm/setup.h2
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/include/asm/topology.h3
-rw-r--r--arch/x86/include/asm/xen/page.h31
-rw-r--r--arch/x86/include/asm/xor_avx.h4
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/sleep.c18
-rw-r--r--arch/x86/kernel/amd_nb.c13
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c10
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c21
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c53
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c4
-rw-r--r--arch/x86/kernel/devicetree.c3
-rw-r--r--arch/x86/kernel/e820.c5
-rw-r--r--arch/x86/kernel/early-quirks.c15
-rw-r--r--arch/x86/kernel/entry_32.S34
-rw-r--r--arch/x86/kernel/entry_64.S79
-rw-r--r--arch/x86/kernel/espfix_64.c208
-rw-r--r--arch/x86/kernel/ftrace.c95
-rw-r--r--arch/x86/kernel/head_32.S7
-rw-r--r--arch/x86/kernel/head_64.S8
-rw-r--r--arch/x86/kernel/i387.c17
-rw-r--r--arch/x86/kernel/ldt.c5
-rw-r--r--arch/x86/kernel/microcode_amd.c2
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c16
-rw-r--r--arch/x86/kernel/resource.c8
-rw-r--r--arch/x86/kernel/setup.c25
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kernel/smpboot.c7
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c8
-rw-r--r--arch/x86/kvm/emulate.c9
-rw-r--r--arch/x86/kvm/i8254.c18
-rw-r--r--arch/x86/kvm/irq.c2
-rw-r--r--arch/x86/kvm/lapic.c154
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/paging_tmpl.h8
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c45
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lib/csum-wrappers_64.c12
-rw-r--r--arch/x86/mm/dump_pagetables.c39
-rw-r--r--arch/x86/mm/fault.c14
-rw-r--r--arch/x86/mm/hugetlbpage.c187
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/ioremap.c26
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/net/bpf_jit.S2
-rw-r--r--arch/x86/net/bpf_jit_comp.c14
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/platform/efi/efi.c169
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/x86/syscalls/syscall_32.tbl5
-rw-r--r--arch/x86/syscalls/syscall_64.tbl11
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/setup.c22
-rw-r--r--arch/x86/xen/smp.c20
-rw-r--r--arch/x86/xen/time.c17
-rw-r--r--arch/xtensa/include/asm/ftrace.h33
-rw-r--r--arch/xtensa/include/asm/traps.h44
-rw-r--r--arch/xtensa/kernel/entry.S60
-rw-r--r--arch/xtensa/kernel/head.S9
-rw-r--r--arch/xtensa/kernel/setup.c46
-rw-r--r--arch/xtensa/kernel/signal.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c12
760 files changed, 22729 insertions, 6365 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index a4429bcd609e..4c0a1d03ae0d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -331,6 +331,7 @@ config HAVE_ARCH_SECCOMP_FILTER
- secure_computing is called from a ptrace_event()-safe context
- secure_computing return value is checked and a return value of -1
results in the system call being skipped immediately.
+ - seccomp syscall wired up
config SECCOMP_FILTER
def_bool y
@@ -404,6 +405,12 @@ config CLONE_BACKWARDS2
help
Architecture has the first two arguments of clone(2) swapped.
+config CLONE_BACKWARDS3
+ bool
+ help
+ Architecture has tls passed as the 3rd argument of clone(2),
+ not the 5th one.
+
config ODD_RT_SIGACTION
bool
help
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index ea16d782af58..4f31b2eb5cdf 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -11,13 +11,16 @@
/ {
compatible = "snps,nsimosci";
- clock-frequency = <80000000>; /* 80 MHZ */
+ clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
chosen {
- bootargs = "console=tty0 consoleblank=0";
+ /* this is for console on PGU */
+ /* bootargs = "console=tty0 consoleblank=0"; */
+ /* this is for console on serial */
+ bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
};
aliases {
@@ -44,15 +47,14 @@
};
uart0: serial@c0000000 {
- compatible = "snps,dw-apb-uart";
+ compatible = "ns8250";
reg = <0xc0000000 0x2000>;
interrupts = <11>;
- #clock-frequency = <80000000>;
clock-frequency = <3686400>;
baud = <115200>;
reg-shift = <2>;
reg-io-width = <4>;
- status = "okay";
+ no-loopback-test = <1>;
};
pgu0: pgu@c9000000 {
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 446c96c24eff..00788e741ce7 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -54,6 +54,7 @@ CONFIG_SERIO_ARC_PS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 442ce5d0f709..43de30256981 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
{
unsigned long loops;
- /* (long long) cast ensures 64 bit MPY - real or emulated
+ /* (u64) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
- loops = ((long long)(usecs * 4295 * HZ) *
- (long long)(loops_per_jiffy)) >> 32;
+ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
__delay(loops);
}
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index eac071668201..c29d56587bf0 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -137,13 +137,6 @@ static inline void arch_unmask_irq(unsigned int irq)
flag \scratch
.endm
-.macro IRQ_DISABLE_SAVE scratch, save
- lr \scratch, [status32]
- mov \save, \scratch /* Make a copy */
- bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
- flag \scratch
-.endm
-
.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 6179de7e07c2..2046a89a57cf 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -52,12 +52,14 @@ struct pt_regs {
/*to distinguish bet excp, syscall, irq */
union {
+ struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
/* so that assembly code is same for LE/BE */
unsigned long orig_r8:16, event:16;
#else
unsigned long event:16, orig_r8:16;
#endif
+ };
long orig_r8_word;
};
};
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
index 6fc1159dfefe..764f1e3ba752 100644
--- a/arch/arc/include/asm/sections.h
+++ b/arch/arc/include/asm/sections.h
@@ -11,7 +11,6 @@
#include <asm-generic/sections.h>
-extern char _int_vec_base_lds[];
extern char __arc_dccm_base[];
extern char __dtb_start[];
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
smp_mb();
}
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 33ab3048e9b2..29de09804306 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -18,7 +18,7 @@ static inline long
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
if (user_mode(regs) && in_syscall(regs))
- return regs->orig_r8;
+ return regs->r8;
else
return -1;
}
@@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
- /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
- regs->r8 = regs->orig_r8;
+ regs->r0 = regs->orig_r0;
}
static inline long
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
* Because it essentially checks if buffer end is within limit and @len is
* non-ngeative, which implies that buffer start will be within limit too.
*
- * The reason for rewriting being, for majorit yof cases, @len is generally
+ * The reason for rewriting being, for majority of cases, @len is generally
* compile time constant, causing first sub-expression to be compile time
* subsumed.
*
@@ -53,7 +53,7 @@
*
*/
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
- (((addr)+(sz)) <= get_fs()))
+ ((addr) <= (get_fs() - (sz))))
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
likely(__user_ok((addr), (sz))))
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 30333cec0fef..ef9d79a3db25 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -11,6 +11,7 @@
#ifndef _UAPI__ASM_ARC_PTRACE_H
#define _UAPI__ASM_ARC_PTRACE_H
+#define PTRACE_GET_THREAD_AREA 25
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index bdee3a812052..afdd13cf881c 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -40,7 +40,7 @@ struct machine_desc * __init setup_machine_fdt(void *dt)
const char *model, *compat;
void *clk;
char manufacturer[16];
- unsigned long len;
+ int len;
/* check device tree validity */
if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 0c6d664d4a83..6f3cd0fb4b54 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -498,7 +498,7 @@ tracesys_exit:
trap_with_param:
; stop_pc info by gdb needs this info
- stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+ st orig_r8_IS_BRKPT, [sp, PT_orig_r8]
mov r0, r12
lr r1, [efa]
@@ -589,11 +589,7 @@ ARC_ENTRY ret_from_exception
; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
-#ifdef CONFIG_PREEMPT
bbit0 r8, STATUS_U_BIT, resume_kernel_mode
-#else
- bbit0 r8, STATUS_U_BIT, restore_regs
-#endif
; Before returning to User mode check-for-and-complete any pending work
; such as rescheduling/signal-delivery etc.
@@ -653,10 +649,15 @@ resume_user_mode_begin:
b resume_user_mode_begin ; unconditionally back to U mode ret chks
; for single exit point from this block
-#ifdef CONFIG_PREEMPT
-
resume_kernel_mode:
+ ; Disable Interrupts from this point on
+ ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
+ ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
+ IRQ_DISABLE r9
+
+#ifdef CONFIG_PREEMPT
+
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
@@ -666,8 +667,6 @@ resume_kernel_mode:
ld r9, [r10, THREAD_INFO_FLAGS]
bbit0 r9, TIF_NEED_RESCHED, restore_regs
- IRQ_DISABLE r9
-
; Invoke PREEMPTION
bl preempt_schedule_irq
@@ -680,12 +679,11 @@ resume_kernel_mode:
;
; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
; IRQ shd definitely not happen between now and rtie
+; All 2 entry points to here already disable interrupts
restore_regs :
- ; Disable Interrupts while restoring reg-file back
- ; XXX can this be optimised out
- IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
+ lr r10, [status32]
#ifdef CONFIG_ARC_CURR_IN_REG
; Restore User R25
@@ -723,7 +721,7 @@ not_exception:
; things to what they were, before returning from L2 context
;----------------------------------------------------------------
- ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
+ ld r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 006dec3fc353..0f944f024513 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -27,11 +27,16 @@ stext:
; Don't clobber r0-r4 yet. It might have bootloader provided info
;-------------------------------------------------------------------
+ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
+
#ifdef CONFIG_SMP
; Only Boot (Master) proceeds. Others wait in platform dependent way
; IDENTITY Reg [ 3 2 1 0 ]
; (cpu-id) ^^^ => Zero for UP ARC700
; => #Core-ID if SMP (Master 0)
+ ; Note that non-boot CPUs might not land here if halt-on-reset and
+ ; instead breath life from @first_lines_of_secondary, but we still
+ ; need to make sure only boot cpu takes this path.
GET_CPU_ID r5
cmp r5, 0
jnz arc_platform_smp_wait_to_boot
@@ -96,6 +101,8 @@ stext:
first_lines_of_secondary:
+ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
+
; setup per-cpu idle task as "current" on this CPU
ld r0, [@secondary_idle_tsk]
SET_CURR_TASK_ON_CPU r0, r1
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 8115fa531575..a199471ce01e 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -24,7 +24,6 @@
* -Needed for each CPU (hence not foldable into init_IRQ)
*
* what it does ?
- * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
* -Disable all IRQs (on CPU side)
* -Optionally, setup the High priority Interrupts as Level 2 IRQs
*/
@@ -32,8 +31,6 @@ void __cpuinit arc_init_IRQ(void)
{
int level_mask = 0;
- write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
-
/* Disable all IRQs: enable them as devices request */
write_aux_reg(AUX_IENABLE, 0);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index c6a81c58d0f3..f8a36ed9e0d5 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -92,7 +92,7 @@ static int genregs_set(struct task_struct *target,
REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */
REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
REG_IGNORE_ONE(efa); /* efa update invalid */
- REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+ REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
return ret;
}
@@ -136,6 +136,10 @@ long arch_ptrace(struct task_struct *child, long request,
pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
switch (request) {
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->thr_ptr,
+ (unsigned long __user *)data);
+ break;
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2b3731dd1e9..2d7786b69a8a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,10 +47,7 @@ void __cpuinit read_arc_build_cfg_regs(void)
READ_BCR(AUX_IDENTITY, cpu->core);
cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
-
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
- if (cpu->vec_base == 0)
- cpu->vec_base = (unsigned int)_int_vec_base_lds;
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
cpu->uncached_base = uncached_space.start << 24;
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index ee6ef2f60a28..7e95e1a86510 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
{
struct rt_sigframe __user *sf;
unsigned int magic;
- int err;
struct pt_regs *regs = current_pt_regs();
/* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
goto badframe;
- err = restore_usr_regs(regs, sf);
- err |= __get_user(magic, &sf->sigret_magic);
- if (err)
+ if (__get_user(magic, &sf->sigret_magic))
goto badframe;
if (unlikely(is_do_ss_needed(magic)))
if (restore_altstack(&sf->uc.uc_stack))
goto badframe;
+ if (restore_usr_regs(regs, sf))
+ goto badframe;
+
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
return 1;
/*
+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+ * during signal handler execution. This works for SA_SIGINFO as well
+ * although the semantics are now overloaded (the same reg state can be
+ * inspected by userland: but are they allowed to fiddle with it ?
+ */
+ err |= stash_usr_regs(sf, regs, set);
+
+ /*
* SA_SIGINFO requires 3 args to signal handler:
* #1: sig-no (common to any handler)
* #2: struct siginfo
@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
magic = MAGIC_SIGALTSTK;
}
- /*
- * w/o SA_SIGINFO, struct ucontext is partially populated (only
- * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
- * during signal handler execution. This works for SA_SIGINFO as well
- * although the semantics are now overloaded (the same reg state can be
- * inspected by userland: but are they allowed to fiddle with it ?
- */
- err |= stash_usr_regs(sf, regs, set);
err |= __put_user(magic, &sf->sigret_magic);
if (err)
return err;
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 4cd81633febd..116d3e09b5b5 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -233,6 +233,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
+
+ /* handle zero-overhead-loop */
+ if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
+ regs->ret = regs->lp_start;
+ regs->lp_count--;
+ }
}
return 0;
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b .Lfound_char ; Likewise this one.
+#else
+ and r12,r12,r4
+ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
+ lsr_s r12,r12,7
+ bic r2,r7,r6
+ b.d .Lfound_char_b
+ and_s r2,r2,r12
+#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
lsr r7,r7,7
bic r2,r7,r6
+.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 689ffd86d5e9..331a0846628e 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -16,7 +16,7 @@
#include <linux/kdebug.h>
#include <asm/pgalloc.h>
-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+static int handle_vmalloc_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
@@ -26,7 +26,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
- pgd = pgd_offset_fast(mm, address);
+ pgd = pgd_offset_fast(current->active_mm, address);
pgd_k = pgd_offset_k(address);
if (!pgd_present(*pgd_k))
@@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
* nothing more.
*/
if (address >= VMALLOC_START && address <= VMALLOC_END) {
- ret = handle_vmalloc_fault(mm, address);
+ ret = handle_vmalloc_fault(address);
if (unlikely(ret))
goto bad_area_nosemaphore;
else
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 4a177365b2c4..7991e08d606b 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -157,9 +157,8 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
#endif
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
- pr_err("%s(%lx, %lx)\n", __func__, start, end);
+ pr_err("%s(%llx, %llx)\n", __func__, start, end);
}
#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 351e48e45cbb..993973b061d4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@ config ARM
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT if MMU
@@ -19,10 +20,9 @@ config ARM
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HARDIRQS_SW_RESEND
- select HAVE_AOUT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
- select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_C_RECORDMCOUNT
@@ -213,7 +213,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000
help
- The base address of exception vectors.
+ The base address of exception vectors. This must be two pages
+ in size.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED
@@ -474,6 +475,7 @@ config ARCH_IXP4XX
bool "IXP4xx-based"
depends on MMU
select ARCH_HAS_DMA_SET_COHERENT_MASK
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CPU_XSCALE
@@ -1494,6 +1496,109 @@ config SCHED_SMT
MultiThreading at a cost of slightly increased overhead in some
places. If unsure say N here.
+config DISABLE_CPU_SCHED_DOMAIN_BALANCE
+ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
+ help
+ Disables scheduler load-balancing at CPU sched domain level.
+
+config SCHED_HMP
+ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
+ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
+ help
+ Experimental scheduler optimizations for heterogeneous platforms.
+ Attempts to introspectively select task affinity to optimize power
+ and performance. Basic support for multiple (>2) cpu types is in place,
+ but it has only been tested with two types of cpus.
+ There is currently no support for migration of task groups, hence
+ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
+ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
+ When turned on, this option adds sys/kernel/hmp directory which
+ contains the following files:
+ up_threshold - the load average threshold used for up migration
+ (0 - 1023)
+ down_threshold - the load average threshold used for down migration
+ (0 - 1023)
+ hmp_domains - a list of cpumasks for the present HMP domains,
+ starting with the 'biggest' and ending with the
+ 'smallest'.
+ Note that both the threshold files can be written at runtime to
+ control scheduler behaviour.
+
+config SCHED_HMP_PRIO_FILTER
+ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
+ depends on SCHED_HMP
+ help
+ Enables task priority based HMP migration filter. Any task with
+ a NICE value above the threshold will always be on low-power cpus
+ with less compute capacity.
+
+config SCHED_HMP_PRIO_FILTER_VAL
+ int "NICE priority threshold"
+ default 5
+ depends on SCHED_HMP_PRIO_FILTER
+
+config HMP_FAST_CPU_MASK
+ string "HMP scheduler fast CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the fast CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_SLOW_CPU_MASK
+ string "HMP scheduler slow CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the slow CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_VARIABLE_SCALE
+ bool "Allows changing the load tracking scale through sysfs"
+ depends on SCHED_HMP
+ help
+ When turned on, this option exports the load average period value
+ for the load tracking patches through sysfs.
+ The values can be modified to change the rate of load accumulation
+ used for HMP migration. 'load_avg_period_ms' is the time in ms to
+ reach a load average of 0.5 for an idle task of 0 load average
+ ratio which becomes 100% busy.
+ For example, with load_avg_period_ms = 128 and up_threshold = 512,
+ a running task with a load of 0 will be migrated to a bigger CPU after
+ 128ms, because after 128ms its load_avg_ratio is 0.5 and the real
+ up_threshold is 0.5.
+ This patch has the same behavior as changing the Y of the load
+ average computation to
+ (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
+ but removes intermediate overflows in computation.
+
+config HMP_FREQUENCY_INVARIANT_SCALE
+ bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP"
+ depends on SCHED_HMP && CPU_FREQ
+ help
+ Scales the current load contribution in line with the frequency
+ of the CPU that the task was executed on.
+ In this version, we use a simple linear scale derived from the
+ maximum frequency reported by CPUFreq.
+ Restricting tracked load to be scaled by the CPU's frequency
+ represents the consumption of possible compute capacity
+ (rather than consumption of actual instantaneous capacity as
+ normal) and allows the HMP migration's simple threshold
+ migration strategy to interact more predictably with CPUFreq's
+ asynchronous compute capacity changes.
+
+config SCHED_HMP_LITTLE_PACKING
+ bool "Small task packing for HMP"
+ depends on SCHED_HMP
+ default n
+ help
+ Allows the HMP Scheduler to pack small tasks into CPUs in the
+ smallest HMP domain.
+ Controlled by two sysfs files in sys/kernel/hmp.
+ packing_enable: 1 to enable, 0 to disable packing. Default 1.
+ packing_limit: runqueue load ratio where a RQ is considered
+ to be full. Default is NICE_0_LOAD * 9/8.
+
config HAVE_ARM_SCU
bool
help
@@ -1521,6 +1626,31 @@ config MCPM
for (multi-)cluster based systems, such as big.LITTLE based
systems.
+config BIG_LITTLE
+ bool "big.LITTLE support (Experimental)"
+ depends on CPU_V7 && SMP
+ select MCPM
+ help
+ This option enables support for the big.LITTLE architecture.
+
+config BL_SWITCHER
+ bool "big.LITTLE switcher support"
+ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+ select CPU_PM
+ select ARM_CPU_SUSPEND
+ help
+ The big.LITTLE "switcher" provides the core functionality to
+ transparently handle transition between a cluster of A15's
+ and a cluster of A7's in a big.LITTLE system.
+
+config BL_SWITCHER_DUMMY_IF
+ tristate "Simple big.LITTLE switcher user interface"
+ depends on BL_SWITCHER && DEBUG_KERNEL
+ help
+ This is a simple and dummy char dev interface to control
+ the big.LITTLE switcher core code. It is meant for
+ debugging purposes only.
+
choice
prompt "Memory split"
default VMSPLIT_3G
@@ -1681,6 +1811,11 @@ config OABI_COMPAT
in memory differs between the legacy ABI and the new ARM EABI
(only for non "thumb" binaries). This option adds a tiny
overhead to all syscalls and produces a slightly larger kernel.
+
+ The seccomp filter system will not be available when this is
+ selected, since there is no way yet to sensibly distinguish
+ between calling conventions during filtering.
+
If you know you'll be using only pure EABI user space then you
can say N here. If this option is not selected and you attempt
to execute a legacy ABI binary then the result will be
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 883e4bec807f..9d36200374f0 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,6 +16,7 @@ LDFLAGS :=
LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
+LDFLAGS_MODULE += --be8
endif
OBJCOPYFLAGS :=-O binary -R .comment -S
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2caf07ed..085bb96493a3 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -14,6 +14,7 @@
ifneq ($(MACHINE),)
include $(srctree)/$(MACHINE)/Makefile.boot
endif
+include $(srctree)/arch/arm/boot/dts/Makefile
# Note: the following conditions must always be true:
# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index aabc02a68482..d1153c8a765a 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -53,6 +53,17 @@ static const void *getprop(const void *fdt, const char *node_path,
return fdt_getprop(fdt, offset, property, len);
}
+static uint32_t get_cell_size(const void *fdt)
+{
+ int len;
+ uint32_t cell_size = 1;
+ const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len);
+
+ if (size_len)
+ cell_size = fdt32_to_cpu(*size_len);
+ return cell_size;
+}
+
static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
{
char cmdline[COMMAND_LINE_SIZE];
@@ -95,9 +106,11 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
int atags_to_fdt(void *atag_list, void *fdt, int total_space)
{
struct tag *atag = atag_list;
- uint32_t mem_reg_property[2 * NR_BANKS];
+ /* In the case of 64 bits memory size, need to reserve 2 cells for
+ * address and size for each bank */
+ uint32_t mem_reg_property[2 * 2 * NR_BANKS];
int memcount = 0;
- int ret;
+ int ret, memsize;
/* make sure we've got an aligned pointer */
if ((u32)atag_list & 0x3)
@@ -137,8 +150,25 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
continue;
if (!atag->u.mem.size)
continue;
- mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
- mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
+ memsize = get_cell_size(fdt);
+
+ if (memsize == 2) {
+ /* if memsize is 2, that means that
+ * each data needs 2 cells of 32 bits,
+ * so the data are 64 bits */
+ uint64_t *mem_reg_prop64 =
+ (uint64_t *)mem_reg_property;
+ mem_reg_prop64[memcount++] =
+ cpu_to_fdt64(atag->u.mem.start);
+ mem_reg_prop64[memcount++] =
+ cpu_to_fdt64(atag->u.mem.size);
+ } else {
+ mem_reg_property[memcount++] =
+ cpu_to_fdt32(atag->u.mem.start);
+ mem_reg_property[memcount++] =
+ cpu_to_fdt32(atag->u.mem.size);
+ }
+
} else if (atag->hdr.tag == ATAG_INITRD2) {
uint32_t initrd_start, initrd_size;
initrd_start = atag->u.initrd.start;
@@ -150,8 +180,10 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
}
}
- if (memcount)
- setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount);
+ if (memcount) {
+ setprop(fdt, "/memory", "reg", mem_reg_property,
+ 4 * memcount * memsize);
+ }
return fdt_pack(fdt);
}
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index a7cd67383883..a8264aa9b03a 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -135,6 +135,7 @@ start:
.word _edata @ zImage end address
THUMB( .thumb )
1:
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
mrs r9, cpsr
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install @ get into SVC mode, reversibly
@@ -679,9 +680,7 @@ __armv4_mmu_cache_on:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
@@ -708,9 +707,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
@ (needed for ARM1176)
#ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f0895c581a89..00baf9f5766a 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -202,7 +202,14 @@ dtb-$(CONFIG_ARCH_VERSATILE) += versatile-ab.dtb \
dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
vexpress-v2p-ca9.dtb \
vexpress-v2p-ca15-tc1.dtb \
- vexpress-v2p-ca15_a7.dtb
+ vexpress-v2p-ca15_a7.dtb \
+ rtsm_ve-cortex_a9x2.dtb \
+ rtsm_ve-cortex_a9x4.dtb \
+ rtsm_ve-cortex_a15x1.dtb \
+ rtsm_ve-cortex_a15x2.dtb \
+ rtsm_ve-cortex_a15x4.dtb \
+ rtsm_ve-v2p-ca15x1-ca7x1.dtb \
+ rtsm_ve-v2p-ca15x4-ca7x4.dtb
dtb-$(CONFIG_ARCH_VIRT) += xenvm-4.2.dtb
dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
wm8505-ref.dtb \
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 550eb772c30e..ddd068bb1457 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -51,7 +51,7 @@
coherency-fabric@20200 {
compatible = "marvell,coherency-fabric";
- reg = <0x20200 0xb0>, <0x21810 0x1c>;
+ reg = <0x20200 0xb0>, <0x21010 0x1c>;
};
serial@12000 {
@@ -92,6 +92,7 @@
#size-cells = <0>;
compatible = "marvell,orion-mdio";
reg = <0x72004 0x4>;
+ clocks = <&gateclk 4>;
};
ethernet@70000 {
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 76db557adbe7..f97550420fcc 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -124,7 +124,7 @@
/* Device Bus parameters are required */
/* Read parameters */
- devbus,bus-width = <8>;
+ devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index f8eaa383e07f..f94cdbc579cb 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -81,7 +81,7 @@
/*
* MV78230 has 2 PCIe units Gen2.0: One unit can be
* configured as x4 or quad x1 lanes. One unit is
- * x4/x1.
+ * x1 only.
*/
pcie-controller {
compatible = "marvell,armada-xp-pcie";
@@ -94,10 +94,10 @@
bus-range = <0x00 0xff>;
ranges = <0x82000000 0 0x40000 0x40000 0 0x00002000 /* Port 0.0 registers */
- 0x82000000 0 0x42000 0x42000 0 0x00002000 /* Port 2.0 registers */
0x82000000 0 0x44000 0x44000 0 0x00002000 /* Port 0.1 registers */
0x82000000 0 0x48000 0x48000 0 0x00002000 /* Port 0.2 registers */
0x82000000 0 0x4c000 0x4c000 0 0x00002000 /* Port 0.3 registers */
+ 0x82000000 0 0x80000 0x80000 0 0x00002000 /* Port 1.0 registers */
0x82000000 0 0xe0000000 0xe0000000 0 0x08000000 /* non-prefetchable memory */
0x81000000 0 0 0xe8000000 0 0x00100000>; /* downstream I/O */
@@ -165,19 +165,19 @@
status = "disabled";
};
- pcie@9,0 {
+ pcie@5,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
- reg = <0x4800 0 0 0 0>;
+ assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 99>;
- marvell,pcie-port = <2>;
+ interrupt-map = <0 0 0 0 &mpic 62>;
+ marvell,pcie-port = <1>;
marvell,pcie-lane = <0>;
- clocks = <&gateclk 26>;
+ clocks = <&gateclk 9>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index f4029f015aff..55cdd58c155f 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -101,7 +101,7 @@
/*
* MV78260 has 3 PCIe units Gen2.0: Two units can be
* configured as x4 or quad x1 lanes. One unit is
- * x4/x1.
+ * x4 only.
*/
pcie-controller {
compatible = "marvell,armada-xp-pcie";
@@ -119,7 +119,9 @@
0x82000000 0 0x48000 0x48000 0 0x00002000 /* Port 0.2 registers */
0x82000000 0 0x4c000 0x4c000 0 0x00002000 /* Port 0.3 registers */
0x82000000 0 0x80000 0x80000 0 0x00002000 /* Port 1.0 registers */
- 0x82000000 0 0x82000 0x82000 0 0x00002000 /* Port 3.0 registers */
+ 0x82000000 0 0x84000 0x84000 0 0x00002000 /* Port 1.1 registers */
+ 0x82000000 0 0x88000 0x88000 0 0x00002000 /* Port 1.2 registers */
+ 0x82000000 0 0x8c000 0x8c000 0 0x00002000 /* Port 1.3 registers */
0x82000000 0 0xe0000000 0xe0000000 0 0x08000000 /* non-prefetchable memory */
0x81000000 0 0 0xe8000000 0 0x00100000>; /* downstream I/O */
@@ -187,35 +189,83 @@
status = "disabled";
};
- pcie@9,0 {
+ pcie@5,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
- reg = <0x4800 0 0 0 0>;
+ assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 99>;
- marvell,pcie-port = <2>;
+ interrupt-map = <0 0 0 0 &mpic 62>;
+ marvell,pcie-port = <1>;
marvell,pcie-lane = <0>;
- clocks = <&gateclk 26>;
+ clocks = <&gateclk 9>;
+ status = "disabled";
+ };
+
+ pcie@6,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
+ reg = <0x3000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 63>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <1>;
+ clocks = <&gateclk 10>;
+ status = "disabled";
+ };
+
+ pcie@7,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
+ reg = <0x3800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 64>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <2>;
+ clocks = <&gateclk 11>;
status = "disabled";
};
- pcie@10,0 {
+ pcie@8,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x82000 0 0x2000>;
- reg = <0x5000 0 0 0 0>;
+ assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
+ reg = <0x4000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 103>;
- marvell,pcie-port = <3>;
+ interrupt-map = <0 0 0 0 &mpic 65>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <3>;
+ clocks = <&gateclk 12>;
+ status = "disabled";
+ };
+
+ pcie@9,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
+ reg = <0x4800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 99>;
+ marvell,pcie-port = <2>;
marvell,pcie-lane = <0>;
- clocks = <&gateclk 27>;
+ clocks = <&gateclk 26>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index fdea75c73411..9746d0e7fcb4 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -152,7 +152,7 @@
/* Device Bus parameters are required */
/* Read parameters */
- devbus,bus-width = <8>;
+ devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index 5d3ed5aafc69..0af879a4eafa 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -35,8 +35,12 @@
ssc2 = &ssc2;
};
cpus {
- cpu@0 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
compatible = "arm,arm920t";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 84c4bef2d726..0dbdb846f90a 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -32,8 +32,12 @@
ssc0 = &ssc0;
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
@@ -340,6 +344,14 @@
};
};
+ i2c_gpio0 {
+ pinctrl_i2c_gpio0: i2c_gpio0-0 {
+ atmel,pins =
+ <0 23 0x0 0x3 /* PA23 gpio I2C_SDA pin */
+ 0 24 0x0 0x3>; /* PA24 gpio I2C_SCL pin */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -592,6 +604,8 @@
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c_gpio0>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 94b58ab2cc08..fcd38f89904e 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -29,8 +29,12 @@
ssc1 = &ssc1;
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index bf18a735c37d..479a0622cdb8 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -35,8 +35,12 @@
ssc1 = &ssc1;
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 8d25f889928e..a92ec78349a2 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -31,8 +31,12 @@
ssc0 = &ssc0;
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d30e48bd1e9d..28ba79893877 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
chosen {
- bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
};
memory {
- reg = <0x20000000 0x10000000>;
+ reg = <0x20000000 0x8000000>;
};
clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 1145ac330fb7..2b2b6923d16b 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -33,8 +33,12 @@
ssc0 = &ssc0;
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
@@ -643,7 +647,7 @@
};
rtc@fffffeb0 {
- compatible = "atmel,at91rm9200-rtc";
+ compatible = "atmel,at91sam9x5-rtc";
reg = <0xfffffeb0 0x40>;
interrupts = <1 4 7>;
status = "disabled";
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 1e12aeff403b..aa537ed13f0a 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -85,6 +85,8 @@
reg = <0x7e205000 0x1000>;
interrupts = <2 21>;
clocks = <&clk_i2c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -93,6 +95,8 @@
reg = <0x7e804000 0x1000>;
interrupts = <2 21>;
clocks = <&clk_i2c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/clcd-panels.dtsi b/arch/arm/boot/dts/clcd-panels.dtsi
new file mode 100644
index 000000000000..0b0ff6ead4b2
--- /dev/null
+++ b/arch/arm/boot/dts/clcd-panels.dtsi
@@ -0,0 +1,52 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ */
+
+/ {
+ panels {
+ panel@0 {
+ compatible = "panel";
+ mode = "VGA";
+ refresh = <60>;
+ xres = <640>;
+ yres = <480>;
+ pixclock = <39721>;
+ left_margin = <40>;
+ right_margin = <24>;
+ upper_margin = <32>;
+ lower_margin = <11>;
+ hsync_len = <96>;
+ vsync_len = <2>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+
+ panel@1 {
+ compatible = "panel";
+ mode = "XVGA";
+ refresh = <60>;
+ xres = <1024>;
+ yres = <768>;
+ pixclock = <15748>;
+ left_margin = <152>;
+ right_margin = <48>;
+ upper_margin = <23>;
+ lower_margin = <3>;
+ hsync_len = <104>;
+ vsync_len = <4>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index 3f0239ec1bc5..49d8da2779a6 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -27,6 +27,13 @@
i2c2_bus: i2c2-bus {
samsung,pin-pud = <0>;
};
+
+ max77686_irq: max77686-irq {
+ samsung,pins = "gpx3-2";
+ samsung,pin-function = <0>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
};
i2c@12C60000 {
@@ -35,6 +42,11 @@
max77686@09 {
compatible = "maxim,max77686";
+ interrupt-parent = <&gpx3>;
+ interrupts = <2 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77686_irq>;
+ wakeup-source;
reg = <0x09>;
voltage-regulators {
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 02cfc76d002f..b64cb43a7295 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -263,6 +263,7 @@
regulator-name = "vdd_g3d";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
+ regulator-always-on;
regulator-boot-on;
op_mode = <1>;
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index fc9fb3d526e2..cdbdc4dfef22 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -545,7 +545,7 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x10800000 0x1000>;
interrupts = <0 33 0>;
- clocks = <&clock 271>;
+ clocks = <&clock 346>;
clock-names = "apb_pclk";
#dma-cells = <1>;
#dma-channels = <8>;
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 73fd7d0887b5..587ceef81e45 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -23,8 +23,12 @@
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 600f7cb51f3e..4c10a1968c0e 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -32,8 +32,12 @@
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index eb83aa039b8b..e524316998f4 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -71,7 +71,7 @@
ipu: ipu@18000000 {
#crtc-cells = <1>;
compatible = "fsl,imx53-ipu";
- reg = <0x18000000 0x080000000>;
+ reg = <0x18000000 0x08000000>;
interrupts = <11 10>;
clocks = <&clks 59>, <&clks 110>, <&clks 61>;
clock-names = "bus", "di0", "di1";
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 5bcdf3a90bb3..62dc78126795 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -18,12 +18,14 @@
cpu@0 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
};
cpu@1 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <1>;
next-level-cache = <&L2>;
};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 21e675848bd1..dc54a72a3bcd 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -18,6 +18,7 @@
cpu@0 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
operating-points = <
@@ -39,18 +40,21 @@
cpu@1 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <1>;
next-level-cache = <&L2>;
};
cpu@2 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <2>;
next-level-cache = <&L2>;
};
cpu@3 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <3>;
next-level-cache = <&L2>;
};
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index ff1aea0ee043..72693a69f830 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -9,11 +9,6 @@
model = "ARM Integrator/CP";
compatible = "arm,integrator-cp";
- aliases {
- arm,timer-primary = &timer2;
- arm,timer-secondary = &timer1;
- };
-
chosen {
bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
};
@@ -24,14 +19,18 @@
};
timer0: timer@13000000 {
+ /* TIMER0 runs @ 25MHz */
compatible = "arm,integrator-cp-timer";
+ status = "disabled";
};
timer1: timer@13000100 {
+ /* TIMER1 runs @ 1MHz */
compatible = "arm,integrator-cp-timer";
};
timer2: timer@13000200 {
+ /* TIMER2 runs @ 1MHz */
compatible = "arm,integrator-cp-timer";
};
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts
new file mode 100644
index 000000000000..c9eee916aa7e
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts
@@ -0,0 +1,159 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x1CT
+ *
+ * RTSM_VE_Cortex_A15x1.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA15x1";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x1", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts
new file mode 100644
index 000000000000..853a166e3c32
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts
@@ -0,0 +1,165 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x2CT
+ *
+ * RTSM_VE_Cortex_A15x2.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA15x2";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x2", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts
new file mode 100644
index 000000000000..c1947a3a5c88
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts
@@ -0,0 +1,177 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x4CT
+ *
+ * RTSM_VE_Cortex_A15x4.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA15x4";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x4", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <3>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts
new file mode 100644
index 000000000000..fca6b2f79677
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts
@@ -0,0 +1,171 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA9MPx2CT
+ *
+ * RTSM_VE_Cortex_A9x2.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA9x2";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a9x2", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ scu@2c000000 {
+ compatible = "arm,cortex-a9-scu";
+ reg = <0x2c000000 0x58>;
+ };
+
+ timer@2c000600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x2c000600 0x20>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ watchdog@2c000620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <1 14 0xf04>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x2c001000 0x1000>,
+ <0x2c000100 0x100>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x04000000>,
+ <1 0 0x14000000 0x04000000>,
+ <2 0 0x18000000 0x04000000>,
+ <3 0 0x1c000000 0x04000000>,
+ <4 0 0x0c000000 0x04000000>,
+ <5 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts
new file mode 100644
index 000000000000..fd8a6ed97a04
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts
@@ -0,0 +1,183 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA9MPx4CT
+ *
+ * RTSM_VE_Cortex_A9x4.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA9x4";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a9x4", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <3>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ scu@2c000000 {
+ compatible = "arm,cortex-a9-scu";
+ reg = <0x2c000000 0x58>;
+ };
+
+ timer@2c000600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x2c000600 0x20>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ watchdog@2c000620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <1 14 0xf04>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x2c001000 0x1000>,
+ <0x2c000100 0x100>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x04000000>,
+ <1 0 0x14000000 0x04000000>,
+ <2 0 0x18000000 0x04000000>,
+ <3 0 0x1c000000 0x04000000>,
+ <4 0 0x0c000000 0x04000000>,
+ <5 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi
new file mode 100644
index 000000000000..a2d895ee5faa
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi
@@ -0,0 +1,231 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * Motherboard component
+ *
+ * VEMotherBoard.lisa
+ */
+
+ motherboard {
+ compatible = "arm,vexpress,v2m-p1", "simple-bus";
+ arm,hbi = <0x190>;
+ arm,vexpress,site = <0>;
+ arm,v2m-memory-map = "rs1";
+ #address-cells = <2>; /* SMB chipselect number and offset */
+ #size-cells = <1>;
+ #interrupt-cells = <1>;
+ ranges;
+
+ flash@0,00000000 {
+ compatible = "arm,vexpress-flash", "cfi-flash";
+ reg = <0 0x00000000 0x04000000>,
+ <4 0x00000000 0x04000000>;
+ bank-width = <4>;
+ };
+
+ vram@2,00000000 {
+ compatible = "arm,vexpress-vram";
+ reg = <2 0x00000000 0x00800000>;
+ };
+
+ ethernet@2,02000000 {
+ compatible = "smsc,lan91c111";
+ reg = <2 0x02000000 0x10000>;
+ interrupts = <15>;
+ };
+
+ iofpga@3,00000000 {
+ compatible = "arm,amba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 3 0 0x200000>;
+
+ v2m_sysreg: sysreg@010000 {
+ compatible = "arm,vexpress-sysreg";
+ reg = <0x010000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ v2m_sysctl: sysctl@020000 {
+ compatible = "arm,sp810", "arm,primecell";
+ reg = <0x020000 0x1000>;
+ clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
+ clock-names = "refclk", "timclk", "apb_pclk";
+ #clock-cells = <1>;
+ clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+ };
+
+ aaci@040000 {
+ compatible = "arm,pl041", "arm,primecell";
+ reg = <0x040000 0x1000>;
+ interrupts = <11>;
+ clocks = <&smbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ mmci@050000 {
+ compatible = "arm,pl180", "arm,primecell";
+ reg = <0x050000 0x1000>;
+ interrupts = <9 10>;
+ cd-gpios = <&v2m_sysreg 0 0>;
+ wp-gpios = <&v2m_sysreg 1 0>;
+ max-frequency = <12000000>;
+ vmmc-supply = <&v2m_fixed_3v3>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "mclk", "apb_pclk";
+ };
+
+ kmi@060000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x060000 0x1000>;
+ interrupts = <12>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ kmi@070000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x070000 0x1000>;
+ interrupts = <13>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ v2m_serial0: uart@090000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x090000 0x1000>;
+ interrupts = <5>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial1: uart@0a0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0a0000 0x1000>;
+ interrupts = <6>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial2: uart@0b0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0b0000 0x1000>;
+ interrupts = <7>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial3: uart@0c0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0c0000 0x1000>;
+ interrupts = <8>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ wdt@0f0000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0f0000 0x1000>;
+ interrupts = <0>;
+ clocks = <&v2m_refclk32khz>, <&smbclk>;
+ clock-names = "wdogclk", "apb_pclk";
+ };
+
+ v2m_timer01: timer@110000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x110000 0x1000>;
+ interrupts = <2>;
+ clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&smbclk>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
+ };
+
+ v2m_timer23: timer@120000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x120000 0x1000>;
+ interrupts = <3>;
+ clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&smbclk>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
+ };
+
+ rtc@170000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x170000 0x1000>;
+ interrupts = <4>;
+ clocks = <&smbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ clcd@1f0000 {
+ compatible = "arm,pl111", "arm,primecell";
+ reg = <0x1f0000 0x1000>;
+ interrupts = <14>;
+ clocks = <&v2m_oscclk1>, <&smbclk>;
+ clock-names = "v2m:oscclk1", "apb_pclk";
+ mode = "VGA";
+ use_dma = <0>;
+ framebuffer = <0x18000000 0x00180000>;
+ };
+
+ virtio_block@0130000 {
+ compatible = "virtio,mmio";
+ reg = <0x130000 0x200>;
+ interrupts = <42>;
+ };
+
+ };
+
+ v2m_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ v2m_clk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "v2m:clk24mhz";
+ };
+
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "v2m:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "v2m:refclk32khz";
+ };
+
+ mcc {
+ compatible = "simple-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ v2m_oscclk1: osc@1 {
+ /* CLCD clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <23750000 63500000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk1";
+ };
+
+ muxfpga@0 {
+ compatible = "arm,vexpress-muxfpga";
+ arm,vexpress-sysreg,func = <7 0>;
+ };
+
+ shutdown@0 {
+ compatible = "arm,vexpress-shutdown";
+ arm,vexpress-sysreg,func = <8 0>;
+ };
+ };
+ };
diff --git a/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts
new file mode 100644
index 000000000000..fe8cf5dc8570
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts
@@ -0,0 +1,244 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x4CT
+ * ARMCortexA7x4CT
+ * RTSM_VE_Cortex_A15x1_A7x1.lisa
+ */
+
+/dts-v1/;
+
+/memreserve/ 0xff000000 0x01000000;
+
+/ {
+ model = "RTSM_VE_CortexA15x1-A7x1";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x1_a7x1", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cluster0: cluster@0 {
+ reg = <0>;
+// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core0: core@0 {
+ reg = <0>;
+ };
+
+ };
+ };
+
+ cluster1: cluster@1 {
+ reg = <1>;
+// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core1: core@0 {
+ reg = <0>;
+ };
+
+ };
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ cluster = <&cluster0>;
+ core = <&core0>;
+// clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control1>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ cluster = <&cluster1>;
+ core = <&core1>;
+// clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci-400", "arm,cci";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0x2c090000 0 0x1000>;
+ ranges = <0x0 0x0 0x2c090000 0x10000>;
+
+ cci_control1: slave-if@4000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x4000 0x1000>;
+ };
+
+ cci_control2: slave-if@5000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x5000 0x1000>;
+ };
+ };
+
+ dcscb@60000000 {
+ compatible = "arm,rtsm,dcscb";
+ reg = <0 0x60000000 0 0x1000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+
+ gic-cpuif@0 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <0>;
+ cpu = <&cpu0>;
+ };
+ gic-cpuif@1 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <1>;
+ cpu = <&cpu1>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts
new file mode 100644
index 000000000000..f715285131d8
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts
@@ -0,0 +1,358 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x4CT
+ * ARMCortexA7x4CT
+ * RTSM_VE_Cortex_A15x4_A7x4.lisa
+ */
+
+/dts-v1/;
+
+/memreserve/ 0xff000000 0x01000000;
+
+/ {
+ model = "RTSM_VE_CortexA15x4-A7x4";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x4_a7x4", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cluster0: cluster@0 {
+ reg = <0>;
+// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core0: core@0 {
+ reg = <0>;
+ };
+
+ core1: core@1 {
+ reg = <1>;
+ };
+
+ core2: core@2 {
+ reg = <2>;
+ };
+
+ core3: core@3 {
+ reg = <3>;
+ };
+
+ };
+ };
+
+ cluster1: cluster@1 {
+ reg = <1>;
+// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core4: core@0 {
+ reg = <0>;
+ };
+
+ core5: core@1 {
+ reg = <1>;
+ };
+
+ core6: core@2 {
+ reg = <2>;
+ };
+
+ core7: core@3 {
+ reg = <3>;
+ };
+
+ };
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ cluster = <&cluster0>;
+ core = <&core0>;
+// clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control1>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ cluster = <&cluster0>;
+ core = <&core1>;
+// clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control1>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <2>;
+ cluster = <&cluster0>;
+ core = <&core2>;
+// clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control1>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <3>;
+ cluster = <&cluster0>;
+ core = <&core3>;
+// clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control1>;
+ };
+
+ cpu4: cpu@4 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ cluster = <&cluster1>;
+ core = <&core4>;
+// clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
+ };
+
+ cpu5: cpu@5 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ cluster = <&cluster1>;
+ core = <&core5>;
+// clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
+ };
+
+ cpu6: cpu@6 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ cluster = <&cluster1>;
+ core = <&core6>;
+// clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
+ };
+
+ cpu7: cpu@7 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x103>;
+ cluster = <&cluster1>;
+ core = <&core7>;
+// clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci-400", "arm,cci";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0x2c090000 0 0x1000>;
+ ranges = <0x0 0x0 0x2c090000 0x10000>;
+
+ cci_control1: slave-if@4000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x4000 0x1000>;
+ };
+
+ cci_control2: slave-if@5000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x5000 0x1000>;
+ };
+ };
+
+ dcscb@60000000 {
+ compatible = "arm,rtsm,dcscb";
+ reg = <0 0x60000000 0 0x1000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+
+ gic-cpuif@0 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <0>;
+ cpu = <&cpu0>;
+ };
+ gic-cpuif@1 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <1>;
+ cpu = <&cpu1>;
+ };
+ gic-cpuif@2 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <2>;
+ cpu = <&cpu2>;
+ };
+ gic-cpuif@3 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <3>;
+ cpu = <&cpu3>;
+ };
+ gic-cpuif@4 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <4>;
+ cpu = <&cpu4>;
+ };
+ gic-cpuif@5 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <5>;
+ cpu = <&cpu5>;
+ };
+ gic-cpuif@6 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <6>;
+ cpu = <&cpu6>;
+ };
+ gic-cpuif@7 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <7>;
+ cpu = <&cpu7>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 5000e0d42849..642775d7ca67 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -35,8 +35,12 @@
ssc1 = &ssc1;
};
cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
cpu@0 {
+ device_type = "cpu";
compatible = "arm,cortex-a5";
+ reg = <0x0>;
};
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index e7ef619a70a2..06ef8b625dba 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -16,8 +16,12 @@
interrupt-parent = <&intc>;
cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
cpu@0 {
+ device_type = "cpu";
compatible = "arm,cortex-a8";
+ reg = <0x0>;
};
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 31fa38f8cc98..d2852547b572 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -17,8 +17,12 @@
interrupt-parent = <&intc>;
cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
cpu@0 {
+ device_type = "cpu";
compatible = "arm,cortex-a8";
+ reg = <0x0>;
};
};
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index ac870fb3fa0d..9584232ee6b6 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -228,6 +228,7 @@
};
clcd@1f0000 {
+ status = "disabled";
compatible = "arm,pl111", "arm,primecell";
reg = <0x1f0000 0x1000>;
interrupts = <14>;
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index f1420368355b..6593398c11ae 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -227,6 +227,7 @@
};
clcd@1f000 {
+ status = "disabled";
compatible = "arm,pl111", "arm,primecell";
reg = <0x1f000 0x1000>;
interrupts = <14>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 9420053acc14..cc6a8c0cfe33 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -9,6 +9,8 @@
/dts-v1/;
+/memreserve/ 0xbf000000 0x01000000;
+
/ {
model = "V2P-CA15";
arm,hbi = <0x237>;
@@ -57,6 +59,8 @@
interrupts = <0 85 4>;
clocks = <&oscclk5>;
clock-names = "pxlclk";
+ mode = "1024x768-16@60";
+ framebuffer = <0 0xff000000 0 0x01000000>;
};
memory-controller@2b0a0000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index d2803be4e1a8..f1dc620c5c45 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -9,11 +9,13 @@
/dts-v1/;
+/memreserve/ 0xff000000 0x01000000;
+
/ {
model = "V2P-CA15_CA7";
arm,hbi = <0x249>;
arm,vexpress,site = <0xf>;
- compatible = "arm,vexpress,v2p-ca15_a7", "arm,vexpress";
+ compatible = "arm,vexpress,v2p-ca15_a7", "arm,vexpress", "arm,generic";
interrupt-parent = <&gic>;
#address-cells = <2>;
#size-cells = <2>;
@@ -29,44 +31,106 @@
i2c1 = &v2m_i2c_pcie;
};
- cpus {
+ clusters {
#address-cells = <1>;
#size-cells = <0>;
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
+ cluster0: cluster@0 {
reg = <0>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core0: core@0 {
+ reg = <0>;
+ };
+
+ core1: core@1 {
+ reg = <1>;
+ };
+
+ };
};
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
+ cluster1: cluster@1 {
reg = <1>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core2: core@0 {
+ reg = <0>;
+ };
+
+ core3: core@1 {
+ reg = <1>;
+ };
+
+ core4: core@2 {
+ reg = <2>;
+ };
+ };
};
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x100>;
+ cluster = <&cluster1>;
+ core = <&core2>;
+ clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
};
cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x101>;
+ cluster = <&cluster1>;
+ core = <&core3>;
+ clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
};
cpu4: cpu@4 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x102>;
+ cluster = <&cluster1>;
+ core = <&core4>;
+ clock-frequency = <800000000>;
+ cci-control-port = <&cci_control2>;
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ cluster = <&cluster0>;
+ core = <&core0>;
+ clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control1>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ cluster = <&cluster0>;
+ core = <&core1>;
+ clock-frequency = <1000000000>;
+ cci-control-port = <&cci_control1>;
};
};
memory@80000000 {
device_type = "memory";
- reg = <0 0x80000000 0 0x40000000>;
+ reg = <0 0x80000000 0 0x80000000>;
};
wdt@2a490000 {
@@ -81,6 +145,8 @@
compatible = "arm,hdlcd";
reg = <0 0x2b000000 0 0x1000>;
interrupts = <0 85 4>;
+ mode = "1024x768-16@60";
+ framebuffer = <0 0xff000000 0 0x01000000>;
clocks = <&oscclk5>;
clock-names = "pxlclk";
};
@@ -102,6 +168,64 @@
<0 0x2c004000 0 0x2000>,
<0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;
+
+ gic-cpuif@0 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <0>;
+ cpu = <&cpu0>;
+ };
+ gic-cpuif@1 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <1>;
+ cpu = <&cpu1>;
+ };
+ gic-cpuif@2 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <2>;
+ cpu = <&cpu2>;
+ };
+
+ gic-cpuif@3 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <3>;
+ cpu = <&cpu3>;
+ };
+
+ gic-cpuif@4 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <4>;
+ cpu = <&cpu4>;
+ };
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci-400";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0x2c090000 0 0x1000>;
+ ranges = <0x0 0x0 0x2c090000 0x10000>;
+
+ cci_control1: slave-if@4000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x4000 0x1000>;
+ };
+
+ cci_control2: slave-if@5000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x5000 0x1000>;
+ };
+ };
+
+ cci-pmu@2c099000 {
+ compatible = "arm,cci-400-pmu";
+ reg = <0 0x2c099000 0 0x6000>;
+ interrupts = <0 101 4>,
+ <0 102 4>,
+ <0 103 4>,
+ <0 104 4>,
+ <0 105 4>;
};
memory-controller@7ffd0000 {
@@ -125,6 +249,12 @@
clock-names = "apb_pclk";
};
+ spc@7fff0000 {
+ compatible = "arm,vexpress-spc,v2p-ca15_a7","arm,vexpress-spc";
+ reg = <0 0x7fff0000 0 0x1000>;
+ interrupts = <0 95 4>;
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts = <1 13 0xf08>,
@@ -133,12 +263,21 @@
<1 10 0xf08>;
};
- pmu {
+ pmu_a15 {
compatible = "arm,cortex-a15-pmu";
+ cluster = <&cluster0>;
interrupts = <0 68 4>,
<0 69 4>;
};
+ pmu_a7 {
+ compatible = "arm,cortex-a7-pmu";
+ cluster = <&cluster1>;
+ interrupts = <0 128 4>,
+ <0 129 4>,
+ <0 130 4>;
+ };
+
oscclk6a: oscclk6a {
/* Reference 24MHz clock */
compatible = "fixed-clock";
@@ -147,6 +286,15 @@
clock-output-names = "oscclk6a";
};
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x80100001>;
+ cpu_off = <0x80100002>;
+ cpu_on = <0x80100003>;
+ migrate = <0x80100004>;
+ };
+
dcc {
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index c544a5504591..cf633ed6a1b4 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -9,6 +9,8 @@
/dts-v1/;
+/memreserve/ 0xbf000000 0x01000000;
+
/ {
model = "V2P-CA5s";
arm,hbi = <0x225>;
@@ -59,6 +61,8 @@
interrupts = <0 85 4>;
clocks = <&oscclk3>;
clock-names = "pxlclk";
+ mode = "640x480-16@60";
+ framebuffer = <0xbf000000 0x01000000>;
};
memory-controller@2a150000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index 62d9b225dcce..f83706bd3f9a 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -9,6 +9,8 @@
/dts-v1/;
+/include/ "clcd-panels.dtsi"
+
/ {
model = "V2P-CA9";
arm,hbi = <0x191>;
@@ -73,6 +75,8 @@
interrupts = <0 44 4>;
clocks = <&oscclk1>, <&oscclk2>;
clock-names = "clcdclk", "apb_pclk";
+ mode = "XVGA";
+ use_dma = <1>;
};
memory-controller@100e0000 {
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 992d4046bb8a..ce01364a96e3 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -21,49 +21,3 @@ config SHARP_SCOOP
config FIQ_GLUE
bool
select FIQ
-
-config FIQ_DEBUGGER
- bool "FIQ Mode Serial Debugger"
- select FIQ
- select FIQ_GLUE
- default n
- help
- The FIQ serial debugger can accept commands even when the
- kernel is unresponsive due to being stuck with interrupts
- disabled.
-
-
-config FIQ_DEBUGGER_NO_SLEEP
- bool "Keep serial debugger active"
- depends on FIQ_DEBUGGER
- default n
- help
- Enables the serial debugger at boot. Passing
- fiq_debugger.no_sleep on the kernel commandline will
- override this config option.
-
-config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
- bool "Don't disable wakeup IRQ when debugger is active"
- depends on FIQ_DEBUGGER
- default n
- help
- Don't disable the wakeup irq when enabling the uart clock. This will
- cause extra interrupts, but it makes the serial debugger usable with
- on some MSM radio builds that ignore the uart clock request in power
- collapse.
-
-config FIQ_DEBUGGER_CONSOLE
- bool "Console on FIQ Serial Debugger port"
- depends on FIQ_DEBUGGER
- default n
- help
- Enables a console so that printk messages are displayed on
- the debugger serial port as the occur.
-
-config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
- bool "Put the FIQ debugger into console mode by default"
- depends on FIQ_DEBUGGER_CONSOLE
- default n
- help
- If enabled, this puts the fiq debugger into console mode by default.
- Otherwise, the fiq debugger will start out in debug mode.
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 384abdc09b62..505c479202b6 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,7 +4,6 @@
obj-y += firmware.o
-obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o
obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_SA1111) += sa1111.o
@@ -16,5 +15,9 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
+obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
+obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
+
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
+CFLAGS_REMOVE_mcpm_entry.o = -pg
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
new file mode 100644
index 000000000000..8fee70dfb302
--- /dev/null
+++ b/arch/arm/common/bL_switcher.c
@@ -0,0 +1,864 @@
+/*
+ * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/moduleparam.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/suspend.h>
+#include <asm/mcpm.h>
+#include <asm/bL_switcher.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power_cpu_migrate.h>
+
+
+/*
+ * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
+ * __attribute_const__ and we don't want the compiler to assume any
+ * constness here as the value _does_ change along some code paths.
+ */
+
+static int read_mpidr(void)
+{
+ unsigned int id;
+ asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (id));
+ return id & MPIDR_HWID_BITMASK;
+}
+
+/*
+ * Get a global nanosecond time stamp for tracing.
+ */
+static s64 get_ns(void)
+{
+ struct timespec ts;
+ getnstimeofday(&ts);
+ return timespec_to_ns(&ts);
+}
+
+/*
+ * bL switcher core code.
+ */
+
+static void bL_do_switch(void *_arg)
+{
+ unsigned ib_mpidr, ib_cpu, ib_cluster;
+ long volatile handshake, **handshake_ptr = _arg;
+
+ pr_debug("%s\n", __func__);
+
+ ib_mpidr = cpu_logical_map(smp_processor_id());
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ /* Advertise our handshake location */
+ if (handshake_ptr) {
+ handshake = 0;
+ *handshake_ptr = &handshake;
+ } else
+ handshake = -1;
+
+ /*
+ * Our state has been saved at this point. Let's release our
+ * inbound CPU.
+ */
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
+ sev();
+
+ /*
+ * From this point, we must assume that our counterpart CPU might
+ * have taken over in its parallel world already, as if execution
+ * just returned from cpu_suspend(). It is therefore important to
+ * be very careful not to make any change the other guy is not
+ * expecting. This is why we need stack isolation.
+ *
+ * Fancy under cover tasks could be performed here. For now
+ * we have none.
+ */
+
+ /*
+ * Let's wait until our inbound is alive.
+ */
+ while (!handshake) {
+ wfe();
+ smp_mb();
+ }
+
+ /* Let's put ourself down. */
+ mcpm_cpu_power_down();
+
+ /* should never get here */
+ BUG();
+}
+
+/*
+ * Stack isolation. To ensure 'current' remains valid, we just use another
+ * piece of our thread's stack space which should be fairly lightly used.
+ * The selected area starts just above the thread_info structure located
+ * at the very bottom of the stack, aligned to a cache line, and indexed
+ * with the cluster number.
+ */
+#define STACK_SIZE 512
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+static int bL_switchpoint(unsigned long _arg)
+{
+ unsigned int mpidr = read_mpidr();
+ unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ void *stack = current_thread_info() + 1;
+ stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
+ stack += clusterid * STACK_SIZE + STACK_SIZE;
+ call_with_stack(bL_do_switch, (void *)_arg, stack);
+ BUG();
+}
+
+/*
+ * Generic switcher interface
+ */
+
+static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
+static int bL_switcher_cpu_pairing[NR_CPUS];
+
+/*
+ * bL_switch_to - Switch to a specific cluster for the current CPU
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function must be called on the CPU to be switched.
+ * Returns 0 on success, else a negative status code.
+ */
+static int bL_switch_to(unsigned int new_cluster_id)
+{
+ unsigned int mpidr, this_cpu, that_cpu;
+ unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
+ struct completion inbound_alive;
+ struct tick_device *tdev;
+ enum clock_event_mode tdev_mode;
+ long volatile *handshake_ptr;
+ int ipi_nr, ret;
+
+ this_cpu = smp_processor_id();
+ ob_mpidr = read_mpidr();
+ ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
+ ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
+ BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
+
+ if (new_cluster_id == ob_cluster)
+ return 0;
+
+ that_cpu = bL_switcher_cpu_pairing[this_cpu];
+ ib_mpidr = cpu_logical_map(that_cpu);
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
+ this_cpu, ob_mpidr, ib_mpidr);
+
+ this_cpu = smp_processor_id();
+
+ /* Close the gate for our entry vectors */
+ mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
+
+ /* Install our "inbound alive" notifier. */
+ init_completion(&inbound_alive);
+ ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
+ ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
+
+ /*
+ * Let's wake up the inbound CPU now in case it requires some delay
+ * to come online, but leave it gated in our entry vector code.
+ */
+ ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
+ if (ret) {
+ pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Raise a SGI on the inbound CPU to make sure it doesn't stall
+ * in a possible WFI, such as in bL_power_down().
+ */
+ gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
+
+ /*
+ * Wait for the inbound to come up. This allows for other
+ * tasks to be scheduled in the mean time.
+ */
+ wait_for_completion(&inbound_alive);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
+
+ /*
+ * From this point we are entering the switch critical zone
+ * and can't sleep/schedule anymore.
+ */
+ local_irq_disable();
+ local_fiq_disable();
+ trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+
+ /* redirect GIC's SGIs to our counterpart */
+ gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
+
+ tdev = tick_get_device(this_cpu);
+ if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
+ tdev = NULL;
+ if (tdev) {
+ tdev_mode = tdev->evtdev->mode;
+ clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ }
+
+ ret = cpu_pm_enter();
+
+ /* we can not tolerate errors at this point */
+ if (ret)
+ panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
+
+ /*
+ * Swap the physical CPUs in the logical map for this logical CPU.
+ * This must be flushed to RAM as the resume code
+ * needs to access it while the caches are still disabled.
+ */
+ cpu_logical_map(this_cpu) = ib_mpidr;
+ cpu_logical_map(that_cpu) = ob_mpidr;
+ sync_cache_w(&cpu_logical_map(this_cpu));
+
+ /* Let's do the actual CPU switch. */
+ ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
+ if (ret > 0)
+ panic("%s: cpu_suspend() returned %d\n", __func__, ret);
+
+ /* We are executing on the inbound CPU at this point */
+ mpidr = read_mpidr();
+ pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
+ BUG_ON(mpidr != ib_mpidr);
+
+ mcpm_cpu_powered_up();
+
+ ret = cpu_pm_exit();
+
+ if (tdev) {
+ clockevents_set_mode(tdev->evtdev, tdev_mode);
+ clockevents_program_event(tdev->evtdev,
+ tdev->evtdev->next_event, 1);
+ }
+
+ trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+ local_fiq_enable();
+ local_irq_enable();
+
+ *handshake_ptr = 1;
+ dsb_sev();
+
+ if (ret)
+ pr_err("%s exiting with error %d\n", __func__, ret);
+ return ret;
+}
+
+struct bL_thread {
+ spinlock_t lock;
+ struct task_struct *task;
+ wait_queue_head_t wq;
+ int wanted_cluster;
+ struct completion started;
+ bL_switch_completion_handler completer;
+ void *completer_cookie;
+};
+
+static struct bL_thread bL_threads[NR_CPUS];
+
+static int bL_switcher_thread(void *arg)
+{
+ struct bL_thread *t = arg;
+ struct sched_param param = { .sched_priority = 1 };
+ int cluster;
+ bL_switch_completion_handler completer;
+ void *completer_cookie;
+
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ complete(&t->started);
+
+ do {
+ if (signal_pending(current))
+ flush_signals(current);
+ wait_event_interruptible(t->wq,
+ t->wanted_cluster != -1 ||
+ kthread_should_stop());
+
+ spin_lock(&t->lock);
+ cluster = t->wanted_cluster;
+ completer = t->completer;
+ completer_cookie = t->completer_cookie;
+ t->wanted_cluster = -1;
+ t->completer = NULL;
+ spin_unlock(&t->lock);
+
+ if (cluster != -1) {
+ bL_switch_to(cluster);
+
+ if (completer)
+ completer(completer_cookie);
+ }
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static struct task_struct * bL_switcher_thread_create(int cpu, void *arg)
+{
+ struct task_struct *task;
+
+ task = kthread_create_on_node(bL_switcher_thread, arg,
+ cpu_to_node(cpu), "kswitcher_%d", cpu);
+ if (!IS_ERR(task)) {
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+ } else
+ pr_err("%s failed for CPU %d\n", __func__, cpu);
+ return task;
+}
+
+/*
+ * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
+ * with completion notification via a callback
+ *
+ * @cpu: the CPU to switch
+ * @new_cluster_id: the ID of the cluster to switch to.
+ * @completer: switch completion callback. if non-NULL,
+ * @completer(@completer_cookie) will be called on completion of
+ * the switch, in non-atomic context.
+ * @completer_cookie: opaque context argument for @completer.
+ *
+ * This function causes a cluster switch on the given CPU by waking up
+ * the appropriate switcher thread. This function may or may not return
+ * before the switch has occurred.
+ *
+ * If a @completer callback function is supplied, it will be called when
+ * the switch is complete. This can be used to determine asynchronously
+ * when the switch is complete, regardless of when bL_switch_request()
+ * returns. When @completer is supplied, no new switch request is permitted
+ * for the affected CPU until after the switch is complete, and @completer
+ * has returned.
+ */
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie)
+{
+ struct bL_thread *t;
+
+ if (cpu >= ARRAY_SIZE(bL_threads)) {
+ pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+ return -EINVAL;
+ }
+
+ t = &bL_threads[cpu];
+
+ if (IS_ERR(t->task))
+ return PTR_ERR(t->task);
+ if (!t->task)
+ return -ESRCH;
+
+ spin_lock(&t->lock);
+ if (t->completer) {
+ spin_unlock(&t->lock);
+ return -EBUSY;
+ }
+ t->completer = completer;
+ t->completer_cookie = completer_cookie;
+ t->wanted_cluster = new_cluster_id;
+ spin_unlock(&t->lock);
+ wake_up(&t->wq);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(bL_switch_request_cb);
+
+/*
+ * Detach an outstanding switch request.
+ *
+ * The switcher will continue with the switch request in the background,
+ * but the completer function will not be called.
+ *
+ * This may be necessary if the completer is in a kernel module which is
+ * about to be unloaded.
+ */
+void bL_switch_request_detach(unsigned int cpu,
+ bL_switch_completion_handler completer)
+{
+ struct bL_thread *t;
+
+ if (cpu >= ARRAY_SIZE(bL_threads)) {
+ pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+ return;
+ }
+
+ t = &bL_threads[cpu];
+
+ if (IS_ERR(t->task) || !t->task)
+ return;
+
+ spin_lock(&t->lock);
+ if (t->completer == completer)
+ t->completer = NULL;
+ spin_unlock(&t->lock);
+}
+
+EXPORT_SYMBOL_GPL(bL_switch_request_detach);
+
+/*
+ * Activation and configuration code.
+ */
+
+static DEFINE_MUTEX(bL_switcher_activation_lock);
+static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
+static unsigned int bL_switcher_active;
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
+static cpumask_t bL_switcher_removed_logical_cpus;
+
+int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
+
+int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
+
+static int bL_activation_notify(unsigned long val)
+{
+ int ret;
+
+ ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
+ if (ret & NOTIFY_STOP_MASK)
+ pr_err("%s: notifier chain failed with status 0x%x\n",
+ __func__, ret);
+ return notifier_to_errno(ret);
+}
+
+static void bL_switcher_restore_cpus(void)
+{
+ int i;
+
+ for_each_cpu(i, &bL_switcher_removed_logical_cpus)
+ cpu_up(i);
+}
+
+static int bL_switcher_halve_cpus(void)
+{
+ int i, j, cluster_0, gic_id, ret;
+ unsigned int cpu, cluster, mask;
+ cpumask_t available_cpus;
+
+ /* First pass to validate what we have */
+ mask = 0;
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster >= 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+ if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
+ return -EINVAL;
+ mask |= (1 << cluster);
+ }
+ if (mask != 3) {
+ pr_err("%s: no CPU pairing possible\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Now let's do the pairing. We match each CPU with another CPU
+ * from a different cluster. To get a uniform scheduling behavior
+ * without fiddling with CPU topology and compute capacity data,
+ * we'll use logical CPUs initially belonging to the same cluster.
+ */
+ memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
+ cpumask_copy(&available_cpus, cpu_online_mask);
+ cluster_0 = -1;
+ for_each_cpu(i, &available_cpus) {
+ int match = -1;
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster_0 == -1)
+ cluster_0 = cluster;
+ if (cluster != cluster_0)
+ continue;
+ cpumask_clear_cpu(i, &available_cpus);
+ for_each_cpu(j, &available_cpus) {
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+ /*
+ * Let's remember the last match to create "odd"
+ * pairing on purpose in order for other code not
+ * to assume any relation between physical and
+ * logical CPU numbers.
+ */
+ if (cluster != cluster_0)
+ match = j;
+ }
+ if (match != -1) {
+ bL_switcher_cpu_pairing[i] = match;
+ cpumask_clear_cpu(match, &available_cpus);
+ pr_info("CPU%d paired with CPU%d\n", i, match);
+ }
+ }
+
+ /*
+ * Now we disable the unwanted CPUs i.e. everything that has no
+ * pairing information (that includes the pairing counterparts).
+ */
+ cpumask_clear(&bL_switcher_removed_logical_cpus);
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+
+ /* Let's take note of the GIC ID for this CPU */
+ gic_id = gic_get_cpu_id(i);
+ if (gic_id < 0) {
+ pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
+ bL_switcher_restore_cpus();
+ return -EINVAL;
+ }
+ bL_gic_id[cpu][cluster] = gic_id;
+ pr_info("GIC ID for CPU %u cluster %u is %u\n",
+ cpu, cluster, gic_id);
+
+ if (bL_switcher_cpu_pairing[i] != -1) {
+ bL_switcher_cpu_original_cluster[i] = cluster;
+ continue;
+ }
+
+ ret = cpu_down(i);
+ if (ret) {
+ bL_switcher_restore_cpus();
+ return ret;
+ }
+ cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
+ }
+
+ return 0;
+}
+
+/* Determine the logical CPU a given physical CPU is grouped on. */
+int bL_switcher_get_logical_index(u32 mpidr)
+{
+ int cpu;
+
+ if (!bL_switcher_active)
+ return -EUNATCH;
+
+ mpidr &= MPIDR_HWID_BITMASK;
+ for_each_online_cpu(cpu) {
+ int pairing = bL_switcher_cpu_pairing[cpu];
+ if (pairing == -1)
+ continue;
+ if ((mpidr == cpu_logical_map(cpu)) ||
+ (mpidr == cpu_logical_map(pairing)))
+ return cpu;
+ }
+ return -EINVAL;
+}
+
+static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
+{
+ trace_cpu_migrate_current(get_ns(), read_mpidr());
+}
+
+int bL_switcher_trace_trigger(void)
+{
+ int ret;
+
+ preempt_disable();
+
+ bL_switcher_trace_trigger_cpu(NULL);
+ ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
+
+static int bL_switcher_enable(void)
+{
+ int cpu, ret;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ cpu_hotplug_driver_lock();
+ if (bL_switcher_active) {
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return 0;
+ }
+
+ pr_info("big.LITTLE switcher initializing\n");
+
+ ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
+ if (ret)
+ goto error;
+
+ ret = bL_switcher_halve_cpus();
+ if (ret)
+ goto error;
+
+ bL_switcher_trace_trigger();
+
+ for_each_online_cpu(cpu) {
+ struct bL_thread *t = &bL_threads[cpu];
+ spin_lock_init(&t->lock);
+ init_waitqueue_head(&t->wq);
+ init_completion(&t->started);
+ t->wanted_cluster = -1;
+ t->task = bL_switcher_thread_create(cpu, t);
+ }
+
+ bL_switcher_active = 1;
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ pr_info("big.LITTLE switcher initialized\n");
+ goto out;
+
+error:
+ pr_warning("big.LITTLE switcher initialization failed\n");
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return ret;
+}
+
+#ifdef CONFIG_SYSFS
+
+static void bL_switcher_disable(void)
+{
+ unsigned int cpu, cluster;
+ struct bL_thread *t;
+ struct task_struct *task;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ cpu_hotplug_driver_lock();
+
+ if (!bL_switcher_active)
+ goto out;
+
+ if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ goto out;
+ }
+
+ bL_switcher_active = 0;
+
+ /*
+ * To deactivate the switcher, we must shut down the switcher
+ * threads to prevent any other requests from being accepted.
+ * Then, if the final cluster for given logical CPU is not the
+ * same as the original one, we'll recreate a switcher thread
+ * just for the purpose of switching the CPU back without any
+ * possibility for interference from external requests.
+ */
+ for_each_online_cpu(cpu) {
+ t = &bL_threads[cpu];
+ task = t->task;
+ t->task = NULL;
+ if (!task || IS_ERR(task))
+ continue;
+ kthread_stop(task);
+ /* no more switch may happen on this CPU at this point */
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ init_completion(&t->started);
+ t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
+ task = bL_switcher_thread_create(cpu, t);
+ if (!IS_ERR(task)) {
+ wait_for_completion(&t->started);
+ kthread_stop(task);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ }
+ /* If execution gets here, we're in trouble. */
+ pr_crit("%s: unable to restore original cluster for CPU %d\n",
+ __func__, cpu);
+ pr_crit("%s: CPU %d can't be restored\n",
+ __func__, bL_switcher_cpu_pairing[cpu]);
+ cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
+ &bL_switcher_removed_logical_cpus);
+ }
+
+ bL_switcher_restore_cpus();
+ bL_switcher_trace_trigger();
+
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+
+static ssize_t bL_switcher_active_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", bL_switcher_active);
+}
+
+static ssize_t bL_switcher_active_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+
+ switch (buf[0]) {
+ case '0':
+ bL_switcher_disable();
+ ret = 0;
+ break;
+ case '1':
+ ret = bL_switcher_enable();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return (ret >= 0) ? count : ret;
+}
+
+static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = bL_switcher_trace_trigger();
+
+ return ret ? ret : count;
+}
+
+static struct kobj_attribute bL_switcher_active_attr =
+ __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
+
+static struct kobj_attribute bL_switcher_trace_trigger_attr =
+ __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
+
+static struct attribute *bL_switcher_attrs[] = {
+ &bL_switcher_active_attr.attr,
+ &bL_switcher_trace_trigger_attr.attr,
+ NULL,
+};
+
+static struct attribute_group bL_switcher_attr_group = {
+ .attrs = bL_switcher_attrs,
+};
+
+static struct kobject *bL_switcher_kobj;
+
+static int __init bL_switcher_sysfs_init(void)
+{
+ int ret;
+
+ bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
+ if (!bL_switcher_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
+ if (ret)
+ kobject_put(bL_switcher_kobj);
+ return ret;
+}
+
+#endif /* CONFIG_SYSFS */
+
+bool bL_switcher_get_enabled(void)
+{
+ mutex_lock(&bL_switcher_activation_lock);
+
+ return bL_switcher_active;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
+
+void bL_switcher_put_enabled(void)
+{
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
+
+/*
+ * Veto any CPU hotplug operation while the switcher is active.
+ * We're just not ready to deal with that given the trickery involved.
+ */
+static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ if (bL_switcher_active)
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block bL_switcher_hotplug_notifier =
+ { &bL_switcher_hotplug_callback, NULL, 0 };
+
+#ifdef CONFIG_SCHED_HMP
+static bool no_bL_switcher = true;
+#else
+static bool no_bL_switcher;
+#endif
+core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
+
+static int __init bL_switcher_init(void)
+{
+ int ret;
+
+ if (MAX_NR_CLUSTERS != 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+
+ register_cpu_notifier(&bL_switcher_hotplug_notifier);
+
+ if (!no_bL_switcher) {
+ ret = bL_switcher_enable();
+ if (ret)
+ return ret;
+ }
+
+#ifdef CONFIG_SYSFS
+ ret = bL_switcher_sysfs_init();
+ if (ret)
+ pr_err("%s: unable to create sysfs entry\n", __func__);
+#endif
+
+ return 0;
+}
+
+late_initcall(bL_switcher_init);
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
new file mode 100644
index 000000000000..5e2dd197e728
--- /dev/null
+++ b/arch/arm/common/bL_switcher_dummy_if.c
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * Dummy interface to user space for debugging purpose only.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+#include <asm/bL_switcher.h>
+
+static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ unsigned char val[3];
+ unsigned int cpu, cluster;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ if (len < 3)
+ return -EINVAL;
+
+ if (copy_from_user(val, buf, 3))
+ return -EFAULT;
+
+ /* format: <cpu#>,<cluster#> */
+ if (val[0] < '0' || val[0] > '4' ||
+ val[1] != ',' ||
+ val[2] < '0' || val[2] > '1')
+ return -EINVAL;
+
+ cpu = val[0] - '0';
+ cluster = val[2] - '0';
+ ret = bL_switch_request(cpu, cluster);
+
+ return ret ? : len;
+}
+
+static const struct file_operations bL_switcher_fops = {
+ .write = bL_switcher_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice bL_switcher_device = {
+ MISC_DYNAMIC_MINOR,
+ "b.L_switcher",
+ &bL_switcher_fops
+};
+
+static int __init bL_switcher_dummy_if_init(void)
+{
+ return misc_register(&bL_switcher_device);
+}
+
+static void __exit bL_switcher_dummy_if_exit(void)
+{
+ misc_deregister(&bL_switcher_device);
+}
+
+module_init(bL_switcher_dummy_if_init);
+module_exit(bL_switcher_dummy_if_exit);
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c
deleted file mode 100644
index 65b943c76300..000000000000
--- a/arch/arm/common/fiq_debugger.c
+++ /dev/null
@@ -1,1376 +0,0 @@
-/*
- * arch/arm/common/fiq_debugger.c
- *
- * Serial Debugger Interface accessed through an FIQ interrupt.
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <stdarg.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/console.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/kernel_stat.h>
-#include <linux/kmsg_dump.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/timer.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/wakelock.h>
-
-#include <asm/fiq_debugger.h>
-#include <asm/fiq_glue.h>
-#include <asm/stacktrace.h>
-
-#include <linux/uaccess.h>
-
-#include "fiq_debugger_ringbuf.h"
-
-#define DEBUG_MAX 64
-#define MAX_UNHANDLED_FIQ_COUNT 1000000
-
-#define MAX_FIQ_DEBUGGER_PORTS 4
-
-#define THREAD_INFO(sp) ((struct thread_info *) \
- ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
-
-struct fiq_debugger_state {
- struct fiq_glue_handler handler;
-
- int fiq;
- int uart_irq;
- int signal_irq;
- int wakeup_irq;
- bool wakeup_irq_no_set_wake;
- struct clk *clk;
- struct fiq_debugger_pdata *pdata;
- struct platform_device *pdev;
-
- char debug_cmd[DEBUG_MAX];
- int debug_busy;
- int debug_abort;
-
- char debug_buf[DEBUG_MAX];
- int debug_count;
-
- bool no_sleep;
- bool debug_enable;
- bool ignore_next_wakeup_irq;
- struct timer_list sleep_timer;
- spinlock_t sleep_timer_lock;
- bool uart_enabled;
- struct wake_lock debugger_wake_lock;
- bool console_enable;
- int current_cpu;
- atomic_t unhandled_fiq_count;
- bool in_fiq;
-
- struct work_struct work;
- spinlock_t work_lock;
- char work_cmd[DEBUG_MAX];
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
- spinlock_t console_lock;
- struct console console;
- struct tty_port tty_port;
- struct fiq_debugger_ringbuf *tty_rbuf;
- bool syslog_dumping;
-#endif
-
- unsigned int last_irqs[NR_IRQS];
- unsigned int last_local_timer_irqs[NR_CPUS];
-};
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
-struct tty_driver *fiq_tty_driver;
-#endif
-
-#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
-static bool initial_no_sleep = true;
-#else
-static bool initial_no_sleep;
-#endif
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
-static bool initial_debug_enable = true;
-static bool initial_console_enable = true;
-#else
-static bool initial_debug_enable;
-static bool initial_console_enable;
-#endif
-
-static bool fiq_kgdb_enable;
-
-module_param_named(no_sleep, initial_no_sleep, bool, 0644);
-module_param_named(debug_enable, initial_debug_enable, bool, 0644);
-module_param_named(console_enable, initial_console_enable, bool, 0644);
-module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
-
-#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
-static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {}
-static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {}
-#else
-static inline void enable_wakeup_irq(struct fiq_debugger_state *state)
-{
- if (state->wakeup_irq < 0)
- return;
- enable_irq(state->wakeup_irq);
- if (!state->wakeup_irq_no_set_wake)
- enable_irq_wake(state->wakeup_irq);
-}
-static inline void disable_wakeup_irq(struct fiq_debugger_state *state)
-{
- if (state->wakeup_irq < 0)
- return;
- disable_irq_nosync(state->wakeup_irq);
- if (!state->wakeup_irq_no_set_wake)
- disable_irq_wake(state->wakeup_irq);
-}
-#endif
-
-static bool inline debug_have_fiq(struct fiq_debugger_state *state)
-{
- return (state->fiq >= 0);
-}
-
-static void debug_force_irq(struct fiq_debugger_state *state)
-{
- unsigned int irq = state->signal_irq;
-
- if (WARN_ON(!debug_have_fiq(state)))
- return;
- if (state->pdata->force_irq) {
- state->pdata->force_irq(state->pdev, irq);
- } else {
- struct irq_chip *chip = irq_get_chip(irq);
- if (chip && chip->irq_retrigger)
- chip->irq_retrigger(irq_get_irq_data(irq));
- }
-}
-
-static void debug_uart_enable(struct fiq_debugger_state *state)
-{
- if (state->clk)
- clk_enable(state->clk);
- if (state->pdata->uart_enable)
- state->pdata->uart_enable(state->pdev);
-}
-
-static void debug_uart_disable(struct fiq_debugger_state *state)
-{
- if (state->pdata->uart_disable)
- state->pdata->uart_disable(state->pdev);
- if (state->clk)
- clk_disable(state->clk);
-}
-
-static void debug_uart_flush(struct fiq_debugger_state *state)
-{
- if (state->pdata->uart_flush)
- state->pdata->uart_flush(state->pdev);
-}
-
-static void debug_putc(struct fiq_debugger_state *state, char c)
-{
- state->pdata->uart_putc(state->pdev, c);
-}
-
-static void debug_puts(struct fiq_debugger_state *state, char *s)
-{
- unsigned c;
- while ((c = *s++)) {
- if (c == '\n')
- debug_putc(state, '\r');
- debug_putc(state, c);
- }
-}
-
-static void debug_prompt(struct fiq_debugger_state *state)
-{
- debug_puts(state, "debug> ");
-}
-
-static void dump_kernel_log(struct fiq_debugger_state *state)
-{
- char buf[512];
- size_t len;
- struct kmsg_dumper dumper = { .active = true };
-
-
- kmsg_dump_rewind_nolock(&dumper);
- while (kmsg_dump_get_line_nolock(&dumper, true, buf,
- sizeof(buf) - 1, &len)) {
- buf[len] = 0;
- debug_puts(state, buf);
- }
-}
-
-static char *mode_name(unsigned cpsr)
-{
- switch (cpsr & MODE_MASK) {
- case USR_MODE: return "USR";
- case FIQ_MODE: return "FIQ";
- case IRQ_MODE: return "IRQ";
- case SVC_MODE: return "SVC";
- case ABT_MODE: return "ABT";
- case UND_MODE: return "UND";
- case SYSTEM_MODE: return "SYS";
- default: return "???";
- }
-}
-
-static int debug_printf(void *cookie, const char *fmt, ...)
-{
- struct fiq_debugger_state *state = cookie;
- char buf[256];
- va_list ap;
-
- va_start(ap, fmt);
- vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- debug_puts(state, buf);
- return state->debug_abort;
-}
-
-/* Safe outside fiq context */
-static int debug_printf_nfiq(void *cookie, const char *fmt, ...)
-{
- struct fiq_debugger_state *state = cookie;
- char buf[256];
- va_list ap;
- unsigned long irq_flags;
-
- va_start(ap, fmt);
- vsnprintf(buf, 128, fmt, ap);
- va_end(ap);
-
- local_irq_save(irq_flags);
- debug_puts(state, buf);
- debug_uart_flush(state);
- local_irq_restore(irq_flags);
- return state->debug_abort;
-}
-
-static void dump_regs(struct fiq_debugger_state *state, unsigned *regs)
-{
- debug_printf(state, " r0 %08x r1 %08x r2 %08x r3 %08x\n",
- regs[0], regs[1], regs[2], regs[3]);
- debug_printf(state, " r4 %08x r5 %08x r6 %08x r7 %08x\n",
- regs[4], regs[5], regs[6], regs[7]);
- debug_printf(state, " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n",
- regs[8], regs[9], regs[10], regs[11],
- mode_name(regs[16]));
- if ((regs[16] & MODE_MASK) == USR_MODE)
- debug_printf(state, " ip %08x sp %08x lr %08x pc %08x "
- "cpsr %08x\n", regs[12], regs[13], regs[14],
- regs[15], regs[16]);
- else
- debug_printf(state, " ip %08x sp %08x lr %08x pc %08x "
- "cpsr %08x spsr %08x\n", regs[12], regs[13],
- regs[14], regs[15], regs[16], regs[17]);
-}
-
-struct mode_regs {
- unsigned long sp_svc;
- unsigned long lr_svc;
- unsigned long spsr_svc;
-
- unsigned long sp_abt;
- unsigned long lr_abt;
- unsigned long spsr_abt;
-
- unsigned long sp_und;
- unsigned long lr_und;
- unsigned long spsr_und;
-
- unsigned long sp_irq;
- unsigned long lr_irq;
- unsigned long spsr_irq;
-
- unsigned long r8_fiq;
- unsigned long r9_fiq;
- unsigned long r10_fiq;
- unsigned long r11_fiq;
- unsigned long r12_fiq;
- unsigned long sp_fiq;
- unsigned long lr_fiq;
- unsigned long spsr_fiq;
-};
-
-void __naked get_mode_regs(struct mode_regs *regs)
-{
- asm volatile (
- "mrs r1, cpsr\n"
- "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
- "stmia r0!, {r13 - r14}\n"
- "mrs r2, spsr\n"
- "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
- "stmia r0!, {r2, r13 - r14}\n"
- "mrs r2, spsr\n"
- "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
- "stmia r0!, {r2, r13 - r14}\n"
- "mrs r2, spsr\n"
- "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
- "stmia r0!, {r2, r13 - r14}\n"
- "mrs r2, spsr\n"
- "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
- "stmia r0!, {r2, r8 - r14}\n"
- "mrs r2, spsr\n"
- "stmia r0!, {r2}\n"
- "msr cpsr_c, r1\n"
- "bx lr\n");
-}
-
-
-static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs)
-{
- struct mode_regs mode_regs;
- dump_regs(state, regs);
- get_mode_regs(&mode_regs);
- debug_printf(state, " svc: sp %08x lr %08x spsr %08x\n",
- mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
- debug_printf(state, " abt: sp %08x lr %08x spsr %08x\n",
- mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
- debug_printf(state, " und: sp %08x lr %08x spsr %08x\n",
- mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
- debug_printf(state, " irq: sp %08x lr %08x spsr %08x\n",
- mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
- debug_printf(state, " fiq: r8 %08x r9 %08x r10 %08x r11 %08x "
- "r12 %08x\n",
- mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
- mode_regs.r11_fiq, mode_regs.r12_fiq);
- debug_printf(state, " fiq: sp %08x lr %08x spsr %08x\n",
- mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
-}
-
-static void dump_irqs(struct fiq_debugger_state *state)
-{
- int n;
- struct irq_desc *desc;
-
- debug_printf(state, "irqnr total since-last status name\n");
- for_each_irq_desc(n, desc) {
- struct irqaction *act = desc->action;
- if (!act && !kstat_irqs(n))
- continue;
- debug_printf(state, "%5d: %10u %11u %8x %s\n", n,
- kstat_irqs(n),
- kstat_irqs(n) - state->last_irqs[n],
- desc->status_use_accessors,
- (act && act->name) ? act->name : "???");
- state->last_irqs[n] = kstat_irqs(n);
- }
-}
-
-struct stacktrace_state {
- struct fiq_debugger_state *state;
- unsigned int depth;
-};
-
-static int report_trace(struct stackframe *frame, void *d)
-{
- struct stacktrace_state *sts = d;
-
- if (sts->depth) {
- debug_printf(sts->state,
- " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
- frame->pc, frame->pc, frame->lr, frame->lr,
- frame->sp, frame->fp);
- sts->depth--;
- return 0;
- }
- debug_printf(sts->state, " ...\n");
-
- return sts->depth == 0;
-}
-
-struct frame_tail {
- struct frame_tail *fp;
- unsigned long sp;
- unsigned long lr;
-} __attribute__((packed));
-
-static struct frame_tail *user_backtrace(struct fiq_debugger_state *state,
- struct frame_tail *tail)
-{
- struct frame_tail buftail[2];
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
- debug_printf(state, " invalid frame pointer %p\n", tail);
- return NULL;
- }
- if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
- debug_printf(state,
- " failed to copy frame pointer %p\n", tail);
- return NULL;
- }
-
- debug_printf(state, " %p\n", buftail[0].lr);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (tail >= buftail[0].fp)
- return NULL;
-
- return buftail[0].fp-1;
-}
-
-void dump_stacktrace(struct fiq_debugger_state *state,
- struct pt_regs * const regs, unsigned int depth, void *ssp)
-{
- struct frame_tail *tail;
- struct thread_info *real_thread_info = THREAD_INFO(ssp);
- struct stacktrace_state sts;
-
- sts.depth = depth;
- sts.state = state;
- *current_thread_info() = *real_thread_info;
-
- if (!current)
- debug_printf(state, "current NULL\n");
- else
- debug_printf(state, "pid: %d comm: %s\n",
- current->pid, current->comm);
- dump_regs(state, (unsigned *)regs);
-
- if (!user_mode(regs)) {
- struct stackframe frame;
- frame.fp = regs->ARM_fp;
- frame.sp = regs->ARM_sp;
- frame.lr = regs->ARM_lr;
- frame.pc = regs->ARM_pc;
- debug_printf(state,
- " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
- regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
- regs->ARM_sp, regs->ARM_fp);
- walk_stackframe(&frame, report_trace, &sts);
- return;
- }
-
- tail = ((struct frame_tail *) regs->ARM_fp) - 1;
- while (depth-- && tail && !((unsigned long) tail & 3))
- tail = user_backtrace(state, tail);
-}
-
-static void do_ps(struct fiq_debugger_state *state)
-{
- struct task_struct *g;
- struct task_struct *p;
- unsigned task_state;
- static const char stat_nam[] = "RSDTtZX";
-
- debug_printf(state, "pid ppid prio task pc\n");
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- task_state = p->state ? __ffs(p->state) + 1 : 0;
- debug_printf(state,
- "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
- debug_printf(state, "%-13.13s %c", p->comm,
- task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
- if (task_state == TASK_RUNNING)
- debug_printf(state, " running\n");
- else
- debug_printf(state, " %08lx\n", thread_saved_pc(p));
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
-}
-
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
-static void begin_syslog_dump(struct fiq_debugger_state *state)
-{
- state->syslog_dumping = true;
-}
-
-static void end_syslog_dump(struct fiq_debugger_state *state)
-{
- state->syslog_dumping = false;
-}
-#else
-extern int do_syslog(int type, char __user *bug, int count);
-static void begin_syslog_dump(struct fiq_debugger_state *state)
-{
- do_syslog(5 /* clear */, NULL, 0);
-}
-
-static void end_syslog_dump(struct fiq_debugger_state *state)
-{
- dump_kernel_log(state);
-}
-#endif
-
-static void do_sysrq(struct fiq_debugger_state *state, char rq)
-{
- if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
- debug_printf(state, "sysrq-g blocked\n");
- return;
- }
- begin_syslog_dump(state);
- handle_sysrq(rq);
- end_syslog_dump(state);
-}
-
-#ifdef CONFIG_KGDB
-static void do_kgdb(struct fiq_debugger_state *state)
-{
- if (!fiq_kgdb_enable) {
- debug_printf(state, "kgdb through fiq debugger not enabled\n");
- return;
- }
-
- debug_printf(state, "enabling console and triggering kgdb\n");
- state->console_enable = true;
- handle_sysrq('g');
-}
-#endif
-
-static void debug_schedule_work(struct fiq_debugger_state *state, char *cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&state->work_lock, flags);
- if (state->work_cmd[0] != '\0') {
- debug_printf(state, "work command processor busy\n");
- spin_unlock_irqrestore(&state->work_lock, flags);
- return;
- }
-
- strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
- spin_unlock_irqrestore(&state->work_lock, flags);
-
- schedule_work(&state->work);
-}
-
-static void debug_work(struct work_struct *work)
-{
- struct fiq_debugger_state *state;
- char work_cmd[DEBUG_MAX];
- char *cmd;
- unsigned long flags;
-
- state = container_of(work, struct fiq_debugger_state, work);
-
- spin_lock_irqsave(&state->work_lock, flags);
-
- strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
- state->work_cmd[0] = '\0';
-
- spin_unlock_irqrestore(&state->work_lock, flags);
-
- cmd = work_cmd;
- if (!strncmp(cmd, "reboot", 6)) {
- cmd += 6;
- while (*cmd == ' ')
- cmd++;
- if (cmd != '\0')
- kernel_restart(cmd);
- else
- kernel_restart(NULL);
- } else {
- debug_printf(state, "unknown work command '%s'\n", work_cmd);
- }
-}
-
-/* This function CANNOT be called in FIQ context */
-static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd)
-{
- if (!strcmp(cmd, "ps"))
- do_ps(state);
- if (!strcmp(cmd, "sysrq"))
- do_sysrq(state, 'h');
- if (!strncmp(cmd, "sysrq ", 6))
- do_sysrq(state, cmd[6]);
-#ifdef CONFIG_KGDB
- if (!strcmp(cmd, "kgdb"))
- do_kgdb(state);
-#endif
- if (!strncmp(cmd, "reboot", 6))
- debug_schedule_work(state, cmd);
-}
-
-static void debug_help(struct fiq_debugger_state *state)
-{
- debug_printf(state, "FIQ Debugger commands:\n"
- " pc PC status\n"
- " regs Register dump\n"
- " allregs Extended Register dump\n"
- " bt Stack trace\n"
- " reboot [<c>] Reboot with command <c>\n"
- " reset [<c>] Hard reset with command <c>\n"
- " irqs Interupt status\n"
- " kmsg Kernel log\n"
- " version Kernel version\n");
- debug_printf(state, " sleep Allow sleep while in FIQ\n"
- " nosleep Disable sleep while in FIQ\n"
- " console Switch terminal to console\n"
- " cpu Current CPU\n"
- " cpu <number> Switch to CPU<number>\n");
- debug_printf(state, " ps Process list\n"
- " sysrq sysrq options\n"
- " sysrq <param> Execute sysrq with <param>\n");
-#ifdef CONFIG_KGDB
- debug_printf(state, " kgdb Enter kernel debugger\n");
-#endif
-}
-
-static void take_affinity(void *info)
-{
- struct fiq_debugger_state *state = info;
- struct cpumask cpumask;
-
- cpumask_clear(&cpumask);
- cpumask_set_cpu(get_cpu(), &cpumask);
-
- irq_set_affinity(state->uart_irq, &cpumask);
-}
-
-static void switch_cpu(struct fiq_debugger_state *state, int cpu)
-{
- if (!debug_have_fiq(state))
- smp_call_function_single(cpu, take_affinity, state, false);
- state->current_cpu = cpu;
-}
-
-static bool debug_fiq_exec(struct fiq_debugger_state *state,
- const char *cmd, unsigned *regs, void *svc_sp)
-{
- bool signal_helper = false;
-
- if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
- debug_help(state);
- } else if (!strcmp(cmd, "pc")) {
- debug_printf(state, " pc %08x cpsr %08x mode %s\n",
- regs[15], regs[16], mode_name(regs[16]));
- } else if (!strcmp(cmd, "regs")) {
- dump_regs(state, regs);
- } else if (!strcmp(cmd, "allregs")) {
- dump_allregs(state, regs);
- } else if (!strcmp(cmd, "bt")) {
- dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp);
- } else if (!strncmp(cmd, "reset", 5)) {
- cmd += 5;
- while (*cmd == ' ')
- cmd++;
- if (*cmd) {
- char tmp_cmd[32];
- strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
- machine_restart(tmp_cmd);
- } else {
- machine_restart(NULL);
- }
- } else if (!strcmp(cmd, "irqs")) {
- dump_irqs(state);
- } else if (!strcmp(cmd, "kmsg")) {
- dump_kernel_log(state);
- } else if (!strcmp(cmd, "version")) {
- debug_printf(state, "%s\n", linux_banner);
- } else if (!strcmp(cmd, "sleep")) {
- state->no_sleep = false;
- debug_printf(state, "enabling sleep\n");
- } else if (!strcmp(cmd, "nosleep")) {
- state->no_sleep = true;
- debug_printf(state, "disabling sleep\n");
- } else if (!strcmp(cmd, "console")) {
- debug_printf(state, "console mode\n");
- debug_uart_flush(state);
- state->console_enable = true;
- } else if (!strcmp(cmd, "cpu")) {
- debug_printf(state, "cpu %d\n", state->current_cpu);
- } else if (!strncmp(cmd, "cpu ", 4)) {
- unsigned long cpu = 0;
- if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
- switch_cpu(state, cpu);
- else
- debug_printf(state, "invalid cpu\n");
- debug_printf(state, "cpu %d\n", state->current_cpu);
- } else {
- if (state->debug_busy) {
- debug_printf(state,
- "command processor busy. trying to abort.\n");
- state->debug_abort = -1;
- } else {
- strcpy(state->debug_cmd, cmd);
- state->debug_busy = 1;
- }
-
- return true;
- }
- if (!state->console_enable)
- debug_prompt(state);
-
- return signal_helper;
-}
-
-static void sleep_timer_expired(unsigned long data)
-{
- struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&state->sleep_timer_lock, flags);
- if (state->uart_enabled && !state->no_sleep) {
- if (state->debug_enable && !state->console_enable) {
- state->debug_enable = false;
- debug_printf_nfiq(state, "suspending fiq debugger\n");
- }
- state->ignore_next_wakeup_irq = true;
- debug_uart_disable(state);
- state->uart_enabled = false;
- enable_wakeup_irq(state);
- }
- wake_unlock(&state->debugger_wake_lock);
- spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
-}
-
-static void handle_wakeup(struct fiq_debugger_state *state)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&state->sleep_timer_lock, flags);
- if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
- state->ignore_next_wakeup_irq = false;
- } else if (!state->uart_enabled) {
- wake_lock(&state->debugger_wake_lock);
- debug_uart_enable(state);
- state->uart_enabled = true;
- disable_wakeup_irq(state);
- mod_timer(&state->sleep_timer, jiffies + HZ / 2);
- }
- spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
-}
-
-static irqreturn_t wakeup_irq_handler(int irq, void *dev)
-{
- struct fiq_debugger_state *state = dev;
-
- if (!state->no_sleep)
- debug_puts(state, "WAKEUP\n");
- handle_wakeup(state);
-
- return IRQ_HANDLED;
-}
-
-static void debug_handle_console_irq_context(struct fiq_debugger_state *state)
-{
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
- if (state->tty_port.ops) {
- int i;
- int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
- for (i = 0; i < count; i++) {
- int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
- tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL);
- if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
- pr_warn("fiq tty failed to consume byte\n");
- }
- tty_flip_buffer_push(&state->tty_port);
- }
-#endif
-}
-
-static void debug_handle_irq_context(struct fiq_debugger_state *state)
-{
- if (!state->no_sleep) {
- unsigned long flags;
-
- spin_lock_irqsave(&state->sleep_timer_lock, flags);
- wake_lock(&state->debugger_wake_lock);
- mod_timer(&state->sleep_timer, jiffies + HZ * 5);
- spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
- }
- debug_handle_console_irq_context(state);
- if (state->debug_busy) {
- debug_irq_exec(state, state->debug_cmd);
- if (!state->console_enable)
- debug_prompt(state);
- state->debug_busy = 0;
- }
-}
-
-static int debug_getc(struct fiq_debugger_state *state)
-{
- return state->pdata->uart_getc(state->pdev);
-}
-
-static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state,
- int this_cpu, void *regs, void *svc_sp)
-{
- int c;
- static int last_c;
- int count = 0;
- bool signal_helper = false;
-
- if (this_cpu != state->current_cpu) {
- if (state->in_fiq)
- return false;
-
- if (atomic_inc_return(&state->unhandled_fiq_count) !=
- MAX_UNHANDLED_FIQ_COUNT)
- return false;
-
- debug_printf(state, "fiq_debugger: cpu %d not responding, "
- "reverting to cpu %d\n", state->current_cpu,
- this_cpu);
-
- atomic_set(&state->unhandled_fiq_count, 0);
- switch_cpu(state, this_cpu);
- return false;
- }
-
- state->in_fiq = true;
-
- while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
- count++;
- if (!state->debug_enable) {
- if ((c == 13) || (c == 10)) {
- state->debug_enable = true;
- state->debug_count = 0;
- debug_prompt(state);
- }
- } else if (c == FIQ_DEBUGGER_BREAK) {
- state->console_enable = false;
- debug_puts(state, "fiq debugger mode\n");
- state->debug_count = 0;
- debug_prompt(state);
-#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
- } else if (state->console_enable && state->tty_rbuf) {
- fiq_debugger_ringbuf_push(state->tty_rbuf, c);
- signal_helper = true;
-#endif
- } else if ((c >= ' ') && (c < 127)) {
- if (state->debug_count < (DEBUG_MAX - 1)) {
- state->debug_buf[state->debug_count++] = c;
- debug_putc(state, c);
- }
- } else if ((c == 8) || (c == 127)) {
- if (state->debug_count > 0) {
- state->debug_count--;
- debug_putc(state, 8);
- debug_putc(state, ' ');
- debug_putc(state, 8);
- }
- } else if ((c == 13) || (c == 10)) {
- if (c == '\r' || (c == '\n' && last_c != '\r')) {
- debug_putc(state, '\r');
- debug_putc(state, '\n');
- }
- if (state->debug_count) {
- state->debug_buf[state->debug_count] = 0;
- state->debug_count = 0;
- signal_helper |=
- debug_fiq_exec(state, state->debug_buf,
- regs, svc_sp);
- } else {
- debug_prompt(state);
- }
- }
- last_c = c;
- }
- if (!state->console_enable)
- debug_uart_flush(state);
- if (state->pdata->fiq_ack)
- state->pdata->fiq_ack(state->pdev, state->fiq);
-
- /* poke sleep timer if necessary */
- if (state->debug_enable && !state->no_sleep)
- signal_helper = true;
-
- atomic_set(&state->unhandled_fiq_count, 0);
- state->in_fiq = false;
-
- return signal_helper;
-}
-
-static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
-{
- struct fiq_debugger_state *state =
- container_of(h, struct fiq_debugger_state, handler);
- unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
- bool need_irq;
-
- need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp);
- if (need_irq)
- debug_force_irq(state);
-}
-
-/*
- * When not using FIQs, we only use this single interrupt as an entry point.
- * This just effectively takes over the UART interrupt and does all the work
- * in this context.
- */
-static irqreturn_t debug_uart_irq(int irq, void *dev)
-{
- struct fiq_debugger_state *state = dev;
- bool not_done;
-
- handle_wakeup(state);
-
- /* handle the debugger irq in regular context */
- not_done = debug_handle_uart_interrupt(state, smp_processor_id(),
- get_irq_regs(),
- current_thread_info());
- if (not_done)
- debug_handle_irq_context(state);
-
- return IRQ_HANDLED;
-}
-
-/*
- * If FIQs are used, not everything can happen in fiq context.
- * FIQ handler does what it can and then signals this interrupt to finish the
- * job in irq context.
- */
-static irqreturn_t debug_signal_irq(int irq, void *dev)
-{
- struct fiq_debugger_state *state = dev;
-
- if (state->pdata->force_irq_ack)
- state->pdata->force_irq_ack(state->pdev, state->signal_irq);
-
- debug_handle_irq_context(state);
-
- return IRQ_HANDLED;
-}
-
-static void debug_resume(struct fiq_glue_handler *h)
-{
- struct fiq_debugger_state *state =
- container_of(h, struct fiq_debugger_state, handler);
- if (state->pdata->uart_resume)
- state->pdata->uart_resume(state->pdev);
-}
-
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
-struct tty_driver *debug_console_device(struct console *co, int *index)
-{
- *index = co->index;
- return fiq_tty_driver;
-}
-
-static void debug_console_write(struct console *co,
- const char *s, unsigned int count)
-{
- struct fiq_debugger_state *state;
- unsigned long flags;
-
- state = container_of(co, struct fiq_debugger_state, console);
-
- if (!state->console_enable && !state->syslog_dumping)
- return;
-
- debug_uart_enable(state);
- spin_lock_irqsave(&state->console_lock, flags);
- while (count--) {
- if (*s == '\n')
- debug_putc(state, '\r');
- debug_putc(state, *s++);
- }
- debug_uart_flush(state);
- spin_unlock_irqrestore(&state->console_lock, flags);
- debug_uart_disable(state);
-}
-
-static struct console fiq_debugger_console = {
- .name = "ttyFIQ",
- .device = debug_console_device,
- .write = debug_console_write,
- .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
-};
-
-int fiq_tty_open(struct tty_struct *tty, struct file *filp)
-{
- int line = tty->index;
- struct fiq_debugger_state **states = tty->driver->driver_state;
- struct fiq_debugger_state *state = states[line];
-
- return tty_port_open(&state->tty_port, tty, filp);
-}
-
-void fiq_tty_close(struct tty_struct *tty, struct file *filp)
-{
- tty_port_close(tty->port, tty, filp);
-}
-
-int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- int i;
- int line = tty->index;
- struct fiq_debugger_state **states = tty->driver->driver_state;
- struct fiq_debugger_state *state = states[line];
-
- if (!state->console_enable)
- return count;
-
- debug_uart_enable(state);
- spin_lock_irq(&state->console_lock);
- for (i = 0; i < count; i++)
- debug_putc(state, *buf++);
- spin_unlock_irq(&state->console_lock);
- debug_uart_disable(state);
-
- return count;
-}
-
-int fiq_tty_write_room(struct tty_struct *tty)
-{
- return 16;
-}
-
-#ifdef CONFIG_CONSOLE_POLL
-static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
-{
- return 0;
-}
-
-static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
-{
- struct fiq_debugger_state **states = driver->driver_state;
- struct fiq_debugger_state *state = states[line];
- int c = NO_POLL_CHAR;
-
- debug_uart_enable(state);
- if (debug_have_fiq(state)) {
- int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
- if (count > 0) {
- c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
- fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
- }
- } else {
- c = debug_getc(state);
- if (c == FIQ_DEBUGGER_NO_CHAR)
- c = NO_POLL_CHAR;
- }
- debug_uart_disable(state);
-
- return c;
-}
-
-static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
-{
- struct fiq_debugger_state **states = driver->driver_state;
- struct fiq_debugger_state *state = states[line];
- debug_uart_enable(state);
- debug_putc(state, ch);
- debug_uart_disable(state);
-}
-#endif
-
-static const struct tty_port_operations fiq_tty_port_ops;
-
-static const struct tty_operations fiq_tty_driver_ops = {
- .write = fiq_tty_write,
- .write_room = fiq_tty_write_room,
- .open = fiq_tty_open,
- .close = fiq_tty_close,
-#ifdef CONFIG_CONSOLE_POLL
- .poll_init = fiq_tty_poll_init,
- .poll_get_char = fiq_tty_poll_get_char,
- .poll_put_char = fiq_tty_poll_put_char,
-#endif
-};
-
-static int fiq_debugger_tty_init(void)
-{
- int ret;
- struct fiq_debugger_state **states = NULL;
-
- states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
- if (!states) {
- pr_err("Failed to allocate fiq debugger state structres\n");
- return -ENOMEM;
- }
-
- fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
- if (!fiq_tty_driver) {
- pr_err("Failed to allocate fiq debugger tty\n");
- ret = -ENOMEM;
- goto err_free_state;
- }
-
- fiq_tty_driver->owner = THIS_MODULE;
- fiq_tty_driver->driver_name = "fiq-debugger";
- fiq_tty_driver->name = "ttyFIQ";
- fiq_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- fiq_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- fiq_tty_driver->init_termios = tty_std_termios;
- fiq_tty_driver->flags = TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV;
- fiq_tty_driver->driver_state = states;
-
- fiq_tty_driver->init_termios.c_cflag =
- B115200 | CS8 | CREAD | HUPCL | CLOCAL;
- fiq_tty_driver->init_termios.c_ispeed = 115200;
- fiq_tty_driver->init_termios.c_ospeed = 115200;
-
- tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
-
- ret = tty_register_driver(fiq_tty_driver);
- if (ret) {
- pr_err("Failed to register fiq tty: %d\n", ret);
- goto err_free_tty;
- }
-
- pr_info("Registered FIQ tty driver\n");
- return 0;
-
-err_free_tty:
- put_tty_driver(fiq_tty_driver);
- fiq_tty_driver = NULL;
-err_free_state:
- kfree(states);
- return ret;
-}
-
-static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
-{
- int ret;
- struct device *tty_dev;
- struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
-
- states[state->pdev->id] = state;
-
- state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
- if (!state->tty_rbuf) {
- pr_err("Failed to allocate fiq debugger ringbuf\n");
- ret = -ENOMEM;
- goto err;
- }
-
- tty_port_init(&state->tty_port);
- state->tty_port.ops = &fiq_tty_port_ops;
-
- tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
- state->pdev->id, &state->pdev->dev);
- if (IS_ERR(tty_dev)) {
- pr_err("Failed to register fiq debugger tty device\n");
- ret = PTR_ERR(tty_dev);
- goto err;
- }
-
- device_set_wakeup_capable(tty_dev, 1);
-
- pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
-
- return 0;
-
-err:
- fiq_debugger_ringbuf_free(state->tty_rbuf);
- state->tty_rbuf = NULL;
- return ret;
-}
-#endif
-
-static int fiq_debugger_dev_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct fiq_debugger_state *state = platform_get_drvdata(pdev);
-
- if (state->pdata->uart_dev_suspend)
- return state->pdata->uart_dev_suspend(pdev);
- return 0;
-}
-
-static int fiq_debugger_dev_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct fiq_debugger_state *state = platform_get_drvdata(pdev);
-
- if (state->pdata->uart_dev_resume)
- return state->pdata->uart_dev_resume(pdev);
- return 0;
-}
-
-static int fiq_debugger_probe(struct platform_device *pdev)
-{
- int ret;
- struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
- struct fiq_debugger_state *state;
- int fiq;
- int uart_irq;
-
- if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
- return -EINVAL;
-
- if (!pdata->uart_getc || !pdata->uart_putc)
- return -EINVAL;
- if ((pdata->uart_enable && !pdata->uart_disable) ||
- (!pdata->uart_enable && pdata->uart_disable))
- return -EINVAL;
-
- fiq = platform_get_irq_byname(pdev, "fiq");
- uart_irq = platform_get_irq_byname(pdev, "uart_irq");
-
- /* uart_irq mode and fiq mode are mutually exclusive, but one of them
- * is required */
- if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
- return -EINVAL;
- if (fiq >= 0 && !pdata->fiq_enable)
- return -EINVAL;
-
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- setup_timer(&state->sleep_timer, sleep_timer_expired,
- (unsigned long)state);
- state->pdata = pdata;
- state->pdev = pdev;
- state->no_sleep = initial_no_sleep;
- state->debug_enable = initial_debug_enable;
- state->console_enable = initial_console_enable;
-
- state->fiq = fiq;
- state->uart_irq = uart_irq;
- state->signal_irq = platform_get_irq_byname(pdev, "signal");
- state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
-
- INIT_WORK(&state->work, debug_work);
- spin_lock_init(&state->work_lock);
-
- platform_set_drvdata(pdev, state);
-
- spin_lock_init(&state->sleep_timer_lock);
-
- if (state->wakeup_irq < 0 && debug_have_fiq(state))
- state->no_sleep = true;
- state->ignore_next_wakeup_irq = !state->no_sleep;
-
- wake_lock_init(&state->debugger_wake_lock,
- WAKE_LOCK_SUSPEND, "serial-debug");
-
- state->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(state->clk))
- state->clk = NULL;
-
- /* do not call pdata->uart_enable here since uart_init may still
- * need to do some initialization before uart_enable can work.
- * So, only try to manage the clock during init.
- */
- if (state->clk)
- clk_enable(state->clk);
-
- if (pdata->uart_init) {
- ret = pdata->uart_init(pdev);
- if (ret)
- goto err_uart_init;
- }
-
- debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n",
- state->no_sleep ? "" : "twice ");
-
- if (debug_have_fiq(state)) {
- state->handler.fiq = debug_fiq;
- state->handler.resume = debug_resume;
- ret = fiq_glue_register_handler(&state->handler);
- if (ret) {
- pr_err("%s: could not install fiq handler\n", __func__);
- goto err_register_fiq;
- }
-
- pdata->fiq_enable(pdev, state->fiq, 1);
- } else {
- ret = request_irq(state->uart_irq, debug_uart_irq,
- IRQF_NO_SUSPEND, "debug", state);
- if (ret) {
- pr_err("%s: could not install irq handler\n", __func__);
- goto err_register_irq;
- }
-
- /* for irq-only mode, we want this irq to wake us up, if it
- * can.
- */
- enable_irq_wake(state->uart_irq);
- }
-
- if (state->clk)
- clk_disable(state->clk);
-
- if (state->signal_irq >= 0) {
- ret = request_irq(state->signal_irq, debug_signal_irq,
- IRQF_TRIGGER_RISING, "debug-signal", state);
- if (ret)
- pr_err("serial_debugger: could not install signal_irq");
- }
-
- if (state->wakeup_irq >= 0) {
- ret = request_irq(state->wakeup_irq, wakeup_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_DISABLED,
- "debug-wakeup", state);
- if (ret) {
- pr_err("serial_debugger: "
- "could not install wakeup irq\n");
- state->wakeup_irq = -1;
- } else {
- ret = enable_irq_wake(state->wakeup_irq);
- if (ret) {
- pr_err("serial_debugger: "
- "could not enable wakeup\n");
- state->wakeup_irq_no_set_wake = true;
- }
- }
- }
- if (state->no_sleep)
- handle_wakeup(state);
-
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
- spin_lock_init(&state->console_lock);
- state->console = fiq_debugger_console;
- state->console.index = pdev->id;
- if (!console_set_on_cmdline)
- add_preferred_console(state->console.name,
- state->console.index, NULL);
- register_console(&state->console);
- fiq_debugger_tty_init_one(state);
-#endif
- return 0;
-
-err_register_irq:
-err_register_fiq:
- if (pdata->uart_free)
- pdata->uart_free(pdev);
-err_uart_init:
- if (state->clk)
- clk_disable(state->clk);
- if (state->clk)
- clk_put(state->clk);
- wake_lock_destroy(&state->debugger_wake_lock);
- platform_set_drvdata(pdev, NULL);
- kfree(state);
- return ret;
-}
-
-static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
- .suspend = fiq_debugger_dev_suspend,
- .resume = fiq_debugger_dev_resume,
-};
-
-static struct platform_driver fiq_debugger_driver = {
- .probe = fiq_debugger_probe,
- .driver = {
- .name = "fiq_debugger",
- .pm = &fiq_debugger_dev_pm_ops,
- },
-};
-
-static int __init fiq_debugger_init(void)
-{
-#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
- fiq_debugger_tty_init();
-#endif
- return platform_driver_register(&fiq_debugger_driver);
-}
-
-postcore_initcall(fiq_debugger_init);
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h
deleted file mode 100644
index 2649b5581088..000000000000
--- a/arch/arm/common/fiq_debugger_ringbuf.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * arch/arm/common/fiq_debugger_ringbuf.c
- *
- * simple lockless ringbuffer
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-struct fiq_debugger_ringbuf {
- int len;
- int head;
- int tail;
- u8 buf[];
-};
-
-
-static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
-{
- struct fiq_debugger_ringbuf *rbuf;
-
- rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
- if (rbuf == NULL)
- return NULL;
-
- rbuf->len = len;
- rbuf->head = 0;
- rbuf->tail = 0;
- smp_mb();
-
- return rbuf;
-}
-
-static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
-{
- kfree(rbuf);
-}
-
-static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
-{
- int level = rbuf->head - rbuf->tail;
-
- if (level < 0)
- level = rbuf->len + level;
-
- return level;
-}
-
-static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
-{
- return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
-}
-
-static inline u8
-fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
-{
- return rbuf->buf[(rbuf->tail + i) % rbuf->len];
-}
-
-static inline int
-fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
-{
- count = min(count, fiq_debugger_ringbuf_level(rbuf));
-
- rbuf->tail = (rbuf->tail + count) % rbuf->len;
- smp_mb();
-
- return count;
-}
-
-static inline int
-fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
-{
- if (fiq_debugger_ringbuf_room(rbuf) == 0)
- return 0;
-
- rbuf->buf[rbuf->head] = datum;
- smp_mb();
- rbuf->head = (rbuf->head + 1) % rbuf->len;
- smp_mb();
-
- return 1;
-}
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
index 9e3455a09f8f..24b42cec4813 100644
--- a/arch/arm/common/fiq_glue.S
+++ b/arch/arm/common/fiq_glue.S
@@ -22,13 +22,14 @@
/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
ENTRY(fiq_glue)
- /* store pc, cpsr from previous mode */
+ /* store pc, cpsr from previous mode, reserve space for spsr */
mrs r12, spsr
- sub r11, lr, #4
+ sub lr, lr, #4
subs r10, #1
bne nested_fiq
- stmfd sp!, {r11-r12, lr}
+ str r12, [sp, #-8]!
+ str lr, [sp, #-4]!
/* store r8-r14 from previous mode */
sub sp, sp, #(7 * 4)
@@ -85,12 +86,15 @@ fiq_from_usr_mode_exit:
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
ldmfd sp!, {r0-r7}
- add sp, sp, #(7 * 4)
- ldmfd sp!, {r11-r12, lr}
+ ldr lr, [sp, #(4 * 7)]
+ ldr r12, [sp, #(4 * 8)]
+ add sp, sp, #(10 * 4)
exit_fiq:
msr spsr_cxsf, r12
add r10, #1
- movs pc, r11
+ cmp r11, #0
+ moveqs pc, lr
+ bx r11 /* jump to custom fiq return function */
nested_fiq:
orr r12, r12, #(PSR_F_BIT)
@@ -98,14 +102,17 @@ nested_fiq:
fiq_glue_end:
-ENTRY(fiq_glue_setup) /* func, data, sp */
- mrs r3, cpsr
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+ stmfd sp!, {r4}
+ mrs r4, cpsr
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
movs r8, r0
mov r9, r1
mov sp, r2
+ mov r11, r3
moveq r10, #0
movne r10, #1
- msr cpsr_c, r3
+ msr cpsr_c, r4
+ ldmfd sp!, {r4}
bx lr
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
index 4044c7db95c8..8cb1b611c6d5 100644
--- a/arch/arm/common/fiq_glue_setup.c
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -18,20 +18,23 @@
#include <asm/fiq_glue.h>
extern unsigned char fiq_glue, fiq_glue_end;
-extern void fiq_glue_setup(void *func, void *data, void *sp);
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+ fiq_return_handler_t fiq_return_handler);
static struct fiq_handler fiq_debbuger_fiq_handler = {
.name = "fiq_glue",
};
DEFINE_PER_CPU(void *, fiq_stack);
static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
static DEFINE_MUTEX(fiq_glue_lock);
static void fiq_glue_setup_helper(void *info)
{
struct fiq_glue_handler *handler = info;
fiq_glue_setup(handler->fiq, handler,
- __get_cpu_var(fiq_stack) + THREAD_START_SP);
+ __get_cpu_var(fiq_stack) + THREAD_START_SP,
+ fiq_return_handler);
}
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
@@ -80,6 +83,49 @@ err_busy:
return ret;
}
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+ fiq_return_handler = fiq_return;
+ if (current_handler)
+ on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+ int ret;
+
+ mutex_lock(&fiq_glue_lock);
+ if (fiq_return_handler) {
+ ret = -EBUSY;
+ goto err_busy;
+ }
+ fiq_glue_update_return_handler(fiq_return);
+ ret = 0;
+err_busy:
+ mutex_unlock(&fiq_glue_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+ int ret;
+
+ mutex_lock(&fiq_glue_lock);
+ if (WARN_ON(fiq_return_handler != fiq_return)) {
+ ret = -EINVAL;
+ goto err_inval;
+ }
+ fiq_glue_update_return_handler(NULL);
+ ret = 0;
+err_inval:
+ mutex_unlock(&fiq_glue_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
/**
* fiq_glue_resume - Restore fiqs after suspend or low power idle states
*
@@ -93,7 +139,8 @@ void fiq_glue_resume(void)
if (!current_handler)
return;
fiq_glue_setup(current_handler->fiq, current_handler,
- __get_cpu_var(fiq_stack) + THREAD_START_SP);
+ __get_cpu_var(fiq_stack) + THREAD_START_SP,
+ fiq_return_handler);
if (current_handler->resume)
current_handler->resume(current_handler);
}
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 370236dd1a03..4a2b32fd53a1 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
+extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val)
+{
+ unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+ poke[0] = poke_phys_addr;
+ poke[1] = poke_val;
+ __cpuc_flush_dcache_area((void *)poke, 8);
+ outer_clean_range(__pa(poke), __pa(poke + 2));
+}
+
static const struct mcpm_platform_ops *platform_ops;
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 8178705c4b24..0decb3c07165 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -15,6 +15,7 @@
#include <linux/linkage.h>
#include <asm/mcpm.h>
+#include <asm/assembler.h>
#include "vlock.h"
@@ -47,6 +48,7 @@
ENTRY(mcpm_entry_point)
+ ARM_BE8(setend be)
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
THUMB( .thumb )
@@ -71,12 +73,19 @@ ENTRY(mcpm_entry_point)
* position independent way.
*/
adr r5, 3f
- ldmia r5, {r6, r7, r8, r11}
+ ldmia r5, {r0, r6, r7, r8, r11}
+ add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync
add r11, r5, r11 @ r11 = first_man_locks
+ @ Perform an early poke, if any
+ add r0, r0, r4, lsl #3
+ ldmia r0, {r0, r1}
+ teq r0, #0
+ strne r1, [r0]
+
mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base
@@ -195,7 +204,8 @@ mcpm_entry_gated:
.align 2
-3: .word mcpm_entry_vectors - .
+3: .word mcpm_entry_early_pokes - .
+ .word mcpm_entry_vectors - 3b
.word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b
.word first_man_locks - 3b
@@ -214,6 +224,10 @@ first_man_locks:
ENTRY(mcpm_entry_vectors)
.space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+ .type mcpm_entry_early_pokes, #object
+ENTRY(mcpm_entry_early_pokes)
+ .space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
.type mcpm_power_up_setup_phys, #object
ENTRY(mcpm_power_up_setup_phys)
.space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2e67a272df70..9ce8ba1a1433 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,6 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
CONFIG_ARCH_MVEBU=y
CONFIG_MACH_ARMADA_370=y
CONFIG_ARCH_SIRF=y
@@ -22,6 +23,7 @@ CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_HIGHPTE=y
CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_NET=y
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 19d6cd6f29f9..3a14ea8fe97e 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -148,7 +148,7 @@ AES_Te:
@ const AES_KEY *key) {
.align 5
ENTRY(AES_encrypt)
- sub r3,pc,#8 @ AES_encrypt
+ adr r3,AES_encrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
mov r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
.align 5
ENTRY(private_AES_set_encrypt_key)
_armv4_AES_set_encrypt_key:
- sub r3,pc,#8 @ AES_set_encrypt_key
+ adr r3,_armv4_AES_set_encrypt_key
teq r0,#0
moveq r0,#-1
beq .Labrt
@@ -843,7 +843,7 @@ AES_Td:
@ const AES_KEY *key) {
.align 5
ENTRY(AES_decrypt)
- sub r3,pc,#8 @ AES_decrypt
+ adr r3,AES_decrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
mov r11,r2
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70c..000000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
- struct task_struct *tsk = current;
-
- dump->magic = CMAGIC;
- dump->start_code = tsk->mm->start_code;
- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
- dump->u_ssize = 0;
-
- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
- if (dump->start_stack < 0x04000000)
- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
- dump->regs = *regs;
- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 7c1bfc0aea0c..4b26d14e41b3 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -17,7 +17,8 @@ int arch_timer_arch_init(void);
* nicely work out which register we want, and chuck away the rest of
* the code. At least it does so with a recent GCC (4.6.3).
*/
-static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
@@ -28,9 +29,7 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
break;
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
@@ -44,7 +43,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
isb();
}
-static inline u32 arch_timer_reg_read(const int access, const int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val = 0;
@@ -57,9 +57,7 @@ static inline u32 arch_timer_reg_read(const int access, const int reg)
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
break;
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
@@ -80,15 +78,6 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u64 arch_counter_get_cntpct(void)
-{
- u64 cval;
-
- isb();
- asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
- return cval;
-}
-
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
@@ -98,17 +87,43 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
+ return cntkctl;
+}
- /* disable user access to everything */
- cntkctl &= ~((3 << 8) | (7 << 0));
-
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
}
+
+static inline void __cpuinit arch_counter_set_user_access(void)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+
+ /* Disable user access to both physical/virtual counters/timers */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_VCT_ACCESS_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+}
+
#endif
#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 05ee9eebad6b..e780afbcee54 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,6 +53,13 @@
#define put_byte_3 lsl #0
#endif
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
/*
* Data preload for architectures that support it
*/
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..6447a0b7b127 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -301,8 +301,8 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -320,8 +320,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
+" adds %Q0, %Q0, %Q4\n"
+" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -341,8 +341,8 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -360,8 +360,8 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
+" subs %Q0, %Q0, %Q4\n"
+" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
@@ -428,9 +428,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, #1\n"
-" sbc %H0, %H0, #0\n"
-" teq %H0, #0\n"
+" subs %Q0, %Q0, #1\n"
+" sbc %R0, %R0, #0\n"
+" teq %R0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
@@ -459,8 +459,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
" beq 2f\n"
-" adds %0, %0, %6\n"
-" adc %H0, %H0, %H6\n"
+" adds %Q0, %Q0, %Q6\n"
+" adc %R0, %R0, %R6\n"
" strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..482383b45c91
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,83 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+ return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+void bL_switch_request_detach(unsigned int cpu,
+ bL_switch_completion_handler completer);
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static void bL_switch_request_detach(unsigned int cpu,
+ bL_switch_completion_handler completer) { }
+
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c3653a..b274bde24905 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
#define _ASMARM_BUG_H
#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_BUG
@@ -12,10 +14,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
#endif
@@ -33,7 +35,7 @@
#define __BUG(__file, __line, __value) \
do { \
- asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
@@ -48,7 +50,7 @@ do { \
#define __BUG(__file, __line, __value) \
do { \
- asm volatile(BUG_INSTR_TYPE #__value); \
+ asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 3e94af33aa6f..455e6637c881 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -213,6 +213,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
+ dsb();
}
/*
@@ -437,4 +438,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ * fp is preserved to the stack explicitly prior disabling the cache
+ * since adding it to the clobber list is incompatible with having
+ * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
+ * trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "stmfd sp!, {fp, ip} \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "clrex \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb \n\t" \
+ "ldmfd sp!, {fp, ip}" \
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r9","r10","lr","memory" )
+
#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 1f3262e99d81..cedd3721318b 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -61,6 +61,20 @@ static inline void set_cr(unsigned int val)
isb();
}
+static inline unsigned int get_auxcr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val));
+ return val;
+}
+
+static inline void set_auxcr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
+ : : "r" (val));
+ isb();
+}
+
#ifndef CONFIG_SMP
extern void adjust_cr(unsigned long mask, unsigned long set);
#endif
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index fe92ccf1d0b0..a66061aef29c 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -156,7 +156,7 @@
/* Select the best insn combination to perform the */ \
/* actual __m * __n / (__p << 64) operation. */ \
if (!__c) { \
- asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
+ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
"mov %Q0, #0" \
: "=&r" (__res) \
: "r" (__m), "r" (__n) \
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index 3ed37b4d93da..4f8e9e5514b1 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -2,10 +2,9 @@
#define ASMARM_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
#include <linux/types.h>
-#include <asm-generic/dma-contiguous.h>
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c4800..f4b46d39b9cf 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -19,8 +19,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
-#define EM_ARM 40
-
#define EF_ARM_EABI_MASK 0xff000000
#define EF_ARM_EABI_UNKNOWN 0x00000000
#define EF_ARM_EABI_VER1 0x01000000
@@ -130,4 +128,10 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
+#ifdef CONFIG_MMU
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+#endif
+
#endif
diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h
deleted file mode 100644
index 4d274883ba6a..000000000000
--- a/arch/arm/include/asm/fiq_debugger.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * arch/arm/include/asm/fiq_debugger.h
- *
- * Copyright (C) 2010 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
-#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
-
-#include <linux/serial_core.h>
-
-#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
-#define FIQ_DEBUGGER_BREAK 0x00ff0100
-
-#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq"
-#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal"
-#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup"
-
-/**
- * struct fiq_debugger_pdata - fiq debugger platform data
- * @uart_resume: used to restore uart state right before enabling
- * the fiq.
- * @uart_enable: Do the work necessary to communicate with the uart
- * hw (enable clocks, etc.). This must be ref-counted.
- * @uart_disable: Do the work necessary to disable the uart hw
- * (disable clocks, etc.). This must be ref-counted.
- * @uart_dev_suspend: called during PM suspend, generally not needed
- * for real fiq mode debugger.
- * @uart_dev_resume: called during PM resume, generally not needed
- * for real fiq mode debugger.
- */
-struct fiq_debugger_pdata {
- int (*uart_init)(struct platform_device *pdev);
- void (*uart_free)(struct platform_device *pdev);
- int (*uart_resume)(struct platform_device *pdev);
- int (*uart_getc)(struct platform_device *pdev);
- void (*uart_putc)(struct platform_device *pdev, unsigned int c);
- void (*uart_flush)(struct platform_device *pdev);
- void (*uart_enable)(struct platform_device *pdev);
- void (*uart_disable)(struct platform_device *pdev);
-
- int (*uart_dev_suspend)(struct platform_device *pdev);
- int (*uart_dev_resume)(struct platform_device *pdev);
-
- void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
- bool enable);
- void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
-
- void (*force_irq)(struct platform_device *pdev, unsigned int irq);
- void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
-};
-
-#endif
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
index d54c29db97a8..a9e244f9f197 100644
--- a/arch/arm/include/asm/fiq_glue.h
+++ b/arch/arm/include/asm/fiq_glue.h
@@ -18,8 +18,11 @@ struct fiq_glue_handler {
void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
void (*resume)(struct fiq_glue_handler *h);
};
+typedef void (*fiq_return_handler_t)(void);
int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
+int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
#ifdef CONFIG_FIQ_GLUE
void fiq_glue_resume(void);
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index f89515adac60..39eb16b0066f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -52,15 +52,7 @@ extern inline void *return_address(unsigned int level)
#endif
-#define HAVE_ARCH_CALLER_ADDR
-
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 ((unsigned long)return_address(1))
-#define CALLER_ADDR2 ((unsigned long)return_address(2))
-#define CALLER_ADDR3 ((unsigned long)return_address(3))
-#define CALLER_ADDR4 ((unsigned long)return_address(4))
-#define CALLER_ADDR5 ((unsigned long)return_address(5))
-#define CALLER_ADDR6 ((unsigned long)return_address(6))
+#define ftrace_return_address(n) return_address(n)
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index e42cf597f6e6..2aff798fbef4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,11 +3,6 @@
#ifdef __KERNEL__
-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
-/* ARM doesn't provide unprivileged exclusive memory accessors */
-#include <asm-generic/futex.h>
-#else
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -164,6 +159,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 3d7351c844aa..fe3ea776dc34 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 7
+#define NR_IPI 8
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 4aee45da6d59..fc53019c304b 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -31,9 +31,9 @@
#define TRACER_TIMEOUT 10000
-#define etm_writel(t, id, v, x) \
- (__raw_writel((v), (t)->etm_regs[(id)] + (x)))
-#define etm_readl(t, id, x) (__raw_readl((t)->etm_regs[(id)] + (x)))
+#define etm_writel(t, v, x) \
+ (writel_relaxed((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
@@ -169,8 +169,8 @@
#define ETBFF_STOPFL BIT(12)
#define etb_writel(t, v, x) \
- (__raw_writel((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
+ (writel_relaxed((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
#define etm_lock(t, id) \
do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/asm/hardware/debug-pl01x.S
index f9fd083eff63..6489d1ffe3c8 100644
--- a/arch/arm/include/asm/hardware/debug-pl01x.S
+++ b/arch/arm/include/asm/hardware/debug-pl01x.S
@@ -18,12 +18,14 @@
.macro waituart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
+ ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_TXFF
bne 1001b
.endm
.macro busyuart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
+ ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_BUSY
bne 1001b
.endm
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 652b56086de7..d070741b2b37 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -130,16 +130,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
*/
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
size_t, unsigned int, void *);
-extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
+extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
extern void __arm_iounmap(volatile void __iomem *addr);
-extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
extern void (*arch_iounmap)(volatile void __iomem *);
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c75913..863c892b4aaa 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -16,7 +16,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce9ea34..0a9d5dd93294 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
+#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm(".word 0xe7ffdeff");
+ asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 18d50322a9e2..4bb08e3e52bc 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -37,16 +37,18 @@
#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
#define c6_DFAR 16 /* Data Fault Address Register */
#define c6_IFAR 17 /* Instruction Fault Address Register */
-#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */
-#define c10_PRRR 19 /* Primary Region Remap Register */
-#define c10_NMRR 20 /* Normal Memory Remap Register */
-#define c12_VBAR 21 /* Vector Base Address Register */
-#define c13_CID 22 /* Context ID Register */
-#define c13_TID_URW 23 /* Thread ID, User R/W */
-#define c13_TID_URO 24 /* Thread ID, User R/O */
-#define c13_TID_PRIV 25 /* Thread ID, Privileged */
-#define c14_CNTKCTL 26 /* Timer Control Register (PL1) */
-#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */
+#define c7_PAR 18 /* Physical Address Register */
+#define c7_PAR_high 19 /* PAR top 32 bits */
+#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */
+#define c10_PRRR 21 /* Primary Region Remap Register */
+#define c10_NMRR 22 /* Normal Memory Remap Register */
+#define c12_VBAR 23 /* Vector Base Address Register */
+#define c13_CID 24 /* Context ID Register */
+#define c13_TID_URW 25 /* Thread ID, User R/W */
+#define c13_TID_URO 26 /* Thread ID, User R/O */
+#define c13_TID_PRIV 27 /* Thread ID, Privileged */
+#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
+#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 308ad7d6f98b..75bf07910b81 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
#ifndef __ASSEMBLY__
struct tag;
@@ -16,8 +18,10 @@ struct pt_regs;
struct smp_operations;
#ifdef CONFIG_SMP
#define smp_ops(ops) (&(ops))
+#define smp_init_ops(ops) (&(ops))
#else
#define smp_ops(ops) (struct smp_operations *)NULL
+#define smp_init_ops(ops) (bool (*)(void))NULL
#endif
struct machine_desc {
@@ -41,6 +45,7 @@ struct machine_desc {
unsigned char reserve_lp2 :1; /* never has lp2 */
char restart_mode; /* default restart mode */
struct smp_operations *smp; /* SMP operations */
+ bool (*smp_init)(void);
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 0f7b7620e9a5..7626a7fd4938 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -42,6 +42,14 @@ extern void mcpm_entry_point(void);
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
* CPU/cluster power operations API for higher subsystems to use.
*/
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index e3d55547e755..64fd15159b7d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,14 +6,17 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
atomic64_t id;
+#else
+ int switch_pending;
#endif
unsigned int vmalloc_seq;
+ unsigned long sigpage;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
+#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a7b85e0d0cc1..e0b10f19d679 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -27,7 +27,15 @@ void __check_vmalloc_seq(struct mm_struct *mm);
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
-DECLARE_PER_CPU(atomic64_t, active_asids);
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask);
+#else /* !CONFIG_ARM_ERRATA_798181 */
+static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+}
+#endif /* CONFIG_ARM_ERRATA_798181 */
#else /* !CONFIG_CPU_HAS_ASID */
@@ -47,7 +55,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ mm->context.switch_pending = 1;
else
cpu_switch_mm(mm->pgd, mm);
}
@@ -56,9 +64,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- cpu_switch_mm(mm->pgd, mm);
+ struct mm_struct *mm = current->mm;
+
+ if (mm && mm->context.switch_pending) {
+ /*
+ * Preemption must be disabled during cpu_switch_mm() as we
+ * have some stateful cache flush implementations. Check
+ * switch_pending again in case we were preempted and the
+ * switch to this mm was already done.
+ */
+ preempt_disable();
+ if (mm->context.switch_pending) {
+ mm->context.switch_pending = 0;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+ preempt_enable_no_resched();
}
}
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 12f71a190422..f94784f0e3a6 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -37,10 +37,10 @@ struct outer_cache_fns {
void (*resume)(void);
};
-#ifdef CONFIG_OUTER_CACHE
-
extern struct outer_cache_fns outer_cache;
+#ifdef CONFIG_OUTER_CACHE
+
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.inv_range)
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 812a4944e783..cbdc7a21f869 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1
+#endif
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386ee..c98c9c89b95c 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,6 +140,7 @@
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9bcd262a9008..5aac06fcc97e 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -58,7 +58,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
* mapping to be mapped at. This is particularly important for
* non-high vector CPUs.
*/
-#define FIRST_USER_ADDRESS PAGE_SIZE
+#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
/*
* Use TASK_SIZE as the ceiling argument for free_pgtables() and
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index f24edad26c70..0cd7824ca762 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -62,9 +62,19 @@ struct pmu_hw_events {
raw_spinlock_t pmu_lock;
};
+struct cpupmu_regs {
+ u32 pmc;
+ u32 pmcntenset;
+ u32 pmuseren;
+ u32 pmintenset;
+ u32 pmxevttype[8];
+ u32 pmxevtcnt[8];
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
+ cpumask_t valid_cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct perf_event *event);
@@ -81,6 +91,8 @@ struct arm_pmu {
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaac..413f3876341c 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -54,7 +54,6 @@ struct thread_struct {
#define start_thread(regs,pc,sp) \
({ \
- unsigned long *stack = (unsigned long *)sp; \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
@@ -65,9 +64,6 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
nommu_start_thread(regs); \
})
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index ce0dbe7c1625..f0a8627c9f1c 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -16,6 +16,10 @@
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+#define PSCI_POWER_STATE_AFFINITY_LEVEL0 0
+#define PSCI_POWER_STATE_AFFINITY_LEVEL1 1
+#define PSCI_POWER_STATE_AFFINITY_LEVEL2 2
+#define PSCI_POWER_STATE_AFFINITY_LEVEL3 3
struct psci_power_state {
u16 id;
@@ -32,5 +36,22 @@ struct psci_operations {
};
extern struct psci_operations psci_ops;
+extern struct smp_operations psci_smp_ops;
+#ifdef CONFIG_ARM_PSCI
+void psci_init(void);
+bool psci_smp_available(void);
+#else
+static inline void psci_init(void) { }
+static inline bool psci_smp_available(void) { return false; }
+#endif
+
+#ifdef CONFIG_ARM_PSCI
+extern int __init psci_probe(void);
+#else
+static inline int psci_probe(void)
+{
+ return -ENODEV;
+}
+#endif
#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index c5aa088c0a8b..67a18a5ed9fa 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -81,6 +81,7 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern int register_ipi_completion(struct completion *completion, int cpu);
extern void smp_send_all_cpu_backtrace(void);
struct smp_operations {
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 6220e9fdf4c7..b07c09e5a0ac 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned long tmp;
+ unsigned long contended, res;
u32 slock;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" subs %1, %0, %0, ror #16\n"
-" addeq %0, %0, %3\n"
-" strexeq %1, %0, [%2]"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
-
- if (tmp == 0) {
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%3]\n"
+ " mov %2, #0\n"
+ " subs %1, %0, %0, ror #16\n"
+ " addeq %0, %0, %4\n"
+ " strexeq %2, %0, [%3]"
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -165,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
-
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
-
- if (tmp == 0) {
+ unsigned long contended, res;
+
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -251,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
-
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
-
- smp_mb();
- return tmp2 == 0;
+ unsigned long contended, res;
+
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
+
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
/* read_can_lock - would read_trylock() succeed? */
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index f1d96d4e8092..73ddd7239b33 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 1995d1a84060..f00b5692cd9d 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
-#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index bdf2b8458ec1..aa9b4ac3fdf6 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd2..983fa7c153a2 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -26,11 +26,45 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+
+#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 0*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline int cluster_to_logical_mask(unsigned int socket_id,
+ cpumask_t *cluster_mask) { return -EINVAL; }
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..20e1c994669e 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -164,8 +164,9 @@ extern int __put_user_8(void *, unsigned long long);
#define __put_user_check(x,p) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ const typeof(*(p)) __user *__tmp_p = (p); \
register const typeof(*(p)) __r2 asm("r2") = (x); \
- register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 141baa3f9a72..43876245fc57 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -15,7 +15,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls (380)
+#define __NR_syscalls (384)
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64
@@ -48,6 +48,5 @@
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
-#define __IGNORE_kcmp
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 47bcb2d254af..18d76fd5a2af 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += a.out.h
header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644
index 083894b2e3bc..000000000000
--- a/arch/arm/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ARM_A_OUT_H__
-#define __ARM_A_OUT_H__
-
-#include <linux/personality.h>
-#include <linux/types.h>
-
-struct exec
-{
- __u32 a_info; /* Use macros N_MAGIC, etc for access */
- __u32 a_text; /* length of text, in bytes */
- __u32 a_data; /* length of data, in bytes */
- __u32 a_bss; /* length of uninitialized data area for file, in bytes */
- __u32 a_syms; /* length of symbol table data in file, in bytes */
- __u32 a_entry; /* start address */
- __u32 a_trsize; /* length of relocation info for text, in bytes */
- __u32 a_drsize; /* length of relocation info for data, in bytes */
-};
-
-/*
- * This is always the same
- */
-#define N_TXTADDR(a) (0x00008000)
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#define M_ARM 103
-
-#ifndef LIBRARY_START_TEXT
-#define LIBRARY_START_TEXT (0x00c00000)
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h
index 3688fd15a32d..7dcc10d67253 100644
--- a/arch/arm/include/uapi/asm/hwcap.h
+++ b/arch/arm/include/uapi/asm/hwcap.h
@@ -25,6 +25,7 @@
#define HWCAP_IDIVT (1 << 18)
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
-
+#define HWCAP_LPAE (1 << 20)
+#define HWCAP_EVTSTRM (1 << 21)
#endif /* _UAPI__ASMARM_HWCAP_H */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index af33b44990ed..bbe80a7cba0c 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -406,6 +406,12 @@
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
#define __NR_kcmp (__NR_SYSCALL_BASE+378)
#define __NR_finit_module (__NR_SYSCALL_BASE+379)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr (__NR_SYSCALL_BASE+380)
+ * #define __NR_sched_getattr (__NR_SYSCALL_BASE+381)
+ * #define __NR_renameat2 (__NR_SYSCALL_BASE+382)
+ */
+#define __NR_seccomp (__NR_SYSCALL_BASE+383)
/*
* This may need to be greater than __NR_last_syscall+1 in order to
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5f3338eacad2..aa775438388c 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg
obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o sched_clock.o \
- setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
+ setup.o signal.o sigreturn_codes.o \
+ stacktrace.o sys_arm.o time.o traps.o
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
@@ -82,6 +83,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
-obj-$(CONFIG_ARM_PSCI) += psci.o
+ifeq ($(CONFIG_ARM_PSCI),y)
+obj-y += psci.o
+obj-$(CONFIG_SMP) += psci_smp.o
+endif
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index c6ca7e376773..1a2e529a1340 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -389,6 +389,10 @@
CALL(sys_process_vm_writev)
CALL(sys_kcmp)
CALL(sys_finit_module)
+/* 380 */ CALL(sys_ni_syscall) /* CALL(sys_sched_setattr) */
+ CALL(sys_ni_syscall) /* CALL(sys_sched_getattr) */
+ CALL(sys_ni_syscall) /* CALL(sys_renameat2) */
+ CALL(sys_seccomp)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 90c50d4b43f7..5d1286d51154 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize)
return 0;
- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5859c8bc727c..a44e7d11ab02 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
}
if (!mdesc_best) {
const char *prop;
- long size;
+ int size;
early_print("\nError: unrecognized/unsupported "
"device tree compatible list:\n[ ");
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 582b405befc5..45a68d6bb2a3 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,6 +192,7 @@ __dabt_svc:
svc_entry
mov r2, sp
dabt_helper
+ THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
@@ -415,9 +416,8 @@ __und_usr:
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r0, r0 @ little endian instruction
-#endif
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@@ -741,6 +741,18 @@ ENDPROC(__switch_to)
#endif
.endm
+ .macro kuser_pad, sym, size
+ .if (. - \sym) & 3
+ .rept 4 - (. - \sym) & 3
+ .byte 0
+ .endr
+ .endif
+ .rept (\size - (. - \sym)) / 4
+ .word 0xe7fddef1
+ .endr
+ .endm
+
+#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -831,18 +843,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration"
#endif
- /* pad to next slot */
- .rept (16 - (. - __kuser_cmpxchg64)/4)
- .word 0
- .endr
-
- .align 5
+ kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
- .align 5
+ kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
@@ -915,13 +922,14 @@ kuser_cmpxchg32_fixup:
#endif
- .align 5
+ kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
- .rep 4
+ kuser_pad __kuser_get_tls, 16
+ .rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
@@ -931,14 +939,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+#endif
+
THUMB( .thumb )
/*
* Vector stubs.
*
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's. Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not exceed
+ * a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -985,8 +995,17 @@ ENDPROC(vector_\name)
1:
.endm
- .globl __stubs_start
+ .section .stubs, "ax", %progbits
__stubs_start:
+ @ This must be the first word
+ .word vector_swi
+
+vector_rst:
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ b vector_und
+
/*
* Interrupt dispatcher
*/
@@ -1081,6 +1100,16 @@ __stubs_start:
.align 5
/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1093,45 +1122,19 @@ __stubs_start:
vector_fiq:
subs pc, lr, #4
-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
+ .globl vector_fiq_offset
+ .equ vector_fiq_offset, vector_fiq
-vector_addrexcptn:
- b vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
- .align 5
-
-.LCvswi:
- .word vector_swi
-
- .globl __stubs_end
-__stubs_end:
-
- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
-
- .globl __vectors_start
+ .section .vectors, "ax", %progbits
__vectors_start:
- ARM( swi SYS_ERROR0 )
- THUMB( svc #0 )
- THUMB( nop )
- W(b) vector_und + stubs_offset
- W(ldr) pc, .LCvswi + stubs_offset
- W(b) vector_pabt + stubs_offset
- W(b) vector_dabt + stubs_offset
- W(b) vector_addrexcptn + stubs_offset
- W(b) vector_irq + stubs_offset
- W(b) vector_fiq + stubs_offset
-
- .globl __vectors_end
-__vectors_end:
+ W(b) vector_rst
+ W(b) vector_und
+ W(ldr) pc, __vectors_start + 0x1000
+ W(b) vector_pabt
+ W(b) vector_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_irq
+ W(b) vector_fiq
.data
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc5bc0a97131..8c79344552d5 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -379,9 +379,7 @@ ENTRY(vector_swi)
#else
ldr r10, [lr, #-4] @ get SWI instruction
#endif
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r10, r10 @ little endian instruction
-#endif
+ ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712f..25442f451148 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
#include <asm/irq.h>
#include <asm/traps.h>
+#define FIQ_OFFSET ({ \
+ extern void *vector_fiq_offset; \
+ (unsigned)&vector_fiq_offset; \
+ })
+
static unsigned long no_fiq_insn;
/* Default reacquire function
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
- memcpy((void *)0xffff001c, start, length);
+ void *base = (void *)0xffff0000;
#else
- memcpy(vectors_page + 0x1c, start, length);
+ void *base = vectors_page;
#endif
- flush_icache_range(0xffff001c, 0xffff001c + length);
+ unsigned offset = FIQ_OFFSET;
+
+ memcpy(base + offset, start, length);
+ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
if (!vectors_high())
- flush_icache_range(0x1c, 0x1c + length);
+ flush_icache_range(offset, offset + length);
}
int claim_fiq(struct fiq_handler *f)
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start)
{
- no_fiq_insn = *(unsigned long *)0xffff001c;
+ unsigned offset = FIQ_OFFSET;
+ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start;
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 8bac553fe213..11284e744c80 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
__HEAD
ENTRY(stext)
+ ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
@@ -342,7 +343,6 @@ __turn_mmu_on_loc:
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
- __CPUINIT
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
@@ -351,6 +351,9 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
+
+ ARM_BE8(setend be) @ ensure we are in BE8 mode
+
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
@@ -584,8 +587,10 @@ __fixup_a_pv_table:
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
+ARM_BE8(rev16 ip, ip)
and ip, 0x8f00
orr ip, r6 @ mask in offset bits 31-24
+ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
@@ -594,8 +599,14 @@ __fixup_a_pv_table:
#else
b 2f
1: ldr ip, [r7, r3]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ @ in BE8, we load data in BE, but instructions still in LE
+ bic ip, ip, #0xff000000
+ orr ip, ip, r6, lsl#24
+#else
bic ip, ip, #0x000000ff
orr ip, ip, r6 @ mask in offset bits 31-24
+#endif
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..1b803117ed91 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1049,7 +1049,8 @@ static struct notifier_block dbg_cpu_pm_nb = {
static void __init pm_init(void)
{
- cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+ if (has_ossr)
+ cpu_pm_register_notifier(&dbg_cpu_pm_nb);
}
#else
static inline void pm_init(void)
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 1315c4ccfa56..dbe21107945a 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -153,6 +153,8 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
+ mov r7, #0
+ mcrr p15, 4, r7, r7, c14 @ CNTVOFF
1:
#endif
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c446bf..c3ef920823b6 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -73,6 +73,7 @@ void machine_crash_nonpanic_core(void *unused)
crash_save_cpu(&regs, smp_processor_id());
flush_cache_all();
+ set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
while (1)
cpu_relax();
@@ -168,3 +169,10 @@ void machine_kexec(struct kimage *image)
soft_restart(reboot_code_buffer_phys);
}
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+ VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
+}
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 1e9be5d25e56..7e137873083d 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_XIP_KERNEL
/*
@@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
Elf32_Sym *sym;
const char *symname;
s32 offset;
+ u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2;
#endif
@@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
- offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ offset = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = (offset & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
@@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
offset >>= 2;
+ offset &= 0x00ffffff;
- *(u32 *)loc &= 0xff000000;
- *(u32 *)loc |= offset & 0x00ffffff;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
+ *(u32 *)loc |= __opcode_to_mem_arm(offset);
break;
case R_ARM_V4BX:
@@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
* other bits to re-code instruction as
* MOV PC,Rm.
*/
- *(u32 *)loc &= 0xf000000f;
- *(u32 *)loc |= 0x01a0f000;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
+ *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
break;
case R_ARM_PREL31:
@@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
- offset = *(u32 *)loc;
+ offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
offset = (offset ^ 0x8000) - 0x8000;
@@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
offset >>= 16;
- *(u32 *)loc &= 0xfff0f000;
- *(u32 *)loc |= ((offset & 0xf000) << 4) |
- (offset & 0x0fff);
+ tmp &= 0xfff0f000;
+ tmp |= ((offset & 0xf000) << 4) |
+ (offset & 0x0fff);
+
+ *(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
sign = (offset >> 24) & 1;
j1 = sign ^ (~(offset >> 23) & 1);
j2 = sign ^ (~(offset >> 22) & 1);
- *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ upper = (u16)((upper & 0xf800) | (sign << 10) |
((offset >> 12) & 0x03ff));
- *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
- (j1 << 13) | (j2 << 11) |
- ((offset >> 1) & 0x07ff));
+ lower = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
offset >>= 16;
- *(u16 *)loc = (u16)((upper & 0xfbf0) |
- ((offset & 0xf000) >> 12) |
- ((offset & 0x0800) >> 1));
- *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
- ((offset & 0x0700) << 4) |
- (offset & 0x00ff));
+ upper = (u16)((upper & 0xfbf0) |
+ ((offset & 0xf000) >> 12) |
+ ((offset & 0x0800) >> 1));
+ lower = (u16)((lower & 0x8f00) |
+ ((offset & 0x0700) << 4) |
+ (offset & 0x00ff));
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8c3094d0f7b7..b41749fe56dc 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -53,7 +54,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -ENOENT;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -81,6 +87,9 @@ armpmu_map_event(struct perf_event *event,
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
+ default:
+ if (event->attr.type >= PERF_TYPE_MAX)
+ return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
@@ -158,6 +167,8 @@ armpmu_stop(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
@@ -174,6 +185,8 @@ static void armpmu_start(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
@@ -201,6 +214,9 @@ armpmu_del(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
@@ -217,6 +233,10 @@ armpmu_add(struct perf_event *event, int flags)
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return 0;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
@@ -253,6 +273,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
@@ -295,11 +318,18 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
+ int ret;
+ u64 start_clock, finish_clock;
+ start_clock = sched_clock();
if (plat && plat->handle_irq)
- return plat->handle_irq(irq, dev, armpmu->handle_irq);
+ ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
else
- return armpmu->handle_irq(irq, dev);
+ ret = armpmu->handle_irq(irq, dev);
+ finish_clock = sched_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+ return ret;
}
static void
@@ -416,6 +446,10 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -569,6 +603,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
+ perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..0b48a38e3cf4 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "CPU PMU: " fmt
#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -31,33 +32,36 @@
#include <asm/pmu.h>
/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *cpu_pmu;
+static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
+
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
*/
const char *perf_pmu_name(void)
{
- if (!cpu_pmu)
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+ if (!pmu)
return NULL;
- return cpu_pmu->name;
+ return pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
- int max_events = 0;
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
- if (cpu_pmu != NULL)
- max_events = cpu_pmu->num_events;
+ if (!pmu)
+ return 0;
- return max_events;
+ return pmu->num_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);
@@ -75,11 +79,13 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
@@ -91,6 +97,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
if (!pmu_device)
return -ENODEV;
@@ -103,6 +110,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
for (i = 0; i < irqs; ++i) {
err = 0;
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
@@ -112,7 +120,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
@@ -126,7 +134,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return err;
}
- cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
}
return 0;
@@ -135,7 +143,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
@@ -148,7 +156,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu->reset)
- on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
+ on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
}
/*
@@ -160,21 +168,46 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
+
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(cpu_pmu);
+ if (pmu && pmu->reset)
+ pmu->reset(pmu);
else
return NOTIFY_DONE;
return NOTIFY_OK;
}
+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int cpu = smp_processor_id();
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
+ struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+
+ if (!pmu)
+ return NOTIFY_DONE;
+
+ if (action == CPU_PM_ENTER && pmu->save_regs) {
+ pmu->save_regs(pmu, pmuregs);
+ } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
+ pmu->restore_regs(pmu, pmuregs);
+ }
+
+ return NOTIFY_OK;
+}
+
static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};
+static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
@@ -246,6 +279,9 @@ static int probe_current_pmu(struct arm_pmu *pmu)
}
}
+ /* assume PMU support all the CPUs in this case */
+ cpumask_setall(&pmu->valid_cpus);
+
put_cpu();
return ret;
}
@@ -253,15 +289,10 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
- int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
- int ret = -ENODEV;
-
- if (cpu_pmu) {
- pr_info("attempt to register multiple PMU devices!");
- return -ENOSPC;
- }
+ int ret = 0;
+ int cpu;
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
if (!pmu) {
@@ -270,8 +301,28 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
}
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
- init_fn = of_id->data;
- ret = init_fn(pmu);
+ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
+ struct device_node *ncluster;
+ int cluster = -1;
+ cpumask_t sibling_mask;
+
+ ncluster = of_parse_phandle(node, "cluster", 0);
+ if (ncluster) {
+ int len;
+ const u32 *hwid;
+ hwid = of_get_property(ncluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster = be32_to_cpup(hwid);
+ }
+ /* set sibling mask to all cpu mask if socket is not specified */
+ if (cluster == -1 ||
+ cluster_to_logical_mask(cluster, &sibling_mask))
+ cpumask_setall(&sibling_mask);
+
+ smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+
+ /* now set the valid_cpus after init */
+ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
} else {
ret = probe_current_pmu(pmu);
}
@@ -281,10 +332,12 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
goto out_free;
}
- cpu_pmu = pmu;
- cpu_pmu->plat_device = pdev;
- cpu_pmu_init(cpu_pmu);
- ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
+ for_each_cpu_mask(cpu, pmu->valid_cpus)
+ per_cpu(cpu_pmu, cpu) = pmu;
+
+ pmu->plat_device = pdev;
+ cpu_pmu_init(pmu);
+ ret = armpmu_register(pmu, -1);
if (!ret)
return 0;
@@ -313,9 +366,17 @@ static int __init register_pmu_driver(void)
if (err)
return err;
+ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
+ if (err) {
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ return err;
+ }
+
err = platform_driver_register(&cpu_pmu_driver);
- if (err)
+ if (err) {
+ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ }
return err;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 039cffb053a7..654db5030c31 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -950,6 +950,51 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
}
#endif
+static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1"
+ : "=r"(regs->pmxevttype[cnt]));
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r"(regs->pmxevtcnt[cnt]));
+ }
+ return;
+}
+
+static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mcr p15, 0, %0, c9, c13, 1"
+ : : "r"(regs->pmxevttype[cnt]));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ : : "r"(regs->pmxevtcnt[cnt]));
+ }
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+}
+
static void armv7pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
@@ -1223,6 +1268,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
cpu_pmu->max_period = (1LLU << 32) - 1;
};
@@ -1240,7 +1287,7 @@ static u32 armv7_read_num_pmnc_events(void)
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A8";
+ cpu_pmu->name = "ARMv7_Cortex_A8";
cpu_pmu->map_event = armv7_a8_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1249,7 +1296,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A9";
+ cpu_pmu->name = "ARMv7_Cortex_A9";
cpu_pmu->map_event = armv7_a9_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1258,7 +1305,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A5";
+ cpu_pmu->name = "ARMv7_Cortex_A5";
cpu_pmu->map_event = armv7_a5_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1267,7 +1314,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A15";
+ cpu_pmu->name = "ARMv7_Cortex_A15";
cpu_pmu->map_event = armv7_a15_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
@@ -1277,7 +1324,7 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A7";
+ cpu_pmu->name = "ARMv7_Cortex_A7";
cpu_pmu->map_event = armv7_a7_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d12f92e011d4..978002e5b406 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -535,6 +535,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -543,9 +544,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame) < 0)
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
@@ -560,10 +563,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Insert it into the
- * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers. Insert it into the gate_vma so that it is visible
+ * through ptrace and /proc/<pid>/mem.
*/
static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000,
@@ -592,9 +596,48 @@ int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
}
+#define is_gate_vma(vma) ((vma) == &gate_vma)
+#else
+#define is_gate_vma(vma) 0
+#endif
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma == &gate_vma) ? "[vectors]" : NULL;
+ return is_gate_vma(vma) ? "[vectors]" :
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ "[sigpage]" : NULL;
+}
+
+static struct page *signal_page;
+extern struct page *get_signal_page(void);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ if (!signal_page)
+ signal_page = get_signal_page();
+ if (!signal_page)
+ return -ENOMEM;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &signal_page);
+
+ if (ret == 0)
+ mm->context.sigpage = addr;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
}
#endif
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index 36531643cc2c..0daf4f252284 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/compiler.h>
#include <asm/errno.h>
@@ -26,6 +27,11 @@
struct psci_operations psci_ops;
+/* Type of psci support. Currently can only be enabled or disabled */
+#define PSCI_SUP_DISABLED 0
+#define PSCI_SUP_ENABLED 1
+
+static unsigned int psci;
static int (*invoke_psci_fn)(u32, u32, u32, u32);
enum psci_function {
@@ -42,6 +48,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
#define PSCI_RET_EOPNOTSUPP -1
#define PSCI_RET_EINVAL -2
#define PSCI_RET_EPERM -3
+#define PSCI_RET_EALREADYON -4
static int psci_to_linux_errno(int errno)
{
@@ -54,6 +61,8 @@ static int psci_to_linux_errno(int errno)
return -EINVAL;
case PSCI_RET_EPERM:
return -EPERM;
+ case PSCI_RET_EALREADYON:
+ return -EAGAIN;
};
return -EINVAL;
@@ -158,15 +167,18 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
-static int __init psci_init(void)
+void __init psci_init(void)
{
struct device_node *np;
const char *method;
u32 id;
+ if (psci == PSCI_SUP_DISABLED)
+ return;
+
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
- return 0;
+ return;
pr_info("probing function IDs from device-tree\n");
@@ -206,6 +218,35 @@ static int __init psci_init(void)
out_put_node:
of_node_put(np);
- return 0;
+ return;
+}
+
+int __init psci_probe(void)
+{
+ struct device_node *np;
+ int ret = -ENODEV;
+
+ if (psci == PSCI_SUP_ENABLED) {
+ np = of_find_matching_node(NULL, psci_of_match);
+ if (np)
+ ret = 0;
+ }
+
+ of_node_put(np);
+ return ret;
+}
+
+static int __init early_psci(char *val)
+{
+ int ret = 0;
+
+ if (strcmp(val, "enable") == 0)
+ psci = PSCI_SUP_ENABLED;
+ else if (strcmp(val, "disable") == 0)
+ psci = PSCI_SUP_DISABLED;
+ else
+ ret = -EINVAL;
+
+ return ret;
}
-early_initcall(psci_init);
+early_param("psci", early_psci);
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
new file mode 100644
index 000000000000..23a11424c568
--- /dev/null
+++ b/arch/arm/kernel/psci_smp.c
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/psci.h>
+#include <asm/smp_plat.h>
+
+/*
+ * psci_smp assumes that the following is true about PSCI:
+ *
+ * cpu_suspend Suspend the execution on a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * cpu_off Power down a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * no return on successful call
+ *
+ * cpu_on Power up a CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * migrate Migrate the context to a different CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * returns 0 success, < 0 on failure
+ *
+ */
+
+extern void secondary_startup(void);
+
+static int __cpuinit psci_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ if (psci_ops.cpu_on)
+ return psci_ops.cpu_on(cpu_logical_map(cpu),
+ __pa(secondary_startup));
+ return -ENODEV;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __ref psci_cpu_die(unsigned int cpu)
+{
+ const struct psci_power_state ps = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ if (psci_ops.cpu_off)
+ psci_ops.cpu_off(ps);
+
+ /* We should never return */
+ panic("psci: cpu %d failed to shutdown\n", cpu);
+}
+#else
+#define psci_cpu_die NULL
+#endif
+
+bool __init psci_smp_available(void)
+{
+ /* is cpu_on available at least? */
+ return (psci_ops.cpu_on != NULL);
+}
+
+struct smp_operations __initdata psci_smp_ops = {
+ .smp_boot_secondary = psci_boot_secondary,
+ .cpu_die = psci_cpu_die,
+};
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b4b1d397592b..29beb8c76560 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/procinfo.h>
+#include <asm/psci.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -261,6 +262,19 @@ static int cpu_has_aliasing_icache(unsigned int arch)
int aliasing_icache;
unsigned int id_reg, num_sets, line_size;
+#ifdef CONFIG_BIG_LITTLE
+ /*
+ * We expect a combination of Cortex-A15 and Cortex-A7 cores.
+ * A7 = VIPT aliasing I-cache
+ * A15 = PIPT (non-aliasing) I-cache
+ * To cater for this discrepancy, let's assume aliasing I-cache
+ * all the time. This means unneeded extra work on the A15 but
+ * only ptrace is affected which is not performance critical.
+ */
+ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
+ return 1;
+#endif
+
/* PIPT caches never alias. */
if (icache_is_pipt())
return 0;
@@ -530,6 +544,7 @@ void __init dump_machine_table(void)
int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
@@ -542,10 +557,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
- bank->start = PAGE_ALIGN(start);
+ aligned_start = PAGE_ALIGN(start);
+
+#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+ if (aligned_start > ULONG_MAX) {
+ printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
+ "32-bit physical address space\n", (long long)start);
+ return -EINVAL;
+ }
-#ifndef CONFIG_ARM_LPAE
- if (bank->start + size < bank->start) {
+ if (aligned_start + size > ULONG_MAX) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
/*
@@ -553,10 +574,25 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
* This means we lose a page after masking.
*/
- size = ULONG_MAX - bank->start;
+ size = ULONG_MAX - aligned_start;
}
#endif
+ if (aligned_start < PHYS_OFFSET) {
+ if (aligned_start + size <= PHYS_OFFSET) {
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, aligned_start + size);
+ return -EINVAL;
+ }
+
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, (u64)PHYS_OFFSET);
+
+ size -= PHYS_OFFSET - aligned_start;
+ aligned_start = PHYS_OFFSET;
+ }
+
+ bank->start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/*
@@ -796,9 +832,15 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
arm_dt_init_cpu_maps();
+ psci_init();
#ifdef CONFIG_SMP
if (is_smp()) {
- smp_set_ops(mdesc->smp);
+ if (!mdesc->smp_init || !mdesc->smp_init()) {
+ if (psci_smp_available())
+ smp_set_ops(&psci_smp_ops);
+ else if (mdesc->smp)
+ smp_set_ops(mdesc->smp);
+ }
smp_init_cpus();
}
#endif
@@ -872,6 +914,9 @@ static const char *hwcap_str[] = {
"vfpv4",
"idiva",
"idivt",
+ "vfpd32",
+ "lpae",
+ "evtstrm",
NULL
};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 296786bdbb73..3c23086dc8e2 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
+#include <linux/random.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
@@ -15,35 +16,14 @@
#include <asm/elf.h>
#include <asm/cacheflush.h>
+#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
-#include "signal.h"
+extern const unsigned long sigreturn_codes[7];
-/*
- * For ARM syscalls, we encode the syscall number into the instruction.
- */
-#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-
-/*
- * With EABI, the syscall number has to be loaded into r7.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-/*
- * For Thumb syscalls, we pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-const unsigned long sigreturn_codes[7] = {
- MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
-};
+static unsigned long signal_return_offset;
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
@@ -396,13 +376,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
+#ifdef CONFIG_MMU
if (cpsr & MODE32_BIT) {
+ struct mm_struct *mm = current->mm;
/*
- * 32-bit code can use the new high-page
- * signal return code support.
+ * 32-bit code can use the signal return page
+ * except when the MPU has protected the vectors
+ * page from PL0
*/
- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
- } else {
+ retcode = mm->context.sigpage + signal_return_offset +
+ (idx << 2) + thumb;
+ } else
+#endif
+ {
/*
* Ensure that the instruction cache sees
* the return code written onto the stack.
@@ -603,3 +589,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
+
+struct page *get_signal_page(void)
+{
+ unsigned long ptr;
+ unsigned offset;
+ struct page *page;
+ void *addr;
+
+ page = alloc_pages(GFP_KERNEL, 0);
+
+ if (!page)
+ return NULL;
+
+ addr = page_address(page);
+
+ /* Give the signal return code some randomness */
+ offset = 0x200 + (get_random_int() & 0x7fc);
+ signal_return_offset = offset;
+
+ /*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+
+ ptr = (unsigned long)addr + offset;
+ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+
+ return page;
+}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
deleted file mode 100644
index 5ff067b7c752..000000000000
--- a/arch/arm/kernel/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * linux/arch/arm/kernel/signal.h
- *
- * Copyright (C) 2005-2009 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-
-extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
new file mode 100644
index 000000000000..3c5d0f2170fd
--- /dev/null
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -0,0 +1,80 @@
+/*
+ * sigreturn_codes.S - code sinpets for sigreturn syscalls
+ *
+ * Created by: Victor Kamensky, 2013-08-13
+ * Copyright: (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/unistd.h>
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ * With EABI, the syscall number has to be loaded into r7. As result
+ * ARM syscall sequence snippet will have move and svc in .arm encoding
+ *
+ * For Thumb syscalls, we pass the syscall number via r7. We therefore
+ * need two 16-bit instructions in .thumb encoding
+ *
+ * Please note sigreturn_codes code are not executed in place. Instead
+ * they just copied by kernel into appropriate places. Code inside of
+ * arch/arm/kernel/signal.c is very sensitive to layout of these code
+ * snippets.
+ */
+
+#if __LINUX_ARM_ARCH__ <= 4
+ /*
+ * Note we manually set minimally required arch that supports
+ * required thumb opcodes for early arch versions. It is OK
+ * for this file to be used in combination with other
+ * lower arch variants, since these code snippets are only
+ * used as input data.
+ */
+ .arch armv4t
+#endif
+
+ .section .rodata
+ .global sigreturn_codes
+ .type sigreturn_codes, #object
+
+ .arm
+
+sigreturn_codes:
+
+ /* ARM sigreturn syscall code snippet */
+ mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn syscall code snippet */
+ .thumb
+ movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /* ARM sigreturn_rt syscall code snippet */
+ .arm
+ mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn_rt syscall code snippet */
+ .thumb
+ movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /*
+ * Note on addtional space: setup_return in signal.c
+ * algorithm uses two words copy regardless whether
+ * it is thumb case or not, so we need additional
+ * word after real last entry.
+ */
+ .arm
+ .space 4
+
+ .size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415c..baf4d28213a5 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -4,6 +4,7 @@
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
+#include "entry-header.S"
.text
/*
@@ -30,9 +31,8 @@ ENTRY(__cpu_suspend)
mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
- ALT_UP(mov lr, #0)
- and lr, lr, #15
+ get_thread_info r5
+ ldr lr, [r5, #TI_CPU] @ cpu logical index
add r3, r3, lr, lsl #2
#endif
bl __cpu_suspend_save
@@ -81,11 +81,15 @@ ENDPROC(cpu_resume_after_mmu)
.data
.align
ENTRY(cpu_resume)
+ARM_BE8(setend be) @ ensure we are in BE mode
#ifdef CONFIG_SMP
+ mov r1, #0 @ fall-back logical index for UP
+ ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
+ ALT_UP_B(1f)
+ bic r0, #0xff000000
+ bl cpu_logical_index @ return logical index in r1
+1:
adr r0, sleep_save_sp
- ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
- ALT_UP(mov r1, #0)
- and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
@@ -102,3 +106,20 @@ sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr
+
+#ifdef CONFIG_SMP
+cpu_logical_index:
+ adr r3, cpu_map_ptr
+ ldr r2, [r3]
+ add r3, r3, r2 @ virt_to_phys(__cpu_logical_map)
+ mov r1, #0
+1:
+ ldr r2, [r3, r1, lsl #2]
+ cmp r2, r0
+ moveq pc, lr
+ add r1, r1, #1
+ b 1b
+
+cpu_map_ptr:
+ .long __cpu_logical_map - .
+#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 0c96b2d343ed..ed3243bb6c07 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -46,6 +46,9 @@
#include <asm/virt.h>
#include <asm/mach/arch.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/arm-ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -57,7 +60,7 @@ struct secondary_data secondary_data;
* control for which core is the next to come out of the secondary
* boot "holding pen"
*/
-volatile int __cpuinitdata pen_release = -1;
+volatile int pen_release = -1;
enum ipi_msg_type {
IPI_WAKEUP,
@@ -66,6 +69,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_COMPLETION,
IPI_CPU_BACKTRACE,
};
@@ -464,6 +468,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_COMPLETION, "completion interrupts"),
S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
@@ -590,6 +595,19 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+ per_cpu(cpu_completion, cpu) = completion;
+ return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+ complete(per_cpu(cpu_completion, cpu));
+}
+
static cpumask_t backtrace_mask;
static DEFINE_RAW_SPINLOCK(backtrace_lock);
@@ -658,6 +676,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if (ipinr < NR_IPI)
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ trace_arm_ipi_entry(ipinr);
switch (ipinr) {
case IPI_WAKEUP:
break;
@@ -692,6 +711,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+ case IPI_COMPLETION:
+ irq_enter();
+ ipi_complete(cpu);
+ irq_exit();
+ break;
+
case IPI_CPU_BACKTRACE:
ipi_cpu_backtrace(cpu, regs);
break;
@@ -701,6 +726,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
cpu, ipinr);
break;
}
+ trace_arm_ipi_exit(ipinr);
set_irq_regs(old_regs);
}
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 5bc1a63284e3..1aafa0d785eb 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -28,7 +28,7 @@
*/
unsigned int __init scu_get_core_count(void __iomem *scu_base)
{
- unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
+ unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
return (ncores & 0x03) + 1;
}
@@ -42,19 +42,19 @@ void scu_enable(void __iomem *scu_base)
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
- scu_ctrl = __raw_readl(scu_base + 0x30);
+ scu_ctrl = readl_relaxed(scu_base + 0x30);
if (!(scu_ctrl & 1))
- __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+ writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
}
#endif
- scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
+ scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)
return;
scu_ctrl |= 1;
- __raw_writel(scu_ctrl, scu_base + SCU_CTRL);
+ writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
/*
* Ensure that the data accessed by CPU0 before the SCU was
@@ -80,9 +80,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode)
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
- val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+ val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
val |= mode;
- __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+ writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
return 0;
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9a52a07aa40e..a98b62dca2fa 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void)
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
- int cpu, this_cpu;
+ int this_cpu;
cpumask_t mask = { CPU_BITS_NONE };
if (!erratum_a15_798181())
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
dummy_flush_tlb_a15_erratum();
this_cpu = get_cpu();
- for_each_online_cpu(cpu) {
- if (cpu == this_cpu)
- continue;
- /*
- * We only need to send an IPI if the other CPUs are running
- * the same ASID as the one being invalidated. There is no
- * need for locking around the active_asids check since the
- * switch_mm() function has at least one dmb() (as required by
- * this workaround) in case a context switch happens on
- * another CPU after the condition below.
- */
- if (atomic64_read(&mm->context.id) ==
- atomic64_read(&per_cpu(active_asids, cpu)))
- cpumask_set_cpu(cpu, &mask);
- }
+ a15_erratum_get_cpumask(this_cpu, mm, &mask);
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
put_cpu();
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 90525d9d290b..4971ccf012ca 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_PERIODIC:
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
@@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode,
ctrl = 0;
}
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
}
static int twd_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
- unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
ctrl |= TWD_TIMER_CONTROL_ENABLE;
- __raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
return 0;
}
@@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt,
*/
static int twd_timer_ack(void)
{
- if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
- __raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+ if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
+ writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
return 1;
}
@@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb,
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
- smp_call_function(twd_update_frequency,
+ on_each_cpu(twd_update_frequency,
(void *)&cnd->new_rate, 1);
return NOTIFY_OK;
@@ -209,15 +209,15 @@ static void __cpuinit twd_calibrate_rate(void)
waitjiffies += 5;
/* enable, no interrupt or reload */
- __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
/* maximum value */
- __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
while (get_jiffies_64() < waitjiffies)
udelay(10);
- count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
+ count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
@@ -275,7 +275,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
* bother with the below.
*/
if (per_cpu(percpu_setup_called, cpu)) {
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clockevents_register_device(*__this_cpu_ptr(twd_evt));
enable_percpu_irq(clk->irq, 0);
return 0;
@@ -288,7 +288,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
* The following is done once per CPU the first time .setup() is
* called.
*/
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..6582c4adc182 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
- if (fp < (low + 12) || fp + 4 >= high)
+ if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
@@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
return trace->nr_entries >= trace->max_entries;
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+/* This must be noinline to so that our skip calculation works correctly */
+static noinline void __save_stack_trace(struct task_struct *tsk,
+ struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
+ data.no_sched_functions = nosched;
if (tsk != current) {
#ifdef CONFIG_SMP
@@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
- data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */
@@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
} else {
register unsigned long current_sp asm ("sp");
- data.no_sched_functions = 0;
+ /* We don't want this function nor the caller */
+ data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)save_stack_trace_tsk;
+ frame.pc = (unsigned long)__save_stack_trace;
}
walk_stackframe(&frame, save_trace, &data);
@@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ __save_stack_trace(tsk, trace, 1);
+}
+
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(current, trace);
+ __save_stack_trace(current, trace, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index c5a59546a256..677da58d9e88 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <asm/cputype.h>
+#include <asm/smp_plat.h>
#include <asm/topology.h>
/*
@@ -289,6 +290,140 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
+
+#ifdef CONFIG_SCHED_HMP
+
+static const char * const little_cores[] = {
+ "arm,cortex-a7",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ const u32 *mpidr;
+ int len;
+
+ mpidr = of_get_property(cn, "reg", &len);
+ if (!mpidr || len != 4) {
+ pr_err("* %s missing reg property\n", cn->full_name);
+ continue;
+ }
+
+ cpu = get_logical_index(be32_to_cpup(mpidr));
+ if (cpu == -EINVAL) {
+ pr_err("couldn't get logical index for mpidr %x\n",
+ be32_to_cpup(mpidr));
+ break;
+ }
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+struct cpumask hmp_slow_cpu_mask;
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+ struct cpumask hmp_fast_cpu_mask;
+ struct hmp_domain *domain;
+
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+ /*
+ * Initialize hmp_domains
+ * Must be ordered with respect to compute capacity.
+ * Fastest domain at head of list.
+ */
+ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+ }
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id: cluster HW identifier
+ * @cluster_mask: the cpumask location to be initialized, modified by the
+ * function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+ int cpu;
+
+ if (!cluster_mask)
+ return -EINVAL;
+
+ for_each_online_cpu(cpu)
+ if (socket_id == topology_physical_package_id(cpu)) {
+ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 18b32e8e4497..b4fd850c34b2 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,10 +34,15 @@
#include <asm/unwind.h>
#include <asm/tls.h>
#include <asm/system_misc.h>
-
-#include "signal.h"
-
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+#include <asm/opcodes.h>
+
+static const char *handler[]= {
+ "prefetch abort",
+ "data abort",
+ "address exception",
+ "interrupt",
+ "undefined instruction",
+};
void *vectors_page;
@@ -343,15 +348,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
- unsigned short bkpt;
+ u16 bkpt;
+ u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
#else
- unsigned long bkpt;
+ u32 bkpt;
+ u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
- return bkpt == BUG_INSTR_VALUE;
+ return bkpt == insn;
}
#endif
@@ -404,25 +411,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (processor_mode(regs) == SVC_MODE) {
#ifdef CONFIG_THUMB2_KERNEL
if (thumb_mode(regs)) {
- instr = ((u16 *)pc)[0];
+ instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
if (is_wide_instruction(instr)) {
- instr <<= 16;
- instr |= ((u16 *)pc)[1];
+ u16 inst2;
+ inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
+ instr = __opcode_thumb32_compose(instr, inst2);
}
} else
#endif
- instr = *(u32 *) pc;
+ instr = __mem_to_opcode_arm(*(u32 *) pc);
} else if (thumb_mode(regs)) {
if (get_user(instr, (u16 __user *)pc))
goto die_sig;
+ instr = __mem_to_opcode_thumb16(instr);
if (is_wide_instruction(instr)) {
unsigned int instr2;
if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
- instr <<= 16;
- instr |= instr2;
+ instr2 = __mem_to_opcode_thumb16(instr2);
+ instr = __opcode_thumb32_compose(instr, instr2);
}
} else if (get_user(instr, (u32 __user *)pc)) {
+ instr = __mem_to_opcode_arm(instr);
goto die_sig;
}
@@ -800,47 +810,55 @@ void __init trap_init(void)
return;
}
-static void __init kuser_get_tls_init(unsigned long vectors)
+#ifdef CONFIG_KUSER_HELPERS
+static void __init kuser_init(void *vectors)
{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
+#else
+static void __init kuser_init(void *vectors)
+{
+}
+#endif
void __init early_trap_init(void *vectors_base)
{
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
- extern char __kuser_helper_start[], __kuser_helper_end[];
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
vectors_page = vectors_base;
/*
+ * Poison the vectors page with an undefined instruction. This
+ * instruction is chosen to be undefined for both ARM and Thumb
+ * ISAs. The Thumb version is an undefined instruction with a
+ * branch back to the undefined instruction.
+ */
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ ((u32 *)vectors_base)[i] = 0xe7fddef1;
+
+ /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
- /*
- * Do processor specific fixups for the kuser helpers
- */
- kuser_get_tls_init(vectors);
-
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
- sigreturn_codes, sizeof(sigreturn_codes));
+ kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a871b8e00fca..33f2ea32f5a0 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -152,6 +152,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
#endif
+ /*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+ __vectors_start = .;
+ .vectors 0 : AT(__vectors_start) {
+ *(.vectors)
+ }
+ . = __vectors_start + SIZEOF(.vectors);
+ __vectors_end = .;
+
+ __stubs_start = .;
+ .stubs 0x1000 : AT(__stubs_start) {
+ *(.stubs)
+ }
+ . = __stubs_start + SIZEOF(.stubs);
+ __stubs_end = .;
INIT_TEXT_SECTION(8)
.exit.text : {
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ef1703b9587b..1d55afe7fd4b 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -17,6 +17,7 @@
*/
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
@@ -835,6 +836,33 @@ static struct notifier_block hyp_init_cpu_nb = {
.notifier_call = hyp_init_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd,
+ void *v)
+{
+ if (cmd == CPU_PM_EXIT) {
+ cpu_init_hyp_mode(NULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hyp_init_cpu_pm_nb = {
+ .notifier_call = hyp_init_cpu_pm_notifier,
+};
+
+static void __init hyp_cpu_pm_init(void)
+{
+ cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
+}
+#else
+static inline void hyp_cpu_pm_init(void)
+{
+}
+#endif
+
/**
* Inits Hyp-mode on all online CPUs
*/
@@ -995,6 +1023,8 @@ int kvm_arch_init(void *opaque)
goto out_err;
}
+ hyp_cpu_pm_init();
+
kvm_coproc_table_init();
return 0;
out_err:
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 8eea97be1ed5..db9cf692d4dd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
#define access_pmintenclr pm_fake
/* Architected CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg cp15_regs[] = {
/* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c0_CSSELR },
/* TTBR0/TTBR1: swapped by interrupt.S. */
- { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
- { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+ { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
/* TTBCR: swapped by interrupt.S. */
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -180,6 +184,10 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c6_DFAR },
{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
NULL, reset_unknown, c6_IFAR },
+
+ /* PAR swapped by interrupt.S */
+ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+
/*
* DC{C,I,CI}SW operations:
*/
@@ -395,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
| KVM_REG_ARM_OPC1_MASK))
return false;
params->is_64bit = true;
- params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+ /* CRm to CRn: see cp15_to_index for details */
+ params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
>> KVM_REG_ARM_CRM_SHIFT);
params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
>> KVM_REG_ARM_OPC1_SHIFT);
params->Op2 = 0;
- params->CRn = 0;
+ params->CRm = 0;
return true;
default:
return false;
@@ -894,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
if (reg->is_64) {
val |= KVM_REG_SIZE_U64;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
- val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+ /*
+ * CRn always denotes the primary coproc. reg. nr. for the
+ * in-kernel representation, but the user space API uses the
+ * CRm for the encoding, because it is modelled after the
+ * MRRC/MCRR instructions: see the ARM ARM rev. c page
+ * B3-1445
+ */
+ val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
} else {
val |= KVM_REG_SIZE_U32;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3e4799..0461d5c8d3de 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
+ if (i1->is_64 != i2->is_64)
+ return i2->is_64 - i1->is_64;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
+#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a6d0cf..cf93472b9dd6 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
/*
* A15-specific CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index f7793df62f58..16cd4ba5d7fd 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -49,6 +49,7 @@ __kvm_hyp_code_start:
ENTRY(__kvm_tlb_flush_vmid_ipa)
push {r2, r3}
+ dsb ishst
add r0, r0, #KVM_VTTBR
ldrd r2, r3, [r0]
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
@@ -291,6 +292,7 @@ THUMB( orr r2, r2, #PSR_T_BIT )
ldr r2, =BSYM(panic)
msr ELR_hyp, r2
ldr r0, =\panic_str
+ clrex @ Clear exclusive monitor
eret
.endm
@@ -414,6 +416,10 @@ guest_trap:
mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
bne 3f
+ /* Preserve PAR */
+ mrrc p15, 0, r0, r1, c7 @ PAR
+ push {r0, r1}
+
/* Resolve IPA using the xFAR */
mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
isb
@@ -424,13 +430,20 @@ guest_trap:
lsl r2, r2, #4
orr r2, r2, r1, lsl #24
+ /* Restore PAR */
+ pop {r0, r1}
+ mcrr p15, 0, r0, r1, c7 @ PAR
+
3: load_vcpu @ Load VCPU pointer to r0
str r2, [r0, #VCPU_HPFAR]
1: mov r1, #ARM_EXCEPTION_HVC
b __kvm_vcpu_return
-4: pop {r0, r1, r2} @ Failed translation, return to guest
+4: pop {r0, r1} @ Failed translation, return to guest
+ mcrr p15, 0, r0, r1, c7 @ PAR
+ clrex
+ pop {r0, r1, r2}
eret
/*
@@ -456,6 +469,7 @@ switch_to_guest_vfp:
pop {r3-r7}
pop {r0-r2}
+ clrex
eret
#endif
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 3c8f2f0b4c5e..6f18695a09cb 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -302,11 +302,14 @@ vcpu .req r0 @ vcpu pointer always in r0
.endif
mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
+ mrrc p15, 0, r4, r5, c7 @ PAR
.if \store_to_vcpu == 0
- push {r2}
+ push {r2,r4-r5}
.else
str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+ add r12, vcpu, #CP15_OFFSET(c7_PAR)
+ strd r4, r5, [r12]
.endif
.endm
@@ -319,12 +322,15 @@ vcpu .req r0 @ vcpu pointer always in r0
*/
.macro write_cp15_state read_from_vcpu
.if \read_from_vcpu == 0
- pop {r2}
+ pop {r2,r4-r5}
.else
ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+ add r12, vcpu, #CP15_OFFSET(c7_PAR)
+ ldrd r4, r5, [r12]
.endif
mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
+ mcrr p15, 0, r4, r5, c7 @ PAR
.if \read_from_vcpu == 0
pop {r2-r12}
@@ -497,6 +503,10 @@ vcpu .req r0 @ vcpu pointer always in r0
add r5, vcpu, r4
strd r2, r3, [r5]
+ @ Ensure host CNTVCT == CNTPCT
+ mov r2, #0
+ mcrr p15, 4, r2, r2, c14 @ CNTVOFF
+
1:
#endif
@ Allow physical timer/counter access for the host
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 84ba67b982c0..e04613906f1b 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -313,6 +313,17 @@ out:
return err;
}
+static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
+{
+ if (!is_vmalloc_addr(kaddr)) {
+ BUG_ON(!virt_addr_valid(kaddr));
+ return __pa(kaddr);
+ } else {
+ return page_to_phys(vmalloc_to_page(kaddr)) +
+ offset_in_page(kaddr);
+ }
+}
+
/**
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
@@ -324,16 +335,27 @@ out:
*/
int create_hyp_mappings(void *from, void *to)
{
- unsigned long phys_addr = virt_to_phys(from);
+ phys_addr_t phys_addr;
+ unsigned long virt_addr;
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
- /* Check for a valid kernel memory mapping */
- if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
- return -EINVAL;
+ start = start & PAGE_MASK;
+ end = PAGE_ALIGN(end);
- return __create_hyp_mappings(hyp_pgd, start, end,
- __phys_to_pfn(phys_addr), PAGE_HYP);
+ for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
+ int err;
+
+ phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
+ err = __create_hyp_mappings(hyp_pgd, virt_addr,
+ virt_addr + PAGE_SIZE,
+ __phys_to_pfn(phys_addr),
+ PAGE_HYP);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 788562dccb43..c336efdf8f13 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y := irq.o gpio.o setup.o
+obj-y := irq.o gpio.o setup.o sysirq_mask.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index a8ce24538da6..cdb1fb695edf 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -351,6 +351,8 @@ static void __init at91sam9260_initialize(void)
at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
| (1 << AT91SAM9260_ID_IRQ2);
+ at91_sysirq_mask_rtt(AT91SAM9260_BASE_RTT);
+
/* Register GPIO subsystem */
at91_gpio_init(at91sam9260_gpio, 3);
}
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 25efb5ac30f1..7c9d2ea5bb88 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -293,6 +293,8 @@ static void __init at91sam9261_initialize(void)
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
| (1 << AT91SAM9261_ID_IRQ2);
+ at91_sysirq_mask_rtt(AT91SAM9261_BASE_RTT);
+
/* Register GPIO subsystem */
at91_gpio_init(at91sam9261_gpio, 3);
}
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index f44ffd2105a7..c6b2f477c71f 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -329,6 +329,9 @@ static void __init at91sam9263_initialize(void)
arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
+ at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT0);
+ at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT1);
+
/* Register GPIO subsystem */
at91_gpio_init(at91sam9263_gpio, 5);
}
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 8b7fce067652..e381fa125011 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -376,6 +376,9 @@ static void __init at91sam9g45_initialize(void)
arm_pm_restart = at91sam9g45_restart;
at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0);
+ at91_sysirq_mask_rtc(AT91SAM9G45_BASE_RTC);
+ at91_sysirq_mask_rtt(AT91SAM9G45_BASE_RTT);
+
/* Register GPIO subsystem */
at91_gpio_init(at91sam9g45_gpio, 5);
}
diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
index c7d670d11802..4d6001c355f5 100644
--- a/arch/arm/mach-at91/at91sam9n12.c
+++ b/arch/arm/mach-at91/at91sam9n12.c
@@ -223,7 +223,13 @@ static void __init at91sam9n12_map_io(void)
at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE);
}
+static void __init at91sam9n12_initialize(void)
+{
+ at91_sysirq_mask_rtc(AT91SAM9N12_BASE_RTC);
+}
+
AT91_SOC_START(at91sam9n12)
.map_io = at91sam9n12_map_io,
.register_clocks = at91sam9n12_register_clocks,
+ .init = at91sam9n12_initialize,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index f77fae5591bc..5615d28a5b96 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -295,6 +295,9 @@ static void __init at91sam9rl_initialize(void)
arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
+ at91_sysirq_mask_rtc(AT91SAM9RL_BASE_RTC);
+ at91_sysirq_mask_rtt(AT91SAM9RL_BASE_RTT);
+
/* Register GPIO subsystem */
at91_gpio_init(at91sam9rl_gpio, 4);
}
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index e631fec040ce..7b4f848312eb 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -318,6 +318,11 @@ static void __init at91sam9x5_map_io(void)
at91_init_sram(0, AT91SAM9X5_SRAM_BASE, AT91SAM9X5_SRAM_SIZE);
}
+static void __init at91sam9x5_initialize(void)
+{
+ at91_sysirq_mask_rtc(AT91SAM9X5_BASE_RTC);
+}
+
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
@@ -325,4 +330,5 @@ static void __init at91sam9x5_map_io(void)
AT91_SOC_START(at91sam9x5)
.map_io = at91sam9x5_map_io,
.register_clocks = at91sam9x5_register_clocks,
+ .init = at91sam9x5_initialize,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 78ab06548658..d949ab4e26fa 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -33,6 +33,8 @@ extern int __init at91_aic_of_init(struct device_node *node,
struct device_node *parent);
extern int __init at91_aic5_of_init(struct device_node *node,
struct device_node *parent);
+extern void __init at91_sysirq_mask_rtc(u32 rtc_base);
+extern void __init at91_sysirq_mask_rtt(u32 rtt_base);
/* Timer */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9n12.h b/arch/arm/mach-at91/include/mach/at91sam9n12.h
index d374b87c0459..0151bcf6163c 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9n12.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9n12.h
@@ -49,6 +49,11 @@
#define AT91SAM9N12_BASE_USART3 0xf8028000
/*
+ * System Peripherals
+ */
+#define AT91SAM9N12_BASE_RTC 0xfffffeb0
+
+/*
* Internal Memory.
*/
#define AT91SAM9N12_SRAM_BASE 0x00300000 /* Internal SRAM base address */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9x5.h b/arch/arm/mach-at91/include/mach/at91sam9x5.h
index c75ee19b58d3..2fc76c49e97c 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9x5.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9x5.h
@@ -55,6 +55,11 @@
#define AT91SAM9X5_BASE_USART2 0xf8024000
/*
+ * System Peripherals
+ */
+#define AT91SAM9X5_BASE_RTC 0xfffffeb0
+
+/*
* Internal Memory.
*/
#define AT91SAM9X5_SRAM_BASE 0x00300000 /* Internal SRAM base address */
diff --git a/arch/arm/mach-at91/include/mach/sama5d3.h b/arch/arm/mach-at91/include/mach/sama5d3.h
index 6dc81ee38048..3abbc4286875 100644
--- a/arch/arm/mach-at91/include/mach/sama5d3.h
+++ b/arch/arm/mach-at91/include/mach/sama5d3.h
@@ -65,6 +65,11 @@
#define SAMA5D3_ID_IRQ0 47 /* Advanced Interrupt Controller (IRQ0) */
/*
+ * System Peripherals
+ */
+#define SAMA5D3_BASE_RTC 0xfffffeb0
+
+/*
* Internal Memory
*/
#define SAMA5D3_SRAM_BASE 0x00300000 /* Internal SRAM base address */
diff --git a/arch/arm/mach-at91/sam9_smc.c b/arch/arm/mach-at91/sam9_smc.c
index 99a0a1d2b7dc..b26156bf15db 100644
--- a/arch/arm/mach-at91/sam9_smc.c
+++ b/arch/arm/mach-at91/sam9_smc.c
@@ -101,7 +101,7 @@ static void sam9_smc_cs_read(void __iomem *base,
/* Pulse register */
val = __raw_readl(base + AT91_SMC_PULSE);
- config->nwe_setup = val & AT91_SMC_NWEPULSE;
+ config->nwe_pulse = val & AT91_SMC_NWEPULSE;
config->ncs_write_pulse = (val & AT91_SMC_NCS_WRPULSE) >> 8;
config->nrd_pulse = (val & AT91_SMC_NRDPULSE) >> 16;
config->ncs_read_pulse = (val & AT91_SMC_NCS_RDPULSE) >> 24;
diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c
index 401279715ab1..a28873fe3049 100644
--- a/arch/arm/mach-at91/sama5d3.c
+++ b/arch/arm/mach-at91/sama5d3.c
@@ -95,19 +95,19 @@ static struct clk twi0_clk = {
.name = "twi0_clk",
.pid = SAMA5D3_ID_TWI0,
.type = CLK_TYPE_PERIPHERAL,
- .div = AT91_PMC_PCR_DIV2,
+ .div = AT91_PMC_PCR_DIV8,
};
static struct clk twi1_clk = {
.name = "twi1_clk",
.pid = SAMA5D3_ID_TWI1,
.type = CLK_TYPE_PERIPHERAL,
- .div = AT91_PMC_PCR_DIV2,
+ .div = AT91_PMC_PCR_DIV8,
};
static struct clk twi2_clk = {
.name = "twi2_clk",
.pid = SAMA5D3_ID_TWI2,
.type = CLK_TYPE_PERIPHERAL,
- .div = AT91_PMC_PCR_DIV2,
+ .div = AT91_PMC_PCR_DIV8,
};
static struct clk mmc0_clk = {
.name = "mci0_clk",
@@ -371,7 +371,13 @@ static void __init sama5d3_map_io(void)
at91_init_sram(0, SAMA5D3_SRAM_BASE, SAMA5D3_SRAM_SIZE);
}
+static void __init sama5d3_initialize(void)
+{
+ at91_sysirq_mask_rtc(SAMA5D3_BASE_RTC);
+}
+
AT91_SOC_START(sama5d3)
.map_io = sama5d3_map_io,
.register_clocks = sama5d3_register_clocks,
+ .init = sama5d3_initialize,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/sysirq_mask.c b/arch/arm/mach-at91/sysirq_mask.c
new file mode 100644
index 000000000000..f8bc3511a8c8
--- /dev/null
+++ b/arch/arm/mach-at91/sysirq_mask.c
@@ -0,0 +1,75 @@
+/*
+ * sysirq_mask.c - System-interrupt masking
+ *
+ * Copyright (C) 2013 Johan Hovold <jhovold@gmail.com>
+ *
+ * Functions to disable system interrupts from backup-powered peripherals.
+ *
+ * The RTC and RTT-peripherals are generally powered by backup power (VDDBU)
+ * and are not reset on wake-up, user, watchdog or software reset. This means
+ * that their interrupts may be enabled during early boot (e.g. after a user
+ * reset).
+ *
+ * As the RTC and RTT share the system-interrupt line with the PIT, an
+ * interrupt occurring before a handler has been installed would lead to the
+ * system interrupt being disabled and prevent the system from booting.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <mach/at91_rtt.h>
+
+#include "generic.h"
+
+#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
+#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
+#define AT91_RTC_IRQ_MASK 0x1f /* Available IRQs mask */
+
+void __init at91_sysirq_mask_rtc(u32 rtc_base)
+{
+ void __iomem *base;
+
+ base = ioremap(rtc_base, 64);
+ if (!base)
+ return;
+
+ /*
+ * sam9x5 SoCs have the following errata:
+ * "RTC: Interrupt Mask Register cannot be used
+ * Interrupt Mask Register read always returns 0."
+ *
+ * Hence we're not relying on IMR values to disable
+ * interrupts.
+ */
+ writel_relaxed(AT91_RTC_IRQ_MASK, base + AT91_RTC_IDR);
+ (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
+
+ iounmap(base);
+}
+
+void __init at91_sysirq_mask_rtt(u32 rtt_base)
+{
+ void __iomem *base;
+ void __iomem *reg;
+ u32 mode;
+
+ base = ioremap(rtt_base, 16);
+ if (!base)
+ return;
+
+ reg = base + AT91_RTT_MR;
+
+ mode = readl_relaxed(reg);
+ if (mode & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)) {
+ pr_info("AT91: Disabling rtt irq\n");
+ mode &= ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ writel_relaxed(mode, reg);
+ (void)readl_relaxed(reg); /* flush */
+ }
+
+ iounmap(base);
+}
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc5ef81..139e42da25f0 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW_SYNDROME,
+ .ecc_bits = 4,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a6fbb2..fa4bfaf952d8 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
.parts = davinci_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
.timing = &davinci_evm_nandflash_timing,
};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5ab1dc1..0c005e876cac 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.options = 0,
};
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112adf565..808233b60e3d 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
.parts = davinci_ntosd2_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index b13cc74114db..8a53f346cdb3 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -116,7 +116,7 @@ static void __init ebsa110_map_io(void)
iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc));
}
-static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size,
+static void __iomem *ebsa110_ioremap_caller(phys_addr_t cookie, size_t size,
unsigned int flags, void *caller)
{
return (void __iomem *)cookie;
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 753b94f3fca7..d88234e14f96 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -14,6 +14,7 @@
#include <linux/memblock.h>
#include <linux/io.h>
#include <linux/clocksource.h>
+#include <linux/dma-mapping.h>
#include <asm/mach/arch.h>
#include <mach/regs-pmu.h>
@@ -23,11 +24,31 @@
#include "common.h"
+static u64 dma_mask64 = DMA_BIT_MASK(64);
+
static void __init exynos5_dt_map_io(void)
{
exynos_init_io(NULL, 0);
}
+static int exynos5250_platform_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ dev->dma_mask = &dma_mask64;
+ dev->coherent_dma_mask = DMA_BIT_MASK(64);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos5250_platform_nb = {
+ .notifier_call = exynos5250_platform_notifier,
+};
+
static void __init exynos5_dt_machine_init(void)
{
struct device_node *i2c_np;
@@ -52,6 +73,11 @@ static void __init exynos5_dt_machine_init(void)
}
}
+ if (config_enabled(CONFIG_ARM_LPAE) &&
+ of_machine_is_compatible("samsung,exynos5250"))
+ bus_register_notifier(&platform_bus_type,
+ &exynos5250_platform_nb);
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index a42b369bc439..95d0b6e6438b 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <video/vga.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -196,6 +197,8 @@ void __init footbridge_map_io(void)
iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO));
}
+
+ vga_base = PCIMEM_BASE;
}
void footbridge_restart(char mode, const char *cmd)
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 9ee78f7b4990..782f6c71fa0a 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
void __init footbridge_timer_init(void)
{
struct clock_event_device *ce = &ckevt_dc21285;
+ unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
- clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+ clocksource_register_hz(&cksrc_dc21285, rate);
setup_irq(ce->irq, &footbridge_timer_irq);
ce->cpumask = cpumask_of(smp_processor_id());
- clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
+ clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index a7cd2cf5e08d..7c2fdae9a38b 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -18,7 +18,6 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/spinlock.h>
-#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
@@ -276,8 +275,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset = DC21285_PCI_MEM;
- pci_ioremap_io(0, DC21285_PCI_IO);
-
pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
@@ -293,7 +290,6 @@ void __init dc21285_preinit(void)
int cfn_mode;
pcibios_min_mem = 0x81000000;
- vga_base = PCIMEM_BASE;
mem_size = (unsigned int)high_memory - PAGE_OFFSET;
for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c
index b08243500e2e..1a7235fb52ac 100644
--- a/arch/arm/mach-footbridge/ebsa285.c
+++ b/arch/arm/mach-footbridge/ebsa285.c
@@ -30,21 +30,24 @@ static const struct {
const char *name;
const char *trigger;
} ebsa285_leds[] = {
- { "ebsa285:amber", "heartbeat", },
- { "ebsa285:green", "cpu0", },
+ { "ebsa285:amber", "cpu0", },
+ { "ebsa285:green", "heartbeat", },
{ "ebsa285:red",},
};
+static unsigned char hw_led_state;
+
static void ebsa285_led_set(struct led_classdev *cdev,
enum led_brightness b)
{
struct ebsa285_led *led = container_of(cdev,
struct ebsa285_led, cdev);
- if (b != LED_OFF)
- *XBUS_LEDS |= led->mask;
+ if (b == LED_OFF)
+ hw_led_state |= led->mask;
else
- *XBUS_LEDS &= ~led->mask;
+ hw_led_state &= ~led->mask;
+ *XBUS_LEDS = hw_led_state;
}
static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
@@ -52,18 +55,19 @@ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
struct ebsa285_led *led = container_of(cdev,
struct ebsa285_led, cdev);
- return (*XBUS_LEDS & led->mask) ? LED_FULL : LED_OFF;
+ return hw_led_state & led->mask ? LED_OFF : LED_FULL;
}
static int __init ebsa285_leds_init(void)
{
int i;
- if (machine_is_ebsa285())
+ if (!machine_is_ebsa285())
return -ENODEV;
- /* 3 LEDS All ON */
- *XBUS_LEDS |= XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
+ /* 3 LEDS all off */
+ hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
+ *XBUS_LEDS = hw_led_state;
for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) {
struct ebsa285_led *led;
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index cd9fcb1cd7ab..b8466fb00f55 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -2,6 +2,7 @@ config ARCH_HIGHBANK
bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
select ARCH_HAS_CPUFREQ
select ARCH_HAS_OPP
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_GIC
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index e7df2dd43a40..c37d31e15a06 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
+#include <linux/pl320-ipc.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -65,13 +66,12 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr)
HB_JUMP_TABLE_PHYS(cpu) + 15);
}
-#ifdef CONFIG_CACHE_L2X0
static void highbank_l2x0_disable(void)
{
+ outer_flush_all();
/* Disable PL310 L2 Cache controller */
highbank_smc1(0x102, 0x0);
}
-#endif
static void __init highbank_init_irq(void)
{
@@ -80,12 +80,13 @@ static void __init highbank_init_irq(void)
if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9"))
highbank_scu_map_io();
-#ifdef CONFIG_CACHE_L2X0
/* Enable PL310 L2 Cache controller */
- highbank_smc1(0x102, 0x1);
- l2x0_of_init(0, ~0UL);
- outer_cache.disable = highbank_l2x0_disable;
-#endif
+ if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
+ of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
+ highbank_smc1(0x102, 0x1);
+ l2x0_of_init(0, ~0UL);
+ outer_cache.disable = highbank_l2x0_disable;
+ }
}
static void __init highbank_timer_init(void)
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 4e3148ce852d..0b9e437719bd 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -424,7 +424,7 @@ int __init mx6q_clocks_init(void)
clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
- clk[can_root] = imx_clk_divider("can_root", "pll3_usb_otg", base + 0x20, 2, 6);
+ clk[can_root] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
diff --git a/arch/arm/mach-imx/devices/platform-ipu-core.c b/arch/arm/mach-imx/devices/platform-ipu-core.c
index fc4dd7cedc11..6bd7c3f37ac0 100644
--- a/arch/arm/mach-imx/devices/platform-ipu-core.c
+++ b/arch/arm/mach-imx/devices/platform-ipu-core.c
@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
pdev = platform_device_alloc("mx3-camera", 0);
if (!pdev)
- goto err;
+ return ERR_PTR(-ENOMEM);
pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
if (!pdev->dev.dma_mask)
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index e0e69a682174..eed32ca0b8ab 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -65,7 +65,7 @@ static void imx3_idle(void)
: "=r" (reg));
}
-static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
+static void __iomem *imx3_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
if (mtype == MT_DEVICE) {
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 8c60fcb08a98..2f4c92486cfa 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -199,7 +199,8 @@ static struct mmci_platform_data mmc_data = {
static void cp_clcd_enable(struct clcd_fb *fb)
{
struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2;
+ u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2
+ | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1;
if (var->bits_per_pixel <= 8 ||
(var->bits_per_pixel == 16 && var->green.length == 5))
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c
index 183dc8b5511b..faaf7d4482c5 100644
--- a/arch/arm/mach-iop13xx/io.c
+++ b/arch/arm/mach-iop13xx/io.c
@@ -23,7 +23,7 @@
#include "pci.h"
-static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie,
+static void __iomem *__iop13xx_ioremap_caller(phys_addr_t cookie,
size_t size, unsigned int mtype, void *caller)
{
void __iomem * retval;
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 73a2d905af8a..72de05f09cb8 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -1,9 +1,5 @@
if ARCH_IXP4XX
-config ARCH_SUPPORTS_BIG_ENDIAN
- bool
- default y
-
menu "Intel IXP4xx Implementation Options"
comment "IXP4xx Platforms"
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 6600cff6bd92..d7223b3b81f3 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -559,7 +559,7 @@ void ixp4xx_restart(char mode, const char *cmd)
* fallback to the default.
*/
-static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size,
+static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size,
unsigned int mtype, void *caller)
{
if (!is_pci_memory(addr))
diff --git a/arch/arm/mach-msm/common.h b/arch/arm/mach-msm/common.h
index ce8215a269e5..421cf7751a80 100644
--- a/arch/arm/mach-msm/common.h
+++ b/arch/arm/mach-msm/common.h
@@ -23,7 +23,7 @@ extern void msm_map_msm8x60_io(void);
extern void msm_map_msm8960_io(void);
extern void msm_map_qsd8x50_io(void);
-extern void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size,
+extern void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller);
extern struct smp_operations msm_smp_ops;
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 123ef9cbce1b..fd65b6d42cde 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -172,7 +172,7 @@ void __init msm_map_msm7x30_io(void)
}
#endif /* CONFIG_ARCH_MSM7X30 */
-void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size,
+void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
if (mtype == MT_DEVICE) {
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 80a8bcacd9d5..317cdb800099 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -1,5 +1,6 @@
config ARCH_MVEBU
bool "Marvell SOCs with Device Tree support" if ARCH_MULTI_V7
+ select ARCH_SUPPORTS_BIG_ENDIAN
select CLKSRC_MMIO
select COMMON_CLK
select GENERIC_CLOCKEVENTS
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 5476669ba905..ee7598fe75db 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -20,6 +20,8 @@
#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
+#include <asm/assembler.h>
+
.text
/*
* r0: Coherency fabric base register address
@@ -29,6 +31,7 @@ ENTRY(ll_set_cpu_coherent)
/* Create bit by cpu index */
mov r3, #(1 << 24)
lsl r1, r3, r1
+ARM_BE8(rev r1, r1)
/* Add CPU to SMP group - Atomic */
add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index a06e0ede8c08..458ed3fb2626 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,6 +21,8 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
+
/*
* At this stage the secondary CPUs don't have acces yet to the MMU, so
* we have to provide physical addresses
@@ -35,6 +37,7 @@
* startup
*/
ENTRY(armada_xp_secondary_startup)
+ ARM_BE8(setend be ) @ go BE8 if entered LE
/* Read CPU id */
mrc p15, 0, r1, c0, c0, 5
diff --git a/arch/arm/mach-mxs/pm.h b/arch/arm/mach-mxs/pm.h
index f57e7cdece2e..09d77b00a96b 100644
--- a/arch/arm/mach-mxs/pm.h
+++ b/arch/arm/mach-mxs/pm.h
@@ -9,6 +9,10 @@
#ifndef __ARCH_MXS_PM_H
#define __ARCH_MXS_PM_H
+#ifdef CONFIG_PM
void mxs_pm_init(void);
+#else
+#define mxs_pm_init NULL
+#endif
#endif
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 0dac3d239e32..d712c5172237 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -379,7 +379,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
/* usb1 has a Mini-AB port and external isp1301 transceiver */
.otg = 2,
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
.hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
/* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 816ecd13f81e..bfed4f928663 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -366,7 +366,7 @@ static struct omap_usb_config h3_usb_config __initdata = {
/* usb1 has a Mini-AB port and external isp1301 transceiver */
.otg = 2,
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
.hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
/* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index bd5f02e9c354..c49ce83cc1eb 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -312,7 +312,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
/* usb1 has a Mini-AB port and external isp1301 transceiver */
.otg = 2,
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
.hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
/* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a7ce69286688..006fbb5f9654 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -280,7 +280,7 @@ static struct omap_usb_config osk_usb_config __initdata = {
* be used, with a NONSTANDARD gender-bending cable/dongle, as
* a peripheral.
*/
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
.register_dev = 1,
.hmc_mode = 0,
#else
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 45cd26430d1f..da6d407c21cd 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -418,7 +418,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
.clkdm_name = "dpll4_clkdm",
};
-DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
+DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
+ dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
static struct clk dpll4_m5x2_ck_3630 = {
.name = "dpll4_m5x2_ck",
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index 2adb2683f074..6124da1a07d4 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -323,7 +323,8 @@ void omap3_save_scratchpad_contents(void)
scratchpad_contents.public_restore_ptr =
virt_to_phys(omap3_restore_3630);
else if (omap_rev() != OMAP3430_REV_ES3_0 &&
- omap_rev() != OMAP3430_REV_ES3_1)
+ omap_rev() != OMAP3430_REV_ES3_1 &&
+ omap_rev() != OMAP3430_REV_ES3_1_2)
scratchpad_contents.public_restore_ptr =
virt_to_phys(omap3_restore);
else
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index c443f2e97e10..f98410a257e3 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -14,6 +14,7 @@
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/export.h>
+#include <linux/clockchips.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
@@ -80,6 +81,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
int index)
{
struct idle_statedata *cx = state_ptr + index;
+ int cpu_id = smp_processor_id();
/*
* CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -104,6 +106,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
}
}
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+
/*
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
@@ -147,6 +151,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF))
cpu_cluster_pm_exit();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
cpu_done[dev->cpu] = false;
@@ -154,6 +160,16 @@ fail:
return index;
}
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+ int cpu = smp_processor_id();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
static struct cpuidle_driver omap4_idle_driver = {
.name = "omap4_idle",
.owner = THIS_MODULE,
@@ -171,8 +187,7 @@ static struct cpuidle_driver omap4_idle_driver = {
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440,
.target_residency = 960,
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
- CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
.enter = omap_enter_idle_coupled,
.name = "C2",
.desc = "CPUx OFF, MPUSS CSWR",
@@ -181,8 +196,7 @@ static struct cpuidle_driver omap4_idle_driver = {
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
.exit_latency = 460 + 518,
.target_residency = 1100,
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
- CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
.enter = omap_enter_idle_coupled,
.name = "C3",
.desc = "CPUx OFF, MPUSS OSWR",
@@ -213,5 +227,8 @@ int __init omap4_idle_init(void)
if (!cpu_clkdm[0] || !cpu_clkdm[1])
return -ENODEV;
+ /* Configure the broadcast timer on each cpu */
+ on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
+
return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
}
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 6c4da1254f53..55bcb77c623e 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1335,7 +1335,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#ifdef CONFIG_MTD_NAND
+#if IS_ENABLED(CONFIG_MTD_NAND)
static const char * const nand_ecc_opts[] = {
[OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
@@ -1391,7 +1391,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
}
#endif
-#ifdef CONFIG_MTD_ONENAND
+#if IS_ENABLED(CONFIG_MTD_ONENAND)
static int gpmc_probe_onenand_child(struct platform_device *pdev,
struct device_node *child)
{
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 3926f370448f..6037a9a01ed5 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -222,6 +222,7 @@ void __init ti81xx_init_irq(void)
static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs)
{
u32 irqnr;
+ int handled_irq = 0;
do {
irqnr = readl_relaxed(base_addr + 0x98);
@@ -233,7 +234,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
goto out;
irqnr = readl_relaxed(base_addr + 0xd8);
-#ifdef CONFIG_SOC_TI81XX
+#if IS_ENABLED(CONFIG_SOC_TI81XX) || IS_ENABLED(CONFIG_SOC_AM33XX)
if (irqnr)
goto out;
irqnr = readl_relaxed(base_addr + 0xf8);
@@ -249,8 +250,15 @@ out:
if (irqnr) {
irqnr = irq_find_mapping(domain, irqnr);
handle_IRQ(irqnr, regs);
+ handled_irq = 1;
}
} while (irqnr);
+
+ /* If an irq is masked or deasserted while active, we will
+ * keep ending up here with no irq handled. So remove it from
+ * the INTC with an ack.*/
+ if (!handled_irq)
+ omap_ack_irq(NULL);
}
asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index f82cf878d6af..94c2f6d17dae 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
m0_entry = mux->muxnames[0];
/* First check for full name in mode0.muxmode format */
- if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
- continue;
+ if (mode0_len)
+ if (strncmp(muxname, m0_entry, mode0_len) ||
+ (strlen(m0_entry) != mode0_len))
+ continue;
/* Then check for muxmode only */
for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 13b27ffaf45e..ab99ab8fce8a 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void)
static void omap4_l2x0_disable(void)
{
+ outer_flush_all();
/* Disable PL310 L2 Cache controller */
omap_smc1(0x102, 0x0);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 7341eff63f56..62e40a9fffa9 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
}
/**
- * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v
+ * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
* @oh: struct omap_hwmod *
* @v: pointer to register contents to modify
*
@@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
}
/**
+ * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
+ * @oh: struct omap_hwmod *
+ * @v: pointer to register contents to modify
+ *
+ * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
+ * error or 0 upon success.
+ */
+static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
+{
+ u32 softrst_mask;
+
+ if (!oh->class->sysc ||
+ !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
+ return -EINVAL;
+
+ if (!oh->class->sysc->sysc_fields) {
+ WARN(1,
+ "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
+ oh->name);
+ return -EINVAL;
+ }
+
+ softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
+
+ *v &= ~softrst_mask;
+
+ return 0;
+}
+
+/**
* _wait_softreset_complete - wait for an OCP softreset to complete
* @oh: struct omap_hwmod * to wait on
*
@@ -1909,6 +1939,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
ret = _set_softreset(oh, &v);
if (ret)
goto dis_opt_clks;
+
+ _write_sysconfig(v, oh);
+ ret = _clear_softreset(oh, &v);
+ if (ret)
+ goto dis_opt_clks;
+
_write_sysconfig(v, oh);
if (oh->class->sysc->srst_udelay)
@@ -2141,6 +2177,8 @@ static int _enable(struct omap_hwmod *oh)
oh->mux->pads_dynamic))) {
omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
_reconfigure_io_chain();
+ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+ _reconfigure_io_chain();
}
_add_initiator_dep(oh, mpu_oh);
@@ -2247,6 +2285,8 @@ static int _idle(struct omap_hwmod *oh)
if (oh->mux && oh->mux->pads_dynamic) {
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
_reconfigure_io_chain();
+ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+ _reconfigure_io_chain();
}
oh->_state = _HWMOD_STATE_IDLE;
@@ -3148,6 +3188,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
goto error;
_write_sysconfig(v, oh);
+ ret = _clear_softreset(oh, &v);
+ if (ret)
+ goto error;
+ _write_sysconfig(v, oh);
+
error:
return ret;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index d05fc7b54567..83735b72895d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
/* gpmc */
static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
};
static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
- { .irq = 52 },
+ { .irq = 52 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 31c7126eb3bb..8691c8cbe2c7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1930,7 +1930,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
@@ -1954,7 +1955,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
.name = "usb_host_hs",
.class = &omap3xxx_usb_host_hs_hwmod_class,
- .clkdm_name = "l3_init_clkdm",
+ .clkdm_name = "usbhost_clkdm",
.mpu_irqs = omap3xxx_usb_host_hs_irqs,
.main_clk = "usbhost_48m_fck",
.prcm = {
@@ -2008,15 +2009,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY
*/
- /*
- * During system boot; If the hwmod framework resets the module
- * the module will have smart idle settings; which can lead to deadlock
- * (above Errata Id:i660); so, dont reset the module during boot;
- * Use HWMOD_INIT_NO_RESET.
- */
-
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
- HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
};
/*
@@ -2047,7 +2040,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
.name = "usb_tll_hs",
.class = &omap3xxx_usb_tll_hs_hwmod_class,
- .clkdm_name = "l3_init_clkdm",
+ .clkdm_name = "core_l4_clkdm",
.mpu_irqs = omap3xxx_usb_tll_hs_irqs,
.main_clk = "usbtll_fck",
.prcm = {
@@ -2159,7 +2152,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
};
static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -2993,7 +2986,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
- { .irq = 24 },
+ { .irq = 24 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -3035,7 +3028,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
- { .irq = 28 },
+ { .irq = 28 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 7bdd22afce69..d4d0fce325c7 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -103,7 +103,7 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { }
#define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0)
-#if defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
extern u16 pm44xx_errata;
#define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id))
#else
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 3fab583755d4..0616390b857f 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -13,6 +13,7 @@
#include <mach/regs-ost.h>
#include <mach/reset.h>
+#include <mach/smemc.h>
unsigned int reset_status;
EXPORT_SYMBOL(reset_status);
@@ -81,6 +82,12 @@ static void do_hw_reset(void)
writel_relaxed(OSSR_M3, OSSR);
/* ... in 100 ms */
writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
+ /*
+ * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
+ * we put SDRAM into self-refresh to prevent that
+ */
+ while (1)
+ writel_relaxed(MDREFR_SLFRSH, MDREFR);
}
void pxa_restart(char mode, const char *cmd)
@@ -104,4 +111,3 @@ void pxa_restart(char mode, const char *cmd)
break;
}
}
-
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 3d91d2e5bf3a..d91fcf403cee 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -424,57 +424,57 @@ static struct platform_device tosa_power_device = {
* Tosa Keyboard
*/
static const uint32_t tosakbd_keymap[] = {
- KEY(0, 2, KEY_W),
- KEY(0, 6, KEY_K),
- KEY(0, 7, KEY_BACKSPACE),
- KEY(0, 8, KEY_P),
- KEY(1, 1, KEY_Q),
- KEY(1, 2, KEY_E),
- KEY(1, 3, KEY_T),
- KEY(1, 4, KEY_Y),
- KEY(1, 6, KEY_O),
- KEY(1, 7, KEY_I),
- KEY(1, 8, KEY_COMMA),
- KEY(2, 1, KEY_A),
- KEY(2, 2, KEY_D),
- KEY(2, 3, KEY_G),
- KEY(2, 4, KEY_U),
- KEY(2, 6, KEY_L),
- KEY(2, 7, KEY_ENTER),
- KEY(2, 8, KEY_DOT),
- KEY(3, 1, KEY_Z),
- KEY(3, 2, KEY_C),
- KEY(3, 3, KEY_V),
- KEY(3, 4, KEY_J),
- KEY(3, 5, TOSA_KEY_ADDRESSBOOK),
- KEY(3, 6, TOSA_KEY_CANCEL),
- KEY(3, 7, TOSA_KEY_CENTER),
- KEY(3, 8, TOSA_KEY_OK),
- KEY(3, 9, KEY_LEFTSHIFT),
- KEY(4, 1, KEY_S),
- KEY(4, 2, KEY_R),
- KEY(4, 3, KEY_B),
- KEY(4, 4, KEY_N),
- KEY(4, 5, TOSA_KEY_CALENDAR),
- KEY(4, 6, TOSA_KEY_HOMEPAGE),
- KEY(4, 7, KEY_LEFTCTRL),
- KEY(4, 8, TOSA_KEY_LIGHT),
- KEY(4, 10, KEY_RIGHTSHIFT),
- KEY(5, 1, KEY_TAB),
- KEY(5, 2, KEY_SLASH),
- KEY(5, 3, KEY_H),
- KEY(5, 4, KEY_M),
- KEY(5, 5, TOSA_KEY_MENU),
- KEY(5, 7, KEY_UP),
- KEY(5, 11, TOSA_KEY_FN),
- KEY(6, 1, KEY_X),
- KEY(6, 2, KEY_F),
- KEY(6, 3, KEY_SPACE),
- KEY(6, 4, KEY_APOSTROPHE),
- KEY(6, 5, TOSA_KEY_MAIL),
- KEY(6, 6, KEY_LEFT),
- KEY(6, 7, KEY_DOWN),
- KEY(6, 8, KEY_RIGHT),
+ KEY(0, 1, KEY_W),
+ KEY(0, 5, KEY_K),
+ KEY(0, 6, KEY_BACKSPACE),
+ KEY(0, 7, KEY_P),
+ KEY(1, 0, KEY_Q),
+ KEY(1, 1, KEY_E),
+ KEY(1, 2, KEY_T),
+ KEY(1, 3, KEY_Y),
+ KEY(1, 5, KEY_O),
+ KEY(1, 6, KEY_I),
+ KEY(1, 7, KEY_COMMA),
+ KEY(2, 0, KEY_A),
+ KEY(2, 1, KEY_D),
+ KEY(2, 2, KEY_G),
+ KEY(2, 3, KEY_U),
+ KEY(2, 5, KEY_L),
+ KEY(2, 6, KEY_ENTER),
+ KEY(2, 7, KEY_DOT),
+ KEY(3, 0, KEY_Z),
+ KEY(3, 1, KEY_C),
+ KEY(3, 2, KEY_V),
+ KEY(3, 3, KEY_J),
+ KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
+ KEY(3, 5, TOSA_KEY_CANCEL),
+ KEY(3, 6, TOSA_KEY_CENTER),
+ KEY(3, 7, TOSA_KEY_OK),
+ KEY(3, 8, KEY_LEFTSHIFT),
+ KEY(4, 0, KEY_S),
+ KEY(4, 1, KEY_R),
+ KEY(4, 2, KEY_B),
+ KEY(4, 3, KEY_N),
+ KEY(4, 4, TOSA_KEY_CALENDAR),
+ KEY(4, 5, TOSA_KEY_HOMEPAGE),
+ KEY(4, 6, KEY_LEFTCTRL),
+ KEY(4, 7, TOSA_KEY_LIGHT),
+ KEY(4, 9, KEY_RIGHTSHIFT),
+ KEY(5, 0, KEY_TAB),
+ KEY(5, 1, KEY_SLASH),
+ KEY(5, 2, KEY_H),
+ KEY(5, 3, KEY_M),
+ KEY(5, 4, TOSA_KEY_MENU),
+ KEY(5, 6, KEY_UP),
+ KEY(5, 10, TOSA_KEY_FN),
+ KEY(6, 0, KEY_X),
+ KEY(6, 1, KEY_F),
+ KEY(6, 2, KEY_SPACE),
+ KEY(6, 3, KEY_APOSTROPHE),
+ KEY(6, 4, TOSA_KEY_MAIL),
+ KEY(6, 5, KEY_LEFT),
+ KEY(6, 6, KEY_DOWN),
+ KEY(6, 7, KEY_RIGHT),
};
static struct matrix_keymap_data tosakbd_keymap_data = {
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
index 34fffdf6fc1d..564553694b54 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
@@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
}
};
-static struct clk init_clocks[] = {
- {
- .name = "lcd",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_LCDC,
- }, {
- .name = "gpio",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_GPIO,
- }, {
- .name = "usb-host",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBH,
- }, {
- .name = "usb-device",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBD,
- }, {
- .name = "timers",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_PWMT,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.0",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART0,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.1",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART1,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.2",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART2,
- }, {
- .name = "rtc",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_RTC,
- }, {
- .name = "watchdog",
- .parent = &clk_p,
- .ctrlbit = 0,
- }, {
- .name = "usb-bus-host",
- .parent = &clk_usb_bus,
- }, {
- .name = "usb-bus-gadget",
- .parent = &clk_usb_bus,
- },
+static struct clk clk_lcd = {
+ .name = "lcd",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_LCDC,
+};
+
+static struct clk clk_gpio = {
+ .name = "gpio",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_GPIO,
+};
+
+static struct clk clk_usb_host = {
+ .name = "usb-host",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBH,
+};
+
+static struct clk clk_usb_device = {
+ .name = "usb-device",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBD,
+};
+
+static struct clk clk_timers = {
+ .name = "timers",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_PWMT,
+};
+
+struct clk s3c24xx_clk_uart0 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.0",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART0,
+};
+
+struct clk s3c24xx_clk_uart1 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.1",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART1,
+};
+
+struct clk s3c24xx_clk_uart2 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.2",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART2,
+};
+
+static struct clk clk_rtc = {
+ .name = "rtc",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_RTC,
+};
+
+static struct clk clk_watchdog = {
+ .name = "watchdog",
+ .parent = &clk_p,
+ .ctrlbit = 0,
+};
+
+static struct clk clk_usb_bus_host = {
+ .name = "usb-bus-host",
+ .parent = &clk_usb_bus,
+};
+
+static struct clk clk_usb_bus_gadget = {
+ .name = "usb-bus-gadget",
+ .parent = &clk_usb_bus,
+};
+
+static struct clk *init_clocks[] = {
+ &clk_lcd,
+ &clk_gpio,
+ &clk_usb_host,
+ &clk_usb_device,
+ &clk_timers,
+ &s3c24xx_clk_uart0,
+ &s3c24xx_clk_uart1,
+ &s3c24xx_clk_uart2,
+ &clk_rtc,
+ &clk_watchdog,
+ &clk_usb_bus_host,
+ &clk_usb_bus_gadget,
};
/* s3c2410_baseclk_add()
@@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
{
unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
- struct clk *clkp;
struct clk *xtal;
int ret;
int ptr;
@@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
/* register clocks from clock array */
- clkp = init_clocks;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
+ for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
+ struct clk *clkp = init_clocks[ptr];
+
/* ensure that we note the clock state */
clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 1069b5680826..aaf006d1d6dc 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
+ CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
+ CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
+ CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
};
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index e838ba27e443..c9808c684152 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -512,6 +512,9 @@ static void __init assabet_map_io(void)
* Its called GPCLKR0 in my SA1110 manual.
*/
Ser1SDCR0 |= SDCR0_SUS;
+ MSC1 = (MSC1 & ~0xffff) |
+ MSC_NonBrst | MSC_32BitStMem |
+ MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
if (!machine_has_neponset())
sa1100_register_uart_fns(&assabet_port_fns);
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index f33679d2d3ee..50e1d850ee2e 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -13,6 +13,8 @@
#ifndef __ASM_ARCH_COLLIE_H
#define __ASM_ARCH_COLLIE_H
+#include "hardware.h" /* Gives GPIO_MAX */
+
extern void locomolcd_power(int on);
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index b85b2882dbd0..803c9fcfb359 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -437,7 +437,7 @@ static struct platform_device lcdc0_device = {
.id = 0,
.dev = {
.platform_data = &lcdc0_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -534,7 +534,7 @@ static struct platform_device hdmi_lcdc_device = {
.id = 1,
.dev = {
.platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index e6b775a10aad..4d610e6ea987 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -332,7 +332,7 @@ static struct platform_device lcdc_device = {
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index fa3407da682a..3b917bc76a49 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -421,7 +421,7 @@ static struct platform_device lcdc_device = {
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -497,7 +497,7 @@ static struct platform_device hdmi_lcdc_device = {
.id = 1,
.dev = {
.platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c
index 899a86c31ec9..1ccddd228112 100644
--- a/arch/arm/mach-shmobile/setup-emev2.c
+++ b/arch/arm/mach-shmobile/setup-emev2.c
@@ -287,14 +287,14 @@ static struct gpio_em_config gio3_config = {
static struct resource gio3_resources[] = {
[0] = {
.name = "GIO_096",
- .start = 0xe0050100,
- .end = 0xe005012b,
+ .start = 0xe0050180,
+ .end = 0xe00501ab,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_096",
- .start = 0xe0050140,
- .end = 0xe005015f,
+ .start = 0xe00501c0,
+ .end = 0xe00501df,
.flags = IORESOURCE_MEM,
},
[2] = {
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index c5a75a7a508f..7f45c2edbca9 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -62,7 +62,7 @@ enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFB3 };
static const struct plat_sci_port scif[] = {
SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */
SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */
- SCIFB_DATA(SCIFB0, 0xe6c50000, gic_spi(145)), /* SCIFB0 */
+ SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */
SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */
SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */
SCIFB_DATA(SCIFB3, 0xe6cf0000, gic_spi(151)), /* SCIFB3 */
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 9f852c6fe5b9..d5ebcd0bb622 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/irqchip.h>
#include <linux/clk/tegra.h>
@@ -80,10 +81,20 @@ void tegra_assert_system_reset(char mode, const char *cmd)
static void __init tegra_init_cache(void)
{
#ifdef CONFIG_CACHE_L2X0
+ static const struct of_device_id pl310_ids[] __initconst = {
+ { .compatible = "arm,pl310-cache", },
+ {}
+ };
+
+ struct device_node *np;
int ret;
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
u32 aux_ctrl, cache_type;
+ np = of_find_matching_node(NULL, pl310_ids);
+ if (!np)
+ return;
+
cache_type = readl(p + L2X0_CACHE_TYPE);
aux_ctrl = (cache_type & 0x700) << (17-8);
aux_ctrl |= 0x7C400001;
diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
index ec087407b163..6f938ccb0c54 100644
--- a/arch/arm/mach-versatile/include/mach/platform.h
+++ b/arch/arm/mach-versatile/include/mach/platform.h
@@ -231,12 +231,14 @@
/* PCI space */
#define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */
#define VERSATILE_PCI_CFG_BASE 0x42000000
+#define VERSATILE_PCI_IO_BASE 0x43000000
#define VERSATILE_PCI_MEM_BASE0 0x44000000
#define VERSATILE_PCI_MEM_BASE1 0x50000000
#define VERSATILE_PCI_MEM_BASE2 0x60000000
/* Sizes of above maps */
#define VERSATILE_PCI_BASE_SIZE 0x01000000
#define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000
+#define VERSATILE_PCI_IO_BASE_SIZE 0x01000000
#define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */
#define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */
#define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index e92e5e0705bc..c97be4ea76d2 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -43,9 +43,9 @@
#define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
#define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
#define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
+#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
+#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
+#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
#define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
#define DEVICE_ID_OFFSET 0x00
@@ -170,8 +170,8 @@ static struct pci_ops pci_versatile_ops = {
.write = versatile_write_config,
};
-static struct resource io_mem = {
- .name = "PCI I/O space",
+static struct resource unused_mem = {
+ .name = "PCI unused",
.start = VERSATILE_PCI_MEM_BASE0,
.end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
.flags = IORESOURCE_MEM,
@@ -195,9 +195,9 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
{
int ret = 0;
- ret = request_resource(&iomem_resource, &io_mem);
+ ret = request_resource(&iomem_resource, &unused_mem);
if (ret) {
- printk(KERN_ERR "PCI: unable to allocate I/O "
+ printk(KERN_ERR "PCI: unable to allocate unused "
"memory region (%d)\n", ret);
goto out;
}
@@ -205,7 +205,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
if (ret) {
printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
"memory region (%d)\n", ret);
- goto release_io_mem;
+ goto release_unused_mem;
}
ret = request_resource(&iomem_resource, &pre_mem);
if (ret) {
@@ -225,8 +225,8 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
release_non_mem:
release_resource(&non_mem);
- release_io_mem:
- release_resource(&io_mem);
+ release_unused_mem:
+ release_resource(&unused_mem);
out:
return ret;
}
@@ -246,7 +246,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
goto out;
}
- ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0);
+ ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE);
if (ret)
goto out;
@@ -295,6 +295,19 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
/*
+ * For many years the kernel and QEMU were symbiotically buggy
+ * in that they both assumed the same broken IRQ mapping.
+ * QEMU therefore attempts to auto-detect old broken kernels
+ * so that they still work on newer QEMU as they did on old
+ * QEMU. Since we now use the correct (ie matching-hardware)
+ * IRQ mapping we write a definitely different value to a
+ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
+ * real hardware behaviour and it need not be backwards
+ * compatible for us. This write is harmless on real hardware.
+ */
+ __raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE);
+
+ /*
* Do not to map Versatile FPGA PCI device into memory space
*/
pci_slot_ignore |= (1 << myslot);
@@ -327,13 +340,13 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
- /* slot, pin, irq
- * 24 1 IRQ_SIC_PCI0
- * 25 1 IRQ_SIC_PCI1
- * 26 1 IRQ_SIC_PCI2
- * 27 1 IRQ_SIC_PCI3
+ /*
+ * Slot INTA INTB INTC INTD
+ * 31 PCI1 PCI2 PCI3 PCI0
+ * 30 PCI0 PCI1 PCI2 PCI3
+ * 29 PCI3 PCI0 PCI1 PCI2
*/
- irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
+ irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3);
return irq;
}
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 5907e10c37fd..39858ba03084 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,6 +1,9 @@
config ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
+ select ARCH_HAS_CPUFREQ
+ select ARCH_HAS_OPP
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
select ARM_TIMER_SP804
@@ -56,5 +59,23 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
+ select ARM_ERRATA_643719
+
+config ARCH_VEXPRESS_DCSCB
+ bool "Dual Cluster System Control Block (DCSCB) support"
+ depends on MCPM
+ select ARM_CCI
+ help
+ Support for the Dual Cluster System Configuration Block (DCSCB).
+ This is needed to provide CPU and cluster power management
+ on RTSM implementing big.LITTLE.
+
+config ARCH_VEXPRESS_TC2
+ bool "TC2 cluster management"
+ depends on MCPM
+ select VEXPRESS_SPC
+ select ARM_CCI
+ help
+ Support for CPU and cluster power management on TC2.
endmenu
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 42703e8b4d3b..14193dc7e6e8 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -6,5 +6,13 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
+obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
+CFLAGS_REMOVE_dcscb.o = -pg
+obj-$(CONFIG_ARCH_VEXPRESS_TC2) += tc2_pm.o tc2_pm_setup.o
+CFLAGS_REMOVE_tc2_pm.o = -pg
+ifeq ($(CONFIG_ARCH_VEXPRESS_TC2),y)
+obj-$(CONFIG_ARM_PSCI) += tc2_pm_psci.o
+CFLAGS_REMOVE_tc2_pm_psci.o = -pg
+endif
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h
index f134cd4a85f1..bde4374ab6d5 100644
--- a/arch/arm/mach-vexpress/core.h
+++ b/arch/arm/mach-vexpress/core.h
@@ -6,6 +6,8 @@
void vexpress_dt_smp_map_io(void);
+bool vexpress_smp_init_ops(void);
+
extern struct smp_operations vexpress_smp_ops;
extern void vexpress_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
new file mode 100644
index 000000000000..b35700f8e01f
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -0,0 +1,236 @@
+/*
+ * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
+ *
+ * Created by: Nicolas Pitre, May 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/vexpress.h>
+#include <linux/arm-cci.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+#include <asm/psci.h>
+
+
+#define RST_HOLD0 0x0
+#define RST_HOLD1 0x4
+#define SYS_SWRESET 0x8
+#define RST_STAT0 0xc
+#define RST_STAT1 0x10
+#define EAG_CFG_R 0x20
+#define EAG_CFG_W 0x24
+#define KFC_CFG_R 0x28
+#define KFC_CFG_W 0x2c
+#define DCS_CFG_R 0x30
+
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() while its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static void __iomem *dcscb_base;
+static int dcscb_use_count[4][2];
+static int dcscb_allcpus_mask[2];
+
+static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int rst_hold, cpumask = (1 << cpu);
+ unsigned int all_mask = dcscb_allcpus_mask[cluster];
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cpu >= 4 || cluster >= 2)
+ return -EINVAL;
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&dcscb_lock);
+
+ dcscb_use_count[cpu][cluster]++;
+ if (dcscb_use_count[cpu][cluster] == 1) {
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ if (rst_hold & (1 << 8)) {
+ /* remove cluster reset and add individual CPU's reset */
+ rst_hold &= ~(1 << 8);
+ rst_hold |= all_mask;
+ }
+ rst_hold &= ~(cpumask | (cpumask << 4));
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ } else if (dcscb_use_count[cpu][cluster] != 2) {
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG();
+ }
+
+ arch_spin_unlock(&dcscb_lock);
+ local_irq_enable();
+
+ return 0;
+}
+
+static void dcscb_power_down(void)
+{
+ unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask;
+ bool last_man = false, skip_wfi = false;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpumask = (1 << cpu);
+ all_mask = dcscb_allcpus_mask[cluster];
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= 4 || cluster >= 2);
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+ arch_spin_lock(&dcscb_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ dcscb_use_count[cpu][cluster]--;
+ if (dcscb_use_count[cpu][cluster] == 0) {
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold |= cpumask;
+ if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
+ rst_hold |= (1 << 8);
+ last_man = true;
+ }
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ } else if (dcscb_use_count[cpu][cluster] == 1) {
+ /*
+ * A power_up request went ahead of us.
+ * Even if we do not want to shut this CPU down,
+ * the caller expects a certain state as if the WFI
+ * was aborted. So let's continue with cache cleaning.
+ */
+ skip_wfi = true;
+ } else
+ BUG();
+
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ arch_spin_unlock(&dcscb_lock);
+
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
+
+ /*
+ * This is a harmless no-op. On platforms with a real
+ * outer cache this might either be needed or not,
+ * depending on where the outer cache sits.
+ */
+ outer_flush_all();
+
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ cci_disable_port_by_cpu(mpidr);
+
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ arch_spin_unlock(&dcscb_lock);
+
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ dsb();
+ if (!skip_wfi)
+ wfi();
+
+ /* Not dead at this point? Let our caller cope. */
+}
+
+static const struct mcpm_platform_ops dcscb_power_ops = {
+ .power_up = dcscb_power_up,
+ .power_down = dcscb_power_down,
+};
+
+static void __init dcscb_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= 4 || cluster >= 2);
+ dcscb_use_count[cpu][cluster] = 1;
+}
+
+extern void dcscb_power_up_setup(unsigned int affinity_level);
+
+static int __init dcscb_init(void)
+{
+ struct device_node *node;
+ unsigned int cfg;
+ int ret;
+
+ ret = psci_probe();
+ if (!ret) {
+ pr_debug("psci found. Aborting native init\n");
+ return -ENODEV;
+ }
+
+ if (!cci_probed())
+ return -ENODEV;
+
+ node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
+ if (!node)
+ return -ENODEV;
+ dcscb_base = of_iomap(node, 0);
+ if (!dcscb_base)
+ return -EADDRNOTAVAIL;
+ cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
+ dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
+ dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
+ dcscb_usage_count_init();
+
+ ret = mcpm_platform_register(&dcscb_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(dcscb_power_up_setup);
+ if (ret) {
+ iounmap(dcscb_base);
+ return ret;
+ }
+
+ pr_info("VExpress DCSCB support installed\n");
+
+ /*
+ * Future entries into the kernel can now go
+ * through the cluster entry vectors.
+ */
+ vexpress_flags_set(virt_to_phys(mcpm_entry_point));
+
+ return 0;
+}
+
+early_initcall(dcscb_init);
diff --git a/arch/arm/mach-vexpress/dcscb_setup.S b/arch/arm/mach-vexpress/dcscb_setup.S
new file mode 100644
index 000000000000..4bb7fbe0f621
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb_setup.S
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/include/asm/dcscb_setup.S
+ *
+ * Created by: Dave Martin, 2012-06-22
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+
+ENTRY(dcscb_power_up_setup)
+
+ cmp r0, #0 @ check affinity level
+ beq 2f
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ * The ACTLR SMP bit does not need to be set here, because cpu_resume()
+ * already restores that.
+ *
+ * A15/A7 may not require explicit L2 invalidation on reset, dependent
+ * on hardware integration decisions.
+ * For now, this code assumes that L2 is either already invalidated,
+ * or invalidation is not required.
+ */
+
+ b cci_enable_port_for_self
+
+2: @ Implementation-specific local CPU setup operations should go here,
+ @ if any. In this case, there is nothing to do.
+
+ bx lr
+
+ENDPROC(dcscb_power_up_setup)
diff --git a/arch/arm/mach-vexpress/include/mach/tc2.h b/arch/arm/mach-vexpress/include/mach/tc2.h
new file mode 100644
index 000000000000..d3b5a2225a0e
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/tc2.h
@@ -0,0 +1,10 @@
+#ifndef __MACH_TC2_H
+#define __MACH_TC2_H
+
+/*
+ * cpu and cluster limits
+ */
+#define TC2_MAX_CPUS 3
+#define TC2_MAX_CLUSTERS 2
+
+#endif
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index dc1ace55d557..b4a5f0d8390d 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -12,9 +12,11 @@
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/vexpress.h>
+#include <asm/mcpm.h>
#include <asm/smp_scu.h>
#include <asm/mach/map.h>
@@ -51,7 +53,7 @@ static int __init vexpress_dt_find_scu(unsigned long node,
{
if (of_flat_dt_match(node, vexpress_dt_cortex_a9_match)) {
phys_addr_t phys_addr;
- __be32 *reg = of_get_flat_dt_prop(node, "reg", NULL);
+ const __be32 *reg = of_get_flat_dt_prop(node, "reg", NULL);
if (WARN_ON(!reg))
return -EINVAL;
@@ -203,3 +205,21 @@ struct smp_operations __initdata vexpress_smp_ops = {
.cpu_die = vexpress_cpu_die,
#endif
};
+
+bool __init vexpress_smp_init_ops(void)
+{
+#ifdef CONFIG_MCPM
+ /*
+ * The best way to detect a multi-cluster configuration at the moment
+ * is to look for the presence of a CCI in the system.
+ * Override the default vexpress_smp_ops if so.
+ */
+ struct device_node *node;
+ node = of_find_compatible_node(NULL, NULL, "arm,cci-400");
+ if (node && of_device_is_available(node)) {
+ mcpm_smp_set_ops();
+ return true;
+ }
+#endif
+ return false;
+}
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
new file mode 100644
index 000000000000..9fc264a3bade
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -0,0 +1,277 @@
+/*
+ * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
+ *
+ * Created by: Nicolas Pitre, October 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * Some portions of this file were originally written by Achin Gupta
+ * Copyright: (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+#include <asm/psci.h>
+
+#include <mach/motherboard.h>
+#include <mach/tc2.h>
+
+#include <linux/vexpress.h>
+#include <linux/arm-cci.h>
+
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static int tc2_pm_use_count[TC2_MAX_CPUS][TC2_MAX_CLUSTERS];
+
+static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
+{
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster))
+ return -EINVAL;
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&tc2_pm_lock);
+
+ if (!tc2_pm_use_count[0][cluster] &&
+ !tc2_pm_use_count[1][cluster] &&
+ !tc2_pm_use_count[2][cluster])
+ vexpress_spc_powerdown_enable(cluster, 0);
+
+ tc2_pm_use_count[cpu][cluster]++;
+ if (tc2_pm_use_count[cpu][cluster] == 1) {
+ vexpress_spc_write_resume_reg(cluster, cpu,
+ virt_to_phys(mcpm_entry_point));
+ vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
+ } else if (tc2_pm_use_count[cpu][cluster] != 2) {
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG();
+ }
+
+ arch_spin_unlock(&tc2_pm_lock);
+ local_irq_enable();
+
+ return 0;
+}
+
+static void tc2_pm_down(u64 residency)
+{
+ unsigned int mpidr, cpu, cluster;
+ bool last_man = false, skip_wfi = false;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+ arch_spin_lock(&tc2_pm_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ tc2_pm_use_count[cpu][cluster]--;
+ if (tc2_pm_use_count[cpu][cluster] == 0) {
+ vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
+ if (!tc2_pm_use_count[0][cluster] &&
+ !tc2_pm_use_count[1][cluster] &&
+ !tc2_pm_use_count[2][cluster] &&
+ (!residency || residency > 5000)) {
+ vexpress_spc_powerdown_enable(cluster, 1);
+ vexpress_spc_set_global_wakeup_intr(1);
+ last_man = true;
+ }
+ } else if (tc2_pm_use_count[cpu][cluster] == 1) {
+ /*
+ * A power_up request went ahead of us.
+ * Even if we do not want to shut this CPU down,
+ * the caller expects a certain state as if the WFI
+ * was aborted. So let's continue with cache cleaning.
+ */
+ skip_wfi = true;
+ } else
+ BUG();
+
+ /*
+ * If the CPU is committed to power down, make sure
+ * the power controller will be in charge of waking it
+ * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
+ * to the CPU by disabling the GIC CPU IF to prevent wfi
+ * from completing execution behind power controller back
+ */
+ if (!skip_wfi)
+ gic_cpu_if_down();
+
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ arch_spin_unlock(&tc2_pm_lock);
+
+ if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+ /*
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
+ */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+ "isb \n\t"
+ "dsb "
+ : : "r" (0x400) );
+ }
+
+ v7_exit_coherency_flush(all);
+
+ cci_disable_port_by_cpu(mpidr);
+
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ /*
+ * If last man then undo any setup done previously.
+ */
+ if (last_man) {
+ vexpress_spc_powerdown_enable(cluster, 0);
+ vexpress_spc_set_global_wakeup_intr(0);
+ }
+
+ arch_spin_unlock(&tc2_pm_lock);
+
+ v7_exit_coherency_flush(louis);
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ if (!skip_wfi)
+ wfi();
+
+ /* Not dead at this point? Let our caller cope. */
+}
+
+static void tc2_pm_power_down(void)
+{
+ tc2_pm_down(0);
+}
+
+static void tc2_pm_suspend(u64 residency)
+{
+ extern void tc2_resume(void);
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ vexpress_spc_write_resume_reg(cluster, cpu,
+ virt_to_phys(tc2_resume));
+
+ tc2_pm_down(residency);
+}
+
+static void tc2_pm_powered_up(void)
+{
+ unsigned int mpidr, cpu, cluster;
+ unsigned long flags;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ local_irq_save(flags);
+ arch_spin_lock(&tc2_pm_lock);
+
+ if (!tc2_pm_use_count[0][cluster] &&
+ !tc2_pm_use_count[1][cluster] &&
+ !tc2_pm_use_count[2][cluster]) {
+ vexpress_spc_powerdown_enable(cluster, 0);
+ vexpress_spc_set_global_wakeup_intr(0);
+ }
+
+ if (!tc2_pm_use_count[cpu][cluster])
+ tc2_pm_use_count[cpu][cluster] = 1;
+
+ vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 0);
+ vexpress_spc_write_resume_reg(cluster, cpu, 0);
+
+ arch_spin_unlock(&tc2_pm_lock);
+ local_irq_restore(flags);
+}
+
+static const struct mcpm_platform_ops tc2_pm_power_ops = {
+ .power_up = tc2_pm_power_up,
+ .power_down = tc2_pm_power_down,
+ .suspend = tc2_pm_suspend,
+ .powered_up = tc2_pm_powered_up,
+};
+
+static void __init tc2_pm_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ tc2_pm_use_count[cpu][cluster] = 1;
+}
+
+extern void tc2_pm_power_up_setup(unsigned int affinity_level);
+
+static int __init tc2_pm_init(void)
+{
+ int ret;
+
+ ret = psci_probe();
+ if (!ret) {
+ pr_debug("psci found. Aborting native init\n");
+ return -ENODEV;
+ }
+
+ if (!vexpress_spc_check_loaded())
+ return -ENODEV;
+
+ tc2_pm_usage_count_init();
+
+ ret = mcpm_platform_register(&tc2_pm_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(tc2_pm_power_up_setup);
+ if (!ret)
+ pr_info("TC2 power management initialized\n");
+ return ret;
+}
+
+early_initcall(tc2_pm_init);
diff --git a/arch/arm/mach-vexpress/tc2_pm_psci.c b/arch/arm/mach-vexpress/tc2_pm_psci.c
new file mode 100644
index 000000000000..c2fdc22e4c06
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm_psci.c
@@ -0,0 +1,173 @@
+/*
+ * arch/arm/mach-vexpress/tc2_pm_psci.c - TC2 PSCI support
+ *
+ * Created by: Achin Gupta, December 2012
+ * Copyright: (C) 2012 ARM Limited
+ *
+ * Some portions of this file were originally written by Nicolas Pitre
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/psci.h>
+#include <asm/atomic.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+
+#include <mach/motherboard.h>
+#include <mach/tc2.h>
+
+#include <linux/vexpress.h>
+
+/*
+ * Platform specific state id understood by the firmware and used to
+ * program the power controller
+ */
+#define PSCI_POWER_STATE_ID 0
+
+static atomic_t tc2_pm_use_count[TC2_MAX_CPUS][TC2_MAX_CLUSTERS];
+
+static int tc2_pm_psci_power_up(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int mpidr = (cluster << 8) | cpu;
+ int ret = 0;
+
+ BUG_ON(!psci_ops.cpu_on);
+
+ switch (atomic_inc_return(&tc2_pm_use_count[cpu][cluster])) {
+ case 1:
+ /*
+ * This is a request to power up a cpu that linux thinks has
+ * been powered down. Retries are needed if the firmware has
+ * seen the power down request as yet.
+ */
+ do
+ ret = psci_ops.cpu_on(mpidr,
+ virt_to_phys(mcpm_entry_point));
+ while (ret == -EAGAIN);
+
+ return ret;
+ case 2:
+ /* This power up request has overtaken a power down request */
+ return ret;
+ default:
+ /* Any other value is a bug */
+ BUG();
+ }
+}
+
+static void tc2_pm_psci_power_down(void)
+{
+ struct psci_power_state power_state;
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ BUG_ON(!psci_ops.cpu_off);
+
+ switch (atomic_dec_return(&tc2_pm_use_count[cpu][cluster])) {
+ case 1:
+ /*
+ * Overtaken by a power up. Flush caches, exit coherency,
+ * return & fake a reset
+ */
+ set_cr(get_cr() & ~CR_C);
+
+ flush_cache_louis();
+
+ asm volatile ("clrex");
+ set_auxcr(get_auxcr() & ~(1 << 6));
+
+ return;
+ case 0:
+ /* A normal request to possibly power down the cluster */
+ power_state.id = PSCI_POWER_STATE_ID;
+ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
+ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
+
+ psci_ops.cpu_off(power_state);
+
+ /* On success this function never returns */
+ default:
+ /* Any other value is a bug */
+ BUG();
+ }
+}
+
+static void tc2_pm_psci_suspend(u64 unused)
+{
+ struct psci_power_state power_state;
+
+ BUG_ON(!psci_ops.cpu_suspend);
+
+ /* On TC2 always attempt to power down the cluster */
+ power_state.id = PSCI_POWER_STATE_ID;
+ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
+ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
+
+ psci_ops.cpu_suspend(power_state, virt_to_phys(mcpm_entry_point));
+
+ /* On success this function never returns */
+ BUG();
+}
+
+static const struct mcpm_platform_ops tc2_pm_power_ops = {
+ .power_up = tc2_pm_psci_power_up,
+ .power_down = tc2_pm_psci_power_down,
+ .suspend = tc2_pm_psci_suspend,
+};
+
+static void __init tc2_pm_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ atomic_set(&tc2_pm_use_count[cpu][cluster], 1);
+}
+
+static int __init tc2_pm_psci_init(void)
+{
+ int ret;
+
+ ret = psci_probe();
+ if (ret) {
+ pr_debug("psci not found. Aborting psci init\n");
+ return -ENODEV;
+ }
+
+ if (!vexpress_spc_check_loaded()) {
+ pr_debug("spc not found. Aborting psci init\n");
+ return -ENODEV;
+ }
+
+ tc2_pm_usage_count_init();
+
+ ret = mcpm_platform_register(&tc2_pm_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(NULL);
+ if (!ret)
+ pr_info("TC2 power management initialized\n");
+ return ret;
+}
+
+early_initcall(tc2_pm_psci_init);
diff --git a/arch/arm/mach-vexpress/tc2_pm_setup.S b/arch/arm/mach-vexpress/tc2_pm_setup.S
new file mode 100644
index 000000000000..a18dafeeb0ee
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm_setup.S
@@ -0,0 +1,68 @@
+/*
+ * arch/arm/include/asm/tc2_pm_setup.S
+ *
+ * Created by: Nicolas Pitre, October 2012
+ ( (based on dcscb_setup.S by Dave Martin)
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+
+#define SPC_PHYS_BASE 0x7FFF0000
+#define SPC_WAKE_INT_STAT 0xb2c
+
+#define SNOOP_CTL_A15 0x404
+#define SNOOP_CTL_A7 0x504
+
+#define A15_SNOOP_MASK (0x3 << 7)
+#define A7_SNOOP_MASK (0x1 << 13)
+
+#define A15_BX_ADDR0 0xB68
+
+
+ENTRY(tc2_resume)
+ mrc p15, 0, r0, c0, c0, 5
+ ubfx r1, r0, #0, #4 @ r1 = cpu
+ ubfx r2, r0, #8, #4 @ r2 = cluster
+ add r1, r1, r2, lsl #2 @ r1 = index of CPU in WAKE_INT_STAT
+ ldr r3, =SPC_PHYS_BASE + SPC_WAKE_INT_STAT
+ ldr r3, [r3]
+ lsr r3, r1
+ tst r3, #1
+ wfieq @ if no pending IRQ reenters wfi
+ b mcpm_entry_point
+ENDPROC(tc2_resume)
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ * The ACTLR SMP bit does not need to be set here, because cpu_resume()
+ * already restores that.
+ */
+
+ENTRY(tc2_pm_power_up_setup)
+
+ cmp r0, #0
+ beq 2f
+
+ b cci_enable_port_for_self
+
+2: @ Clear the BX addr register
+ ldr r3, =SPC_PHYS_BASE + A15_BX_ADDR0
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r1, r0, #8, #4 @ cluster
+ ubfx r0, r0, #0, #4 @ cpu
+ add r3, r3, r1, lsl #4
+ mov r1, #0
+ str r1, [r3, r0, lsl #2]
+ dsb
+
+ bx lr
+
+ENDPROC(tc2_pm_power_up_setup)
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 8802030df98d..057f99b62eaf 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -10,6 +10,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/irqchip.h>
+#include <linux/memblock.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
@@ -373,6 +374,31 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express")
.init_machine = v2m_init,
MACHINE_END
+static void __init v2m_dt_hdlcd_init(void)
+{
+ struct device_node *node;
+ int len, na, ns;
+ const __be32 *prop;
+ phys_addr_t fb_base, fb_size;
+
+ node = of_find_compatible_node(NULL, NULL, "arm,hdlcd");
+ if (!node)
+ return;
+
+ na = of_n_addr_cells(node);
+ ns = of_n_size_cells(node);
+
+ prop = of_get_property(node, "framebuffer", &len);
+ if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop)))
+ return;
+
+ fb_base = of_read_number(prop, na);
+ fb_size = of_read_number(prop + na, ns);
+
+ if (WARN_ON(memblock_remove(fb_base, fb_size)))
+ return;
+};
+
static struct map_desc v2m_rs1_io_desc __initdata = {
.virtual = V2M_PERIPH,
.pfn = __phys_to_pfn(0x1c000000),
@@ -423,6 +449,8 @@ void __init v2m_dt_init_early(void)
pr_warning("vexpress: DT HBI (%x) is not matching "
"hardware (%x)!\n", dt_hbi, hbi);
}
+
+ v2m_dt_hdlcd_init();
}
static void __init v2m_dt_timer_init(void)
@@ -456,6 +484,7 @@ static const char * const v2m_dt_match[] __initconst = {
DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express")
.dt_compat = v2m_dt_match,
.smp = smp_ops(vexpress_smp_ops),
+ .smp_init = smp_init_ops(vexpress_smp_init_ops),
.map_io = v2m_dt_map_io,
.init_early = v2m_dt_init_early,
.init_irq = irqchip_init,
diff --git a/arch/arm/mach-virt/Makefile b/arch/arm/mach-virt/Makefile
index 042afc1f8c44..7ddbfa60227f 100644
--- a/arch/arm/mach-virt/Makefile
+++ b/arch/arm/mach-virt/Makefile
@@ -3,4 +3,3 @@
#
obj-y := virt.o
-obj-$(CONFIG_SMP) += platsmp.o
diff --git a/arch/arm/mach-virt/platsmp.c b/arch/arm/mach-virt/platsmp.c
deleted file mode 100644
index f4143f5bfa5b..000000000000
--- a/arch/arm/mach-virt/platsmp.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Dummy Virtual Machine - does what it says on the tin.
- *
- * Copyright (C) 2012 ARM Ltd
- * Author: Will Deacon <will.deacon@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/of.h>
-
-#include <asm/psci.h>
-#include <asm/smp_plat.h>
-
-extern void secondary_startup(void);
-
-static void __init virt_smp_init_cpus(void)
-{
-}
-
-static void __init virt_smp_prepare_cpus(unsigned int max_cpus)
-{
-}
-
-static int __cpuinit virt_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
-{
- if (psci_ops.cpu_on)
- return psci_ops.cpu_on(cpu_logical_map(cpu),
- __pa(secondary_startup));
- return -ENODEV;
-}
-
-struct smp_operations __initdata virt_smp_ops = {
- .smp_init_cpus = virt_smp_init_cpus,
- .smp_prepare_cpus = virt_smp_prepare_cpus,
- .smp_boot_secondary = virt_boot_secondary,
-};
diff --git a/arch/arm/mach-virt/virt.c b/arch/arm/mach-virt/virt.c
index 061f283f579e..a67d2dd5bb60 100644
--- a/arch/arm/mach-virt/virt.c
+++ b/arch/arm/mach-virt/virt.c
@@ -36,11 +36,8 @@ static const char *virt_dt_match[] = {
NULL
};
-extern struct smp_operations virt_smp_ops;
-
DT_MACHINE_START(VIRT, "Dummy Virtual Machine")
.init_irq = irqchip_init,
.init_machine = virt_init,
- .smp = smp_ops(virt_smp_ops),
.dt_compat = virt_dt_match,
MACHINE_END
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 35955b54944c..36e9f24e03b0 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -411,28 +411,31 @@ config CPU_32v3
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v6
bool
- select CPU_USE_DOMAINS if CPU_V6 && MMU
select TLS_REG_EMUL if !CPU_32v6K && !MMU
config CPU_32v6K
@@ -647,7 +650,7 @@ config ARM_VIRT_EXT
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
- depends on !CPU_USE_DOMAINS && CPU_V7
+ depends on CPU_V7
default y if SMP
select HAVE_PROC_CPU if PROC_FS
help
@@ -756,6 +759,7 @@ config CPU_BPREDICT_DISABLE
config TLS_REG_EMUL
bool
+ select NEED_KUSER_HELPERS
help
An SMP system using a pre-ARMv6 processor (there are apparently
a few prototypes like that in existence) and therefore access to
@@ -763,11 +767,43 @@ config TLS_REG_EMUL
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
+ select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
+config NEED_KUSER_HELPERS
+ bool
+
+config KUSER_HELPERS
+ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+ default y
+ help
+ Warning: disabling this option may break user programs.
+
+ Provide kuser helpers in the vector page. The kernel provides
+ helper code to userspace in read only form at a fixed location
+ in the high vector page to allow userspace to be independent of
+ the CPU type fitted to the system. This permits binaries to be
+ run on ARMv4 through to ARMv7 without modification.
+
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+ by ROP (return orientated programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+ are built specifically for your platform, and make no use of
+ these helpers, then you can turn this option off to hinder
+ such exploits. However, in that case, if a binary or library
+ relying on those helpers is run, it will receive a SIGILL signal,
+ which will terminate the program.
+
+ Say N here only if you are absolutely certain that you do not
+ need these helpers; otherwise, the safe option is to say Y.
+
config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6K && SMP
@@ -895,3 +931,9 @@ config ARCH_HAS_BARRIERS
help
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+
+config ARCH_SUPPORTS_BIG_ENDIAN
+ bool
+ help
+ This option specifies the architecture can support big endian
+ operation.
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 80741992a9fc..3815a8262af0 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -38,9 +38,8 @@ ENTRY(v6_early_abort)
bne do_DataAbort
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r3, r3
-#endif
+ ARM_BE8(rev r3, r3)
+
do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 6f4585b89078..924036473b16 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -25,6 +25,7 @@
#include <asm/cp15.h>
#include <asm/system_info.h>
#include <asm/unaligned.h>
+#include <asm/opcodes.h>
#include "fault.h"
@@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = probe_kernel_address(ptr + 1, tinst2);
- instr = (tinstr << 16) | tinst2;
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
}
- } else
+ } else {
fault = probe_kernel_address(instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
if (fault) {
type = TYPE_FAULT;
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 515b00064da8..a84e0536ce74 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -146,18 +146,18 @@ flush_levels:
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the index size
loop1:
- mov r9, r4 @ create working copy of max way size
+ mov r9, r7 @ create working copy of max index
loop2:
- ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
- THUMB( lsl r6, r9, r5 )
+ ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
+ THUMB( lsl r6, r4, r5 )
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
- ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
- THUMB( lsl r6, r7, r2 )
+ ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
+ THUMB( lsl r6, r9, r2 )
THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- subs r9, r9, #1 @ decrement the way
+ subs r9, r9, #1 @ decrement the index
bge loop2
- subs r7, r7, #1 @ decrement the index
+ subs r4, r4, #1 @ decrement the way
bge loop1
skip:
add r10, r10, #2 @ increment cache number
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 2ac37372ef52..eeab06ebd06e 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -39,19 +39,43 @@
* non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
-
-#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
-#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS ASID_FIRST_VERSION
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+ int cpu;
+ unsigned long flags;
+ u64 context_id, asid;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ context_id = mm->context.id.counter;
+ for_each_online_cpu(cpu) {
+ if (cpu == this_cpu)
+ continue;
+ /*
+ * We only need to send an IPI if the other CPUs are
+ * running the same ASID as the one being invalidated.
+ */
+ asid = per_cpu(active_asids, cpu).counter;
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, cpu);
+ if (context_id == asid)
+ cpumask_set_cpu(cpu, mask);
+ }
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+#endif
+
#ifdef CONFIG_ARM_LPAE
static void cpu_set_reserved_ttbr0(void)
{
@@ -128,7 +152,16 @@ static void flush_context(unsigned int cpu)
asid = 0;
} else {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
- __set_bit(ASID_TO_IDX(asid), asid_map);
+ /*
+ * If this CPU has already been through a
+ * rollover, but hasn't run another task in
+ * the meantime, we must preserve its reserved
+ * ASID, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, i);
+ __set_bit(asid & ~ASID_MASK, asid_map);
}
per_cpu(reserved_asids, i) = asid;
}
@@ -167,17 +200,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
/*
* Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs
- * as requiring flushes.
+ * as requiring flushes. We always count from ASID #1,
+ * as we reserve ASID #0 to switch via TTBR0 and indicate
+ * rollover events.
*/
- asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
flush_context(cpu);
- asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
}
__set_bit(asid, asid_map);
- asid = generation | IDX_TO_ASID(asid);
+ asid |= generation;
cpumask_clear(mm_cpumask(mm));
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ef3e0f3aac96..051e904a5379 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -358,7 +358,7 @@ static int __init atomic_pool_init(void)
if (!pages)
goto no_pages;
- if (IS_ENABLED(CONFIG_CMA))
+ if (IS_ENABLED(CONFIG_DMA_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
@@ -670,7 +670,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!IS_ENABLED(CONFIG_CMA))
+ else if (!IS_ENABLED(CONFIG_DMA_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
@@ -759,7 +759,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
__dma_free_buffer(page, size);
} else if (__free_from_pool(cpu_addr, size)) {
return;
- } else if (!IS_ENABLED(CONFIG_CMA)) {
+ } else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
@@ -1311,7 +1311,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- if (gfp & GFP_ATOMIC)
+ if (!(gfp & __GFP_WAIT))
return __iommu_alloc_atomic(dev, size, handle);
pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
index 9d285626bc7d..312e15e6d00b 100644
--- a/arch/arm/mm/extable.c
+++ b/arch/arm/mm/extable.c
@@ -9,8 +9,13 @@ int fixup_exception(struct pt_regs *regs)
const struct exception_table_entry *fixup;
fixup = search_exception_tables(instruction_pointer(regs));
- if (fixup)
+ if (fixup) {
regs->ARM_pc = fixup->fixup;
+#ifdef CONFIG_THUMB2_KERNEL
+ /* Clear the IT state to avoid nasty surprises in the fixup */
+ regs->ARM_cpsr &= ~PSR_IT_MASK;
+#endif
+ }
return fixup != NULL;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index b835c9e3b770..56059a5be9a8 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -446,8 +446,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (pud_none(*pud_k))
goto bad_area;
- if (!pud_present(*pud))
+ if (!pud_present(*pud)) {
set_pud(pud, *pud_k);
+ /*
+ * There is a small window during free_pgtables() where the
+ * user *pud entry is 0 but the TLB has not been invalidated
+ * and we get a level 2 (pmd) translation fault caused by the
+ * intermediate TLB caching of the old level 1 (pud) entry.
+ */
+ flush_tlb_kernel_page(addr);
+ }
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
@@ -470,8 +478,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
#endif
if (pmd_none(pmd_k[index]))
goto bad_area;
+ if (!pmd_present(pmd[index]))
+ copy_pmd(pmd, pmd_k);
- copy_pmd(pmd, pmd_k);
return 0;
bad_area:
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 83cb3ac27095..c61d2373408c 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -24,6 +24,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pr_warning("Failed to allocate identity pmd.\n");
return;
}
+ /*
+ * Copy the original PMD to ensure that the PMD entries for
+ * the kernel image are preserved.
+ */
+ if (!pud_none(*pud))
+ memcpy(pmd, pmd_offset(pud, 0),
+ PTRS_PER_PMD * sizeof(pmd_t));
pud_populate(&init_mm, pud, pmd);
pmd += pmd_index(addr);
} else
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9a5cdc01fcdf..c12ae661d4ab 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -76,7 +76,7 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
@@ -600,7 +600,7 @@ void __init mem_init(void)
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
+ free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, 0, NULL);
#endif
free_highpages();
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 04d9006eab1f..f123d6eb074b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -331,10 +331,10 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
return (void __iomem *) (offset + addr);
}
-void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
- unsigned long last_addr;
+ phys_addr_t last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
unsigned long pfn = __phys_to_pfn(phys_addr);
@@ -367,12 +367,12 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *) =
__arm_ioremap_caller;
void __iomem *
-__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
{
return arch_ioremap_caller(phys_addr, size, mtype,
__builtin_return_address(0));
@@ -387,7 +387,7 @@ EXPORT_SYMBOL(__arm_ioremap);
* CONFIG_GENERIC_ALLOCATOR for allocating external memory.
*/
void __iomem *
-__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
+__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
{
unsigned int mtype;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 10062ceadd1c..5ef506c6f492 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = PAGE_SIZE;
+ info.low_limit = FIRST_USER_ADDRESS;
info.high_limit = mm->mmap_base;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
@@ -204,13 +204,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 394342d0482e..95f7a1c90326 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -459,6 +459,16 @@ static void __init build_mem_type_table(void)
hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
/*
+ * We don't use domains on ARMv6 (since this causes problems with
+ * v6/v7 kernels), so we must use a separate memory type for user
+ * r/o, kernel r/w to map the vectors page.
+ */
+#ifndef CONFIG_ARM_LPAE
+ if (cpu_arch == CPU_ARCH_ARMv6)
+ vecs_pgprot |= L_PTE_MT_VECTORS;
+#endif
+
+ /*
* ARMv6 and above have extended page tables.
*/
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
@@ -1220,7 +1230,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors);
@@ -1265,15 +1275,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
+#ifdef CONFIG_KUSER_HELPERS
map.type = MT_HIGH_VECTORS;
+#else
+ map.type = MT_LOW_VECTORS;
+#endif
create_mapping(&map, false);
if (!vectors_high()) {
map.virtual = 0;
+ map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS;
create_mapping(&map, false);
}
+ /* Now create a kernel read-only mapping */
+ map.pfn += 1;
+ map.virtual = 0xffff0000 + PAGE_SIZE;
+ map.length = PAGE_SIZE;
+ map.type = MT_LOW_VECTORS;
+ create_mapping(&map, false);
+
/*
* Ask the machine support to map in the statically mapped devices.
*/
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index eb5293a69a84..7fe0524a5449 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -87,16 +87,16 @@ void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
return __arm_ioremap_pfn(pfn, offset, size, mtype);
}
-void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
+void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
unsigned int mtype)
{
return (void __iomem *)phys_addr;
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *);
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
-void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 0acb089d0f70..1046b373d1ae 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
- set_pte_ext(new_pte, *init_pte, 0);
+ set_pte_ext(new_pte + 0, init_pte[0], 0);
+ set_pte_ext(new_pte + 1, init_pte[1], 0);
pte_unmap(init_pte);
pte_unmap(new_pte);
}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index e3c48a3fe063..ee1d80593958 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -112,13 +112,9 @@
* 100x 1 0 1 r/o no acc
* 10x0 1 0 1 r/o no acc
* 1011 0 0 1 r/w no acc
- * 110x 0 1 0 r/w r/o
- * 11x0 0 1 0 r/w r/o
- * 1111 0 1 1 r/w r/w
- *
- * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
* 110x 1 1 1 r/o r/o
* 11x0 1 1 1 r/o r/o
+ * 1111 0 1 1 r/w r/w
*/
.macro armv6_mt_table pfx
\pfx\()_mt_table:
@@ -137,7 +133,7 @@
.long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
.long 0x00 @ unused
.long 0x00 @ unused
- .long 0x00 @ unused
+ .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
.endm
.macro armv6_set_pte_ext pfx
@@ -158,24 +154,21 @@
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
-#ifdef CONFIG_CPU_USE_DOMAINS
- @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-#endif
+
+ @ user read-only -> kernel read-only
+ bicne r3, r3, #PTE_EXT_AP0
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
- orr r3, r3, r2
+ eor r3, r3, r2
tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_PRESENT
moveq r3, #0
-#ifndef CONFIG_CPU_USE_DOMAINS
tstne r1, #L_PTE_NONE
movne r3, #0
-#endif
str r3, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 919405e20b80..b96c6e64943e 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -206,7 +206,6 @@ __v6_setup:
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
@@ -216,11 +215,11 @@ __v6_setup:
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
+ @ complete invalidations
adr r5, v6_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 9704097c450e..bb20ba0f7bc7 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -90,27 +90,20 @@ ENTRY(cpu_v7_set_pte_ext)
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
-#ifdef CONFIG_CPU_USE_DOMAINS
- @ allow kernel read/write access to read-only user pages
- tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-#endif
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_VALID
-#ifndef CONFIG_CPU_USE_DOMAINS
eorne r1, r1, #L_PTE_NONE
tstne r1, #L_PTE_NONE
-#endif
moveq r3, #0
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- ALT_SMP(mov pc,lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 363027e811d6..6f3b0476b729 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -56,6 +56,14 @@ ENTRY(cpu_v7_switch_mm)
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
+#ifdef __ARMEB__
+#define rl r3
+#define rh r2
+#else
+#define rl r2
+#define rh r3
+#endif
+
/*
* cpu_v7_set_pte_ext(ptep, pte)
*
@@ -65,15 +73,15 @@ ENDPROC(cpu_v7_switch_mm)
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
- tst r2, #L_PTE_VALID
+ tst rl, #L_PTE_VALID
beq 1f
- tst r3, #1 << (57 - 32) @ L_PTE_NONE
- bicne r2, #L_PTE_VALID
+ tst rh, #1 << (57 - 32) @ L_PTE_NONE
+ bicne rl, #L_PTE_VALID
bne 1f
- tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
- orreq r2, #L_PTE_RDONLY
+ tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
+ orreq rl, #L_PTE_RDONLY
1: strd r2, r3, [r0]
- ALT_SMP(mov pc, lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index e35fec34453e..769496e6e8e9 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area)
- ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
- ALT_UP(W(nop))
- dcache_line_size r2, r3
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
+ ALT_UP_B(1f)
+ mov pc, lr
+1: dcache_line_size r2, r3
+2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2
subs r1, r1, r2
- bhi 1b
+ bhi 2b
dsb
mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
@@ -328,7 +329,6 @@ __v7_setup:
3: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
- dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
@@ -337,6 +337,7 @@ __v7_setup:
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
+ dsb @ Complete invalidations
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
@@ -351,9 +352,7 @@ __v7_setup:
#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 1a643ee8e082..78351ca8d51e 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -19,6 +19,7 @@
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
+#include <asm/opcodes.h>
#include "bpf_jit_32.h"
@@ -113,8 +114,11 @@ static u32 jit_udiv(u32 dividend, u32 divisor)
static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
{
+ inst |= (cond << 28);
+ inst = __opcode_to_mem_arm(inst);
+
if (ctx->target != NULL)
- ctx->target[ctx->idx] = inst | (cond << 28);
+ ctx->target[ctx->idx] = inst;
ctx->idx++;
}
@@ -637,10 +641,10 @@ load_ind:
emit(ARM_MUL(r_A, r_A, r_X), ctx);
break;
case BPF_S_ALU_DIV_K:
- /* current k == reciprocal_value(userspace k) */
+ if (k == 1)
+ break;
emit_mov_i(r_scratch, k, ctx);
- /* A = top 32 bits of the product */
- emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
+ emit_udiv(r_A, r_A, r_scratch, ctx);
break;
case BPF_S_ALU_DIV_X:
update_on_xread(ctx);
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index a62753dc15ba..df45d6edc98d 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -83,6 +83,11 @@ extern struct clk clk_ext;
extern struct clksrc_clk clk_epllref;
extern struct clksrc_clk clk_esysclk;
+/* S3C24XX UART clocks */
+extern struct clk s3c24xx_clk_uart0;
+extern struct clk s3c24xx_clk_uart1;
+extern struct clk s3c24xx_clk_uart2;
+
/* S3C64XX specific clocks */
extern struct clk clk_h2;
extern struct clk clk_27m;
diff --git a/arch/arm/plat-samsung/s5p-dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c
index a93fb6fb6606..586ca73d1059 100644
--- a/arch/arm/plat-samsung/s5p-dev-mfc.c
+++ b/arch/arm/plat-samsung/s5p-dev-mfc.c
@@ -116,8 +116,8 @@ device_initcall(s5p_mfc_memory_init);
int __init s5p_fdt_find_mfc_mem(unsigned long node, const char *uname,
int depth, void *data)
{
- __be32 *prop;
- unsigned long len;
+ const __be32 *prop;
+ int len;
struct s5p_mfc_dt_meminfo *mfc_mem = data;
if (!data)
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S
index b178d44e9eaa..40f27e52de75 100644
--- a/arch/arm/plat-versatile/headsmp.S
+++ b/arch/arm/plat-versatile/headsmp.S
@@ -10,8 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
-
- __INIT
+#include <asm/assembler.h>
/*
* Realview/Versatile Express specific entry point for secondary CPUs.
@@ -19,6 +18,7 @@
* until we're ready for them to initialise.
*/
ENTRY(versatile_secondary_startup)
+ ARM_BE8(setend be)
mrc p15, 0, r0, c0, c0, 5
bic r0, #0xff000000
adr r4, 1f
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 13609e01f4b7..81edd31bb4ac 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused)
per_cpu(xen_vcpu, cpu) = vcpup;
enable_percpu_irq(xen_events_irq, 0);
+ put_cpu();
}
static void xen_restart(char str, const char *cmd)
@@ -272,12 +273,15 @@ core_initcall(xen_guest_init);
static int __init xen_pm_init(void)
{
+ if (!xen_domain())
+ return -ENODEV;
+
pm_power_off = xen_power_off;
arm_pm_restart = xen_restart;
return 0;
}
-subsys_initcall(xen_pm_init);
+late_initcall(xen_pm_init);
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 56b3f6d447ae..bce2a34ee041 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,36 +1,62 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_HAS_OPP
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
select ARM_AMBA
select ARM_ARCH_TIMER
select ARM_GIC
+ select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
+ select CPU_PM if (SUSPEND || CPU_IDLE)
+ select DCACHE_WORD_ACCESS
select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_EARLY_IOREMAP
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GENERIC_DMA_COHERENT
select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
select PERF_USE_VMALLOC
select POWER_RESET
select POWER_SUPPLY
@@ -61,11 +87,7 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config GENERIC_LOCKBREAK
- def_bool y
- depends on SMP && PREEMPT
-
-config RWSEM_GENERIC_SPINLOCK
+config RWSEM_XCHGADD_ALGORITHM
def_bool y
config GENERIC_HWEIGHT
@@ -77,7 +99,7 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA32
+config ZONE_DMA
def_bool y
config ARCH_DMA_ADDR_T_64BIT
@@ -95,6 +117,9 @@ config SWIOTLB
config IOMMU_HELPER
def_bool SWIOTLB
+config FIX_EARLYCON_MEM
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -111,6 +136,11 @@ config ARCH_VEXPRESS
This enables support for the ARMv8 software model (Versatile
Express).
+config ARCH_XGENE
+ bool "AppliedMicro X-Gene SOC Family"
+ help
+ This enables support for AppliedMicro X-Gene SOC Family
+
endmenu
menu "Bus support"
@@ -130,6 +160,11 @@ config ARM64_64K_PAGES
look-up. AArch32 emulation is not available when this feature
is enabled.
+config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ help
+ Say Y if you plan on running a kernel in big-endian mode.
+
config SMP
bool "Symmetric Multi-Processing"
select USE_GENERIC_SMP_HELPERS
@@ -144,11 +179,131 @@ config SMP
If you don't know what to do here, say N.
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
+config SCHED_SMT
+ bool "SMT scheduler support"
+ depends on SMP
+ help
+ Improves the CPU scheduler's decision making when dealing with
+ MultiThreading at a cost of slightly increased overhead in some
+ places. If unsure say N here.
+
+config DISABLE_CPU_SCHED_DOMAIN_BALANCE
+ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
+ help
+ Disables scheduler load-balancing at CPU sched domain level.
+
+config SCHED_HMP
+ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
+ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
+ help
+ Experimental scheduler optimizations for heterogeneous platforms.
+ Attempts to introspectively select task affinity to optimize power
+ and performance. Basic support for multiple (>2) cpu types is in place,
+ but it has only been tested with two types of cpus.
+ There is currently no support for migration of task groups, hence
+ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
+ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
+
+config SCHED_HMP_PRIO_FILTER
+ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
+ depends on SCHED_HMP
+ help
+ Enables task priority based HMP migration filter. Any task with
+ a NICE value above the threshold will always be on low-power cpus
+ with less compute capacity.
+
+config SCHED_HMP_PRIO_FILTER_VAL
+ int "NICE priority threshold"
+ default 5
+ depends on SCHED_HMP_PRIO_FILTER
+
+config HMP_FAST_CPU_MASK
+ string "HMP scheduler fast CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the fast CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_SLOW_CPU_MASK
+ string "HMP scheduler slow CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the slow CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_VARIABLE_SCALE
+ bool "Allows changing the load tracking scale through sysfs"
+ depends on SCHED_HMP
+ help
+ When turned on, this option exports the thresholds and load average
+ period value for the load tracking patches through sysfs.
+ The values can be modified to change the rate of load accumulation
+ and the thresholds used for HMP migration.
+ The load_avg_period_ms is the time in ms to reach a load average of
+ 0.5 for an idle task of 0 load average ratio that start a busy loop.
+ The up_threshold and down_threshold is the value to go to a faster
+ CPU or to go back to a slower cpu.
+ The {up,down}_threshold are devided by 1024 before being compared
+ to the load average.
+ For examples, with load_avg_period_ms = 128 and up_threshold = 512,
+ a running task with a load of 0 will be migrated to a bigger CPU after
+ 128ms, because after 128ms its load_avg_ratio is 0.5 and the real
+ up_threshold is 0.5.
+ This patch has the same behavior as changing the Y of the load
+ average computation to
+ (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
+ but it remove intermadiate overflows in computation.
+
+config HMP_FREQUENCY_INVARIANT_SCALE
+ bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP"
+ depends on HMP_VARIABLE_SCALE && CPU_FREQ
+ help
+ Scales the current load contribution in line with the frequency
+ of the CPU that the task was executed on.
+ In this version, we use a simple linear scale derived from the
+ maximum frequency reported by CPUFreq.
+ Restricting tracked load to be scaled by the CPU's frequency
+ represents the consumption of possible compute capacity
+ (rather than consumption of actual instantaneous capacity as
+ normal) and allows the HMP migration's simple threshold
+ migration strategy to interact more predictably with CPUFreq's
+ asynchronous compute capacity changes.
+
+config SCHED_HMP_LITTLE_PACKING
+ bool "Small task packing for HMP"
+ depends on SCHED_HMP
+ default n
+ help
+ Allows the HMP Scheduler to pack small tasks into CPUs in the
+ smallest HMP domain.
+ Controlled by two sysfs files in sys/kernel/hmp.
+ packing_enable: 1 to enable, 0 to disable packing. Default 1.
+ packing_limit: runqueue load ratio where a RQ is considered
+ to be full. Default is NICE_0_LOAD * 9/8.
+
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4"
+ # These have to remain sorted largest to smallest
+ default "8"
+
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ help
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
source kernel/Kconfig.preempt
@@ -180,8 +335,25 @@ config HW_PERF_EVENTS
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
+config SYS_SUPPORTS_HUGETLBFS
+ def_bool y
+
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
+config ARCH_WANT_HUGE_PMD_SHARE
+ def_bool y if !ARM64_64K_PAGES
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ def_bool y
+
source "mm/Kconfig"
+config FORCE_MAX_ZONEORDER
+ int
+ default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
+ default "11"
+
endmenu
menu "Boot options"
@@ -194,6 +366,23 @@ config CMDLINE
entering them here. As a minimum, you should specify the the
root device (e.g. root=/dev/nfs).
+choice
+ prompt "Kernel command line type" if CMDLINE != ""
+ default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+ bool "Use bootloader kernel arguments if available"
+ help
+ Uses the command-line options passed by the boot loader. If
+ the boot loader doesn't provide any, the default kernel command
+ string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+ bool "Extend bootloader kernel arguments"
+ help
+ The command-line arguments provided by the boot loader will be
+ appended to the default kernel command string.
+
config CMDLINE_FORCE
bool "Always use the default kernel command string"
help
@@ -201,6 +390,36 @@ config CMDLINE_FORCE
loader passes other arguments to the kernel.
This is useful if you cannot or don't want to change the
command-line options your boot loader passes to the kernel.
+endchoice
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE
+ bool "Build a concatenated Image.gz/dtb by default"
+ depends on OF
+ help
+ Enabling this option will cause a concatenated Image.gz and list of
+ DTBs to be built by default (instead of a standalone Image.gz.)
+ The image will built in arch/arm64/boot/Image.gz-dtb
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
+ string "Default dtb names"
+ depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+ help
+ Space separated list of names of dtbs to append when
+ building a concatenated Image.gz-dtb.
+
+config EFI
+ bool "UEFI runtime support"
+ depends on OF && !CPU_BIG_ENDIAN
+ select LIBFDT
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ default y
+ help
+ This option provides support for runtime services provided
+ by UEFI firmware (such as non-volatile variables, realtime
+ clock, and platform reset). A UEFI stub is also provided to
+ allow the kernel to be booted as an EFI application. This
+ is only useful on systems that have UEFI firmware.
endmenu
@@ -229,10 +448,31 @@ config SYSVIPC_COMPAT
endmenu
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+source "drivers/cpufreq/Kconfig"
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
+config ARM64_CPU_SUSPEND
+ def_bool PM_SLEEP
+
+endmenu
+
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
source "net/Kconfig"
source "drivers/Kconfig"
+source "drivers/firmware/Kconfig"
+
source "fs/Kconfig"
source "arch/arm64/Kconfig.debug"
@@ -240,5 +480,8 @@ source "arch/arm64/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
+if CRYPTO
+source "arch/arm64/crypto/Kconfig"
+endif
source "lib/Kconfig"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 1a6bfe954d49..e1b0c4601b3e 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -13,6 +13,20 @@ config DEBUG_STACK_USAGE
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T output.
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to memory mapped peripherals.
+
+ If in doubt, say Y.
+
config EARLY_PRINTK
bool "Early printk support"
default y
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index c95c5cb212fd..28750a191dd8 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -20,9 +20,15 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+KBUILD_CPPFLAGS += -mbig-endian
+AS += -EB
+LD += -EB
+else
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
+endif
comma = ,
@@ -37,11 +43,17 @@ TEXT_OFFSET := 0x00080000
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
+core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
libs-y += $(LIBGCC)
# Default target when executing plain make
+ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE := Image.gz-dtb
+else
KBUILD_IMAGE := Image.gz
+endif
+
KBUILD_DTBS := dtbs
all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
@@ -60,6 +72,13 @@ zinstall install: vmlinux
dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+Image.gz-dtb: vmlinux scripts dtbs
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+PHONY += vdso_install
+vdso_install:
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore
index 8dab0bb6ae66..eb3551131b1e 100644
--- a/arch/arm64/boot/.gitignore
+++ b/arch/arm64/boot/.gitignore
@@ -1,2 +1,3 @@
Image
Image.gz
+Image.gz-dtb
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 5a0e3ab854a5..df519849fa00 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -14,14 +14,27 @@
# Based on the ia64 boot/Makefile.
#
+include $(srctree)/arch/arm64/boot/dts/Makefile
+
targets := Image Image.gz
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/Image.gz: $(obj)/Image FORCE
$(call if_changed,gzip)
+$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
+ $(call if_changed,cat)
+
install: $(obj)/Image
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 68457e9e0975..661015fd7748 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,8 +1,18 @@
-dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb \
+ fvp-base-gicv2-psci.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb
+dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
targets += dtbs
-targets += $(dtb-y)
-dtbs: $(addprefix $(obj)/, $(dtb-y))
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+targets += $(DTB_LIST)
-clean-files := *.dtb
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
+
+clean-files := dts/*.dtb *.dtb
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
new file mode 100644
index 000000000000..6541962f5d70
--- /dev/null
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -0,0 +1,30 @@
+/*
+ * dts file for AppliedMicro (APM) Mustang Board
+ *
+ * Copyright (C) 2013, Applied Micro Circuits Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+/include/ "apm-storm.dtsi"
+
+/ {
+ model = "APM X-Gene Mustang board";
+ compatible = "apm,mustang", "apm,xgene-storm";
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
+ };
+};
+
+&serial0 {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
new file mode 100644
index 000000000000..42edf193d084
--- /dev/null
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -0,0 +1,398 @@
+/*
+ * dts file for AppliedMicro (APM) X-Gene Storm SOC
+ *
+ * Copyright (C) 2013, Applied Micro Circuits Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/ {
+ compatible = "apm,xgene-storm";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@000 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x000>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@001 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x001>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@200 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x200>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@201 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x201>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@300 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x300>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@301 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x301>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ };
+
+ gic: interrupt-controller@78010000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x78010000 0x0 0x1000>, /* GIC Dist */
+ <0x0 0x78020000 0x0 0x1000>, /* GIC CPU */
+ <0x0 0x78040000 0x0 0x2000>, /* GIC VCPU Control */
+ <0x0 0x78060000 0x0 0x2000>; /* GIC VCPU */
+ interrupts = <1 9 0xf04>; /* GIC Maintenence IRQ */
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 0 0xff01>, /* Secure Phys IRQ */
+ <1 13 0xff01>, /* Non-secure Phys IRQ */
+ <1 14 0xff01>, /* Virt IRQ */
+ <1 15 0xff01>; /* Hyp IRQ */
+ clock-frequency = <50000000>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ refclk: refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <100000000>;
+ clock-output-names = "refclk";
+ };
+
+ pcppll: pcppll@17000100 {
+ compatible = "apm,xgene-pcppll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "pcppll";
+ reg = <0x0 0x17000100 0x0 0x1000>;
+ clock-output-names = "pcppll";
+ type = <0>;
+ };
+
+ socpll: socpll@17000120 {
+ compatible = "apm,xgene-socpll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "socpll";
+ reg = <0x0 0x17000120 0x0 0x1000>;
+ clock-output-names = "socpll";
+ type = <1>;
+ };
+
+ socplldiv2: socplldiv2 {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <1>;
+ clocks = <&socpll 0>;
+ clock-names = "socplldiv2";
+ clock-mult = <1>;
+ clock-div = <2>;
+ clock-output-names = "socplldiv2";
+ };
+
+ qmlclk: qmlclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "qmlclk";
+ reg = <0x0 0x1703C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "qmlclk";
+ };
+
+ ethclk: ethclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "ethclk";
+ reg = <0x0 0x17000000 0x0 0x1000>;
+ reg-names = "div-reg";
+ divider-offset = <0x238>;
+ divider-width = <0x9>;
+ divider-shift = <0x0>;
+ clock-output-names = "ethclk";
+ };
+
+ eth8clk: eth8clk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&ethclk 0>;
+ clock-names = "eth8clk";
+ reg = <0x0 0x1702C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "eth8clk";
+ };
+
+ sataphy1clk: sataphy1clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy1clk";
+ status = "disabled";
+ csr-offset = <0x4>;
+ csr-mask = <0x00>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sataphy2clk: sataphy1clk@1f22c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f22c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy2clk";
+ status = "ok";
+ csr-offset = <0x4>;
+ csr-mask = <0x3a>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sataphy3clk: sataphy1clk@1f23c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f23c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy3clk";
+ status = "ok";
+ csr-offset = <0x4>;
+ csr-mask = <0x3a>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sata01clk: sata01clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata01clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ sata23clk: sata23clk@1f22c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f22c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata23clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ sata45clk: sata45clk@1f23c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f23c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata45clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ rtcclk: rtcclk@17000000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x17000000 0x0 0x2000>;
+ reg-names = "csr-reg";
+ csr-offset = <0xc>;
+ csr-mask = <0x2>;
+ enable-offset = <0x10>;
+ enable-mask = <0x2>;
+ clock-output-names = "rtcclk";
+ };
+ };
+
+ serial0: serial@1c020000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c020000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4c 0x4>;
+ };
+
+ serial1: serial@1c021000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c021000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4d 0x4>;
+ };
+
+ serial2: serial@1c022000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c022000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4e 0x4>;
+ };
+
+ serial3: serial@1c023000 {
+ status = "disabled";
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0 0x1c023000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4f 0x4>;
+ };
+
+ phy1: phy@1f21a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f21a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy1clk 0>;
+ status = "disabled";
+ apm,tx-boost-gain = <30 30 30 30 30 30>;
+ apm,tx-eye-tuning = <2 10 10 2 10 10>;
+ };
+
+ phy2: phy@1f22a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f22a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy2clk 0>;
+ status = "ok";
+ apm,tx-boost-gain = <30 30 30 30 30 30>;
+ apm,tx-eye-tuning = <1 10 10 2 10 10>;
+ };
+
+ phy3: phy@1f23a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f23a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy3clk 0>;
+ status = "ok";
+ apm,tx-boost-gain = <31 31 31 31 31 31>;
+ apm,tx-eye-tuning = <2 10 10 2 10 10>;
+ };
+
+ sata1: sata@1a000000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a000000 0x0 0x1000>,
+ <0x0 0x1f210000 0x0 0x1000>,
+ <0x0 0x1f21d000 0x0 0x1000>,
+ <0x0 0x1f21e000 0x0 0x1000>,
+ <0x0 0x1f217000 0x0 0x1000>;
+ interrupts = <0x0 0x86 0x4>;
+ status = "disabled";
+ clocks = <&sata01clk 0>;
+ phys = <&phy1 0>;
+ phy-names = "sata-phy";
+ };
+
+ sata2: sata@1a400000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a400000 0x0 0x1000>,
+ <0x0 0x1f220000 0x0 0x1000>,
+ <0x0 0x1f22d000 0x0 0x1000>,
+ <0x0 0x1f22e000 0x0 0x1000>,
+ <0x0 0x1f227000 0x0 0x1000>;
+ interrupts = <0x0 0x87 0x4>;
+ status = "ok";
+ clocks = <&sata23clk 0>;
+ phys = <&phy2 0>;
+ phy-names = "sata-phy";
+ };
+
+ sata3: sata@1a800000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a800000 0x0 0x1000>,
+ <0x0 0x1f230000 0x0 0x1000>,
+ <0x0 0x1f23d000 0x0 0x1000>,
+ <0x0 0x1f23e000 0x0 0x1000>;
+ interrupts = <0x0 0x88 0x4>;
+ status = "ok";
+ clocks = <&sata45clk 0>;
+ phys = <&phy3 0>;
+ phy-names = "sata-phy";
+ };
+
+ rtc: rtc@10510000 {
+ compatible = "apm,xgene-rtc";
+ reg = <0x0 0x10510000 0x0 0x400>;
+ interrupts = <0x0 0x46 0x4>;
+ #clock-cells = <1>;
+ clocks = <&rtcclk 0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/clcd-panels.dtsi b/arch/arm64/boot/dts/clcd-panels.dtsi
new file mode 100644
index 000000000000..0b0ff6ead4b2
--- /dev/null
+++ b/arch/arm64/boot/dts/clcd-panels.dtsi
@@ -0,0 +1,52 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ */
+
+/ {
+ panels {
+ panel@0 {
+ compatible = "panel";
+ mode = "VGA";
+ refresh = <60>;
+ xres = <640>;
+ yres = <480>;
+ pixclock = <39721>;
+ left_margin = <40>;
+ right_margin = <24>;
+ upper_margin = <32>;
+ lower_margin = <11>;
+ hsync_len = <96>;
+ vsync_len = <2>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+
+ panel@1 {
+ compatible = "panel";
+ mode = "XVGA";
+ refresh = <60>;
+ xres = <1024>;
+ yres = <768>;
+ pixclock = <15748>;
+ left_margin = <152>;
+ right_margin = <48>;
+ upper_margin = <23>;
+ lower_margin = <3>;
+ hsync_len = <104>;
+ vsync_len = <4>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc5018284..519c4b2c0687 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
/dts-v1/;
+/memreserve/ 0x80000000 0x00010000;
+
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts b/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts
new file mode 100644
index 000000000000..ed55571e06dd
--- /dev/null
+++ b/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x80000000 0x00010000;
+
+/ {
+};
+
+/ {
+ model = "FVP Base";
+ compatible = "arm,vfp-base", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x84000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0xc4000003>;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ idle-states {
+ entry-method = "arm,psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x0010000>;
+ entry-latency-us = <40>;
+ exit-latency-us = <100>;
+ min-residency-us = <150>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x1010000>;
+ entry-latency-us = <500>;
+ exit-latency-us = <1000>;
+ min-residency-us = <2500>;
+ };
+ };
+
+ big0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ big1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ big2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ big3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little0: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little1: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little2: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x102>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+ little3: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x103>;
+ enable-method = "psci";
+ clock-frequency = <1000000>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&big0>;
+ };
+ core1 {
+ cpu = <&big1>;
+ };
+ core2 {
+ cpu = <&big2>;
+ };
+ core3 {
+ cpu = <&big3>;
+ };
+ };
+ cluster1 {
+ core0 {
+ cpu = <&little0>;
+ };
+ core1 {
+ cpu = <&little1>;
+ };
+ core2 {
+ cpu = <&little2>;
+ };
+ core3 {
+ cpu = <&little3>;
+ };
+ };
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0 0x80000000>,
+ <0x00000008 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2f000000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0x2f000000 0 0x10000>,
+ <0x0 0x2c000000 0 0x2000>,
+ <0x0 0x2c010000 0 0x2000>,
+ <0x0 0x2c02F000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 0xff01>,
+ <1 14 0xff01>,
+ <1 11 0xff01>,
+ <1 10 0xff01>;
+ clock-frequency = <100000000>;
+ };
+
+ timer@2a810000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0 0x2a810000 0x0 0x10000>;
+ clock-frequency = <100000000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ frame@2a820000 {
+ frame-number = <0>;
+ interrupts = <0 25 4>;
+ reg = <0x0 0x2a820000 0x0 0x10000>;
+ };
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <0 60 4>,
+ <0 61 4>,
+ <0 62 4>,
+ <0 63 4>;
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm64/boot/dts/juno.dts b/arch/arm64/boot/dts/juno.dts
new file mode 100644
index 000000000000..f260d702041c
--- /dev/null
+++ b/arch/arm64/boot/dts/juno.dts
@@ -0,0 +1,498 @@
+/*
+ * ARM Ltd. Juno Plaform
+ *
+ * Fast Models FVP v2 support
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "Juno";
+ compatible = "arm,juno", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &soc_uart0;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ };
+
+ cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x102>;
+ enable-method = "psci";
+ };
+
+ cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x103>;
+ enable-method = "psci";
+ };
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0x0 0x7f000000>,
+ <0x00000008 0x80000000 0x1 0x80000000>;
+ };
+
+ /* memory@14000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x14000000 0x0 0x02000000>;
+ }; */
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0x2c010000 0 0x1000>,
+ <0x0 0x2c02f000 0 0x1000>,
+ <0x0 0x2c04f000 0 0x2000>,
+ <0x0 0x2c06f000 0 0x2000>;
+ interrupts = <GIC_PPI 9 0xf04>;
+ };
+
+ msi0: msi@2c1c0000 {
+ compatible = "arm,gic-msi";
+ reg = <0x0 0x2c1c0000 0 0x10000
+ 0x0 0x2c1d0000 0 0x10000
+ 0x0 0x2c1e0000 0 0x10000
+ 0x0 0x2c1f0000 0 0x10000>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 0xff01>,
+ <GIC_PPI 14 0xff01>,
+ <GIC_PPI 11 0xff01>,
+ <GIC_PPI 10 0xff01>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 60 4>,
+ <GIC_SPI 61 4>,
+ <GIC_SPI 62 4>,
+ <GIC_SPI 63 4>;
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0xC4000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0xC4000003>;
+ migrate = <0xC4000005>;
+ };
+
+ pci0: pci@30000000 {
+ compatible = "arm,pcie-xr3";
+ device_type = "pci";
+ reg = <0 0x7ff30000 0 0x1000
+ 0 0x7ff20000 0 0x10000
+ 0 0x40000000 0 0x10000000>;
+ bus-range = <0 255>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x00000000 0x00 0x5ff00000 0x0 0x00100000
+ 0x02000000 0x0 0x00000000 0x40 0x00000000 0x0 0x80000000
+ 0x42000000 0x0 0x80000000 0x40 0x80000000 0x0 0x80000000>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &gic 0 136 4
+ 0 0 0 2 &gic 0 137 4
+ 0 0 0 3 &gic 0 138 4
+ 0 0 0 4 &gic 0 139 4>;
+ };
+
+ scpi: scpi@2b1f0000 {
+ compatible = "arm,scpi-mhu";
+ reg = <0x0 0x2b1f0000 0x0 0x10000>, /* MHU registers */
+ <0x0 0x2e000000 0x0 0x10000>; /* Payload area */
+ interrupts = <0 36 4>, /* low priority interrupt */
+ <0 35 4>, /* high priority interrupt */
+ <0 37 4>; /* secure channel interrupt */
+ #clock-cells = <1>;
+ clock-output-names = "a57", "a53", "gpu", "hdlcd0", "hdlcd1";
+ };
+
+ hdlcd0_osc: scpi_osc@3 {
+ compatible = "arm,scpi-osc";
+ #clock-cells = <0>;
+ clocks = <&scpi 3>;
+ frequency-range = <23000000 210000000>;
+ clock-output-names = "pxlclk0";
+ };
+
+ hdlcd1_osc: scpi_osc@4 {
+ compatible = "arm,scpi-osc";
+ #clock-cells = <0>;
+ clocks = <&scpi 4>;
+ frequency-range = <23000000 210000000>;
+ clock-output-names = "pxlclk1";
+ };
+
+ soc_uartclk: refclk72738khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <7273800>;
+ clock-output-names = "juno:uartclk";
+ };
+
+ soc_refclk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "juno:clk24mhz";
+ };
+
+ mb_eth25mhz: clk25mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "ethclk25mhz";
+ };
+
+ soc_usb48mhz: clk48mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ clock-output-names = "clk48mhz";
+ };
+
+ soc_smc50mhz: clk50mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "smc_clk";
+ };
+
+ soc_refclk100mhz: refclk100mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "apb_pclk";
+ };
+
+ soc_faxiclk: refclk533mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <533000000>;
+ clock-output-names = "faxi_clk";
+ };
+
+ soc_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ memory-controller@7ffd0000 {
+ compatible = "arm,pl354", "arm,primecell";
+ reg = <0 0x7ffd0000 0 0x1000>;
+ interrupts = <0 86 4>,
+ <0 87 4>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ chip5-memwidth = <16>;
+ };
+
+ dma0: dma@0x7ff00000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0x7ff00000 0 0x1000>;
+ interrupts = <0 95 4>,
+ <0 88 4>,
+ <0 89 4>,
+ <0 90 4>,
+ <0 91 4>,
+ <0 108 4>,
+ <0 109 4>,
+ <0 110 4>,
+ <0 111 4>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ clocks = <&soc_faxiclk>;
+ clock-names = "apb_pclk";
+ };
+
+ soc_uart0: uart@7ff80000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0x7ff80000 0x0 0x1000>;
+ interrupts = <0 83 4>;
+ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
+ clock-names = "uartclk", "apb_pclk";
+ dmas = <&dma0 1
+ &dma0 2>;
+ dma-names = "rx", "tx";
+ };
+
+ /* this UART is reserved for secure software.
+ soc_uart1: uart@7ff70000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0x7ff70000 0x0 0x1000>;
+ interrupts = <0 84 4>;
+ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
+ clock-names = "uartclk", "apb_pclk";
+ }; */
+
+ ulpi_phy: phy@0 {
+ compatible = "phy-ulpi-generic";
+ reg = <0x0 0x94 0x0 0x4>;
+ phy-id = <0>;
+ };
+
+ ehci@7ffc0000 {
+ compatible = "snps,ehci-h20ahb";
+ /* compatible = "arm,h20ahb-ehci"; */
+ reg = <0x0 0x7ffc0000 0x0 0x10000>;
+ interrupts = <0 117 4>;
+ clocks = <&soc_usb48mhz>;
+ clock-names = "otg";
+ phys = <&ulpi_phy>;
+ };
+
+ ohci@0x7ffb0000 {
+ compatible = "generic-ohci";
+ reg = <0x0 0x7ffb0000 0x0 0x10000>;
+ interrupts = <0 116 4>;
+ clocks = <&soc_usb48mhz>;
+ clock-names = "otg";
+ };
+
+ i2c@0x7ffa0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0x7ffa0000 0x0 0x1000>;
+ interrupts = <0 104 4>;
+ clock-frequency = <400000>;
+ i2c-sda-hold-time-ns = <500>;
+ clocks = <&soc_smc50mhz>;
+
+ dvi0: dvi-transmitter@70 {
+ compatible = "nxp,tda998x";
+ reg = <0x70>;
+ };
+
+ dvi1: dvi-transmitter@71 {
+ compatible = "nxp,tda998x";
+ reg = <0x71>;
+ };
+ };
+
+ /* mmci@1c050000 {
+ compatible = "arm,pl180", "arm,primecell";
+ reg = <0x0 0x1c050000 0x0 0x1000>;
+ interrupts = <0 73 4>,
+ <0 74 4>;
+ max-frequency = <12000000>;
+ vmmc-supply = <&soc_fixed_3v3>;
+ clocks = <&soc_refclk24mhz>, <&soc_refclk100mhz>;
+ clock-names = "mclk", "apb_pclk";
+ }; */
+
+ hdlcd@7ff60000 {
+ compatible = "arm,hdlcd";
+ reg = <0 0x7ff60000 0 0x1000>;
+ interrupts = <0 85 4>;
+ clocks = <&hdlcd0_osc>;
+ clock-names = "pxlclk";
+ i2c-slave = <&dvi0>;
+
+ /* display-timings {
+ native-mode = <&timing0>;
+ timing0: timing@0 {
+ /* 1024 x 768 framebufer, standard VGA timings * /
+ clock-frequency = <65000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hfront-porch = <24>;
+ hback-porch = <160>;
+ hsync-len = <136>;
+ vfront-porch = <3>;
+ vback-porch = <29>;
+ vsync-len = <6>;
+ };
+ }; */
+ };
+
+ hdlcd@7ff50000 {
+ compatible = "arm,hdlcd";
+ reg = <0 0x7ff50000 0 0x1000>;
+ interrupts = <0 93 4>;
+ clocks = <&hdlcd1_osc>;
+ clock-names = "pxlclk";
+ i2c-slave = <&dvi1>;
+
+ display-timings {
+ native-mode = <&timing1>;
+ timing1: timing@1 {
+ /* 1024 x 768 framebufer, standard VGA timings */
+ clock-frequency = <65000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hfront-porch = <24>;
+ hback-porch = <160>;
+ hsync-len = <136>;
+ vfront-porch = <3>;
+ vback-porch = <29>;
+ vsync-len = <6>;
+ };
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 15>;
+ interrupt-map = <0 0 0 &gic 0 68 4>,
+ <0 0 1 &gic 0 69 4>,
+ <0 0 2 &gic 0 70 4>,
+ <0 0 3 &gic 0 160 4>,
+ <0 0 4 &gic 0 161 4>,
+ <0 0 5 &gic 0 162 4>,
+ <0 0 6 &gic 0 163 4>,
+ <0 0 7 &gic 0 164 4>,
+ <0 0 8 &gic 0 165 4>,
+ <0 0 9 &gic 0 166 4>,
+ <0 0 10 &gic 0 167 4>,
+ <0 0 11 &gic 0 168 4>,
+ <0 0 12 &gic 0 169 4>;
+
+ motherboard {
+ model = "V2M-Juno";
+ arm,hbi = <0x252>;
+ arm,vexpress,site = <0>;
+ arm,v2m-memory-map = "rs1";
+ compatible = "arm,vexpress,v2p-p1", "simple-bus";
+ #address-cells = <2>; /* SMB chipselect number and offset */
+ #size-cells = <1>;
+ #interrupt-cells = <1>;
+ ranges;
+
+ usb@5,00000000 {
+ compatible = "nxp,usb-isp1763";
+ reg = <5 0x00000000 0x20000>;
+ bus-width = <16>;
+ interrupts = <4>;
+ };
+
+ ethernet@2,00000000 {
+ compatible = "smsc,lan9118", "smsc,lan9115";
+ reg = <2 0x00000000 0x10000>;
+ interrupts = <3>;
+ phy-mode = "mii";
+ reg-io-width = <4>;
+ smsc,irq-active-high;
+ smsc,irq-push-pull;
+ clocks = <&mb_eth25mhz>;
+ vdd33a-supply = <&soc_fixed_3v3>; /* change this */
+ vddvario-supply = <&soc_fixed_3v3>; /* and this */
+ };
+
+ iofpga@3,00000000 {
+ compatible = "arm,amba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 3 0 0x200000>;
+
+ kmi@060000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x060000 0x1000>;
+ interrupts = <8>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ kmi@070000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x070000 0x1000>;
+ interrupts = <8>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ wdt@0f0000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0f0000 0x10000>;
+ interrupts = <7>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "wdogclk", "apb_pclk";
+ };
+
+ v2m_timer01: timer@110000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x110000 0x10000>;
+ interrupts = <9>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "timclken1", "apb_pclk";
+ };
+
+ v2m_timer23: timer@120000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x120000 0x10000>;
+ interrupts = <9>;
+ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
+ clock-names = "timclken1", "apb_pclk";
+ };
+
+ rtc@170000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x170000 0x10000>;
+ interrupts = <0>;
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
index 572005ea2217..28ed4ba3391a 100644
--- a/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
@@ -27,37 +27,70 @@
serial3 = &v2m_serial3;
};
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ /*
+ * Function IDs usage and compliancy with PSCI v0.2 still
+ * under discussion. Current IDs should be considered
+ * temporary for demonstration purposes.
+ */
+ cpu_suspend = <0x84000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0x84000003>;
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
+ idle-states {
+ entry-method = "arm,psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x0010000>;
+ entry-latency-us = <40>;
+ exit-latency-us = <100>;
+ min-residency-us = <150>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-method-param = <0x1010000>;
+ entry-latency-us = <500>;
+ exit-latency-us = <1000>;
+ min-residency-us = <2500>;
+ };
+ };
+
cpu@0 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x0>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu@1 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x1>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu@2 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x2>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
cpu@3 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x3>;
- enable-method = "spin-table";
- cpu-release-addr = <0x0 0x8000fff8>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
};
};
@@ -157,3 +190,5 @@
/include/ "rtsm_ve-motherboard.dtsi"
};
};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
index b45e5f39f577..b683d4703582 100644
--- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -182,6 +182,15 @@
interrupts = <14>;
clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
clock-names = "clcdclk", "apb_pclk";
+ mode = "XVGA";
+ use_dma = <0>;
+ framebuffer = <0x18000000 0x00180000>;
+ };
+
+ virtio_block@0130000 {
+ compatible = "virtio,mmio";
+ reg = <0x130000 0x200>;
+ interrupts = <42>;
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 8d9696adb440..8e323147c375 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -19,13 +18,17 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
CONFIG_SMP=y
+CONFIG_PREEMPT=y
CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -42,29 +45,42 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_BLK_DEV is not set
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_MII=y
CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
@@ -86,3 +102,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DMA_CMA=y
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
new file mode 100644
index 000000000000..5562652c5316
--- /dev/null
+++ b/arch/arm64/crypto/Kconfig
@@ -0,0 +1,53 @@
+
+menuconfig ARM64_CRYPTO
+ bool "ARM64 Accelerated Cryptographic Algorithms"
+ depends on ARM64
+ help
+ Say Y here to choose from a selection of cryptographic algorithms
+ implemented using ARM64 specific CPU features or instructions.
+
+if ARM64_CRYPTO
+
+config CRYPTO_SHA1_ARM64_CE
+ tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_HASH
+
+config CRYPTO_SHA2_ARM64_CE
+ tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_HASH
+
+config CRYPTO_GHASH_ARM64_CE
+ tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_HASH
+
+config CRYPTO_AES_ARM64_CE
+ tristate "AES core cipher using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+
+config CRYPTO_AES_ARM64_CE_CCM
+ tristate "AES in CCM mode using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_AEAD
+
+config CRYPTO_AES_ARM64_CE_BLK
+ tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ABLK_HELPER
+
+config CRYPTO_AES_ARM64_NEON_BLK
+ tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ABLK_HELPER
+
+endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
new file mode 100644
index 000000000000..a3f935fde975
--- /dev/null
+++ b/arch/arm64/crypto/Makefile
@@ -0,0 +1,38 @@
+#
+# linux/arch/arm64/crypto/Makefile
+#
+# Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
+sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
+
+obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
+sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
+
+obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
+ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
+CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
+aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE_BLK) += aes-ce-blk.o
+aes-ce-blk-y := aes-glue-ce.o aes-ce.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
+aes-neon-blk-y := aes-glue-neon.o aes-neon.o
+
+AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE
+AFLAGS_aes-neon.o := -DINTERLEAVE=4
+
+CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
+
+$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
+ $(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
new file mode 100644
index 000000000000..432e4841cd81
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -0,0 +1,222 @@
+/*
+ * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+ .text
+ .arch armv8-a+crypto
+
+ /*
+ * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
+ * u32 *macp, u8 const rk[], u32 rounds);
+ */
+ENTRY(ce_aes_ccm_auth_data)
+ ldr w8, [x3] /* leftover from prev round? */
+ ld1 {v0.2d}, [x0] /* load mac */
+ cbz w8, 1f
+ sub w8, w8, #16
+ eor v1.16b, v1.16b, v1.16b
+0: ldrb w7, [x1], #1 /* get 1 byte of input */
+ subs w2, w2, #1
+ add w8, w8, #1
+ ins v1.b[0], w7
+ ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
+ beq 8f /* out of input? */
+ cbnz w8, 0b
+ eor v0.16b, v0.16b, v1.16b
+1: ld1 {v3.2d}, [x4] /* load first round key */
+ prfm pldl1strm, [x1]
+ cmp w5, #12 /* which key size? */
+ add x6, x4, #16
+ sub w7, w5, #2 /* modified # of rounds */
+ bmi 2f
+ bne 5f
+ mov v5.16b, v3.16b
+ b 4f
+2: mov v4.16b, v3.16b
+ ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
+3: aese v0.16b, v4.16b
+ aesmc v0.16b, v0.16b
+4: ld1 {v3.2d}, [x6], #16 /* load next round key */
+ aese v0.16b, v5.16b
+ aesmc v0.16b, v0.16b
+5: ld1 {v4.2d}, [x6], #16 /* load next round key */
+ subs w7, w7, #3
+ aese v0.16b, v3.16b
+ aesmc v0.16b, v0.16b
+ ld1 {v5.2d}, [x6], #16 /* load next round key */
+ bpl 3b
+ aese v0.16b, v4.16b
+ subs w2, w2, #16 /* last data? */
+ eor v0.16b, v0.16b, v5.16b /* final round */
+ bmi 6f
+ ld1 {v1.16b}, [x1], #16 /* load next input block */
+ eor v0.16b, v0.16b, v1.16b /* xor with mac */
+ bne 1b
+6: st1 {v0.2d}, [x0] /* store mac */
+ beq 10f
+ adds w2, w2, #16
+ beq 10f
+ mov w8, w2
+7: ldrb w7, [x1], #1
+ umov w6, v0.b[0]
+ eor w6, w6, w7
+ strb w6, [x0], #1
+ subs w2, w2, #1
+ beq 10f
+ ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
+ b 7b
+8: mov w7, w8
+ add w8, w8, #16
+9: ext v1.16b, v1.16b, v1.16b, #1
+ adds w7, w7, #1
+ bne 9b
+ eor v0.16b, v0.16b, v1.16b
+ st1 {v0.2d}, [x0]
+10: str w8, [x3]
+ ret
+ENDPROC(ce_aes_ccm_auth_data)
+
+ /*
+ * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
+ * u32 rounds);
+ */
+ENTRY(ce_aes_ccm_final)
+ ld1 {v3.2d}, [x2], #16 /* load first round key */
+ ld1 {v0.2d}, [x0] /* load mac */
+ cmp w3, #12 /* which key size? */
+ sub w3, w3, #2 /* modified # of rounds */
+ ld1 {v1.2d}, [x1] /* load 1st ctriv */
+ bmi 0f
+ bne 3f
+ mov v5.16b, v3.16b
+ b 2f
+0: mov v4.16b, v3.16b
+1: ld1 {v5.2d}, [x2], #16 /* load next round key */
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+2: ld1 {v3.2d}, [x2], #16 /* load next round key */
+ aese v0.16b, v5.16b
+ aese v1.16b, v5.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+3: ld1 {v4.2d}, [x2], #16 /* load next round key */
+ subs w3, w3, #3
+ aese v0.16b, v3.16b
+ aese v1.16b, v3.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+ bpl 1b
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ /* final round key cancels out */
+ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
+ st1 {v0.2d}, [x0] /* store result */
+ ret
+ENDPROC(ce_aes_ccm_final)
+
+ .macro aes_ccm_do_crypt,enc
+ ldr x8, [x6, #8] /* load lower ctr */
+ ld1 {v0.2d}, [x5] /* load mac */
+ rev x8, x8 /* keep swabbed ctr in reg */
+0: /* outer loop */
+ ld1 {v1.1d}, [x6] /* load upper ctr */
+ prfm pldl1strm, [x1]
+ add x8, x8, #1
+ rev x9, x8
+ cmp w4, #12 /* which key size? */
+ sub w7, w4, #2 /* get modified # of rounds */
+ ins v1.d[1], x9 /* no carry in lower ctr */
+ ld1 {v3.2d}, [x3] /* load first round key */
+ add x10, x3, #16
+ bmi 1f
+ bne 4f
+ mov v5.16b, v3.16b
+ b 3f
+1: mov v4.16b, v3.16b
+ ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
+2: /* inner loop: 3 rounds, 2x interleaved */
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+3: ld1 {v3.2d}, [x10], #16 /* load next round key */
+ aese v0.16b, v5.16b
+ aese v1.16b, v5.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+4: ld1 {v4.2d}, [x10], #16 /* load next round key */
+ subs w7, w7, #3
+ aese v0.16b, v3.16b
+ aese v1.16b, v3.16b
+ aesmc v0.16b, v0.16b
+ aesmc v1.16b, v1.16b
+ ld1 {v5.2d}, [x10], #16 /* load next round key */
+ bpl 2b
+ aese v0.16b, v4.16b
+ aese v1.16b, v4.16b
+ subs w2, w2, #16
+ bmi 6f /* partial block? */
+ ld1 {v2.16b}, [x1], #16 /* load next input block */
+ .if \enc == 1
+ eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
+ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
+ .else
+ eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
+ eor v1.16b, v2.16b, v5.16b /* final round enc */
+ .endif
+ eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
+ st1 {v1.16b}, [x0], #16 /* write output block */
+ bne 0b
+ rev x8, x8
+ st1 {v0.2d}, [x5] /* store mac */
+ str x8, [x6, #8] /* store lsb end of ctr (BE) */
+5: ret
+
+6: eor v0.16b, v0.16b, v5.16b /* final round mac */
+ eor v1.16b, v1.16b, v5.16b /* final round enc */
+ st1 {v0.2d}, [x5] /* store mac */
+ add w2, w2, #16 /* process partial tail block */
+7: ldrb w9, [x1], #1 /* get 1 byte of input */
+ umov w6, v1.b[0] /* get top crypted ctr byte */
+ umov w7, v0.b[0] /* get top mac byte */
+ .if \enc == 1
+ eor w7, w7, w9
+ eor w9, w9, w6
+ .else
+ eor w9, w9, w6
+ eor w7, w7, w9
+ .endif
+ strb w9, [x0], #1 /* store out byte */
+ strb w7, [x5], #1 /* store mac byte */
+ subs w2, w2, #1
+ beq 5b
+ ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
+ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
+ b 7b
+ .endm
+
+ /*
+ * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
+ * u8 const rk[], u32 rounds, u8 mac[],
+ * u8 ctr[]);
+ * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
+ * u8 const rk[], u32 rounds, u8 mac[],
+ * u8 ctr[]);
+ */
+ENTRY(ce_aes_ccm_encrypt)
+ aes_ccm_do_crypt 1
+ENDPROC(ce_aes_ccm_encrypt)
+
+ENTRY(ce_aes_ccm_decrypt)
+ aes_ccm_do_crypt 0
+ENDPROC(ce_aes_ccm_decrypt)
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
new file mode 100644
index 000000000000..9e6cdde9b43d
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -0,0 +1,297 @@
+/*
+ * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+ /*
+ * # of rounds specified by AES:
+ * 128 bit key 10 rounds
+ * 192 bit key 12 rounds
+ * 256 bit key 14 rounds
+ * => n byte key => 6 + (n/4) rounds
+ */
+ return 6 + ctx->key_length / 4;
+}
+
+asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
+ u32 *macp, u32 const rk[], u32 rounds);
+
+asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
+ u32 const rk[], u32 rounds, u8 mac[],
+ u8 ctr[]);
+
+asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
+ u32 const rk[], u32 rounds, u8 mac[],
+ u8 ctr[]);
+
+asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
+ u32 rounds);
+
+static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = crypto_aes_expand_key(ctx, in_key, key_len);
+ if (!ret)
+ return 0;
+
+ tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+
+static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ if ((authsize & 1) || authsize < 4)
+ return -EINVAL;
+ return 0;
+}
+
+static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
+ u32 l = req->iv[0] + 1;
+
+ /* verify that CCM dimension 'L' is set correctly in the IV */
+ if (l < 2 || l > 8)
+ return -EINVAL;
+
+ /* verify that msglen can in fact be represented in L bytes */
+ if (l < 4 && msglen >> (8 * l))
+ return -EOVERFLOW;
+
+ /*
+ * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
+ * uses a u32 type to represent msglen so the top 4 bytes are always 0.
+ */
+ n[0] = 0;
+ n[1] = cpu_to_be32(msglen);
+
+ memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
+
+ /*
+ * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
+ * - bits 0..2 : max # of bytes required to represent msglen, minus 1
+ * (already set by caller)
+ * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
+ * - bit 6 : indicates presence of authenticate-only data
+ */
+ maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
+ if (req->assoclen)
+ maciv[0] |= 0x40;
+
+ memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
+ return 0;
+}
+
+static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+ struct __packed { __be16 l; __be32 h; u16 len; } ltag;
+ struct scatter_walk walk;
+ u32 len = req->assoclen;
+ u32 macp = 0;
+
+ /* prepend the AAD with a length tag */
+ if (len < 0xff00) {
+ ltag.l = cpu_to_be16(len);
+ ltag.len = 2;
+ } else {
+ ltag.l = cpu_to_be16(0xfffe);
+ put_unaligned_be32(len, &ltag.h);
+ ltag.len = 6;
+ }
+
+ ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
+ num_rounds(ctx));
+ scatterwalk_start(&walk, req->assoc);
+
+ do {
+ u32 n = scatterwalk_clamp(&walk, len);
+ u8 *p;
+
+ if (!n) {
+ scatterwalk_start(&walk, sg_next(walk.sg));
+ n = scatterwalk_clamp(&walk, len);
+ }
+ p = scatterwalk_map(&walk);
+ ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc,
+ num_rounds(ctx));
+ len -= n;
+
+ scatterwalk_unmap(p);
+ scatterwalk_advance(&walk, n);
+ scatterwalk_done(&walk, 0, len);
+ } while (len);
+}
+
+static int ccm_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+ struct blkcipher_desc desc = { .info = req->iv };
+ struct blkcipher_walk walk;
+ u8 __aligned(8) mac[AES_BLOCK_SIZE];
+ u8 buf[AES_BLOCK_SIZE];
+ u32 len = req->cryptlen;
+ int err;
+
+ err = ccm_init_mac(req, mac, len);
+ if (err)
+ return err;
+
+ kernel_neon_begin_partial(6);
+
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ /* preserve the original iv for the final round */
+ memcpy(buf, req->iv, AES_BLOCK_SIZE);
+
+ blkcipher_walk_init(&walk, req->dst, req->src, len);
+ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
+ AES_BLOCK_SIZE);
+
+ while (walk.nbytes) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+
+ if (walk.nbytes == len)
+ tail = 0;
+
+ ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes - tail, ctx->key_enc,
+ num_rounds(ctx), mac, walk.iv);
+
+ len -= walk.nbytes - tail;
+ err = blkcipher_walk_done(&desc, &walk, tail);
+ }
+ if (!err)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+
+ kernel_neon_end();
+
+ if (err)
+ return err;
+
+ /* copy authtag to end of dst */
+ scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
+ crypto_aead_authsize(aead), 1);
+
+ return 0;
+}
+
+static int ccm_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ struct blkcipher_desc desc = { .info = req->iv };
+ struct blkcipher_walk walk;
+ u8 __aligned(8) mac[AES_BLOCK_SIZE];
+ u8 buf[AES_BLOCK_SIZE];
+ u32 len = req->cryptlen - authsize;
+ int err;
+
+ err = ccm_init_mac(req, mac, len);
+ if (err)
+ return err;
+
+ kernel_neon_begin_partial(6);
+
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ /* preserve the original iv for the final round */
+ memcpy(buf, req->iv, AES_BLOCK_SIZE);
+
+ blkcipher_walk_init(&walk, req->dst, req->src, len);
+ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
+ AES_BLOCK_SIZE);
+
+ while (walk.nbytes) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+
+ if (walk.nbytes == len)
+ tail = 0;
+
+ ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes - tail, ctx->key_enc,
+ num_rounds(ctx), mac, walk.iv);
+
+ len -= walk.nbytes - tail;
+ err = blkcipher_walk_done(&desc, &walk, tail);
+ }
+ if (!err)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+
+ kernel_neon_end();
+
+ if (err)
+ return err;
+
+ /* compare calculated auth tag with the stored one */
+ scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
+ authsize, 0);
+
+ if (memcmp(mac, buf, authsize))
+ return -EBADMSG;
+ return 0;
+}
+
+static struct crypto_alg ccm_aes_alg = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm_setkey,
+ .setauthsize = ccm_setauthsize,
+ .encrypt = ccm_encrypt,
+ .decrypt = ccm_decrypt,
+ }
+};
+
+static int __init aes_mod_init(void)
+{
+ if (!(elf_hwcap & HWCAP_AES))
+ return -ENODEV;
+ return crypto_register_alg(&ccm_aes_alg);
+}
+
+static void __exit aes_mod_exit(void)
+{
+ crypto_unregister_alg(&ccm_aes_alg);
+}
+
+module_init(aes_mod_init);
+module_exit(aes_mod_exit);
+
+MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ccm(aes)");
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
new file mode 100644
index 000000000000..2075e1acae6b
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -0,0 +1,155 @@
+/*
+ * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+struct aes_block {
+ u8 b[AES_BLOCK_SIZE];
+};
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+ /*
+ * # of rounds specified by AES:
+ * 128 bit key 10 rounds
+ * 192 bit key 12 rounds
+ * 256 bit key 14 rounds
+ * => n byte key => 6 + (n/4) rounds
+ */
+ return 6 + ctx->key_length / 4;
+}
+
+static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aes_block *out = (struct aes_block *)dst;
+ struct aes_block const *in = (struct aes_block *)src;
+ void *dummy0;
+ int dummy1;
+
+ kernel_neon_begin_partial(4);
+
+ __asm__(" ld1 {v0.16b}, %[in] ;"
+ " ld1 {v1.2d}, [%[key]], #16 ;"
+ " cmp %w[rounds], #10 ;"
+ " bmi 0f ;"
+ " bne 3f ;"
+ " mov v3.16b, v1.16b ;"
+ " b 2f ;"
+ "0: mov v2.16b, v1.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ "1: aese v0.16b, v2.16b ;"
+ " aesmc v0.16b, v0.16b ;"
+ "2: ld1 {v1.2d}, [%[key]], #16 ;"
+ " aese v0.16b, v3.16b ;"
+ " aesmc v0.16b, v0.16b ;"
+ "3: ld1 {v2.2d}, [%[key]], #16 ;"
+ " subs %w[rounds], %w[rounds], #3 ;"
+ " aese v0.16b, v1.16b ;"
+ " aesmc v0.16b, v0.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ " bpl 1b ;"
+ " aese v0.16b, v2.16b ;"
+ " eor v0.16b, v0.16b, v3.16b ;"
+ " st1 {v0.16b}, %[out] ;"
+
+ : [out] "=Q"(*out),
+ [key] "=r"(dummy0),
+ [rounds] "=r"(dummy1)
+ : [in] "Q"(*in),
+ "1"(ctx->key_enc),
+ "2"(num_rounds(ctx) - 2)
+ : "cc");
+
+ kernel_neon_end();
+}
+
+static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
+{
+ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aes_block *out = (struct aes_block *)dst;
+ struct aes_block const *in = (struct aes_block *)src;
+ void *dummy0;
+ int dummy1;
+
+ kernel_neon_begin_partial(4);
+
+ __asm__(" ld1 {v0.16b}, %[in] ;"
+ " ld1 {v1.2d}, [%[key]], #16 ;"
+ " cmp %w[rounds], #10 ;"
+ " bmi 0f ;"
+ " bne 3f ;"
+ " mov v3.16b, v1.16b ;"
+ " b 2f ;"
+ "0: mov v2.16b, v1.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ "1: aesd v0.16b, v2.16b ;"
+ " aesimc v0.16b, v0.16b ;"
+ "2: ld1 {v1.2d}, [%[key]], #16 ;"
+ " aesd v0.16b, v3.16b ;"
+ " aesimc v0.16b, v0.16b ;"
+ "3: ld1 {v2.2d}, [%[key]], #16 ;"
+ " subs %w[rounds], %w[rounds], #3 ;"
+ " aesd v0.16b, v1.16b ;"
+ " aesimc v0.16b, v0.16b ;"
+ " ld1 {v3.2d}, [%[key]], #16 ;"
+ " bpl 1b ;"
+ " aesd v0.16b, v2.16b ;"
+ " eor v0.16b, v0.16b, v3.16b ;"
+ " st1 {v0.16b}, %[out] ;"
+
+ : [out] "=Q"(*out),
+ [key] "=r"(dummy0),
+ [rounds] "=r"(dummy1)
+ : [in] "Q"(*in),
+ "1"(ctx->key_dec),
+ "2"(num_rounds(ctx) - 2)
+ : "cc");
+
+ kernel_neon_end();
+}
+
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = crypto_aes_set_key,
+ .cia_encrypt = aes_cipher_encrypt,
+ .cia_decrypt = aes_cipher_decrypt
+ }
+};
+
+static int __init aes_mod_init(void)
+{
+ return crypto_register_alg(&aes_alg);
+}
+
+static void __exit aes_mod_exit(void)
+{
+ crypto_unregister_alg(&aes_alg);
+}
+
+module_cpu_feature_match(AES, aes_mod_init);
+module_exit(aes_mod_exit);
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
new file mode 100644
index 000000000000..685a18f731eb
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce.S
@@ -0,0 +1,133 @@
+/*
+ * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with
+ * Crypto Extensions
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#define AES_ENTRY(func) ENTRY(ce_ ## func)
+#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
+
+ .arch armv8-a+crypto
+
+ /* preload all round keys */
+ .macro load_round_keys, rounds, rk
+ cmp \rounds, #12
+ blo 2222f /* 128 bits */
+ beq 1111f /* 192 bits */
+ ld1 {v17.16b-v18.16b}, [\rk], #32
+1111: ld1 {v19.16b-v20.16b}, [\rk], #32
+2222: ld1 {v21.16b-v24.16b}, [\rk], #64
+ ld1 {v25.16b-v28.16b}, [\rk], #64
+ ld1 {v29.16b-v31.16b}, [\rk]
+ .endm
+
+ /* prepare for encryption with key in rk[] */
+ .macro enc_prepare, rounds, rk, ignore
+ load_round_keys \rounds, \rk
+ .endm
+
+ /* prepare for encryption (again) but with new key in rk[] */
+ .macro enc_switch_key, rounds, rk, ignore
+ load_round_keys \rounds, \rk
+ .endm
+
+ /* prepare for decryption with key in rk[] */
+ .macro dec_prepare, rounds, rk, ignore
+ load_round_keys \rounds, \rk
+ .endm
+
+ .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
+ aes\de \i0\().16b, \k\().16b
+ .ifnb \i1
+ aes\de \i1\().16b, \k\().16b
+ .ifnb \i3
+ aes\de \i2\().16b, \k\().16b
+ aes\de \i3\().16b, \k\().16b
+ .endif
+ .endif
+ aes\mc \i0\().16b, \i0\().16b
+ .ifnb \i1
+ aes\mc \i1\().16b, \i1\().16b
+ .ifnb \i3
+ aes\mc \i2\().16b, \i2\().16b
+ aes\mc \i3\().16b, \i3\().16b
+ .endif
+ .endif
+ .endm
+
+ /* up to 4 interleaved encryption rounds with the same round key */
+ .macro round_Nx, enc, k, i0, i1, i2, i3
+ .ifc \enc, e
+ do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3
+ .else
+ do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3
+ .endif
+ .endm
+
+ /* up to 4 interleaved final rounds */
+ .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3
+ aes\de \i0\().16b, \k\().16b
+ .ifnb \i1
+ aes\de \i1\().16b, \k\().16b
+ .ifnb \i3
+ aes\de \i2\().16b, \k\().16b
+ aes\de \i3\().16b, \k\().16b
+ .endif
+ .endif
+ eor \i0\().16b, \i0\().16b, \k2\().16b
+ .ifnb \i1
+ eor \i1\().16b, \i1\().16b, \k2\().16b
+ .ifnb \i3
+ eor \i2\().16b, \i2\().16b, \k2\().16b
+ eor \i3\().16b, \i3\().16b, \k2\().16b
+ .endif
+ .endif
+ .endm
+
+ /* up to 4 interleaved blocks */
+ .macro do_block_Nx, enc, rounds, i0, i1, i2, i3
+ cmp \rounds, #12
+ blo 2222f /* 128 bits */
+ beq 1111f /* 192 bits */
+ round_Nx \enc, v17, \i0, \i1, \i2, \i3
+ round_Nx \enc, v18, \i0, \i1, \i2, \i3
+1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3
+ round_Nx \enc, v20, \i0, \i1, \i2, \i3
+2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
+ round_Nx \enc, \key, \i0, \i1, \i2, \i3
+ .endr
+ fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3
+ .endm
+
+ .macro encrypt_block, in, rounds, t0, t1, t2
+ do_block_Nx e, \rounds, \in
+ .endm
+
+ .macro encrypt_block2x, i0, i1, rounds, t0, t1, t2
+ do_block_Nx e, \rounds, \i0, \i1
+ .endm
+
+ .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
+ do_block_Nx e, \rounds, \i0, \i1, \i2, \i3
+ .endm
+
+ .macro decrypt_block, in, rounds, t0, t1, t2
+ do_block_Nx d, \rounds, \in
+ .endm
+
+ .macro decrypt_block2x, i0, i1, rounds, t0, t1, t2
+ do_block_Nx d, \rounds, \i0, \i1
+ .endm
+
+ .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
+ do_block_Nx d, \rounds, \i0, \i1, \i2, \i3
+ .endm
+
+#include "aes-modes.S"
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
new file mode 100644
index 000000000000..79cd911ef88c
--- /dev/null
+++ b/arch/arm64/crypto/aes-glue.c
@@ -0,0 +1,446 @@
+/*
+ * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/hwcap.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#ifdef USE_V8_CRYPTO_EXTENSIONS
+#define MODE "ce"
+#define PRIO 300
+#define aes_ecb_encrypt ce_aes_ecb_encrypt
+#define aes_ecb_decrypt ce_aes_ecb_decrypt
+#define aes_cbc_encrypt ce_aes_cbc_encrypt
+#define aes_cbc_decrypt ce_aes_cbc_decrypt
+#define aes_ctr_encrypt ce_aes_ctr_encrypt
+#define aes_xts_encrypt ce_aes_xts_encrypt
+#define aes_xts_decrypt ce_aes_xts_decrypt
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
+#else
+#define MODE "neon"
+#define PRIO 200
+#define aes_ecb_encrypt neon_aes_ecb_encrypt
+#define aes_ecb_decrypt neon_aes_ecb_decrypt
+#define aes_cbc_encrypt neon_aes_cbc_encrypt
+#define aes_cbc_decrypt neon_aes_cbc_decrypt
+#define aes_ctr_encrypt neon_aes_ctr_encrypt
+#define aes_xts_encrypt neon_aes_xts_encrypt
+#define aes_xts_decrypt neon_aes_xts_decrypt
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
+MODULE_ALIAS("ecb(aes)");
+MODULE_ALIAS("cbc(aes)");
+MODULE_ALIAS("ctr(aes)");
+MODULE_ALIAS("xts(aes)");
+#endif
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+/* defined in aes-modes.S */
+asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, int first);
+asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, int first);
+
+asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 iv[], int first);
+asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 iv[], int first);
+
+asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
+ int rounds, int blocks, u8 ctr[], int first);
+
+asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
+ int rounds, int blocks, u8 const rk2[], u8 iv[],
+ int first);
+asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
+ int rounds, int blocks, u8 const rk2[], u8 iv[],
+ int first);
+
+struct crypto_aes_xts_ctx {
+ struct crypto_aes_ctx key1;
+ struct crypto_aes_ctx __aligned(8) key2;
+};
+
+static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_aes_expand_key(&ctx->key1, in_key, key_len / 2);
+ if (!ret)
+ ret = crypto_aes_expand_key(&ctx->key2, &in_key[key_len / 2],
+ key_len / 2);
+ if (!ret)
+ return 0;
+
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, rounds, blocks, first);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_dec, rounds, blocks, first);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
+ first);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
+ first);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ return err;
+}
+
+static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key_length / 4;
+ struct blkcipher_walk walk;
+ int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+
+ first = 1;
+ kernel_neon_begin();
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
+ first);
+ first = 0;
+ nbytes -= blocks * AES_BLOCK_SIZE;
+ if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
+ break;
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (nbytes) {
+ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 __aligned(8) tail[AES_BLOCK_SIZE];
+
+ /*
+ * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
+ * to tell aes_ctr_encrypt() to only read half a block.
+ */
+ blocks = (nbytes <= 8) ? -1 : 1;
+
+ aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
+ blocks, walk.iv, first);
+ memcpy(tdst, tail, nbytes);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key1.key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key1.key_enc, rounds, blocks,
+ (u8 *)ctx->key2.key_enc, walk.iv, first);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int err, first, rounds = 6 + ctx->key1.key_length / 4;
+ struct blkcipher_walk walk;
+ unsigned int blocks;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ kernel_neon_begin();
+ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key1.key_dec, rounds, blocks,
+ (u8 *)ctx->key2.key_enc, walk.iv, first);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+
+ return err;
+}
+
+static struct crypto_alg aes_algs[] = { {
+ .cra_name = "__ecb-aes-" MODE,
+ .cra_driver_name = "__driver-ecb-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto_aes_set_key,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ },
+}, {
+ .cra_name = "__cbc-aes-" MODE,
+ .cra_driver_name = "__driver-cbc-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto_aes_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ },
+}, {
+ .cra_name = "__ctr-aes-" MODE,
+ .cra_driver_name = "__driver-ctr-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto_aes_set_key,
+ .encrypt = ctr_encrypt,
+ .decrypt = ctr_encrypt,
+ },
+}, {
+ .cra_name = "__xts-aes-" MODE,
+ .cra_driver_name = "__driver-xts-aes-" MODE,
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_set_key,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
+ },
+}, {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+} };
+
+static int __init aes_init(void)
+{
+ return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+static void __exit aes_exit(void)
+{
+ crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+#ifdef USE_V8_CRYPTO_EXTENSIONS
+module_cpu_feature_match(AES, aes_init);
+#else
+module_init(aes_init);
+#endif
+module_exit(aes_exit);
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
new file mode 100644
index 000000000000..f6e372c528eb
--- /dev/null
+++ b/arch/arm64/crypto/aes-modes.S
@@ -0,0 +1,532 @@
+/*
+ * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* included by aes-ce.S and aes-neon.S */
+
+ .text
+ .align 4
+
+/*
+ * There are several ways to instantiate this code:
+ * - no interleave, all inline
+ * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
+ * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
+ * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
+ * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
+ *
+ * Macros imported by this code:
+ * - enc_prepare - setup NEON registers for encryption
+ * - dec_prepare - setup NEON registers for decryption
+ * - enc_switch_key - change to new key after having prepared for encryption
+ * - encrypt_block - encrypt a single block
+ * - decrypt block - decrypt a single block
+ * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2)
+ * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2)
+ * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4)
+ * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4)
+ */
+
+#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
+#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp
+#define FRAME_POP ldp x29, x30, [sp],#16
+
+#if INTERLEAVE == 2
+
+aes_encrypt_block2x:
+ encrypt_block2x v0, v1, w3, x2, x6, w7
+ ret
+ENDPROC(aes_encrypt_block2x)
+
+aes_decrypt_block2x:
+ decrypt_block2x v0, v1, w3, x2, x6, w7
+ ret
+ENDPROC(aes_decrypt_block2x)
+
+#elif INTERLEAVE == 4
+
+aes_encrypt_block4x:
+ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ ret
+ENDPROC(aes_encrypt_block4x)
+
+aes_decrypt_block4x:
+ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ ret
+ENDPROC(aes_decrypt_block4x)
+
+#else
+#error INTERLEAVE should equal 2 or 4
+#endif
+
+ .macro do_encrypt_block2x
+ bl aes_encrypt_block2x
+ .endm
+
+ .macro do_decrypt_block2x
+ bl aes_decrypt_block2x
+ .endm
+
+ .macro do_encrypt_block4x
+ bl aes_encrypt_block4x
+ .endm
+
+ .macro do_decrypt_block4x
+ bl aes_decrypt_block4x
+ .endm
+
+#else
+#define FRAME_PUSH
+#define FRAME_POP
+
+ .macro do_encrypt_block2x
+ encrypt_block2x v0, v1, w3, x2, x6, w7
+ .endm
+
+ .macro do_decrypt_block2x
+ decrypt_block2x v0, v1, w3, x2, x6, w7
+ .endm
+
+ .macro do_encrypt_block4x
+ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ .endm
+
+ .macro do_decrypt_block4x
+ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ .endm
+
+#endif
+
+ /*
+ * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, int first)
+ * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, int first)
+ */
+
+AES_ENTRY(aes_ecb_encrypt)
+ FRAME_PUSH
+ cbz w5, .LecbencloopNx
+
+ enc_prepare w3, x2, x5
+
+.LecbencloopNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lecbenc1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
+ do_encrypt_block2x
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
+ do_encrypt_block4x
+ st1 {v0.16b-v3.16b}, [x0], #64
+#endif
+ b .LecbencloopNx
+.Lecbenc1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lecbencout
+#endif
+.Lecbencloop:
+ ld1 {v0.16b}, [x1], #16 /* get next pt block */
+ encrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lecbencloop
+.Lecbencout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_ecb_encrypt)
+
+
+AES_ENTRY(aes_ecb_decrypt)
+ FRAME_PUSH
+ cbz w5, .LecbdecloopNx
+
+ dec_prepare w3, x2, x5
+
+.LecbdecloopNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lecbdec1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
+ do_decrypt_block2x
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ do_decrypt_block4x
+ st1 {v0.16b-v3.16b}, [x0], #64
+#endif
+ b .LecbdecloopNx
+.Lecbdec1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lecbdecout
+#endif
+.Lecbdecloop:
+ ld1 {v0.16b}, [x1], #16 /* get next ct block */
+ decrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lecbdecloop
+.Lecbdecout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_ecb_decrypt)
+
+
+ /*
+ * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 iv[], int first)
+ * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 iv[], int first)
+ */
+
+AES_ENTRY(aes_cbc_encrypt)
+ cbz w6, .Lcbcencloop
+
+ ld1 {v0.16b}, [x5] /* get iv */
+ enc_prepare w3, x2, x5
+
+.Lcbcencloop:
+ ld1 {v1.16b}, [x1], #16 /* get next pt block */
+ eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
+ encrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lcbcencloop
+ ret
+AES_ENDPROC(aes_cbc_encrypt)
+
+
+AES_ENTRY(aes_cbc_decrypt)
+ FRAME_PUSH
+ cbz w6, .LcbcdecloopNx
+
+ ld1 {v7.16b}, [x5] /* get iv */
+ dec_prepare w3, x2, x5
+
+.LcbcdecloopNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lcbcdec1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
+ mov v2.16b, v0.16b
+ mov v3.16b, v1.16b
+ do_decrypt_block2x
+ eor v0.16b, v0.16b, v7.16b
+ eor v1.16b, v1.16b, v2.16b
+ mov v7.16b, v3.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ mov v4.16b, v0.16b
+ mov v5.16b, v1.16b
+ mov v6.16b, v2.16b
+ do_decrypt_block4x
+ sub x1, x1, #16
+ eor v0.16b, v0.16b, v7.16b
+ eor v1.16b, v1.16b, v4.16b
+ ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
+ eor v2.16b, v2.16b, v5.16b
+ eor v3.16b, v3.16b, v6.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+#endif
+ b .LcbcdecloopNx
+.Lcbcdec1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lcbcdecout
+#endif
+.Lcbcdecloop:
+ ld1 {v1.16b}, [x1], #16 /* get next ct block */
+ mov v0.16b, v1.16b /* ...and copy to v0 */
+ decrypt_block v0, w3, x2, x5, w6
+ eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
+ mov v7.16b, v1.16b /* ct is next iv */
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lcbcdecloop
+.Lcbcdecout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_cbc_decrypt)
+
+
+ /*
+ * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+ * int blocks, u8 ctr[], int first)
+ */
+
+AES_ENTRY(aes_ctr_encrypt)
+ FRAME_PUSH
+ cbnz w6, .Lctrfirst /* 1st time around? */
+ umov x5, v4.d[1] /* keep swabbed ctr in reg */
+ rev x5, x5
+#if INTERLEAVE >= 2
+ cmn w5, w4 /* 32 bit overflow? */
+ bcs .Lctrinc
+ add x5, x5, #1 /* increment BE ctr */
+ b .LctrincNx
+#else
+ b .Lctrinc
+#endif
+.Lctrfirst:
+ enc_prepare w3, x2, x6
+ ld1 {v4.16b}, [x5]
+ umov x5, v4.d[1] /* keep swabbed ctr in reg */
+ rev x5, x5
+#if INTERLEAVE >= 2
+ cmn w5, w4 /* 32 bit overflow? */
+ bcs .Lctrloop
+.LctrloopNx:
+ subs w4, w4, #INTERLEAVE
+ bmi .Lctr1x
+#if INTERLEAVE == 2
+ mov v0.8b, v4.8b
+ mov v1.8b, v4.8b
+ rev x7, x5
+ add x5, x5, #1
+ ins v0.d[1], x7
+ rev x7, x5
+ add x5, x5, #1
+ ins v1.d[1], x7
+ ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
+ do_encrypt_block2x
+ eor v0.16b, v0.16b, v2.16b
+ eor v1.16b, v1.16b, v3.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+#else
+ ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
+ dup v7.4s, w5
+ mov v0.16b, v4.16b
+ add v7.4s, v7.4s, v8.4s
+ mov v1.16b, v4.16b
+ rev32 v8.16b, v7.16b
+ mov v2.16b, v4.16b
+ mov v3.16b, v4.16b
+ mov v1.s[3], v8.s[0]
+ mov v2.s[3], v8.s[1]
+ mov v3.s[3], v8.s[2]
+ ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
+ do_encrypt_block4x
+ eor v0.16b, v5.16b, v0.16b
+ ld1 {v5.16b}, [x1], #16 /* get 1 input block */
+ eor v1.16b, v6.16b, v1.16b
+ eor v2.16b, v7.16b, v2.16b
+ eor v3.16b, v5.16b, v3.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+ add x5, x5, #INTERLEAVE
+#endif
+ cbz w4, .LctroutNx
+.LctrincNx:
+ rev x7, x5
+ ins v4.d[1], x7
+ b .LctrloopNx
+.LctroutNx:
+ sub x5, x5, #1
+ rev x7, x5
+ ins v4.d[1], x7
+ b .Lctrout
+.Lctr1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lctrout
+#endif
+.Lctrloop:
+ mov v0.16b, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
+ subs w4, w4, #1
+ bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
+ ld1 {v3.16b}, [x1], #16
+ eor v3.16b, v0.16b, v3.16b
+ st1 {v3.16b}, [x0], #16
+ beq .Lctrout
+.Lctrinc:
+ adds x5, x5, #1 /* increment BE ctr */
+ rev x7, x5
+ ins v4.d[1], x7
+ bcc .Lctrloop /* no overflow? */
+ umov x7, v4.d[0] /* load upper word of ctr */
+ rev x7, x7 /* ... to handle the carry */
+ add x7, x7, #1
+ rev x7, x7
+ ins v4.d[0], x7
+ b .Lctrloop
+.Lctrhalfblock:
+ ld1 {v3.8b}, [x1]
+ eor v3.8b, v0.8b, v3.8b
+ st1 {v3.8b}, [x0]
+.Lctrout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_ctr_encrypt)
+ .ltorg
+
+
+ /*
+ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+ * int blocks, u8 const rk2[], u8 iv[], int first)
+ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+ * int blocks, u8 const rk2[], u8 iv[], int first)
+ */
+
+ .macro next_tweak, out, in, const, tmp
+ sshr \tmp\().2d, \in\().2d, #63
+ and \tmp\().16b, \tmp\().16b, \const\().16b
+ add \out\().2d, \in\().2d, \in\().2d
+ ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
+ eor \out\().16b, \out\().16b, \tmp\().16b
+ .endm
+
+.Lxts_mul_x:
+ .word 1, 0, 0x87, 0
+
+AES_ENTRY(aes_xts_encrypt)
+ FRAME_PUSH
+ cbz w7, .LxtsencloopNx
+
+ ld1 {v4.16b}, [x6]
+ enc_prepare w3, x5, x6
+ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
+ enc_switch_key w3, x2, x6
+ ldr q7, .Lxts_mul_x
+ b .LxtsencNx
+
+.LxtsencloopNx:
+ ldr q7, .Lxts_mul_x
+ next_tweak v4, v4, v7, v8
+.LxtsencNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lxtsenc1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ do_encrypt_block2x
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+ cbz w4, .LxtsencoutNx
+ next_tweak v4, v5, v7, v8
+ b .LxtsencNx
+.LxtsencoutNx:
+ mov v4.16b, v5.16b
+ b .Lxtsencout
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ next_tweak v6, v5, v7, v8
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ next_tweak v7, v6, v7, v8
+ eor v3.16b, v3.16b, v7.16b
+ do_encrypt_block4x
+ eor v3.16b, v3.16b, v7.16b
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+ mov v4.16b, v7.16b
+ cbz w4, .Lxtsencout
+ b .LxtsencloopNx
+#endif
+.Lxtsenc1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lxtsencout
+#endif
+.Lxtsencloop:
+ ld1 {v1.16b}, [x1], #16
+ eor v0.16b, v1.16b, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v4.16b
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ beq .Lxtsencout
+ next_tweak v4, v4, v7, v8
+ b .Lxtsencloop
+.Lxtsencout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_xts_encrypt)
+
+
+AES_ENTRY(aes_xts_decrypt)
+ FRAME_PUSH
+ cbz w7, .LxtsdecloopNx
+
+ ld1 {v4.16b}, [x6]
+ enc_prepare w3, x5, x6
+ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
+ dec_prepare w3, x2, x6
+ ldr q7, .Lxts_mul_x
+ b .LxtsdecNx
+
+.LxtsdecloopNx:
+ ldr q7, .Lxts_mul_x
+ next_tweak v4, v4, v7, v8
+.LxtsdecNx:
+#if INTERLEAVE >= 2
+ subs w4, w4, #INTERLEAVE
+ bmi .Lxtsdec1x
+#if INTERLEAVE == 2
+ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ do_decrypt_block2x
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ st1 {v0.16b-v1.16b}, [x0], #32
+ cbz w4, .LxtsdecoutNx
+ next_tweak v4, v5, v7, v8
+ b .LxtsdecNx
+.LxtsdecoutNx:
+ mov v4.16b, v5.16b
+ b .Lxtsdecout
+#else
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ next_tweak v5, v4, v7, v8
+ eor v0.16b, v0.16b, v4.16b
+ next_tweak v6, v5, v7, v8
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ next_tweak v7, v6, v7, v8
+ eor v3.16b, v3.16b, v7.16b
+ do_decrypt_block4x
+ eor v3.16b, v3.16b, v7.16b
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+ mov v4.16b, v7.16b
+ cbz w4, .Lxtsdecout
+ b .LxtsdecloopNx
+#endif
+.Lxtsdec1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lxtsdecout
+#endif
+.Lxtsdecloop:
+ ld1 {v1.16b}, [x1], #16
+ eor v0.16b, v1.16b, v4.16b
+ decrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v4.16b
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ beq .Lxtsdecout
+ next_tweak v4, v4, v7, v8
+ b .Lxtsdecloop
+.Lxtsdecout:
+ FRAME_POP
+ ret
+AES_ENDPROC(aes_xts_decrypt)
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
new file mode 100644
index 000000000000..b93170e1cc93
--- /dev/null
+++ b/arch/arm64/crypto/aes-neon.S
@@ -0,0 +1,382 @@
+/*
+ * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#define AES_ENTRY(func) ENTRY(neon_ ## func)
+#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
+
+ /* multiply by polynomial 'x' in GF(2^8) */
+ .macro mul_by_x, out, in, temp, const
+ sshr \temp, \in, #7
+ add \out, \in, \in
+ and \temp, \temp, \const
+ eor \out, \out, \temp
+ .endm
+
+ /* preload the entire Sbox */
+ .macro prepare, sbox, shiftrows, temp
+ adr \temp, \sbox
+ movi v12.16b, #0x40
+ ldr q13, \shiftrows
+ movi v14.16b, #0x1b
+ ld1 {v16.16b-v19.16b}, [\temp], #64
+ ld1 {v20.16b-v23.16b}, [\temp], #64
+ ld1 {v24.16b-v27.16b}, [\temp], #64
+ ld1 {v28.16b-v31.16b}, [\temp]
+ .endm
+
+ /* do preload for encryption */
+ .macro enc_prepare, ignore0, ignore1, temp
+ prepare .LForward_Sbox, .LForward_ShiftRows, \temp
+ .endm
+
+ .macro enc_switch_key, ignore0, ignore1, temp
+ /* do nothing */
+ .endm
+
+ /* do preload for decryption */
+ .macro dec_prepare, ignore0, ignore1, temp
+ prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp
+ .endm
+
+ /* apply SubBytes transformation using the the preloaded Sbox */
+ .macro sub_bytes, in
+ sub v9.16b, \in\().16b, v12.16b
+ tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b
+ sub v10.16b, v9.16b, v12.16b
+ tbx \in\().16b, {v20.16b-v23.16b}, v9.16b
+ sub v11.16b, v10.16b, v12.16b
+ tbx \in\().16b, {v24.16b-v27.16b}, v10.16b
+ tbx \in\().16b, {v28.16b-v31.16b}, v11.16b
+ .endm
+
+ /* apply MixColumns transformation */
+ .macro mix_columns, in
+ mul_by_x v10.16b, \in\().16b, v9.16b, v14.16b
+ rev32 v8.8h, \in\().8h
+ eor \in\().16b, v10.16b, \in\().16b
+ shl v9.4s, v8.4s, #24
+ shl v11.4s, \in\().4s, #24
+ sri v9.4s, v8.4s, #8
+ sri v11.4s, \in\().4s, #8
+ eor v9.16b, v9.16b, v8.16b
+ eor v10.16b, v10.16b, v9.16b
+ eor \in\().16b, v10.16b, v11.16b
+ .endm
+
+ /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */
+ .macro inv_mix_columns, in
+ mul_by_x v11.16b, \in\().16b, v10.16b, v14.16b
+ mul_by_x v11.16b, v11.16b, v10.16b, v14.16b
+ eor \in\().16b, \in\().16b, v11.16b
+ rev32 v11.8h, v11.8h
+ eor \in\().16b, \in\().16b, v11.16b
+ mix_columns \in
+ .endm
+
+ .macro do_block, enc, in, rounds, rk, rkp, i
+ ld1 {v15.16b}, [\rk]
+ add \rkp, \rk, #16
+ mov \i, \rounds
+1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
+ tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
+ sub_bytes \in
+ ld1 {v15.16b}, [\rkp], #16
+ subs \i, \i, #1
+ beq 2222f
+ .if \enc == 1
+ mix_columns \in
+ .else
+ inv_mix_columns \in
+ .endif
+ b 1111b
+2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
+ .endm
+
+ .macro encrypt_block, in, rounds, rk, rkp, i
+ do_block 1, \in, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro decrypt_block, in, rounds, rk, rkp, i
+ do_block 0, \in, \rounds, \rk, \rkp, \i
+ .endm
+
+ /*
+ * Interleaved versions: functionally equivalent to the
+ * ones above, but applied to 2 or 4 AES states in parallel.
+ */
+
+ .macro sub_bytes_2x, in0, in1
+ sub v8.16b, \in0\().16b, v12.16b
+ sub v9.16b, \in1\().16b, v12.16b
+ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
+ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
+ sub v10.16b, v8.16b, v12.16b
+ sub v11.16b, v9.16b, v12.16b
+ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
+ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
+ sub v8.16b, v10.16b, v12.16b
+ sub v9.16b, v11.16b, v12.16b
+ tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
+ tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
+ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
+ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
+ .endm
+
+ .macro sub_bytes_4x, in0, in1, in2, in3
+ sub v8.16b, \in0\().16b, v12.16b
+ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
+ sub v9.16b, \in1\().16b, v12.16b
+ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
+ sub v10.16b, \in2\().16b, v12.16b
+ tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b
+ sub v11.16b, \in3\().16b, v12.16b
+ tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b
+ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
+ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
+ sub v8.16b, v8.16b, v12.16b
+ tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b
+ sub v9.16b, v9.16b, v12.16b
+ tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b
+ sub v10.16b, v10.16b, v12.16b
+ tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b
+ sub v11.16b, v11.16b, v12.16b
+ tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b
+ sub v8.16b, v8.16b, v12.16b
+ tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b
+ sub v9.16b, v9.16b, v12.16b
+ tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b
+ sub v10.16b, v10.16b, v12.16b
+ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
+ sub v11.16b, v11.16b, v12.16b
+ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
+ tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b
+ tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b
+ .endm
+
+ .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const
+ sshr \tmp0\().16b, \in0\().16b, #7
+ add \out0\().16b, \in0\().16b, \in0\().16b
+ sshr \tmp1\().16b, \in1\().16b, #7
+ and \tmp0\().16b, \tmp0\().16b, \const\().16b
+ add \out1\().16b, \in1\().16b, \in1\().16b
+ and \tmp1\().16b, \tmp1\().16b, \const\().16b
+ eor \out0\().16b, \out0\().16b, \tmp0\().16b
+ eor \out1\().16b, \out1\().16b, \tmp1\().16b
+ .endm
+
+ .macro mix_columns_2x, in0, in1
+ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
+ rev32 v10.8h, \in0\().8h
+ rev32 v11.8h, \in1\().8h
+ eor \in0\().16b, v8.16b, \in0\().16b
+ eor \in1\().16b, v9.16b, \in1\().16b
+ shl v12.4s, v10.4s, #24
+ shl v13.4s, v11.4s, #24
+ eor v8.16b, v8.16b, v10.16b
+ sri v12.4s, v10.4s, #8
+ shl v10.4s, \in0\().4s, #24
+ eor v9.16b, v9.16b, v11.16b
+ sri v13.4s, v11.4s, #8
+ shl v11.4s, \in1\().4s, #24
+ sri v10.4s, \in0\().4s, #8
+ eor \in0\().16b, v8.16b, v12.16b
+ sri v11.4s, \in1\().4s, #8
+ eor \in1\().16b, v9.16b, v13.16b
+ eor \in0\().16b, v10.16b, \in0\().16b
+ eor \in1\().16b, v11.16b, \in1\().16b
+ .endm
+
+ .macro inv_mix_cols_2x, in0, in1
+ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
+ mul_by_x_2x v8, v9, v8, v9, v10, v11, v14
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ rev32 v8.8h, v8.8h
+ rev32 v9.8h, v9.8h
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ mix_columns_2x \in0, \in1
+ .endm
+
+ .macro inv_mix_cols_4x, in0, in1, in2, in3
+ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
+ mul_by_x_2x v10, v11, \in2, \in3, v12, v13, v14
+ mul_by_x_2x v8, v9, v8, v9, v12, v13, v14
+ mul_by_x_2x v10, v11, v10, v11, v12, v13, v14
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ eor \in2\().16b, \in2\().16b, v10.16b
+ eor \in3\().16b, \in3\().16b, v11.16b
+ rev32 v8.8h, v8.8h
+ rev32 v9.8h, v9.8h
+ rev32 v10.8h, v10.8h
+ rev32 v11.8h, v11.8h
+ eor \in0\().16b, \in0\().16b, v8.16b
+ eor \in1\().16b, \in1\().16b, v9.16b
+ eor \in2\().16b, \in2\().16b, v10.16b
+ eor \in3\().16b, \in3\().16b, v11.16b
+ mix_columns_2x \in0, \in1
+ mix_columns_2x \in2, \in3
+ .endm
+
+ .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
+ ld1 {v15.16b}, [\rk]
+ add \rkp, \rk, #16
+ mov \i, \rounds
+1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ sub_bytes_2x \in0, \in1
+ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
+ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
+ ld1 {v15.16b}, [\rkp], #16
+ subs \i, \i, #1
+ beq 2222f
+ .if \enc == 1
+ mix_columns_2x \in0, \in1
+ ldr q13, .LForward_ShiftRows
+ .else
+ inv_mix_cols_2x \in0, \in1
+ ldr q13, .LReverse_ShiftRows
+ .endif
+ movi v12.16b, #0x40
+ b 1111b
+2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ .endm
+
+ .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
+ ld1 {v15.16b}, [\rk]
+ add \rkp, \rk, #16
+ mov \i, \rounds
+1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
+ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
+ sub_bytes_4x \in0, \in1, \in2, \in3
+ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
+ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
+ tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
+ tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
+ ld1 {v15.16b}, [\rkp], #16
+ subs \i, \i, #1
+ beq 2222f
+ .if \enc == 1
+ mix_columns_2x \in0, \in1
+ mix_columns_2x \in2, \in3
+ ldr q13, .LForward_ShiftRows
+ .else
+ inv_mix_cols_4x \in0, \in1, \in2, \in3
+ ldr q13, .LReverse_ShiftRows
+ .endif
+ movi v12.16b, #0x40
+ b 1111b
+2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
+ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
+ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
+ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
+ .endm
+
+ .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
+ do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
+ do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
+ do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
+ .endm
+
+ .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
+ do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
+ .endm
+
+#include "aes-modes.S"
+
+ .text
+ .align 4
+.LForward_ShiftRows:
+ .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
+ .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
+
+.LReverse_ShiftRows:
+ .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
+ .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
+
+.LForward_Sbox:
+ .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+ .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+ .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+ .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+ .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+ .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+ .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+ .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+ .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+ .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+ .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+ .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+ .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+ .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+ .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+ .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+ .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+ .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+ .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+ .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+ .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+ .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+ .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+ .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+ .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+ .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+ .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+ .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+ .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+ .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+ .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+ .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+
+.LReverse_Sbox:
+ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+ .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+ .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+ .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+ .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+ .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+ .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+ .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+ .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+ .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+ .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+ .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+ .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+ .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+ .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+ .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+ .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+ .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+ .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+ .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+ .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+ .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+ .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+ .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+ .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+ .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+ .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+ .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+ .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+ .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+ .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+ .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
new file mode 100644
index 000000000000..607928ad1fdf
--- /dev/null
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -0,0 +1,87 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on arch/x86/crypto/ghash-pmullni-intel_asm.S
+ *
+ * Copyright (c) 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ * Vinodh Gopal
+ * Erdinc Ozturk
+ * Deniz Karakoyunlu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ SHASH .req v0
+ SHASH2 .req v1
+ T1 .req v2
+ T2 .req v3
+ MASK .req v4
+ XL .req v5
+ XM .req v6
+ XH .req v7
+ IN1 .req v7
+
+ .text
+ .arch armv8-a+crypto
+
+ /*
+ * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ * struct ghash_key const *k, const char *head)
+ */
+ENTRY(pmull_ghash_update)
+ ld1 {SHASH.16b}, [x3]
+ ld1 {XL.16b}, [x1]
+ movi MASK.16b, #0xe1
+ ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ shl MASK.2d, MASK.2d, #57
+ eor SHASH2.16b, SHASH2.16b, SHASH.16b
+
+ /* do the head block first, if supplied */
+ cbz x4, 0f
+ ld1 {T1.2d}, [x4]
+ b 1f
+
+0: ld1 {T1.2d}, [x2], #16
+ sub w0, w0, #1
+
+1: /* multiply XL by SHASH in GF(2^128) */
+CPU_LE( rev64 T1.16b, T1.16b )
+
+ ext T2.16b, XL.16b, XL.16b, #8
+ ext IN1.16b, T1.16b, T1.16b, #8
+ eor T1.16b, T1.16b, T2.16b
+ eor XL.16b, XL.16b, IN1.16b
+
+ pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
+ eor T1.16b, T1.16b, XL.16b
+ pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
+ pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+
+ ext T1.16b, XL.16b, XH.16b, #8
+ eor T2.16b, XL.16b, XH.16b
+ eor XM.16b, XM.16b, T1.16b
+ eor XM.16b, XM.16b, T2.16b
+ pmull T2.1q, XL.1d, MASK.1d
+
+ mov XH.d[0], XM.d[1]
+ mov XM.d[1], XL.d[0]
+
+ eor XL.16b, XM.16b, T2.16b
+ ext T2.16b, XL.16b, XL.16b, #8
+ pmull XL.1q, XL.1d, MASK.1d
+ eor T2.16b, T2.16b, XH.16b
+ eor XL.16b, XL.16b, T2.16b
+
+ cbnz w0, 0b
+
+ st1 {XL.16b}, [x1]
+ ret
+ENDPROC(pmull_ghash_update)
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
new file mode 100644
index 000000000000..833ec1e3f3e9
--- /dev/null
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -0,0 +1,156 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_key {
+ u64 a;
+ u64 b;
+};
+
+struct ghash_desc_ctx {
+ u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
+ u8 buf[GHASH_BLOCK_SIZE];
+ u32 count;
+};
+
+asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k, const char *head);
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ *ctx = (struct ghash_desc_ctx){};
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+ ctx->count += len;
+
+ if ((partial + len) >= GHASH_BLOCK_SIZE) {
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ int blocks;
+
+ if (partial) {
+ int p = GHASH_BLOCK_SIZE - partial;
+
+ memcpy(ctx->buf + partial, src, p);
+ src += p;
+ len -= p;
+ }
+
+ blocks = len / GHASH_BLOCK_SIZE;
+ len %= GHASH_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(8);
+ pmull_ghash_update(blocks, ctx->digest, src, key,
+ partial ? ctx->buf : NULL);
+ kernel_neon_end();
+ src += blocks * GHASH_BLOCK_SIZE;
+ partial = 0;
+ }
+ if (len)
+ memcpy(ctx->buf + partial, src, len);
+ return 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+ if (partial) {
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+
+ memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
+
+ kernel_neon_begin_partial(8);
+ pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
+ kernel_neon_end();
+ }
+ put_unaligned_be64(ctx->digest[1], dst);
+ put_unaligned_be64(ctx->digest[0], dst + 8);
+
+ *ctx = (struct ghash_desc_ctx){};
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ghash_key *key = crypto_shash_ctx(tfm);
+ u64 a, b;
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* perform multiplication by 'x' in GF(2^128) */
+ b = get_unaligned_be64(inkey);
+ a = get_unaligned_be64(inkey + 8);
+
+ key->a = (a << 1) | (b >> 63);
+ key->b = (b << 1) | (a >> 63);
+
+ if (b >> 63)
+ key->b ^= 0xc200000000000000UL;
+
+ return 0;
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_key),
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init ghash_ce_mod_init(void)
+{
+ return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_ce_mod_exit(void)
+{
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_cpu_feature_match(PMULL, ghash_ce_mod_init);
+module_exit(ghash_ce_mod_exit);
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
new file mode 100644
index 000000000000..09d57d98609c
--- /dev/null
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -0,0 +1,153 @@
+/*
+ * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .arch armv8-a+crypto
+
+ k0 .req v0
+ k1 .req v1
+ k2 .req v2
+ k3 .req v3
+
+ t0 .req v4
+ t1 .req v5
+
+ dga .req q6
+ dgav .req v6
+ dgb .req s7
+ dgbv .req v7
+
+ dg0q .req q12
+ dg0s .req s12
+ dg0v .req v12
+ dg1s .req s13
+ dg1v .req v13
+ dg2s .req s14
+
+ .macro add_only, op, ev, rc, s0, dg1
+ .ifc \ev, ev
+ add t1.4s, v\s0\().4s, \rc\().4s
+ sha1h dg2s, dg0s
+ .ifnb \dg1
+ sha1\op dg0q, \dg1, t0.4s
+ .else
+ sha1\op dg0q, dg1s, t0.4s
+ .endif
+ .else
+ .ifnb \s0
+ add t0.4s, v\s0\().4s, \rc\().4s
+ .endif
+ sha1h dg1s, dg0s
+ sha1\op dg0q, dg2s, t1.4s
+ .endif
+ .endm
+
+ .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1
+ sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s
+ add_only \op, \ev, \rc, \s1, \dg1
+ sha1su1 v\s0\().4s, v\s3\().4s
+ .endm
+
+ /*
+ * The SHA1 round constants
+ */
+ .align 4
+.Lsha1_rcon:
+ .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
+
+ /*
+ * void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
+ * u8 *head, long bytes)
+ */
+ENTRY(sha1_ce_transform)
+ /* load round constants */
+ adr x6, .Lsha1_rcon
+ ld1r {k0.4s}, [x6], #4
+ ld1r {k1.4s}, [x6], #4
+ ld1r {k2.4s}, [x6], #4
+ ld1r {k3.4s}, [x6]
+
+ /* load state */
+ ldr dga, [x2]
+ ldr dgb, [x2, #16]
+
+ /* load partial state (if supplied) */
+ cbz x3, 0f
+ ld1 {v8.4s-v11.4s}, [x3]
+ b 1f
+
+ /* load input */
+0: ld1 {v8.4s-v11.4s}, [x1], #64
+ sub w0, w0, #1
+
+1:
+CPU_LE( rev32 v8.16b, v8.16b )
+CPU_LE( rev32 v9.16b, v9.16b )
+CPU_LE( rev32 v10.16b, v10.16b )
+CPU_LE( rev32 v11.16b, v11.16b )
+
+2: add t0.4s, v8.4s, k0.4s
+ mov dg0v.16b, dgav.16b
+
+ add_update c, ev, k0, 8, 9, 10, 11, dgb
+ add_update c, od, k0, 9, 10, 11, 8
+ add_update c, ev, k0, 10, 11, 8, 9
+ add_update c, od, k0, 11, 8, 9, 10
+ add_update c, ev, k1, 8, 9, 10, 11
+
+ add_update p, od, k1, 9, 10, 11, 8
+ add_update p, ev, k1, 10, 11, 8, 9
+ add_update p, od, k1, 11, 8, 9, 10
+ add_update p, ev, k1, 8, 9, 10, 11
+ add_update p, od, k2, 9, 10, 11, 8
+
+ add_update m, ev, k2, 10, 11, 8, 9
+ add_update m, od, k2, 11, 8, 9, 10
+ add_update m, ev, k2, 8, 9, 10, 11
+ add_update m, od, k2, 9, 10, 11, 8
+ add_update m, ev, k3, 10, 11, 8, 9
+
+ add_update p, od, k3, 11, 8, 9, 10
+ add_only p, ev, k3, 9
+ add_only p, od, k3, 10
+ add_only p, ev, k3, 11
+ add_only p, od
+
+ /* update state */
+ add dgbv.2s, dgbv.2s, dg1v.2s
+ add dgav.4s, dgav.4s, dg0v.4s
+
+ cbnz w0, 0b
+
+ /*
+ * Final block: add padding and total bit count.
+ * Skip if we have no total byte count in x4. In that case, the input
+ * size was not a round multiple of the block size, and the padding is
+ * handled by the C code.
+ */
+ cbz x4, 3f
+ movi v9.2d, #0
+ mov x8, #0x80000000
+ movi v10.2d, #0
+ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
+ fmov d8, x8
+ mov x4, #0
+ mov v11.d[0], xzr
+ mov v11.d[1], x7
+ b 2b
+
+ /* store new state */
+3: str dga, [x2]
+ str dgb, [x2, #16]
+ ret
+ENDPROC(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
new file mode 100644
index 000000000000..6fe83f37a750
--- /dev/null
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -0,0 +1,174 @@
+/*
+ * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
+ u8 *head, long bytes);
+
+static int sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+ };
+ return 0;
+}
+
+static int sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if ((partial + len) >= SHA1_BLOCK_SIZE) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA1_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+ }
+
+ blocks = len / SHA1_BLOCK_SIZE;
+ len %= SHA1_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(16);
+ sha1_ce_transform(blocks, data, sctx->state,
+ partial ? sctx->buffer : NULL, 0);
+ kernel_neon_end();
+
+ data += blocks * SHA1_BLOCK_SIZE;
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+ return 0;
+}
+
+static int sha1_final(struct shash_desc *desc, u8 *out)
+{
+ static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
+
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be64 bits = cpu_to_be64(sctx->count << 3);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ u32 padlen = SHA1_BLOCK_SIZE
+ - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE);
+
+ sha1_update(desc, padding, padlen);
+ sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
+
+static int sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int blocks;
+ int i;
+
+ if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) {
+ sha1_update(desc, data, len);
+ return sha1_final(desc, out);
+ }
+
+ /*
+ * Use a fast path if the input is a multiple of 64 bytes. In
+ * this case, there is no need to copy data around, and we can
+ * perform the entire digest calculation in a single invocation
+ * of sha1_ce_transform()
+ */
+ blocks = len / SHA1_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(16);
+ sha1_ce_transform(blocks, data, sctx->state, NULL, len);
+ kernel_neon_end();
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
+
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ struct sha1_state *dst = out;
+
+ *dst = *sctx;
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ struct sha1_state const *src = in;
+
+ *sctx = *src;
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .init = sha1_init,
+ .update = sha1_update,
+ .final = sha1_final,
+ .finup = sha1_finup,
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha1_ce_mod_init(void)
+{
+ return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_ce_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_cpu_feature_match(SHA1, sha1_ce_mod_init);
+module_exit(sha1_ce_mod_fini);
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
new file mode 100644
index 000000000000..7f29fc031ea8
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -0,0 +1,156 @@
+/*
+ * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .arch armv8-a+crypto
+
+ dga .req q20
+ dgav .req v20
+ dgb .req q21
+ dgbv .req v21
+
+ t0 .req v22
+ t1 .req v23
+
+ dg0q .req q24
+ dg0v .req v24
+ dg1q .req q25
+ dg1v .req v25
+ dg2q .req q26
+ dg2v .req v26
+
+ .macro add_only, ev, rc, s0
+ mov dg2v.16b, dg0v.16b
+ .ifeq \ev
+ add t1.4s, v\s0\().4s, \rc\().4s
+ sha256h dg0q, dg1q, t0.4s
+ sha256h2 dg1q, dg2q, t0.4s
+ .else
+ .ifnb \s0
+ add t0.4s, v\s0\().4s, \rc\().4s
+ .endif
+ sha256h dg0q, dg1q, t1.4s
+ sha256h2 dg1q, dg2q, t1.4s
+ .endif
+ .endm
+
+ .macro add_update, ev, rc, s0, s1, s2, s3
+ sha256su0 v\s0\().4s, v\s1\().4s
+ add_only \ev, \rc, \s1
+ sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s
+ .endm
+
+ /*
+ * The SHA-256 round constants
+ */
+ .align 4
+.Lsha2_rcon:
+ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+
+ /*
+ * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+ * u8 *head, long bytes)
+ */
+ENTRY(sha2_ce_transform)
+ /* load round constants */
+ adr x8, .Lsha2_rcon
+ ld1 { v0.4s- v3.4s}, [x8], #64
+ ld1 { v4.4s- v7.4s}, [x8], #64
+ ld1 { v8.4s-v11.4s}, [x8], #64
+ ld1 {v12.4s-v15.4s}, [x8]
+
+ /* load state */
+ ldp dga, dgb, [x2]
+
+ /* load partial input (if supplied) */
+ cbz x3, 0f
+ ld1 {v16.4s-v19.4s}, [x3]
+ b 1f
+
+ /* load input */
+0: ld1 {v16.4s-v19.4s}, [x1], #64
+ sub w0, w0, #1
+
+1:
+CPU_LE( rev32 v16.16b, v16.16b )
+CPU_LE( rev32 v17.16b, v17.16b )
+CPU_LE( rev32 v18.16b, v18.16b )
+CPU_LE( rev32 v19.16b, v19.16b )
+
+2: add t0.4s, v16.4s, v0.4s
+ mov dg0v.16b, dgav.16b
+ mov dg1v.16b, dgbv.16b
+
+ add_update 0, v1, 16, 17, 18, 19
+ add_update 1, v2, 17, 18, 19, 16
+ add_update 0, v3, 18, 19, 16, 17
+ add_update 1, v4, 19, 16, 17, 18
+
+ add_update 0, v5, 16, 17, 18, 19
+ add_update 1, v6, 17, 18, 19, 16
+ add_update 0, v7, 18, 19, 16, 17
+ add_update 1, v8, 19, 16, 17, 18
+
+ add_update 0, v9, 16, 17, 18, 19
+ add_update 1, v10, 17, 18, 19, 16
+ add_update 0, v11, 18, 19, 16, 17
+ add_update 1, v12, 19, 16, 17, 18
+
+ add_only 0, v13, 17
+ add_only 1, v14, 18
+ add_only 0, v15, 19
+ add_only 1
+
+ /* update state */
+ add dgav.4s, dgav.4s, dg0v.4s
+ add dgbv.4s, dgbv.4s, dg1v.4s
+
+ /* handled all input blocks? */
+ cbnz w0, 0b
+
+ /*
+ * Final block: add padding and total bit count.
+ * Skip if we have no total byte count in x4. In that case, the input
+ * size was not a round multiple of the block size, and the padding is
+ * handled by the C code.
+ */
+ cbz x4, 3f
+ movi v17.2d, #0
+ mov x8, #0x80000000
+ movi v18.2d, #0
+ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
+ fmov d16, x8
+ mov x4, #0
+ mov v19.d[0], xzr
+ mov v19.d[1], x7
+ b 2b
+
+ /* store new state */
+3: stp dga, dgb, [x2]
+ ret
+ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
new file mode 100644
index 000000000000..c294e67d3925
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -0,0 +1,255 @@
+/*
+ * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+ u8 *head, long bytes);
+
+static int sha224_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ *sctx = (struct sha256_state){
+ .state = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+ }
+ };
+ return 0;
+}
+
+static int sha256_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ *sctx = (struct sha256_state){
+ .state = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+ }
+ };
+ return 0;
+}
+
+static int sha2_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if ((partial + len) >= SHA256_BLOCK_SIZE) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA256_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+ }
+
+ blocks = len / SHA256_BLOCK_SIZE;
+ len %= SHA256_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(28);
+ sha2_ce_transform(blocks, data, sctx->state,
+ partial ? sctx->buf : NULL, 0);
+ kernel_neon_end();
+
+ data += blocks * SHA256_BLOCK_SIZE;
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+ return 0;
+}
+
+static void sha2_final(struct shash_desc *desc)
+{
+ static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
+
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be64 bits = cpu_to_be64(sctx->count << 3);
+ u32 padlen = SHA256_BLOCK_SIZE
+ - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
+
+ sha2_update(desc, padding, padlen);
+ sha2_update(desc, (const u8 *)&bits, sizeof(bits));
+}
+
+static int sha224_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_final(desc);
+
+ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_final(desc);
+
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static void sha2_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ int blocks;
+
+ if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
+ sha2_update(desc, data, len);
+ sha2_final(desc);
+ return;
+ }
+
+ /*
+ * Use a fast path if the input is a multiple of 64 bytes. In
+ * this case, there is no need to copy data around, and we can
+ * perform the entire digest calculation in a single invocation
+ * of sha2_ce_transform()
+ */
+ blocks = len / SHA256_BLOCK_SIZE;
+
+ kernel_neon_begin_partial(28);
+ sha2_ce_transform(blocks, data, sctx->state, NULL, len);
+ kernel_neon_end();
+ data += blocks * SHA256_BLOCK_SIZE;
+}
+
+static int sha224_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_finup(desc, data, len);
+
+ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static int sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ int i;
+
+ sha2_finup(desc, data, len);
+
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], dst++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
+
+static int sha2_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state *dst = out;
+
+ *dst = *sctx;
+ return 0;
+}
+
+static int sha2_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state const *src = in;
+
+ *sctx = *src;
+ return 0;
+}
+
+static struct shash_alg algs[] = { {
+ .init = sha224_init,
+ .update = sha2_update,
+ .final = sha224_final,
+ .finup = sha224_finup,
+ .export = sha2_export,
+ .import = sha2_import,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .init = sha256_init,
+ .update = sha2_update,
+ .final = sha256_final,
+ .finup = sha256_finup,
+ .export = sha2_export,
+ .import = sha2_import,
+ .descsize = sizeof(struct sha256_state),
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-ce",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init sha2_ce_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha2_ce_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(SHA2, sha2_ce_mod_init);
+module_exit(sha2_ce_mod_fini);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 79a642d199f2..cfe9860b2076 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -10,6 +10,7 @@ generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
+generic-y += early_ioremap.h
generic-y += errno.h
generic-y += ftrace.h
generic-y += hw_irq.h
@@ -26,16 +27,17 @@ generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += pci.h
-generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
+generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
+generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index bf6ab242f047..be56d33c5dbf 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -26,7 +26,13 @@
#include <clocksource/arm_arch_timer.h>
-static inline void arch_timer_reg_write(int access, int reg, u32 val)
+/*
+ * These register accessors are marked inline so the compiler can
+ * nicely work out which register we want, and chuck away the rest of
+ * the code.
+ */
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
@@ -36,8 +42,6 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -47,17 +51,14 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
isb();
}
-static inline u32 arch_timer_reg_read(int access, int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val;
@@ -69,8 +70,6 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -80,11 +79,7 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
return val;
@@ -97,27 +92,47 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
- /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- cntkctl &= ~((3 << 8) | (1 << 0));
+ return cntkctl;
+}
- /* Enable user access to the virtual counter and frequency. */
- cntkctl |= (1 << 1);
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
-static inline u64 arch_counter_get_cntpct(void)
+static inline void __cpuinit arch_counter_set_user_access(void)
{
- u64 cval;
+ u32 cntkctl = arch_timer_get_cntkctl();
- isb();
- asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
+ /* Disable user access to the timers and the physical counter */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
- return cval;
+ /* Enable user access to the virtual counter */
+ cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+#ifdef CONFIG_COMPAT
+ compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
+#endif
}
static inline u64 arch_counter_get_cntvct(void)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 5aceb83b3f5c..fd3e3924041b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -115,3 +115,34 @@ lr .req x30 // link register
.align 7
b \label
.endm
+
+/*
+ * Select code when configured for BE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_BE(code...) code
+#else
+#define CPU_BE(code...)
+#endif
+
+/*
+ * Select code when configured for LE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_LE(code...)
+#else
+#define CPU_LE(code...) code
+#endif
+
+/*
+ * Define a macro that constructs a 64-bit value by concatenating two
+ * 32-bit registers. Note that on big endian systems the order of the
+ * registers is swapped.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ .macro regs_to_64, rd, lbits, hbits
+#else
+ .macro regs_to_64, rd, hbits, lbits
+#endif
+ orr \rd, \lbits, \hbits, lsl #32
+ .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 836364468571..a049bf7f5150 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp;
int oldval;
+ smp_mb();
+
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w4, %2\n"
+" stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -173,7 +176,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) (*(volatile long long *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
static inline void atomic64_add(u64 i, atomic64_t *v)
@@ -187,8 +190,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -197,14 +199,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -219,8 +222,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -229,14 +231,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -245,17 +248,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval;
unsigned long res;
+ smp_mb();
+
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, %2\n"
+"1: ldxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %4, %2\n"
+" stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -267,11 +273,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
+" dmb ish\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
diff --git a/arch/arm64/include/asm/bL_switcher.h b/arch/arm64/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..2bee500b7f54
--- /dev/null
+++ b/arch/arm64/include/asm/bL_switcher.h
@@ -0,0 +1,54 @@
+/*
+ * Based on the stubs for the ARM implementation which is:
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+static inline int bL_switch_request(unsigned int cpu,
+ unsigned int new_cluster_id)
+{
+ return -ENOTSUPP;
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+
+#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index d4a63338a53c..c98d0a88916a 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,9 +25,10 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
+#define dmb(opt) asm volatile("dmb sy" : : : "memory")
+#define dsb(opt) asm volatile("dsb sy" : : : "memory")
-#define mb() dsb()
+#define mb() dsb(sy)
#define rmb() asm volatile("dsb ld" : : : "memory")
#define wmb() asm volatile("dsb st" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 3300cbd18a89..f2defe1c380c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -85,6 +85,13 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
}
/*
+ * Cache maintenance functions used by the DMA API. No to be used directly.
+ */
+extern void __dma_map_area(const void *, size_t, int);
+extern void __dma_unmap_area(const void *, size_t, int);
+extern void __dma_flush_range(const void *, const void *);
+
+/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
@@ -116,6 +123,7 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
+ dsb(ish);
}
#define flush_dcache_mmap_lock(mapping) \
@@ -123,9 +131,6 @@ static inline void __flush_icache_all(void)
#define flush_dcache_mmap_unlock(mapping) \
spin_unlock_irq(&(mapping)->tree_lock)
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
@@ -133,19 +138,10 @@ static inline void __flush_icache_all(void)
#define flush_icache_page(vma,page) do { } while (0)
/*
- * flush_cache_vmap() is used when creating mappings (eg, via vmap,
- * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
- * caches, since the direct-mappings of these pages may contain cached
- * data, we need to do a full cache flush to ensure that writebacks
- * don't corrupt data placed into these pages via the new mappings.
+ * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
*/
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
- /*
- * set_pte_at() called from vmap_pte_range() does not
- * have a DSB after cleaning the cache line.
- */
- dsb();
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 8a8ce0e73a38..ddb9d7830558 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,49 +29,55 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
asm volatile("// __xchg1\n"
- "1: ldaxrb %w0, %2\n"
+ "1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 2:
asm volatile("// __xchg2\n"
- "1: ldaxrh %w0, %2\n"
+ "1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 4:
asm volatile("// __xchg4\n"
- "1: ldaxr %w0, %2\n"
+ "1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 8:
asm volatile("// __xchg8\n"
- "1: ldaxr %0, %2\n"
+ "1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
default:
BUILD_BUG();
}
+ smp_mb();
return ret;
}
#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __ret; \
+})
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
@@ -158,19 +164,27 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
return ret;
}
-#define cmpxchg(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-
-#define cmpxchg_local(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
+ __ret; \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 899af807ef0f..253e33bc94fb 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -26,7 +26,11 @@
#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
+#ifdef __AARCH64EB__
+#define COMPAT_UTS_MACHINE "armv8b\0\0"
+#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
+#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -73,13 +77,23 @@ struct compat_timeval {
};
struct compat_stat {
+#ifdef __AARCH64EB__
+ short st_dev;
+ short __pad1;
+#else
compat_dev_t st_dev;
+#endif
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_ushort_t st_nlink;
__compat_uid16_t st_uid;
__compat_gid16_t st_gid;
+#ifdef __AARCH64EB__
+ short st_rdev;
+ short __pad2;
+#else
compat_dev_t st_rdev;
+#endif
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
@@ -214,7 +228,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
+#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
static inline void __user *arch_compat_alloc_user_space(long len)
{
@@ -291,11 +305,6 @@ static inline int is_compat_thread(struct thread_info *thread)
#else /* !CONFIG_COMPAT */
-static inline int is_compat_task(void)
-{
- return 0;
-}
-
static inline int is_compat_thread(struct thread_info *thread)
{
return 0;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
new file mode 100644
index 000000000000..152413076503
--- /dev/null
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+
+struct device_node;
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the property as appears in a devicetree cpu node's
+ * enable-method property.
+ * @cpu_init: Reads any data necessary for a specific enable-method from the
+ * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
+ * mechanism for doing so, tests whether it is possible to boot
+ * the given CPU.
+ * @cpu_boot: Boots a cpu into the kernel.
+ * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
+ * synchronisation. Called from the cpu being booted.
+ * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
+ * reason, which will cause the hot unplug to be aborted. Called
+ * from the cpu to be killed.
+ * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
+ * cpu being killed.
+ * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
+ * to wrong parameters or error conditions. Called from the
+ * CPU being suspended. Must be called with IRQs disabled.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_prepare)(unsigned int);
+ int (*cpu_boot)(unsigned int);
+ void (*cpu_postboot)(void);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ int (*cpu_suspend)(unsigned long);
+#endif
+};
+
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+extern int __init cpu_read_ops(struct device_node *dn, int cpu);
+extern void __init cpu_read_bootcpu_ops(void);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
new file mode 100644
index 000000000000..cd4ac0516488
--- /dev/null
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <asm/hwcap.h>
+
+/*
+ * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
+ * in the kernel and for user space to keep track of which optional features
+ * are supported by the current system. So let's map feature 'x' to HWCAP_x.
+ * Note that HWCAP_x constants are bit fields so we need to take the log.
+ */
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+#define cpu_feature(x) ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return elf_hwcap & (1UL << num);
+}
+
+#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index cf2749488cd4..c404fb0df3a6 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -16,32 +16,35 @@
#ifndef __ASM_CPUTYPE_H
#define __ASM_CPUTYPE_H
-#define ID_MIDR_EL1 "midr_el1"
-#define ID_MPIDR_EL1 "mpidr_el1"
-#define ID_CTR_EL0 "ctr_el0"
-
-#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1"
-#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1"
-#define ID_AA64AFR0_EL1 "id_aa64afr0_el1"
-#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1"
-#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1"
-
#define INVALID_HWID ULONG_MAX
#define MPIDR_HWID_BITMASK 0xff00ffffff
+#define MPIDR_LEVEL_BITS_SHIFT 3
+#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
+#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+
+#define MPIDR_LEVEL_SHIFT(level) \
+ (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
+
+#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
+ ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+
#define read_cpuid(reg) ({ \
u64 __val; \
- asm("mrs %0, " reg : "=r" (__val)); \
+ asm("mrs %0, " #reg : "=r" (__val)); \
__val; \
})
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_PART_AEM_V8 0xD0F0
#define ARM_CPU_PART_FOUNDATION 0xD000
#define ARM_CPU_PART_CORTEX_A57 0xD070
+#define APM_CPU_PART_POTENZA 0x0000
+
#ifndef __ASSEMBLY__
/*
@@ -51,12 +54,12 @@
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(ID_MIDR_EL1);
+ return read_cpuid(MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(ID_MPIDR_EL1);
+ return read_cpuid(MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -71,7 +74,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(ID_CTR_EL0);
+ return read_cpuid(CTR_EL0);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7eaa0b302493..7c951a510b54 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -26,6 +26,53 @@
#define DBG_ESR_EVT_HWWP 0x2
#define DBG_ESR_EVT_BRK 0x6
+/*
+ * Break point instruction encoding
+ */
+#define BREAK_INSTR_SIZE 4
+
+/*
+ * ESR values expected for dynamic and compile time BRK instruction
+ */
+#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
+
+/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_IMM 0x400
+#define KDBG_COMPILED_DBG_BRK_IMM 0x401
+
+/*
+ * BRK instruction encoding
+ * The #imm16 value should be placed at bits[20:5] within BRK ins
+ */
+#define AARCH64_BREAK_MON 0xd4200000
+
+/*
+ * Extract byte from BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
+ ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
+
+/*
+ * Extract byte from BRK #imm16
+ */
+#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
+ (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
+
+#define KGDB_DYN_DGB_BRK_BYTE(x) \
+ (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
+
+#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
+#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
+#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
+#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
+
+#define CACHE_FLUSH_IS_SAFE 1
+
enum debug_el {
DBG_ACTIVE_EL0 = 0,
DBG_ACTIVE_EL1,
@@ -43,25 +90,29 @@ enum debug_el {
#ifndef __ASSEMBLY__
struct task_struct;
-#define local_dbg_save(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "mrs %0, daif // local_dbg_save\n" \
- "msr daifset, #8" \
- : "=r" (flags) : : "memory"); \
- } while (0)
-
-#define local_dbg_restore(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "msr daif, %0 // local_dbg_restore\n" \
- : : "r" (flags) : "memory"); \
- } while (0)
-
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
+#define DBG_HOOK_HANDLED 0
+#define DBG_HOOK_ERROR 1
+
+struct step_hook {
+ struct list_head node;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_step_hook(struct step_hook *hook);
+void unregister_step_hook(struct step_hook *hook);
+
+struct break_hook {
+ struct list_head node;
+ u32 esr_val;
+ u32 esr_mask;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_break_hook(struct break_hook *hook);
+void unregister_break_hook(struct break_hook *hook);
+
u8 debug_monitors_arch(void);
void enable_debug_monitors(enum debug_el el);
@@ -83,6 +134,15 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
}
#endif
+#ifdef CONFIG_COMPAT
+int aarch32_break_handler(struct pt_regs *regs);
+#else
+static int aarch32_break_handler(struct pt_regs *regs)
+{
+ return -EFAULT;
+}
+#endif
+
#endif /* __ASSEMBLY */
#endif /* __KERNEL__ */
#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 0d8453c755a8..cf98b362094b 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -18,6 +18,9 @@
struct dev_archdata {
struct dma_map_ops *dma_ops;
+#ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+#endif
};
struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..14c4c0ca7f2a
--- /dev/null
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_DMA_CMA
+
+#include <linux/types.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 994776894198..00a41aab4a37 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -25,7 +25,10 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops coherent_swiotlb_dma_ops;
+extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
@@ -35,6 +38,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ dev->archdata.dma_ops = ops;
+}
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
@@ -81,8 +89,12 @@ static inline void dma_mark_clean(void *addr, size_t size)
{
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *vaddr;
@@ -90,13 +102,14 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
return vaddr;
- vaddr = ops->alloc(dev, size, dma_handle, flags, NULL);
+ vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
return vaddr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dev_addr)
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dev_addr,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -104,7 +117,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
return;
debug_dma_free_coherent(dev, size, vaddr, dev_addr);
- ops->free(dev, size, vaddr, dev_addr, NULL);
+ ops->free(dev, size, vaddr, dev_addr, attrs);
}
/*
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
new file mode 100644
index 000000000000..5a46c4e7f539
--- /dev/null
+++ b/arch/arm64/include/asm/efi.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_EFI_H
+#define _ASM_EFI_H
+
+#include <asm/io.h>
+
+#ifdef CONFIG_EFI
+extern void efi_init(void);
+extern void efi_idmap_init(void);
+#else
+#define efi_init()
+#define efi_idmap_init()
+#endif
+
+#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index fe32c0e4ac01..01d3aab64b79 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -33,8 +33,6 @@ typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpsimd_state elf_fpregset_t;
-#define EM_AARCH64 183
-
/*
* AArch64 static relocation types.
*/
@@ -92,11 +90,24 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
+#ifdef __AARCH64EB__
+#define ELF_DATA ELFDATA2MSB
+#else
#define ELF_DATA ELFDATA2LSB
+#endif
#define ELF_ARCH EM_AARCH64
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
#define ELF_PLATFORM_SIZE 16
+#ifdef __AARCH64EB__
+#define ELF_PLATFORM ("aarch64_be")
+#else
#define ELF_PLATFORM ("aarch64")
+#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -151,8 +162,12 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT
-#define EM_ARM 40
+
+#ifdef __AARCH64EB__
+#define COMPAT_ELF_PLATFORM ("v8b")
+#else
#define COMPAT_ELF_PLATFORM ("v8l")
+#endif
#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 78834123a32e..c4a7f940b387 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
#define ESR_EL1_EC_SP_ALIGN (0x26)
#define ESR_EL1_EC_FP_EXC32 (0x28)
#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERRROR (0x2F)
+#define ESR_EL1_EC_SERROR (0x2F)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
new file mode 100644
index 000000000000..5f7bfe6df723
--- /dev/null
+++ b/arch/arm64/include/asm/fixmap.h
@@ -0,0 +1,67 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
+ *
+ * Adapted from arch/x86_64 version.
+ *
+ */
+
+#ifndef _ASM_ARM64_FIXMAP_H
+#define _ASM_ARM64_FIXMAP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are
+ * page-sized. Use set_fixmap(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ */
+enum fixed_addresses {
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ /*
+ * Temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define NR_FIX_BTMAPS 4
+#else
+#define NR_FIX_BTMAPS 64
+#endif
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
+
+extern void __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags);
+
+#define __set_fixmap __early_set_fixmap
+
+#include <asm-generic/fixmap.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_ARM64_FIXMAP_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index c43b4ac13008..50f559f574fe 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -37,8 +37,21 @@ struct fpsimd_state {
u32 fpcr;
};
};
+ /* the id of the last cpu to have restored this state */
+ unsigned int cpu;
};
+/*
+ * Struct for stacking the bottom 'n' FP/SIMD registers.
+ */
+struct fpsimd_partial_state {
+ u32 fpsr;
+ u32 fpcr;
+ u32 num_regs;
+ __uint128_t vregs[32];
+};
+
+
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
@@ -58,6 +71,16 @@ extern void fpsimd_load_state(struct fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void);
+extern void fpsimd_preserve_current_state(void);
+extern void fpsimd_restore_current_state(void);
+extern void fpsimd_update_current_state(struct fpsimd_state *state);
+
+extern void fpsimd_flush_task_state(struct task_struct *target);
+
+extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state,
+ u32 num_regs);
+extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state);
+
#endif
#endif
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index bbec599c96bd..768414d55e64 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -62,3 +62,38 @@
ldr w\tmpnr, [\state, #16 * 2 + 4]
msr fpcr, x\tmpnr
.endm
+
+.altmacro
+.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2
+ mrs x\tmpnr1, fpsr
+ str w\numnr, [\state, #8]
+ mrs x\tmpnr2, fpcr
+ stp w\tmpnr1, w\tmpnr2, [\state]
+ adr x\tmpnr1, 0f
+ add \state, \state, x\numnr, lsl #4
+ sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1
+ br x\tmpnr1
+ .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
+ .irp qb, %(qa + 1)
+ stp q\qa, q\qb, [\state, # -16 * \qa - 16]
+ .endr
+ .endr
+0:
+.endm
+
+.macro fpsimd_restore_partial state, tmpnr1, tmpnr2
+ ldp w\tmpnr1, w\tmpnr2, [\state]
+ msr fpsr, x\tmpnr1
+ msr fpcr, x\tmpnr2
+ adr x\tmpnr1, 0f
+ ldr w\tmpnr2, [\state, #8]
+ add \state, \state, x\tmpnr2, lsl #4
+ sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1
+ br x\tmpnr1
+ .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
+ .irp qb, %(qa + 1)
+ ldp q\qa, q\qb, [\state, # -16 * \qa - 16]
+ .endr
+ .endr
+0:
+.endm
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
new file mode 100644
index 000000000000..c5534facf941
--- /dev/null
+++ b/arch/arm64/include/asm/ftrace.h
@@ -0,0 +1,59 @@
+/*
+ * arch/arm64/include/asm/ftrace.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_FTRACE_H
+#define __ASM_FTRACE_H
+
+#include <asm/insn.h>
+
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
+
+#ifndef __ASSEMBLY__
+#include <linux/compat.h>
+
+extern void _mcount(unsigned long);
+extern void *return_address(unsigned int);
+
+struct dyn_arch_ftrace {
+ /* No extra data needed for arm64 */
+};
+
+extern unsigned long ftrace_graph_call;
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /*
+ * addr is the address of the mcount call instruction.
+ * recordmcount does the necessary offset calculation.
+ */
+ return addr;
+}
+
+#define ftrace_return_address(n) return_address(n)
+
+/*
+ * Because AArch32 mode does not share the same syscall table with AArch64,
+ * tracing compat syscalls may result in reporting bogus syscalls or even
+ * hang-up, so just do not trace them.
+ * See kernel/trace/trace_syscalls.c
+ *
+ * x86 code says:
+ * If the user realy wants these, then they should use the
+ * raw syscall tracepoints with filtering.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ return is_compat_task();
+}
+#endif /* ifndef __ASSEMBLY__ */
+
+#endif /* __ASM_FTRACE_H */
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c582fa316366..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,12 +24,14 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \
-"1: ldaxr %w1, %2\n" \
+"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \
+" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
+" .align 2\n" \
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
@@ -39,7 +41,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -110,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n"
+" dmb ish\n"
"3:\n"
" .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n"
@@ -126,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
- : "cc", "memory");
+ : "memory");
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 990c051e7829..ae4801d77514 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 4
+#define NR_IPI 5
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
new file mode 100644
index 000000000000..5b7ca8ace95f
--- /dev/null
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -0,0 +1,117 @@
+/*
+ * arch/arm64/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2013 Linaro Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_HUGETLB_H
+#define __ASM_HUGETLB_H
+
+#include <asm-generic/hugetlb.h>
+#include <asm/page.h>
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6d4482fa35bc..024c46183c3c 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,13 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_EVTSTRM (1 << 21)
+
+#define COMPAT_HWCAP2_AES (1 << 0)
+#define COMPAT_HWCAP2_PMULL (1 << 1)
+#define COMPAT_HWCAP2_SHA1 (1 << 2)
+#define COMPAT_HWCAP2_SHA2 (1 << 3)
+#define COMPAT_HWCAP2_CRC32 (1 << 4)
#ifndef __ASSEMBLY__
/*
@@ -37,12 +44,13 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
- COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
-extern unsigned int elf_hwcap;
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
+#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
+extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
+#endif
+
+extern unsigned long elf_hwcap;
#endif
#endif
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
new file mode 100644
index 000000000000..62e7b8bcd2dc
--- /dev/null
+++ b/arch/arm64/include/asm/insn.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_INSN_H
+#define __ASM_INSN_H
+
+#include <linux/types.h>
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+#ifndef __ASSEMBLY__
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+ * AArch64 main encoding table
+ * Bit position
+ * 28 27 26 25 Encoding Group
+ * 0 0 - - Unallocated
+ * 1 0 0 - Data processing, immediate
+ * 1 0 1 - Branch, exception generation and system instructions
+ * - 1 - 0 Loads and stores
+ * - 1 0 1 Data processing - register
+ * 0 1 1 1 Data processing - SIMD and floating point
+ * 1 1 1 1 Data processing - SIMD and floating point
+ * "-" means "don't care"
+ */
+enum aarch64_insn_encoding_class {
+ AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
+ AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
+ AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
+ AARCH64_INSN_CLS_LDST, /* Loads and stores */
+ AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and
+ * system instructions */
+};
+
+enum aarch64_insn_hint_op {
+ AARCH64_INSN_HINT_NOP = 0x0 << 5,
+ AARCH64_INSN_HINT_YIELD = 0x1 << 5,
+ AARCH64_INSN_HINT_WFE = 0x2 << 5,
+ AARCH64_INSN_HINT_WFI = 0x3 << 5,
+ AARCH64_INSN_HINT_SEV = 0x4 << 5,
+ AARCH64_INSN_HINT_SEVL = 0x5 << 5,
+};
+
+enum aarch64_insn_imm_type {
+ AARCH64_INSN_IMM_ADR,
+ AARCH64_INSN_IMM_26,
+ AARCH64_INSN_IMM_19,
+ AARCH64_INSN_IMM_16,
+ AARCH64_INSN_IMM_14,
+ AARCH64_INSN_IMM_12,
+ AARCH64_INSN_IMM_9,
+ AARCH64_INSN_IMM_MAX
+};
+
+enum aarch64_insn_branch_type {
+ AARCH64_INSN_BRANCH_NOLINK,
+ AARCH64_INSN_BRANCH_LINK,
+};
+
+#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
+static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
+{ return (code & (mask)) == (val); } \
+static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
+{ return (val); }
+
+__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
+__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
+__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
+__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
+__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
+
+#undef __AARCH64_INSN_FUNCS
+
+bool aarch64_insn_is_nop(u32 insn);
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm);
+u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
+u32 aarch64_insn_gen_nop(void);
+
+bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 2e12258aa7e4..732e3d51c2cb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -26,6 +26,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
+#include <asm/early_ioremap.h>
/*
* Generic IO read/write. These perform native-endian accesses.
@@ -118,7 +119,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
-#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
+#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
static inline u8 inb(unsigned long addr)
{
@@ -224,19 +225,13 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
-
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
+extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
-#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
-
#define ARCH_HAS_IOREMAP_WC
#include <asm-generic/iomap.h>
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 0332fc077f6e..e1f7ecdde11f 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,6 +4,7 @@
#include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *);
+extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa11943b8502..0ed52c691868 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -87,5 +87,28 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return flags & PSR_I_BIT;
}
+/*
+ * save and restore debug state
+ */
+#define local_dbg_save(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ asm volatile( \
+ "mrs %0, daif // local_dbg_save\n" \
+ "msr daifset, #8" \
+ : "=r" (flags) : : "memory"); \
+ } while (0)
+
+#define local_dbg_restore(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ asm volatile( \
+ "msr daif, %0 // local_dbg_restore\n" \
+ : : "r" (flags) : "memory"); \
+ } while (0)
+
+#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
+#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
+
#endif
#endif
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
new file mode 100644
index 000000000000..076a1c714049
--- /dev/null
+++ b/arch/arm64/include/asm/jump_label.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/include/asm/jump_label.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+#include <linux/types.h>
+#include <asm/insn.h>
+
+#ifdef __KERNEL__
+
+#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm goto("1: nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 3\n\t"
+ ".quad 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i"(key) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __KERNEL__ */
+
+typedef u64 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
new file mode 100644
index 000000000000..3c8aafc1082f
--- /dev/null
+++ b/arch/arm64/include/asm/kgdb.h
@@ -0,0 +1,84 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/include/kgdb.h
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KGDB_H
+#define __ARM_KGDB_H
+
+#include <linux/ptrace.h>
+#include <asm/debug-monitors.h>
+
+#ifndef __ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
+}
+
+extern void kgdb_handle_bus_error(void);
+extern int kgdb_fault_expected;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * gdb is expecting the following registers layout.
+ *
+ * General purpose regs:
+ * r0-r30: 64 bit
+ * sp,pc : 64 bit
+ * pstate : 64 bit
+ * Total: 34
+ * FPU regs:
+ * f0-f31: 128 bit
+ * Total: 32
+ * Extra regs
+ * fpsr & fpcr: 32 bit
+ * Total: 2
+ *
+ */
+
+#define _GP_REGS 34
+#define _FP_REGS 32
+#define _EXTRA_REGS 2
+/*
+ * general purpose registers size in bytes.
+ * pstate is only 4 bytes. subtract 4 bytes
+ */
+#define GP_REG_BYTES (_GP_REGS * 8)
+#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
+
+/*
+ * Size of I/O buffer for gdb packet.
+ * considering to hold all register contents, size is set
+ */
+
+#define BUFMAX 2048
+
+/*
+ * Number of bytes required for gdb_regs buffer.
+ * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
+ * GDB fails to connect for size beyond this with error
+ * "'g' packet reply is too long"
+ */
+
+#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \
+ (_EXTRA_REGS * 4))
+
+#endif /* __ASM_KGDB_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 381f556b664e..71cd41644279 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,24 +33,31 @@
#define UL(x) _AC(x, UL)
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image.
+ * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
-#define PAGE_OFFSET UL(0xffffffc000000000)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define VA_BITS (42)
+#else
+#define VA_BITS (39)
+#endif
+#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
-#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
-#define VA_BITS (39)
+#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 UL(0x100000000)
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE TASK_SIZE_64
#endif /* CONFIG_COMPAT */
@@ -90,6 +97,12 @@
#define MT_NORMAL_NC 3
#define MT_NORMAL 4
+/*
+ * Memory types for Stage-2 translation
+ */
+#define MT_S2_NORMAL 0xf
+#define MT_S2_DEVICE_nGnRE 0x1
+
#ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr;
@@ -127,6 +140,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
/*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 2494fc01896a..c2f006c48bdb 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -22,10 +22,16 @@ typedef struct {
void *vdso;
} mm_context_t;
+#define INIT_MM_CONTEXT(name) \
+ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+
#define ASID(mm) ((mm)->context.id & 0xffff)
extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
+extern void init_mem_pgprot(void);
+/* create an identity mapping for memory (or io if map_io is true) */
+extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index e2bc385adb6b..a9eee33dfa62 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,12 +151,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- /* check for possible thread migration */
- if (!cpumask_empty(mm_cpumask(next)) &&
- !cpumask_test_cpu(cpu, mm_cpumask(next)))
- __flush_icache_all();
-#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
check_and_switch_context(next, tsk);
}
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
new file mode 100644
index 000000000000..13ce4cc18e26
--- /dev/null
+++ b/arch/arm64/include/asm/neon.h
@@ -0,0 +1,18 @@
+/*
+ * linux/arch/arm64/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+
+#define cpu_has_neon() (1)
+
+#define kernel_neon_begin() kernel_neon_begin_partial(32)
+
+void kernel_neon_begin_partial(u32 num_regs);
+void kernel_neon_end(void);
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 46bf66628b6a..a6331e6a92b5 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -31,6 +31,15 @@
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
+/*
+ * The idmap and swapper page tables need some space reserved in the kernel
+ * image. The idmap only requires a pgd and a next level table to (section) map
+ * the kernel, while the swapper also maps the FDT and requires an additional
+ * table to map an early UART. See __create_page_tables for more information.
+ */
+#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
+#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARM64_64K_PAGES
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
new file mode 100644
index 000000000000..453a179469a3
--- /dev/null
+++ b/arch/arm64/include/asm/percpu.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+#ifdef CONFIG_SMP
+
+static inline void set_my_cpu_offset(unsigned long off)
+{
+ asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+}
+
+static inline unsigned long __my_cpu_offset(void)
+{
+ unsigned long off;
+ register unsigned long *sp asm ("sp");
+
+ /*
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
+
+ return off;
+}
+#define __my_cpu_offset __my_cpu_offset()
+
+#else /* !CONFIG_SMP */
+
+#define set_my_cpu_offset(x) do { } while (0)
+
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
index 0a8ed3f94e93..2593b490c56a 100644
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -21,10 +21,10 @@
* 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
* used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
* entry representing 512MB. The user and kernel address spaces are limited to
- * 512GB and therefore we only use 1024 entries in the PGD.
+ * 4TB in the 64KB page configuration.
*/
#define PTRS_PER_PTE 8192
-#define PTRS_PER_PGD 1024
+#define PTRS_PER_PGD 8192
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h
index 3c3ca7d361e4..5f101e63dfc1 100644
--- a/arch/arm64/include/asm/pgtable-2level-types.h
+++ b/arch/arm64/include/asm/pgtable-2level-types.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
#define __ASM_PGTABLE_2LEVEL_TYPES_H
+#include <asm/types.h>
+
typedef u64 pteval_t;
typedef u64 pgdval_t;
typedef pgdval_t pmdval_t;
diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h
index 4489615f14a9..4e94424938a4 100644
--- a/arch/arm64/include/asm/pgtable-3level-types.h
+++ b/arch/arm64/include/asm/pgtable-3level-types.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PGTABLE_3LEVEL_TYPES_H
#define __ASM_PGTABLE_3LEVEL_TYPES_H
+#include <asm/types.h>
+
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pgdval_t;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 75fd13d289b9..2e9d83673ef6 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -25,16 +25,27 @@
/*
* Hardware page table definitions.
*
+ * Level 1 descriptor (PUD).
+ */
+
+#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
+
+/*
* Level 2 descriptor (PMD).
*/
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
/*
* Section
*/
+#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
@@ -53,6 +64,7 @@
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
@@ -68,6 +80,24 @@
#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
/*
+ * 2nd stage PTE definitions
+ */
+#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
+#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+
+/*
+ * Memory Attribute override for Stage-2 (MemAttr[3:0])
+ */
+#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
+#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
+
+/*
+ * EL2/HYP PTE/PMD definitions
+ */
+#define PMD_HYP PMD_SECT_USER
+#define PTE_HYP PTE_USER
+
+/*
* 40-bit physical address supported.
*/
#define PHYS_MASK_SHIFT (40)
@@ -92,5 +122,6 @@
#define TCR_TG1_64K (UL(1) << 30)
#define TCR_IPS_40BIT (UL(2) << 32)
#define TCR_ASID16 (UL(1) << 36)
+#define TCR_TBI0 (UL(1) << 37)
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e333a243bfcc..fb4b26509276 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,15 +25,16 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+#define PTE_WRITE (_AT(pteval_t, 1) << 57)
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*/
-#define VMALLOC_START UL(0xffffff8000000000)
+#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
@@ -51,60 +52,59 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#endif
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
-/*
- * The pgprot_* and protection_map entries will be fixed up at runtime to
- * include the cachable and bufferable bits based on memory policy, as well as
- * any architecture dependent bits like global/ASID and SMP shared mapping
- * bits.
- */
-#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF
-
-extern pgprot_t pgprot_default;
-
-#define __pgprot_modify(prot,mask,bits) \
- __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+#ifdef CONFIG_SMP
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#else
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
+#endif
-#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
-
-#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
-#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
-#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
-
-#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
-#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
-#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-
-#endif /* __ASSEMBLY__ */
-
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+
+#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+
+#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+
+#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+
+#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
+#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+
+#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
+
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
-#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -119,7 +119,7 @@ extern struct page *empty_zero_page;
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
+#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -129,30 +129,72 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & PTE_AF)
-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
+#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
+#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+#define pte_valid_not_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= PTE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_DIRTY;
+ return pte;
+}
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= PTE_DIRTY;
+ return pte;
+}
-PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY);
-PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
-PTE_BIT_FUNC(mkold, &= ~PTE_AF);
-PTE_BIT_FUNC(mkyoung, |= PTE_AF);
-PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_AF;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= PTE_AF;
+ return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= PTE_SPECIAL;
+ return pte;
+}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
+
+ /*
+ * Only if the new pte is valid and kernel, otherwise TLB maintenance
+ * or update_mmu_cache() have the necessary barriers.
+ */
+ if (pte_valid_not_user(pte)) {
+ dsb(ishst);
+ isb();
+ }
}
extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
@@ -161,10 +203,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (pte_valid_user(pte)) {
- if (pte_exec(pte))
+ if (!pte_special(pte) && pte_exec(pte))
__sync_icache_dcache(pte, addr);
- if (!pte_dirty(pte))
- pte = pte_wrprotect(pte);
+ if (pte_dirty(pte) && pte_write(pte))
+ pte_val(pte) &= ~PTE_RDONLY;
+ else
+ pte_val(pte) |= PTE_RDONLY;
}
set_pte(ptep, pte);
@@ -173,20 +217,78 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
/*
* Huge pte definitions.
*/
-#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
-#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
+#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+/*
+ * Hugetlb definitions.
+ */
+#define HUGE_MAX_HSTATE 2
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define __HAVE_ARCH_PTE_SPECIAL
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+/*
+ * THP definitions.
+ */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
+#endif
+
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+
+#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_dmacoherent(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -197,10 +299,17 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
+
+
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- dsb();
+ dsb(ishst);
+ isb();
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -230,7 +339,8 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
- dsb();
+ dsb(ishst);
+ isb();
}
static inline void pud_clear(pud_t *pudp)
@@ -263,36 +373,40 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#endif
/* Find an entry in the third-level page table.. */
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
-
/*
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
* bits 3-8: swap type
- * bits 9-63: swap offset
+ * bits 9-57: swap offset
*/
#define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6
+#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -300,7 +414,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Ensure that there are not more swap files than can be encoded in the kernel
- * the PTEs.
+ * PTEs.
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
@@ -308,13 +422,13 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* Encode and decode a file entry:
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
- * bits 3-63: file offset / PAGE_SIZE
+ * bits 3-57: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 3)
#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 61
+#define PTE_FILE_MAX_BITS 55
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 7cdf466fd0c5..0c657bb54597 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -26,11 +26,14 @@
#include <asm/page.h>
struct mm_struct;
+struct cpu_suspend_ctx;
extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
+extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ab239b2c456f..001b764fffa1 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -107,6 +107,11 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = COMPAT_PSR_MODE_USR;
if (pc & 1)
regs->pstate |= COMPAT_PSR_T_BIT;
+
+#ifdef __AARCH64EB__
+ regs->pstate |= COMPAT_PSR_E_BIT;
+#endif
+
regs->compat_sp = sp;
}
#endif
@@ -131,8 +136,8 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
-#define KSTK_EIP(tsk) task_pt_regs(tsk)->pc
-#define KSTK_ESP(tsk) task_pt_regs(tsk)->sp
+#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
/*
* Prefetching support
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 0604237ecd99..9a4b663670ff 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,25 +14,10 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+struct cpuidle_driver;
+void psci_init(void);
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
-};
-
-extern struct psci_operations psci_ops;
-
-int psci_init(void);
+int __init psci_dt_register_idle_states(struct cpuidle_driver *,
+ struct device_node *[]);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index f38a8aa58d4c..5db016e6d065 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,6 +42,7 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
+#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
@@ -67,6 +68,7 @@
/* Architecturally defined mapping between AArch32 and AArch64 registers */
#define compat_usr(x) regs[(x)]
+#define compat_fp regs[11]
#define compat_sp regs[13]
#define compat_lr regs[14]
#define compat_sp_hyp regs[15]
@@ -131,7 +133,7 @@ struct pt_regs {
(!((regs)->pstate & PSR_F_BIT))
#define user_stack_pointer(regs) \
- ((regs)->sp)
+ (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
@@ -168,7 +170,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs)
return 0;
}
-#define instruction_pointer(regs) (regs)->pc
+#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
@@ -176,7 +178,5 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define profile_pc(regs) instruction_pointer(regs)
#endif
-extern int aarch32_break_trap(struct pt_regs *regs);
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/sigcontext.h b/arch/arm64/include/asm/sigcontext.h
deleted file mode 100644
index dca1094acc74..000000000000
--- a/arch/arm64/include/asm/sigcontext.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_SIGCONTEXT_H
-#define __ASM_SIGCONTEXT_H
-
-#include <uapi/asm/sigcontext.h>
-
-/*
- * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
- * user space as it will change with the addition of new context. User space
- * should check the magic/size information.
- */
-struct aux_context {
- struct fpsimd_context fpsimd;
- /* additional context to be added before "end" */
- struct _aarch64_ctx end;
-};
-#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 4b8023c5d146..a498f2cd2c2a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,21 +60,14 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_holding_pen(void);
-extern volatile unsigned long secondary_holding_pen_release;
+extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-struct device_node;
+extern int __cpu_disable(void);
-struct smp_enable_ops {
- const char *name;
- int (*init_cpu)(struct device_node *, int);
- int (*prepare_cpu)(int);
-};
-
-extern const struct smp_enable_ops smp_spin_table_ops;
-extern const struct smp_enable_ops smp_psci_ops;
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void);
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index ed43a0d2b1b2..59e282311b58 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -21,6 +21,19 @@
#include <asm/types.h>
+struct mpidr_hash {
+ u64 mask;
+ u32 shift_aff[4];
+ u32 bits;
+};
+
+extern struct mpidr_hash mpidr_hash;
+
+static inline u32 mpidr_hash_size(void)
+{
+ return 1 << mpidr_hash.bits;
+}
+
/*
* Logical CPU mapping.
*/
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 7065e920149d..c45b7b1b7197 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,17 +22,10 @@
/*
* Spinlock implementation.
*
- * The old value is read exclusively and the new one, if unlocked, is written
- * exclusively. In case of failure, the loop is restarted.
- *
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
- *
- * Unlocked value: 0
- * Locked value: 1
*/
-#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -41,31 +34,51 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval, newval;
asm volatile(
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+ /* Atomically increment the next ticket. */
+" prfm pstl1strm, %3\n"
+"1: ldaxr %w0, %3\n"
+" add %w1, %w0, %w5\n"
+" stxr %w2, %w1, %3\n"
+" cbnz %w2, 1b\n"
+ /* Did we get the lock? */
+" eor %w1, %w0, %w0, ror #16\n"
+" cbz %w1, 3f\n"
+ /*
+ * No: spin on the owner. Send a local event to avoid missing an
+ * unlock before the exclusive load.
+ */
+" sevl\n"
+"2: wfe\n"
+" ldaxrh %w2, %4\n"
+" eor %w1, %w2, %w0, lsr #16\n"
+" cbnz %w1, 2b\n"
+ /* We got the lock. Critical section starts here. */
+"3:"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+ : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
+ : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval;
asm volatile(
- " ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
- " stxr %w0, %w2, %1\n"
- "1:\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+" prfm pstl1strm, %2\n"
+"1: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 2f\n"
+" add %w0, %w0, %3\n"
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 1b\n"
+"2:"
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ : "I" (1 << TICKET_SHIFT)
+ : "memory");
return !tmp;
}
@@ -73,9 +86,28 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
- " stlr %w1, %0\n"
- : "=Q" (lock->lock) : "r" (0) : "memory");
+" stlrh %w1, %0\n"
+ : "=Q" (lock->owner)
+ : "r" (lock->owner + 1)
+ : "memory");
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner == lock.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lockval = ACCESS_ONCE(*lock);
+ return (lockval.next - lockval.owner) > 1;
}
+#define arch_spin_is_contended arch_spin_is_contended
/*
* Write lock implementation.
@@ -100,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -114,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
return !tmp;
}
@@ -155,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -169,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -184,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 9a494346efed..87692750ed94 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,14 +20,14 @@
# error "please don't include this file directly"
#endif
-/* We only require natural alignment for exclusive accesses. */
-#define __lock_aligned
+#define TICKET_SHIFT 16
typedef struct {
- volatile unsigned int lock;
-} arch_spinlock_t;
+ u16 owner;
+ u16 next;
+} __aligned(4) arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
new file mode 100644
index 000000000000..e9c149c042e0
--- /dev/null
+++ b/arch/arm64/include/asm/suspend.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_SUSPEND_H
+#define __ASM_SUSPEND_H
+
+#define NR_CTX_REGS 11
+
+/*
+ * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
+ * the stack, which must be 16-byte aligned on v8
+ */
+struct cpu_suspend_ctx {
+ /*
+ * This struct must be kept in sync with
+ * cpu_do_{suspend/resume} in mm/proc.S
+ */
+ u64 ctx_regs[NR_CTX_REGS];
+ u64 sp;
+} __aligned(16);
+
+struct sleep_save_sp {
+ phys_addr_t *save_ptr_stash;
+ phys_addr_t save_ptr_stash_phys;
+};
+
+extern void cpu_resume(void);
+extern int cpu_suspend(unsigned long);
+
+#endif
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 89c047f9a971..383771eb0b87 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -18,6 +18,7 @@
#include <linux/err.h>
+extern const void *sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
@@ -59,6 +60,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -82,6 +86,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 3659e460071d..215f935619e7 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
#include <linux/compiler.h>
#ifndef CONFIG_ARM64_64K_PAGES
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE_ORDER 2
#endif
-#define THREAD_SIZE 8192
+#define THREAD_SIZE 16384
#define THREAD_START_SP (THREAD_SIZE - 16)
#ifndef __ASSEMBLY__
@@ -97,6 +97,9 @@ static inline struct thread_info *current_thread_info(void)
/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
+ * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
+ * TIF_SYSCALL_AUDIT - syscall auditing
+ * TIF_SECOMP - syscall secure computing
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
@@ -106,7 +109,11 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_SYSCALL_TRACE 8
+#define TIF_SYSCALL_AUDIT 9
+#define TIF_SYSCALL_TRACEPOINT 10
+#define TIF_SECCOMP 11
#define TIF_POLLING_NRFLAG 16
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
@@ -118,10 +125,18 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME)
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h
index b24a31a7e2c9..81a076eb37fa 100644
--- a/arch/arm64/include/asm/timex.h
+++ b/arch/arm64/include/asm/timex.h
@@ -16,14 +16,14 @@
#ifndef __ASM_TIMEX_H
#define __ASM_TIMEX_H
+#include <asm/arch_timer.h>
+
/*
* Use the current timer as a cycle counter since this is what we use for
* the delay loop.
*/
-#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; })
+#define get_cycles() arch_counter_get_cntvct()
#include <asm-generic/timex.h>
-#define ARCH_HAS_READ_CURRENT_TIMER
-
#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 654f0968030b..717031a762c2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
@@ -187,4 +190,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
#define tlb_migrate_finish(mm) do { } while (0)
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+ tlb_add_flush(tlb, addr);
+}
+
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 122d6320f745..3796ea6bb734 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -72,9 +72,9 @@ extern struct cpu_tlb_fns cpu_tlb;
*/
static inline void flush_tlb_all(void)
{
- dsb();
+ dsb(ishst);
asm("tlbi vmalle1is");
- dsb();
+ dsb(ish);
isb();
}
@@ -82,9 +82,9 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
{
unsigned long asid = (unsigned long)ASID(mm) << 48;
- dsb();
+ dsb(ishst);
asm("tlbi aside1is, %0" : : "r" (asid));
- dsb();
+ dsb(ish);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -93,16 +93,37 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr = uaddr >> 12 |
((unsigned long)ASID(vma->vm_mm) << 48);
- dsb();
+ dsb(ishst);
asm("tlbi vae1is, %0" : : "r" (addr));
- dsb();
+ dsb(ish);
}
-/*
- * Convert calls to our calling convention.
- */
-#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
-#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
+ unsigned long addr;
+ start = asid | (start >> 12);
+ end = asid | (end >> 12);
+
+ dsb(ishst);
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+ asm("tlbi vae1is, %0" : : "r"(addr));
+ dsb(ish);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+ start >>= 12;
+ end >>= 12;
+
+ dsb(ishst);
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+ asm("tlbi vaae1is, %0" : : "r"(addr));
+ dsb(ish);
+ isb();
+}
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
@@ -111,12 +132,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
/*
- * set_pte() does not have a DSB, so make sure that the page table
- * write is visible.
+ * set_pte() does not have a DSB for user mappings, so make sure that
+ * the page table write is visible.
*/
- dsb();
+ dsb(ishst);
}
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
new file mode 100644
index 000000000000..e0171b393a14
--- /dev/null
+++ b/arch/arm64/include/asm/topology.h
@@ -0,0 +1,70 @@
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/cpumask.h>
+
+struct cpu_topology {
+ int thread_id;
+ int core_id;
+ int cluster_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+};
+
+extern struct cpu_topology cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+
+#define mc_capable() (cpu_topology[0].cluster_id != -1)
+#define smt_capable() (cpu_topology[0].thread_id != -1)
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+
+#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 0*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 008f8481da65..3bf8f4e99a51 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -83,7 +83,7 @@ static inline void set_fs(mm_segment_t fs)
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size < (u65)current->addr_limit
+ * (u65)addr + (u65)size <= current->addr_limit
*
* This needs 65-bit arithmetic.
*/
@@ -91,7 +91,7 @@ static inline void set_fs(mm_segment_t fs)
({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \
+ asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
: "1" (addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
@@ -100,6 +100,7 @@ static inline void set_fs(mm_segment_t fs)
})
#define access_ok(type, addr, size) __range_ok(addr, size)
+#define user_addr_max get_fs
/*
* The "__xxx" versions of the user access functions do not verify the address
@@ -166,9 +167,10 @@ do { \
#define get_user(x, ptr) \
({ \
- might_sleep(); \
- access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \
- __get_user((x), (ptr)) : \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
+ __get_user((x), __p) : \
((x) = 0, -EFAULT); \
})
@@ -227,9 +229,10 @@ do { \
#define put_user(x, ptr) \
({ \
- might_sleep(); \
- access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
- __put_user((x), (ptr)) : \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
+ __put_user((x), __p) : \
-EFAULT; \
})
@@ -238,9 +241,6 @@ extern unsigned long __must_check __copy_to_user(void __user *to, const void *fr
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
-extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
-
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
@@ -274,24 +274,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
-static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- res = __strncpy_from_user(dst, src, count);
- return res;
-}
-
-#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-static inline long __must_check strnlen_user(const char __user *s, long n)
-{
- unsigned long res = 0;
-
- if (__addr_ok(s))
- res = __strnlen_user(s, n);
-
- return res;
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 82ce217e94cf..c335479c2638 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -28,3 +28,5 @@
#endif
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439827271e3d..215ad4649dd7 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,7 +18,8 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
-#define BOOT_CPU_MODE_EL2 (0x0e12b007)
+#define BOOT_CPU_MODE_EL1 (0xe11)
+#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..aab5bf09e9d9
--- /dev/null
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_WORD_AT_A_TIME_H
+#define __ASM_WORD_AT_A_TIME_H
+
+#ifndef __AARCH64EB__
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+#define zero_bytemask(mask) (mask)
+
+#else /* __AARCH64EB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: ldr %0, %3\n"
+ "2:\n"
+ " .pushsection .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: and %1, %2, #0x7\n"
+ " bic %2, %2, #0x7\n"
+ " ldr %0, [%2]\n"
+ " lsl %1, %1, #0x3\n"
+#ifndef __AARCH64EB__
+ " lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
+ " b 2b\n"
+ " .popsection\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 1b, 3b\n"
+ " .popsection"
+ : "=&r" (ret), "=&r" (offset)
+ : "r" (addr), "Q" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* __ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index e4b78bdca19e..942376d37d22 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -9,6 +9,7 @@ header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
header-y += kvm_para.h
+header-y += perf_regs.h
header-y += param.h
header-y += ptrace.h
header-y += setup.h
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index 2b92046aafc5..dc19e9537f0d 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -16,6 +16,10 @@
#ifndef __ASM_BYTEORDER_H
#define __ASM_BYTEORDER_H
+#ifdef __AARCH64EB__
+#include <linux/byteorder/big_endian.h>
+#else
#include <linux/byteorder/little_endian.h>
+#endif
#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index eea497578b87..73cf0f54d57c 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,6 +21,11 @@
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
-
+#define HWCAP_EVTSTRM (1 << 2)
+#define HWCAP_AES (1 << 3)
+#define HWCAP_PMULL (1 << 4)
+#define HWCAP_SHA1 (1 << 5)
+#define HWCAP_SHA2 (1 << 6)
+#define HWCAP_CRC32 (1 << 7)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/perf_regs.h b/arch/arm64/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..172b8317ee49
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_ARM64_PERF_REGS_H
+#define _ASM_ARM64_PERF_REGS_H
+
+enum perf_event_arm_regs {
+ PERF_REG_ARM64_X0,
+ PERF_REG_ARM64_X1,
+ PERF_REG_ARM64_X2,
+ PERF_REG_ARM64_X3,
+ PERF_REG_ARM64_X4,
+ PERF_REG_ARM64_X5,
+ PERF_REG_ARM64_X6,
+ PERF_REG_ARM64_X7,
+ PERF_REG_ARM64_X8,
+ PERF_REG_ARM64_X9,
+ PERF_REG_ARM64_X10,
+ PERF_REG_ARM64_X11,
+ PERF_REG_ARM64_X12,
+ PERF_REG_ARM64_X13,
+ PERF_REG_ARM64_X14,
+ PERF_REG_ARM64_X15,
+ PERF_REG_ARM64_X16,
+ PERF_REG_ARM64_X17,
+ PERF_REG_ARM64_X18,
+ PERF_REG_ARM64_X19,
+ PERF_REG_ARM64_X20,
+ PERF_REG_ARM64_X21,
+ PERF_REG_ARM64_X22,
+ PERF_REG_ARM64_X23,
+ PERF_REG_ARM64_X24,
+ PERF_REG_ARM64_X25,
+ PERF_REG_ARM64_X26,
+ PERF_REG_ARM64_X27,
+ PERF_REG_ARM64_X28,
+ PERF_REG_ARM64_X29,
+ PERF_REG_ARM64_LR,
+ PERF_REG_ARM64_SP,
+ PERF_REG_ARM64_PC,
+ PERF_REG_ARM64_MAX,
+};
+#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7b4b564961d4..ac389d32ccde 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -4,20 +4,34 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) \
+ -I$(src)/../../../scripts/dtc/libfdt
+
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_insn.o = -pg
+CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o
+ hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
+arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
+arm64-obj-$(CONFIG_SMP) += topology.o
+arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
+arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+arm64-obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
+arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+arm64-obj-$(CONFIG_KGDB) += kgdb.o
+arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 41b4f626d554..7f0512feaa13 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -29,16 +29,14 @@
#include <asm/checksum.h>
- /* user mem (segment) */
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+ /* user mem (segment) */
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
@@ -58,3 +56,7 @@ EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(test_and_change_bit);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a2a4d810bea3..c481a119b98a 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,6 +24,8 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/cputable.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
#include <linux/kbuild.h>
@@ -104,5 +106,47 @@ int main(void)
BLANK();
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
+ BLANK();
+#ifdef CONFIG_KVM_ARM_HOST
+ DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+ DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
+ DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
+ DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
+ DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
+ DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
+ DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
+ DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
+ DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
+ DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
+ DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
+ DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
+ DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
+ DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
+ DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
+ DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
+ DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
+ DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
+ DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
+ DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
+ DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
+ DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
+ DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
+ DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
+ DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
+ DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
+ DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
+ DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
+ DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
new file mode 100644
index 000000000000..04efea8fe4bc
--- /dev/null
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -0,0 +1,99 @@
+/*
+ * CPU kernel entry/exit control
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/string.h>
+
+extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations cpu_psci_ops;
+
+const struct cpu_operations *cpu_ops[NR_CPUS];
+
+static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_SMP
+ &smp_spin_table_ops,
+ &cpu_psci_ops,
+#endif
+ NULL,
+};
+
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
+{
+ const struct cpu_operations **ops = supported_cpu_ops;
+
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
+
+ ops++;
+ }
+
+ return NULL;
+}
+
+/*
+ * Read a cpu's enable method from the device tree and record it in cpu_ops.
+ */
+int __init cpu_read_ops(struct device_node *dn, int cpu)
+{
+ const char *enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g. when
+ * spin-table is used for secondaries). Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
+ return -ENOENT;
+ }
+
+ cpu_ops[cpu] = cpu_get_ops(enable_method);
+ if (!cpu_ops[cpu]) {
+ pr_warn("%s: unsupported enable-method property: %s\n",
+ dn->full_name, enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void __init cpu_read_bootcpu_ops(void)
+{
+ struct device_node *dn = NULL;
+ u64 mpidr = cpu_logical_map(0);
+
+ while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ u64 hwid;
+ const __be32 *prop;
+
+ prop = of_get_property(dn, "reg", NULL);
+ if (!prop)
+ continue;
+
+ hwid = of_read_number(prop, of_n_addr_cells(dn));
+ if (hwid == mpidr) {
+ cpu_read_ops(dn, 0);
+ of_node_put(dn);
+ return;
+ }
+ }
+}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
index 63cfc4a43f4e..fd3993cb060f 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm64/kernel/cputable.c
@@ -22,7 +22,7 @@
extern unsigned long __cpu_setup(void);
-struct cpu_info __initdata cpu_table[] = {
+struct cpu_info cpu_table[] = {
{
.cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000,
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f4726dc054b3..7f66fe150265 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/stat.h>
+#include <linux/uaccess.h>
#include <asm/debug-monitors.h>
#include <asm/local.h>
@@ -137,7 +138,6 @@ void disable_debug_monitors(enum debug_el el)
static void clear_os_lock(void *unused)
{
asm volatile("msr oslar_el1, %0" : : "r" (0));
- isb();
}
static int __cpuinit os_lock_notify(struct notifier_block *self,
@@ -156,8 +156,9 @@ static struct notifier_block __cpuinitdata os_lock_nb = {
static int __cpuinit debug_monitors_init(void)
{
/* Clear the OS lock. */
- smp_call_function(clear_os_lock, NULL, 1);
- clear_os_lock(NULL);
+ on_each_cpu(clear_os_lock, NULL, 1);
+ isb();
+ local_dbg_enable();
/* Register hotplug handler. */
register_cpu_notifier(&os_lock_nb);
@@ -187,6 +188,48 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
regs->pstate = spsr;
}
+/* EL1 Single Step Handler hooks */
+static LIST_HEAD(step_hook);
+static DEFINE_RWLOCK(step_hook_lock);
+
+void register_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_add(&hook->node, &step_hook);
+ write_unlock(&step_hook_lock);
+}
+
+void unregister_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&step_hook_lock);
+}
+
+/*
+ * Call registered single step handers
+ * There is no Syndrome info to check for determining the handler.
+ * So we call all the registered handlers, until the right handler is
+ * found which returns zero.
+ */
+static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct step_hook *hook;
+ int retval = DBG_HOOK_ERROR;
+
+ read_lock(&step_hook_lock);
+
+ list_for_each_entry(hook, &step_hook, node) {
+ retval = hook->fn(regs, esr);
+ if (retval == DBG_HOOK_HANDLED)
+ break;
+ }
+
+ read_unlock(&step_hook_lock);
+
+ return retval;
+}
+
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -214,7 +257,9 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
*/
user_rewind_single_step(current);
} else {
- /* TODO: route to KGDB */
+ if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
pr_warning("Unexpected kernel single-step exception at EL1\n");
/*
* Re-enable stepping since we know that we will be
@@ -226,13 +271,113 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
return 0;
}
-static int __init single_step_init(void)
+/*
+ * Breakpoint handler is re-entrant as another breakpoint can
+ * hit within breakpoint handler, especically in kprobes.
+ * Use reader/writer locks instead of plain spinlock.
+ */
+static LIST_HEAD(break_hook);
+static DEFINE_RWLOCK(break_hook_lock);
+
+void register_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_add(&hook->node, &break_hook);
+ write_unlock(&break_hook_lock);
+}
+
+void unregister_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&break_hook_lock);
+}
+
+static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct break_hook *hook;
+ int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+
+ read_lock(&break_hook_lock);
+ list_for_each_entry(hook, &break_hook, node)
+ if ((esr & hook->esr_mask) == hook->esr_val)
+ fn = hook->fn;
+ read_unlock(&break_hook_lock);
+
+ return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
+}
+
+static int brk_handler(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
+ if (!user_mode(regs))
+ return -EFAULT;
+
+ info = (siginfo_t) {
+ .si_signo = SIGTRAP,
+ .si_errno = 0,
+ .si_code = TRAP_BRKPT,
+ .si_addr = (void __user *)instruction_pointer(regs),
+ };
+
+ force_sig_info(SIGTRAP, &info, current);
+ return 0;
+}
+
+int aarch32_break_handler(struct pt_regs *regs)
+{
+ siginfo_t info;
+ unsigned int instr;
+ bool bp = false;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+
+ if (!compat_user_mode(regs))
+ return -EFAULT;
+
+ if (compat_thumb_mode(regs)) {
+ /* get 16-bit Thumb instruction */
+ get_user(instr, (u16 __user *)pc);
+ if (instr == AARCH32_BREAK_THUMB2_LO) {
+ /* get second half of 32-bit Thumb-2 instruction */
+ get_user(instr, (u16 __user *)(pc + 2));
+ bp = instr == AARCH32_BREAK_THUMB2_HI;
+ } else {
+ bp = instr == AARCH32_BREAK_THUMB;
+ }
+ } else {
+ /* 32-bit ARM instruction */
+ get_user(instr, (u32 __user *)pc);
+ bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+ }
+
+ if (!bp)
+ return -EFAULT;
+
+ info = (siginfo_t) {
+ .si_signo = SIGTRAP,
+ .si_errno = 0,
+ .si_code = TRAP_BRKPT,
+ .si_addr = pc,
+ };
+
+ force_sig_info(SIGTRAP, &info, current);
+ return 0;
+}
+
+static int __init debug_traps_init(void)
{
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
TRAP_HWBKPT, "single-step handler");
+ hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
+ TRAP_BRKPT, "ptrace BRK handler");
return 0;
}
-arch_initcall(single_step_init);
+arch_initcall(debug_traps_init);
/* Re-enable single step for syscall restarting. */
void user_rewind_single_step(struct task_struct *task)
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index fbb6e1843659..ffbbdde7aba1 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -26,6 +26,8 @@
#include <linux/amba/serial.h>
#include <linux/serial_reg.h>
+#include <asm/fixmap.h>
+
static void __iomem *early_base;
static void (*printch)(char ch);
@@ -141,8 +143,10 @@ static int __init setup_early_printk(char *buf)
}
/* no options parsing yet */
- if (paddr)
- early_base = early_io_map(paddr, EARLYCON_IOBASE);
+ if (paddr) {
+ set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
+ early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
+ }
printch = match->printch;
early_console = &early_console_dev;
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
new file mode 100644
index 000000000000..66716c9b9e5f
--- /dev/null
+++ b/arch/arm64/kernel/efi-entry.S
@@ -0,0 +1,109 @@
+/*
+ * EFI entry point.
+ *
+ * Copyright (C) 2013, 2014 Red Hat, Inc.
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+
+#define EFI_LOAD_ERROR 0x8000000000000001
+
+ __INIT
+
+ /*
+ * We arrive here from the EFI boot manager with:
+ *
+ * * CPU in little-endian mode
+ * * MMU on with identity-mapped RAM
+ * * Icache and Dcache on
+ *
+ * We will most likely be running from some place other than where
+ * we want to be. The kernel image wants to be placed at TEXT_OFFSET
+ * from start of RAM.
+ */
+ENTRY(efi_stub_entry)
+ /*
+ * Create a stack frame to save FP/LR with extra space
+ * for image_addr variable passed to efi_entry().
+ */
+ stp x29, x30, [sp, #-32]!
+
+ /*
+ * Call efi_entry to do the real work.
+ * x0 and x1 are already set up by firmware. Current runtime
+ * address of image is calculated and passed via *image_addr.
+ *
+ * unsigned long efi_entry(void *handle,
+ * efi_system_table_t *sys_table,
+ * unsigned long *image_addr) ;
+ */
+ adrp x8, _text
+ add x8, x8, #:lo12:_text
+ add x2, sp, 16
+ str x8, [x2]
+ bl efi_entry
+ cmn x0, #1
+ b.eq efi_load_fail
+
+ /*
+ * efi_entry() will have relocated the kernel image if necessary
+ * and we return here with device tree address in x0 and the kernel
+ * entry point stored at *image_addr. Save those values in registers
+ * which are callee preserved.
+ */
+ mov x20, x0 // DTB address
+ ldr x0, [sp, #16] // relocated _text address
+ mov x21, x0
+
+ /*
+ * Flush dcache covering current runtime addresses
+ * of kernel text/data. Then flush all of icache.
+ */
+ adrp x1, _text
+ add x1, x1, #:lo12:_text
+ adrp x2, _edata
+ add x2, x2, #:lo12:_edata
+ sub x1, x2, x1
+
+ bl __flush_dcache_area
+ ic ialluis
+
+ /* Turn off Dcache and MMU */
+ mrs x0, CurrentEL
+ cmp x0, #PSR_MODE_EL2t
+ ccmp x0, #PSR_MODE_EL2h, #0x4, ne
+ b.ne 1f
+ mrs x0, sctlr_el2
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el2, x0
+ isb
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el1, x0
+ isb
+2:
+ /* Jump to kernel entry point */
+ mov x0, x20
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ br x21
+
+efi_load_fail:
+ mov x0, #EFI_LOAD_ERROR
+ ldp x29, x30, [sp], #32
+ ret
+
+ENDPROC(efi_stub_entry)
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
new file mode 100644
index 000000000000..e786e6cdc400
--- /dev/null
+++ b/arch/arm64/kernel/efi-stub.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
+ *
+ * This file implements the EFI boot stub for the arm64 kernel.
+ * Adapted from ARM version by Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <asm/sections.h>
+
+/*
+ * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
+ * start of kernel and may not cross a 2MiB boundary. We set alignment to
+ * 2MiB so we know it won't cross a 2MiB boundary.
+ */
+#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */
+#define MAX_FDT_OFFSET SZ_512M
+
+#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+
+static void efi_char16_printk(efi_system_table_t *sys_table_arg,
+ efi_char16_t *str);
+
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table,
+ void *__image, void **__fh);
+static efi_status_t efi_file_close(void *handle);
+
+static efi_status_t
+efi_file_read(void *handle, unsigned long *size, void *addr);
+
+static efi_status_t
+efi_file_size(efi_system_table_t *sys_table, void *__fh,
+ efi_char16_t *filename_16, void **handle, u64 *file_sz);
+
+/* Include shared EFI stub code */
+#include "../../../drivers/firmware/efi/efi-stub-helper.c"
+#include "../../../drivers/firmware/efi/fdt.c"
+#include "../../../drivers/firmware/efi/arm-stub.c"
+
+
+static efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
+{
+ efi_status_t status;
+ unsigned long kernel_size, kernel_memsize = 0;
+
+ /* Relocate the image, if required. */
+ kernel_size = _edata - _text;
+ if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ kernel_memsize = kernel_size + (_end - _edata);
+ status = efi_relocate_kernel(sys_table, image_addr,
+ kernel_size, kernel_memsize,
+ dram_base + TEXT_OFFSET,
+ PAGE_SIZE);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ return status;
+ }
+ if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ pr_efi_err(sys_table, "Failed to alloc kernel memory\n");
+ efi_free(sys_table, kernel_memsize, *image_addr);
+ return EFI_ERROR;
+ }
+ *image_size = kernel_memsize;
+ }
+
+
+ return EFI_SUCCESS;
+}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
new file mode 100644
index 000000000000..14db1f6e8d7f
--- /dev/null
+++ b/arch/arm64/kernel/efi.c
@@ -0,0 +1,469 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+
+struct efi_memory_map memmap;
+
+static efi_runtime_services_t *runtime;
+
+static u64 efi_system_table;
+
+static int uefi_debug __initdata;
+static int __init uefi_debug_setup(char *str)
+{
+ uefi_debug = 1;
+
+ return 0;
+}
+early_param("uefi_debug", uefi_debug_setup);
+
+static int __init is_normal_ram(efi_memory_desc_t *md)
+{
+ if (md->attribute & EFI_MEMORY_WB)
+ return 1;
+ return 0;
+}
+
+static void __init efi_setup_idmap(void)
+{
+ struct memblock_region *r;
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ for_each_memblock(memory, r)
+ create_id_mapping(r->base, r->size, 0);
+
+ /* map runtime io spaces */
+ for_each_efi_memory_desc(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
+ continue;
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+ create_id_mapping(paddr, size, 1);
+ }
+}
+
+static int __init uefi_init(void)
+{
+ efi_char16_t *c16;
+ char vendor[100] = "unknown";
+ int i, retval;
+
+ efi.systab = early_memremap(efi_system_table,
+ sizeof(efi_system_table_t));
+ if (efi.systab == NULL) {
+ pr_warn("Unable to map EFI system table.\n");
+ return -ENOMEM;
+ }
+
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
+
+ /*
+ * Verify the EFI Table
+ */
+ if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ pr_err("System table signature incorrect\n");
+ return -EINVAL;
+ }
+ if ((efi.systab->hdr.revision >> 16) < 2)
+ pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff);
+
+ /* Show what we know for posterity */
+ c16 = early_memremap(efi.systab->fw_vendor,
+ sizeof(vendor));
+ if (c16) {
+ for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+ vendor[i] = c16[i];
+ vendor[i] = '\0';
+ }
+
+ pr_info("EFI v%u.%.02u by %s\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff, vendor);
+
+ retval = efi_config_init(NULL);
+ if (retval == 0)
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+ early_memunmap(c16, sizeof(vendor));
+ early_memunmap(efi.systab, sizeof(efi_system_table_t));
+
+ return retval;
+}
+
+static __initdata char memory_type_name[][32] = {
+ {"Reserved"},
+ {"Loader Code"},
+ {"Loader Data"},
+ {"Boot Code"},
+ {"Boot Data"},
+ {"Runtime Code"},
+ {"Runtime Data"},
+ {"Conventional Memory"},
+ {"Unusable Memory"},
+ {"ACPI Reclaim Memory"},
+ {"ACPI Memory NVS"},
+ {"Memory Mapped I/O"},
+ {"MMIO Port Space"},
+ {"PAL Code"},
+};
+
+/*
+ * Return true for RAM regions we want to permanently reserve.
+ */
+static __init int is_reserve_region(efi_memory_desc_t *md)
+{
+ if (!is_normal_ram(md))
+ return 0;
+
+ if (md->attribute & EFI_MEMORY_RUNTIME)
+ return 1;
+
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY ||
+ md->type == EFI_RESERVED_TYPE)
+ return 1;
+
+ return 0;
+}
+
+static __init void reserve_regions(void)
+{
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ if (uefi_debug)
+ pr_info("Processing EFI memory map:\n");
+
+ for_each_efi_memory_desc(&memmap, md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+ if (uefi_debug)
+ pr_info(" 0x%012llx-0x%012llx [%s]",
+ paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+ memory_type_name[md->type]);
+
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_normal_ram(md))
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (is_reserve_region(md) ||
+ md->type == EFI_BOOT_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_DATA) {
+ memblock_reserve(paddr, size);
+ if (uefi_debug)
+ pr_cont("*");
+ }
+
+ if (uefi_debug)
+ pr_cont("\n");
+ }
+}
+
+
+static u64 __init free_one_region(u64 start, u64 end)
+{
+ u64 size = end - start;
+
+ if (uefi_debug)
+ pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
+
+ free_bootmem_late(start, size);
+ return size;
+}
+
+static u64 __init free_region(u64 start, u64 end)
+{
+ u64 map_start, map_end, total = 0;
+
+ if (end <= start)
+ return total;
+
+ map_start = (u64)memmap.phys_map;
+ map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
+ map_start &= PAGE_MASK;
+
+ if (start < map_end && end > map_start) {
+ /* region overlaps UEFI memmap */
+ if (start < map_start)
+ total += free_one_region(start, map_start);
+
+ if (map_end < end)
+ total += free_one_region(map_end, end);
+ } else
+ total += free_one_region(start, end);
+
+ return total;
+}
+
+static void __init free_boot_services(void)
+{
+ u64 total_freed = 0;
+ u64 keep_end, free_start, free_end;
+ efi_memory_desc_t *md;
+
+ /*
+ * If kernel uses larger pages than UEFI, we have to be careful
+ * not to inadvertantly free memory we want to keep if there is
+ * overlap at the kernel page size alignment. We do not want to
+ * free is_reserve_region() memory nor the UEFI memmap itself.
+ *
+ * The memory map is sorted, so we keep track of the end of
+ * any previous region we want to keep, remember any region
+ * we want to free and defer freeing it until we encounter
+ * the next region we want to keep. This way, before freeing
+ * it, we can clip it as needed to avoid freeing memory we
+ * want to keep for UEFI.
+ */
+
+ keep_end = 0;
+ free_start = 0;
+
+ for_each_efi_memory_desc(&memmap, md) {
+ u64 paddr, npages, size;
+
+ if (is_reserve_region(md)) {
+ /*
+ * We don't want to free any memory from this region.
+ */
+ if (free_start) {
+ /* adjust free_end then free region */
+ if (free_end > md->phys_addr)
+ free_end -= PAGE_SIZE;
+ total_freed += free_region(free_start, free_end);
+ free_start = 0;
+ }
+ keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+ continue;
+ }
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA) {
+ /* no need to free this region */
+ continue;
+ }
+
+ /*
+ * We want to free memory from this region.
+ */
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (free_start) {
+ if (paddr <= free_end)
+ free_end = paddr + size;
+ else {
+ total_freed += free_region(free_start, free_end);
+ free_start = paddr;
+ free_end = paddr + size;
+ }
+ } else {
+ free_start = paddr;
+ free_end = paddr + size;
+ }
+ if (free_start < keep_end) {
+ free_start += PAGE_SIZE;
+ if (free_start >= free_end)
+ free_start = 0;
+ }
+ }
+ if (free_start)
+ total_freed += free_region(free_start, free_end);
+
+ if (total_freed)
+ pr_info("Freed 0x%llx bytes of EFI boot services memory",
+ total_freed);
+}
+
+void __init efi_init(void)
+{
+ struct efi_fdt_params params;
+
+ /* Grab UEFI information placed in FDT by stub */
+ if (!efi_get_fdt_params(&params, uefi_debug))
+ return;
+
+ efi_system_table = params.system_table;
+
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
+ memmap.phys_map = (void *)params.mmap;
+ memmap.map = early_memremap(params.mmap, params.mmap_size);
+ memmap.map_end = memmap.map + params.mmap_size;
+ memmap.desc_size = params.desc_size;
+ memmap.desc_version = params.desc_ver;
+
+ if (uefi_init() < 0)
+ return;
+
+ reserve_regions();
+}
+
+void __init efi_idmap_init(void)
+{
+ if (!efi_enabled(EFI_BOOT))
+ return;
+
+ /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
+ efi_setup_idmap();
+}
+
+static int __init remap_region(efi_memory_desc_t *md, void **new)
+{
+ u64 paddr, vaddr, npages, size;
+
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_normal_ram(md))
+ vaddr = (__force u64)ioremap_cache(paddr, size);
+ else
+ vaddr = (__force u64)ioremap(paddr, size);
+
+ if (!vaddr) {
+ pr_err("Unable to remap 0x%llx pages @ %p\n",
+ npages, (void *)paddr);
+ return 0;
+ }
+
+ /* adjust for any rounding when EFI and system pagesize differs */
+ md->virt_addr = vaddr + (md->phys_addr - paddr);
+
+ if (uefi_debug)
+ pr_info(" EFI remap 0x%012llx => %p\n",
+ md->phys_addr, (void *)md->virt_addr);
+
+ memcpy(*new, md, memmap.desc_size);
+ *new += memmap.desc_size;
+
+ return 1;
+}
+
+/*
+ * Switch UEFI from an identity map to a kernel virtual map
+ */
+static int __init arm64_enter_virtual_mode(void)
+{
+ efi_memory_desc_t *md;
+ phys_addr_t virtmap_phys;
+ void *virtmap, *virt_md;
+ efi_status_t status;
+ u64 mapsize;
+ int count = 0;
+ unsigned long flags;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return -1;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ /* replace early memmap mapping with permanent mapping */
+ mapsize = memmap.map_end - memmap.map;
+ early_memunmap(memmap.map, mapsize);
+ memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
+ mapsize);
+ memmap.map_end = memmap.map + mapsize;
+
+ efi.memmap = &memmap;
+
+ /* Map the runtime regions */
+ virtmap = kmalloc(mapsize, GFP_KERNEL);
+ if (!virtmap) {
+ pr_err("Failed to allocate EFI virtual memmap\n");
+ return -1;
+ }
+ virtmap_phys = virt_to_phys(virtmap);
+ virt_md = virtmap;
+
+ for_each_efi_memory_desc(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (remap_region(md, &virt_md))
+ ++count;
+ }
+
+ efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
+ if (efi.systab)
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+ local_irq_save(flags);
+ cpu_switch_mm(idmap_pg_dir, &init_mm);
+
+ /* Call SetVirtualAddressMap with the physical address of the map */
+ runtime = efi.systab->runtime;
+ efi.set_virtual_address_map = runtime->set_virtual_address_map;
+
+ status = efi.set_virtual_address_map(count * memmap.desc_size,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)virtmap_phys);
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
+ local_irq_restore(flags);
+
+ kfree(virtmap);
+
+ free_boot_services();
+
+ if (status != EFI_SUCCESS) {
+ pr_err("Failed to set EFI virtual address map! [%lx]\n",
+ status);
+ return -1;
+ }
+
+ /* Set up runtime services function pointers */
+ runtime = efi.systab->runtime;
+ efi.get_time = runtime->get_time;
+ efi.set_time = runtime->set_time;
+ efi.get_wakeup_time = runtime->get_wakeup_time;
+ efi.set_wakeup_time = runtime->set_wakeup_time;
+ efi.get_variable = runtime->get_variable;
+ efi.get_next_variable = runtime->get_next_variable;
+ efi.set_variable = runtime->set_variable;
+ efi.query_variable_info = runtime->query_variable_info;
+ efi.update_capsule = runtime->update_capsule;
+ efi.query_capsule_caps = runtime->query_capsule_caps;
+ efi.get_next_high_mono_count = runtime->get_next_high_mono_count;
+ efi.reset_system = runtime->reset_system;
+
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(arm64_enter_virtual_mode);
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6a27cd6dbfa6..d358ccacfc00 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -41,3 +41,27 @@ ENTRY(fpsimd_load_state)
fpsimd_restore x0, 8
ret
ENDPROC(fpsimd_load_state)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Save the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_save_partial_state)
+ fpsimd_save_partial x0, 1, 8, 9
+ ret
+ENDPROC(fpsimd_load_partial_state)
+
+/*
+ * Load the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_load_partial_state)
+ fpsimd_restore_partial x0, 8, 9
+ ret
+ENDPROC(fpsimd_load_partial_state)
+
+#endif
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
new file mode 100644
index 000000000000..b051871f2965
--- /dev/null
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -0,0 +1,218 @@
+/*
+ * arch/arm64/kernel/entry-ftrace.S
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+/*
+ * Gcc with -pg will put the following code in the beginning of each function:
+ * mov x0, x30
+ * bl _mcount
+ * [function's body ...]
+ * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
+ * ftrace is enabled.
+ *
+ * Please note that x0 as an argument will not be used here because we can
+ * get lr(x30) of instrumented function at any time by winding up call stack
+ * as long as the kernel is compiled without -fomit-frame-pointer.
+ * (or CONFIG_FRAME_POINTER, this is forced on arm64)
+ *
+ * stack layout after mcount_enter in _mcount():
+ *
+ * current sp/fp => 0:+-----+
+ * in _mcount() | x29 | -> instrumented function's fp
+ * +-----+
+ * | x30 | -> _mcount()'s lr (= instrumented function's pc)
+ * old sp => +16:+-----+
+ * when instrumented | |
+ * function calls | ... |
+ * _mcount() | |
+ * | |
+ * instrumented => +xx:+-----+
+ * function's fp | x29 | -> parent's fp
+ * +-----+
+ * | x30 | -> instrumented function's lr (= parent's pc)
+ * +-----+
+ * | ... |
+ */
+
+ .macro mcount_enter
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ .endm
+
+ .macro mcount_exit
+ ldp x29, x30, [sp], #16
+ ret
+ .endm
+
+ .macro mcount_adjust_addr rd, rn
+ sub \rd, \rn, #AARCH64_INSN_SIZE
+ .endm
+
+ /* for instrumented function's parent */
+ .macro mcount_get_parent_fp reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg]
+ .endm
+
+ /* for instrumented function */
+ .macro mcount_get_pc0 reg
+ mcount_adjust_addr \reg, x30
+ .endm
+
+ .macro mcount_get_pc reg
+ ldr \reg, [x29, #8]
+ mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg, #8]
+ mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr_addr reg
+ ldr \reg, [x29]
+ add \reg, \reg, #8
+ .endm
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+/*
+ * void _mcount(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function makes calls, if enabled, to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+ENTRY(_mcount)
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ ldr x0, =ftrace_trace_stop
+ ldr x0, [x0] // if ftrace_trace_stop
+ ret // return;
+#endif
+ mcount_enter
+
+ ldr x0, =ftrace_trace_function
+ ldr x2, [x0]
+ adr x0, ftrace_stub
+ cmp x0, x2 // if (ftrace_trace_function
+ b.eq skip_ftrace_call // != ftrace_stub) {
+
+ mcount_get_pc x0 // function's pc
+ mcount_get_lr x1 // function's lr (= parent's pc)
+ blr x2 // (*ftrace_trace_function)(pc, lr);
+
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
+skip_ftrace_call: // return;
+ mcount_exit // }
+#else
+ mcount_exit // return;
+ // }
+skip_ftrace_call:
+ ldr x1, =ftrace_graph_return
+ ldr x2, [x1] // if ((ftrace_graph_return
+ cmp x0, x2 // != ftrace_stub)
+ b.ne ftrace_graph_caller
+
+ ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
+ ldr x2, [x1] // != ftrace_graph_entry_stub))
+ ldr x0, =ftrace_graph_entry_stub
+ cmp x0, x2
+ b.ne ftrace_graph_caller // ftrace_graph_caller();
+
+ mcount_exit
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+ENDPROC(_mcount)
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+/*
+ * _mcount() is used to build the kernel with -pg option, but all the branch
+ * instructions to _mcount() are replaced to NOP initially at kernel start up,
+ * and later on, NOP to branch to ftrace_caller() when enabled or branch to
+ * NOP when disabled per-function base.
+ */
+ENTRY(_mcount)
+ ret
+ENDPROC(_mcount)
+
+/*
+ * void ftrace_caller(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function is a counterpart of _mcount() in 'static' ftrace, and
+ * makes calls to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+ENTRY(ftrace_caller)
+ mcount_enter
+
+ mcount_get_pc0 x0 // function's pc
+ mcount_get_lr x1 // function's lr
+
+ .global ftrace_call
+ftrace_call: // tracer(pc, lr);
+ nop // This will be replaced with "bl xxx"
+ // where xxx can be any kind of tracer.
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .global ftrace_graph_call
+ftrace_graph_call: // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+ mcount_exit
+ENDPROC(ftrace_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(ftrace_stub)
+ ret
+ENDPROC(ftrace_stub)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * void ftrace_graph_caller(void)
+ *
+ * Called from _mcount() or ftrace_caller() when function_graph tracer is
+ * selected.
+ * This function w/ prepare_ftrace_return() fakes link register's value on
+ * the call stack in order to intercept instrumented function's return path
+ * and run return_to_handler() later on its exit.
+ */
+ENTRY(ftrace_graph_caller)
+ mcount_get_lr_addr x0 // pointer to function's saved lr
+ mcount_get_pc x1 // function's pc
+ mcount_get_parent_fp x2 // parent's fp
+ bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
+
+ mcount_exit
+ENDPROC(ftrace_graph_caller)
+
+/*
+ * void return_to_handler(void)
+ *
+ * Run ftrace_return_to_handler() before going back to parent.
+ * @fp is checked against the value passed by ftrace_graph_caller()
+ * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
+ */
+ENTRY(return_to_handler)
+ str x0, [sp, #-16]!
+ mov x0, x29 // parent's fp
+ bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
+ mov x30, x0 // restore the original return address
+ ldr x0, [sp], #16
+ ret
+END(return_to_handler)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a03..0437186e4c7d 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
.macro get_thread_info, rd
mov \rd, sp
- and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
+ and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm
/*
@@ -275,7 +275,6 @@ el1_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- mov x1, x25
mov x2, sp
b do_sp_pc_abort
el1_undef:
@@ -288,6 +287,8 @@ el1_dbg:
/*
* Debug exception handling
*/
+ cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
mov x2, sp // struct pt_regs
@@ -311,14 +312,14 @@ el1_irq:
#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x0, x24, #1 // increment it
- str x0, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w0, w24, #1 // increment it
+ str w0, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- str x24, [tsk, #TI_PREEMPT] // restore preempt count
- cbnz x24, 1f // preempt count != 0
+ str w24, [tsk, #TI_PREEMPT] // restore preempt count
+ cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
@@ -423,6 +424,7 @@ el0_da:
* Data abort handling
*/
mrs x0, far_el1
+ bic x0, x0, #(0xff << 56)
disable_step x1
isb
enable_dbg
@@ -476,6 +478,8 @@ el0_undef:
* Undefined instruction
*/
mov x0, sp
+ // enable interrupts before calling the main handler
+ enable_irq
b do_undefinstr
el0_dbg:
/*
@@ -506,15 +510,15 @@ el0_irq_naked:
#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x23, x24, #1 // increment it
- str x23, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w23, w24, #1 // increment it
+ str w23, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- ldr x0, [tsk, #TI_PREEMPT]
- str x24, [tsk, #TI_PREEMPT]
- cmp x0, x23
+ ldr w0, [tsk, #TI_PREEMPT]
+ str w24, [tsk, #TI_PREEMPT]
+ cmp w0, w23
b.eq 1f
mov x1, #0
str x1, [x1] // BUG
@@ -586,7 +590,7 @@ fast_work_pending:
str x0, [sp, #S_X0] // returned x0
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
- /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
+ /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
tst x2, #PSR_MODE_MASK // user mode regs?
@@ -641,8 +645,9 @@ el0_svc_naked: // compat entry point
enable_irq
get_thread_info tsk
- ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
- tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
+ ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ tst x16, #_TIF_SYSCALL_WORK
+ b.ne __sys_trace
adr lr, ret_fast_syscall // return address
cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys
@@ -658,9 +663,8 @@ ENDPROC(el0_svc)
* switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov x1, sp
- mov w0, #0 // trace entry
- bl syscall_trace
+ mov x0, sp
+ bl syscall_trace_enter
adr lr, __sys_trace_return // return address
uxtw scno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
@@ -675,9 +679,8 @@ __sys_trace:
__sys_trace_return:
str x0, [sp] // save returned x0
- mov x1, sp
- mov w0, #1 // trace exit
- bl syscall_trace
+ mov x0, sp
+ bl syscall_trace_exit
b ret_to_user
/*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index e8b8357aedb4..845fae3aeb26 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -33,6 +34,60 @@
#define FPEXC_IDF (1 << 7)
/*
+ * In order to reduce the number of times the FPSIMD state is needlessly saved
+ * and restored, we need to keep track of two things:
+ * (a) for each task, we need to remember which CPU was the last one to have
+ * the task's FPSIMD state loaded into its FPSIMD registers;
+ * (b) for each CPU, we need to remember which task's userland FPSIMD state has
+ * been loaded into its FPSIMD registers most recently, or whether it has
+ * been used to perform kernel mode NEON in the meantime.
+ *
+ * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to
+ * the id of the current CPU everytime the state is loaded onto a CPU. For (b),
+ * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
+ * address of the userland FPSIMD state of the task that was loaded onto the CPU
+ * the most recently, or NULL if kernel mode NEON has been performed after that.
+ *
+ * With this in place, we no longer have to restore the next FPSIMD state right
+ * when switching between tasks. Instead, we can defer this check to userland
+ * resume, at which time we verify whether the CPU's fpsimd_last_state and the
+ * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we
+ * can omit the FPSIMD restore.
+ *
+ * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
+ * indicate whether or not the userland FPSIMD state of the current task is
+ * present in the registers. The flag is set unless the FPSIMD registers of this
+ * CPU currently contain the most recent userland FPSIMD state of the current
+ * task.
+ *
+ * For a certain task, the sequence may look something like this:
+ * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
+ * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
+ * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
+ * cleared, otherwise it is set;
+ *
+ * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
+ * userland FPSIMD state is copied from memory to the registers, the task's
+ * fpsimd_state.cpu field is set to the id of the current CPU, the current
+ * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
+ * TIF_FOREIGN_FPSTATE flag is cleared;
+ *
+ * - the task executes an ordinary syscall; upon return to userland, the
+ * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
+ * restored;
+ *
+ * - the task executes a syscall which executes some NEON instructions; this is
+ * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
+ * register contents to memory, clears the fpsimd_last_state per-cpu variable
+ * and sets the TIF_FOREIGN_FPSTATE flag;
+ *
+ * - the task gets preempted after kernel_neon_end() is called; as we have not
+ * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
+ * whatever is in the FPSIMD registers is not saved to memory, but discarded.
+ */
+static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+
+/*
* Trapped FP/ASIMD access.
*/
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
@@ -70,20 +125,178 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
void fpsimd_thread_switch(struct task_struct *next)
{
- /* check if not kernel threads */
- if (current->mm)
+ /*
+ * Save the current FPSIMD state to memory, but only if whatever is in
+ * the registers is in fact the most recent userland FPSIMD state of
+ * 'current'.
+ */
+ if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
- if (next->mm)
- fpsimd_load_state(&next->thread.fpsimd_state);
+
+ if (next->mm) {
+ /*
+ * If we are switching to a task whose most recent userland
+ * FPSIMD state is already in the registers of *this* cpu,
+ * we can skip loading the state from memory. Otherwise, set
+ * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
+ * upon the next return to userland.
+ */
+ struct fpsimd_state *st = &next->thread.fpsimd_state;
+
+ if (__this_cpu_read(fpsimd_last_state) == st
+ && st->cpu == smp_processor_id())
+ clear_ti_thread_flag(task_thread_info(next),
+ TIF_FOREIGN_FPSTATE);
+ else
+ set_ti_thread_flag(task_thread_info(next),
+ TIF_FOREIGN_FPSTATE);
+ }
}
void fpsimd_flush_thread(void)
{
+ preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
- fpsimd_load_state(&current->thread.fpsimd_state);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+}
+
+/*
+ * Save the userland FPSIMD state of 'current' to memory, but only if the state
+ * currently held in the registers does in fact belong to 'current'
+ */
+void fpsimd_preserve_current_state(void)
+{
+ preempt_disable();
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ preempt_enable();
+}
+
+/*
+ * Load the userland FPSIMD state of 'current' from memory, but only if the
+ * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
+ * state of 'current'
+ */
+void fpsimd_restore_current_state(void)
+{
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+ fpsimd_load_state(st);
+ this_cpu_write(fpsimd_last_state, st);
+ st->cpu = smp_processor_id();
+ }
+ preempt_enable();
}
/*
+ * Load an updated userland FPSIMD state for 'current' from memory and set the
+ * flag that indicates that the FPSIMD register contents are the most recent
+ * FPSIMD state of 'current'
+ */
+void fpsimd_update_current_state(struct fpsimd_state *state)
+{
+ preempt_disable();
+ fpsimd_load_state(state);
+ if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+ this_cpu_write(fpsimd_last_state, st);
+ st->cpu = smp_processor_id();
+ }
+ preempt_enable();
+}
+
+/*
+ * Invalidate live CPU copies of task t's FPSIMD state
+ */
+void fpsimd_flush_task_state(struct task_struct *t)
+{
+ t->thread.fpsimd_state.cpu = NR_CPUS;
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate);
+static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin_partial(u32 num_regs)
+{
+ if (in_interrupt()) {
+ struct fpsimd_partial_state *s = this_cpu_ptr(
+ in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
+
+ BUG_ON(num_regs > 32);
+ fpsimd_save_partial_state(s, roundup(num_regs, 2));
+ } else {
+ /*
+ * Save the userland FPSIMD state if we have one and if we
+ * haven't done so already. Clear fpsimd_last_state to indicate
+ * that there is no longer userland FPSIMD state in the
+ * registers.
+ */
+ preempt_disable();
+ if (current->mm &&
+ !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ this_cpu_write(fpsimd_last_state, NULL);
+ }
+}
+EXPORT_SYMBOL(kernel_neon_begin_partial);
+
+void kernel_neon_end(void)
+{
+ if (in_interrupt()) {
+ struct fpsimd_partial_state *s = this_cpu_ptr(
+ in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
+ fpsimd_load_partial_state(s);
+ } else {
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
+#ifdef CONFIG_CPU_PM
+static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ this_cpu_write(fpsimd_last_state, NULL);
+ break;
+ case CPU_PM_EXIT:
+ if (current->mm)
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_pm_notifier_block = {
+ .notifier_call = fpsimd_cpu_pm_notifier,
+};
+
+static void fpsimd_pm_init(void)
+{
+ cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
+}
+
+#else
+static inline void fpsimd_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
+/*
* FP/SIMD support code initialisation.
*/
static int __init fpsimd_init(void)
@@ -101,6 +314,8 @@ static int __init fpsimd_init(void)
else
elf_hwcap |= HWCAP_ASIMD;
+ fpsimd_pm_init();
+
return 0;
}
late_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
new file mode 100644
index 000000000000..649890a3ac4e
--- /dev/null
+++ b/arch/arm64/kernel/ftrace.c
@@ -0,0 +1,177 @@
+/*
+ * arch/arm64/kernel/ftrace.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/swab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Replace a single instruction, which may be a branch or NOP.
+ * If @validate == true, a replaced instruction is checked against 'old'.
+ */
+static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
+ bool validate)
+{
+ u32 replaced;
+
+ /*
+ * Note:
+ * Due to modules and __init, code can disappear and change,
+ * we need to protect against faulting as well as code changing.
+ * We do this by aarch64_insn_*() which use the probe_kernel_*().
+ *
+ * No lock is held here because all the modifications are run
+ * through stop_machine().
+ */
+ if (validate) {
+ if (aarch64_insn_read((void *)pc, &replaced))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
+ if (aarch64_insn_patch_text_nosync((void *)pc, new))
+ return -EPERM;
+
+ return 0;
+}
+
+/*
+ * Replace tracer function in ftrace_caller()
+ */
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long pc;
+ u32 new;
+
+ pc = (unsigned long)&ftrace_call;
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
+
+ return ftrace_modify_code(pc, 0, new, false);
+}
+
+/*
+ * Turn on the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_nop();
+ new = aarch64_insn_gen_branch_imm(pc, addr, true);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+/*
+ * Turn off the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_branch_imm(pc, addr, true);
+ new = aarch64_insn_gen_nop();
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ *(unsigned long *)data = 0;
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * function_graph tracer expects ftrace_return_to_handler() to be called
+ * on the way back to parent. For this purpose, this function is called
+ * in _mcount() or ftrace_caller() to replace return address (*parent) on
+ * the call stack to return_to_handler.
+ *
+ * Note that @frame_pointer is used only for sanity check later.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+ struct ftrace_graph_ent trace;
+ int err;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Note:
+ * No protection against faulting at *parent, which may be seen
+ * on other archs. It's unlikely on AArch64.
+ */
+ old = *parent;
+ *parent = return_hooker;
+
+ trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ *parent = old;
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
+ }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
+ * depending on @enable.
+ */
+static int ftrace_modify_graph_caller(bool enable)
+{
+ unsigned long pc = (unsigned long)&ftrace_graph_call;
+ u32 branch, nop;
+
+ branch = aarch64_insn_gen_branch_imm(pc,
+ (unsigned long)ftrace_graph_caller, false);
+ nop = aarch64_insn_gen_nop();
+
+ if (enable)
+ return ftrace_modify_code(pc, nop, branch, true);
+ else
+ return ftrace_modify_code(pc, branch, nop, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 53dcae49e729..f1d3f693cac6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -26,6 +26,7 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
@@ -34,29 +35,17 @@
#include <asm/page.h>
#include <asm/virt.h>
-/*
- * swapper_pg_dir is the virtual address of the initial page table. We place
- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
- * 2 pages and is placed below swapper_pg_dir.
- */
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
#error KERNEL_RAM_VADDR must start at 0xXXX80000
#endif
-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
-
- .globl swapper_pg_dir
- .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
-
- .globl idmap_pg_dir
- .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
-
- .macro pgtbl, ttb0, ttb1, phys
- add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
- sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
+ .macro pgtbl, ttb0, ttb1, virt_to_phys
+ ldr \ttb1, =swapper_pg_dir
+ ldr \ttb0, =idmap_pg_dir
+ add \ttb1, \ttb1, \virt_to_phys
+ add \ttb0, \ttb0, \virt_to_phys
.endm
#ifdef CONFIG_ARM64_64K_PAGES
@@ -107,16 +96,137 @@
/*
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*/
+#ifdef CONFIG_EFI
+efi_head:
+ /*
+ * This add instruction has no meaningful effect except that
+ * its opcode forms the magic "MZ" signature required by UEFI.
+ */
+ add x13, x18, #0x16
+ b stext
+#else
b stext // branch to kernel start, magic
.long 0 // reserved
+#endif
.quad TEXT_OFFSET // Image load offset from start of RAM
.quad 0 // reserved
.quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .byte 0x41 // Magic number, "ARM\x64"
+ .byte 0x52
+ .byte 0x4d
+ .byte 0x64
+#ifdef CONFIG_EFI
+ .long pe_header - efi_head // Offset to the PE header.
+#else
+ .word 0 // reserved
+#endif
+
+#ifdef CONFIG_EFI
+ .align 3
+pe_header:
+ .ascii "PE"
+ .short 0
+coff_header:
+ .short 0xaa64 // AArch64
+ .short 2 // nr_sections
+ .long 0 // TimeDateStamp
+ .long 0 // PointerToSymbolTable
+ .long 1 // NumberOfSymbols
+ .short section_table - optional_header // SizeOfOptionalHeader
+ .short 0x206 // Characteristics.
+ // IMAGE_FILE_DEBUG_STRIPPED |
+ // IMAGE_FILE_EXECUTABLE_IMAGE |
+ // IMAGE_FILE_LINE_NUMS_STRIPPED
+optional_header:
+ .short 0x20b // PE32+ format
+ .byte 0x02 // MajorLinkerVersion
+ .byte 0x14 // MinorLinkerVersion
+ .long _edata - stext // SizeOfCode
+ .long 0 // SizeOfInitializedData
+ .long 0 // SizeOfUninitializedData
+ .long efi_stub_entry - efi_head // AddressOfEntryPoint
+ .long stext - efi_head // BaseOfCode
+
+extra_header_fields:
+ .quad 0 // ImageBase
+ .long 0x20 // SectionAlignment
+ .long 0x8 // FileAlignment
+ .short 0 // MajorOperatingSystemVersion
+ .short 0 // MinorOperatingSystemVersion
+ .short 0 // MajorImageVersion
+ .short 0 // MinorImageVersion
+ .short 0 // MajorSubsystemVersion
+ .short 0 // MinorSubsystemVersion
+ .long 0 // Win32VersionValue
+
+ .long _edata - efi_head // SizeOfImage
+
+ // Everything before the kernel image is considered part of the header
+ .long stext - efi_head // SizeOfHeaders
+ .long 0 // CheckSum
+ .short 0xa // Subsystem (EFI application)
+ .short 0 // DllCharacteristics
+ .quad 0 // SizeOfStackReserve
+ .quad 0 // SizeOfStackCommit
+ .quad 0 // SizeOfHeapReserve
+ .quad 0 // SizeOfHeapCommit
+ .long 0 // LoaderFlags
+ .long 0x6 // NumberOfRvaAndSizes
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+
+ // Section table
+section_table:
+
+ /*
+ * The EFI application loader requires a relocation section
+ * because EFI applications must be relocatable. This is a
+ * dummy section as far as we are concerned.
+ */
+ .ascii ".reloc"
+ .byte 0
+ .byte 0 // end of 0 padding of section name
+ .long 0
+ .long 0
+ .long 0 // SizeOfRawData
+ .long 0 // PointerToRawData
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long 0x42100040 // Characteristics (section flags)
+
+
+ .ascii ".text"
+ .byte 0
+ .byte 0
+ .byte 0 // end of 0 padding of section name
+ .long _edata - stext // VirtualSize
+ .long stext - efi_head // VirtualAddress
+ .long _edata - stext // SizeOfRawData
+ .long stext - efi_head // PointerToRawData
+
+ .long 0 // PointerToRelocations (0 for executables)
+ .long 0 // PointerToLineNumbers (0 for executables)
+ .short 0 // NumberOfRelocations (0 for executables)
+ .short 0 // NumberOfLineNumbers (0 for executables)
+ .long 0xe0500020 // Characteristics (section flags)
+ .align 5
+#endif
ENTRY(stext)
mov x21, x0 // x21=FDT
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl el2_setup // Drop to EL1
+ bl set_cpu_boot_mode_flag
mrs x22, midr_el1 // x22=cpuid
mov x0, x22
bl lookup_processor_type
@@ -142,21 +252,30 @@ ENDPROC(stext)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
+ *
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
+ * booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #PSR_MODE_EL2t
ccmp x0, #PSR_MODE_EL2h, #0x4, ne
- ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x0, x0, x28
- b.eq 1f
- str wzr, [x0] // Remember we don't have EL2...
+ b.ne 1f
+ mrs x0, sctlr_el2
+CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
+CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+ msr sctlr_el2, x0
+ b 2f
+1: mrs x0, sctlr_el1
+CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
+CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ msr sctlr_el1, x0
+ mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
+ isb
ret
/* Hyp configuration. */
-1: ldr w1, =BOOT_CPU_MODE_EL2
- str w1, [x0, #4] // This CPU has EL2
- mov x0, #(1 << 31) // 64-bit EL1
+2: mov x0, #(1 << 31) // 64-bit EL1
msr hcr_el2, x0
/* Generic timers. */
@@ -173,7 +292,8 @@ ENTRY(el2_setup)
/* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits
- movk x0, #0x30d0, lsl #16
+CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
+CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -196,18 +316,34 @@ ENTRY(el2_setup)
PSR_MODE_EL1h)
msr spsr_el2, x0
msr elr_el2, lr
+ mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
ENDPROC(el2_setup)
/*
+ * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
+ * in x20. See arch/arm64/include/asm/virt.h for more info.
+ */
+ENTRY(set_cpu_boot_mode_flag)
+ ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
+ add x1, x1, x28
+ cmp w20, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+1: str w20, [x1] // This CPU has booted in EL1
+ ret
+ENDPROC(set_cpu_boot_mode_flag)
+
+/*
* We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable.
*
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
- .pushsection .data
+ .pushsection .data..cacheline_aligned
ENTRY(__boot_cpu_mode)
+ .align L1_CACHE_SHIFT
.long BOOT_CPU_MODE_EL2
.long 0
.popsection
@@ -217,7 +353,6 @@ ENTRY(__boot_cpu_mode)
.quad PAGE_OFFSET
#ifdef CONFIG_SMP
- .pushsection .smp.pen.text, "ax"
.align 3
1: .quad .
.quad secondary_holding_pen_release
@@ -227,8 +362,9 @@ ENTRY(__boot_cpu_mode)
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
- bl __calc_phys_offset // x24=phys offset
- bl el2_setup // Drop to EL1
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
+ bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
@@ -242,7 +378,16 @@ pen: ldr x4, [x3]
wfe
b pen
ENDPROC(secondary_holding_pen)
- .popsection
+
+ /*
+ * Secondary entry point that jumps straight into the kernel. Only to
+ * be used where CPUs are brought online dynamically by the kernel.
+ */
+ENTRY(secondary_entry)
+ bl __calc_phys_offset // x2=phys offset
+ bl el2_setup // Drop to EL1
+ b secondary_startup
+ENDPROC(secondary_entry)
ENTRY(secondary_startup)
/*
@@ -254,7 +399,7 @@ ENTRY(secondary_startup)
mov x23, x0 // x23=current cpu_table
cbz x23, __error_p // invalid processor (x23=0)?
- pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
+ pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
ldr x12, [x23, #CPU_INFO_SETUP]
add x12, x12, x28 // __virt_to_phys
blr x12 // initialise processor
@@ -296,8 +441,13 @@ ENDPROC(__enable_mmu)
* x27 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
+ *
+ * We align the entire function to the smallest power of two larger than it to
+ * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
+ * close to the end of a 512MB or 1GB block we might require an additional
+ * table to map the entire function.
*/
- .align 6
+ .align 4
__turn_mmu_on:
msr sctlr_el1, x0
isb
@@ -340,26 +490,18 @@ ENDPROC(__calc_phys_offset)
* Preserves: tbl, flags
* Corrupts: phys, start, end, pstate
*/
- .macro create_block_map, tbl, flags, phys, start, end, idmap=0
+ .macro create_block_map, tbl, flags, phys, start, end
lsr \phys, \phys, #BLOCK_SHIFT
- .if \idmap
- and \start, \phys, #PTRS_PER_PTE - 1 // table index
- .else
lsr \start, \start, #BLOCK_SHIFT
and \start, \start, #PTRS_PER_PTE - 1 // table index
- .endif
orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
- .ifnc \start,\end
lsr \end, \end, #BLOCK_SHIFT
and \end, \end, #PTRS_PER_PTE - 1 // table end index
- .endif
9999: str \phys, [\tbl, \start, lsl #3] // store the entry
- .ifnc \start,\end
add \start, \start, #1 // next entry
add \phys, \phys, #BLOCK_SIZE // next block
cmp \start, \end
b.ls 9999b
- .endif
.endm
/*
@@ -368,10 +510,19 @@ ENDPROC(__calc_phys_offset)
* - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled, including the FDT blob (TTBR1)
- * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
+ * - pgd entry for fixed mappings (TTBR1)
*/
__create_page_tables:
- pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
+ pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
+ mov x27, lr
+
+ /*
+ * Invalidate the idmap and swapper page tables to avoid potential
+ * dirty cache lines being evicted.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
@@ -391,9 +542,13 @@ __create_page_tables:
* Create the identity mapping.
*/
add x0, x25, #PAGE_SIZE // section table address
- adr x3, __turn_mmu_on // virtual/physical address
+ ldr x3, =KERNEL_START
+ add x3, x3, x28 // __pa(KERNEL_START)
create_pgd_entry x25, x0, x3, x5, x6
- create_block_map x0, x7, x3, x5, x5, idmap=1
+ ldr x6, =KERNEL_END
+ mov x5, x3 // __pa(KERNEL_START)
+ add x6, x6, x28 // __pa(KERNEL_END)
+ create_block_map x0, x7, x3, x5, x6
/*
* Map the kernel image (starting with PHYS_OFFSET).
@@ -401,7 +556,7 @@ __create_page_tables:
add x0, x26, #PAGE_SIZE // section table address
mov x5, #PAGE_OFFSET
create_pgd_entry x26, x0, x5, x3, x6
- ldr x6, =KERNEL_END - 1
+ ldr x6, =KERNEL_END
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -421,15 +576,23 @@ __create_page_tables:
sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6
1:
-#ifdef CONFIG_EARLY_PRINTK
/*
- * Create the pgd entry for the UART mapping. The full mapping is done
- * later based earlyprintk kernel parameter.
+ * Create the pgd entry for the fixed mappings.
*/
- ldr x5, =EARLYCON_IOBASE // UART virtual address
+ ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
add x0, x26, #2 * PAGE_SIZE // section table address
create_pgd_entry x26, x0, x5, x6, x7
-#endif
+
+ /*
+ * Since the page tables have been populated with non-cacheable
+ * accesses (MMU disabled), invalidate the idmap and swapper page
+ * tables again to remove any speculatively loaded cache lines.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
+
+ mov lr, x27
ret
ENDPROC(__create_page_tables)
.ltorg
@@ -438,10 +601,8 @@ ENDPROC(__create_page_tables)
.type __switch_data, %object
__switch_data:
.quad __mmap_switched
- .quad __data_loc // x4
- .quad _data // x5
.quad __bss_start // x6
- .quad _end // x7
+ .quad __bss_stop // x7
.quad processor_id // x4
.quad __fdt_pointer // x5
.quad memstart_addr // x6
@@ -454,15 +615,7 @@ __switch_data:
__mmap_switched:
adr x3, __switch_data + 8
- ldp x4, x5, [x3], #16
ldp x6, x7, [x3], #16
- cmp x4, x5 // Copy data segment if needed
-1: ccmp x5, x6, #4, ne
- b.eq 2f
- ldr x16, [x4], #8
- str x16, [x5], #8
- b 1b
-2:
1: cmp x6, x7
b.hs 2f
str xzr, [x6], #8 // Clear BSS
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 5ab825c59db9..6de3460ede4c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -20,13 +20,14 @@
#define pr_fmt(fmt) "hw-breakpoint: " fmt
+#include <linux/compat.h>
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
-#include <asm/compat.h>
#include <asm/current.h>
#include <asm/debug-monitors.h>
#include <asm/hw_breakpoint.h>
@@ -169,15 +170,68 @@ static enum debug_el debug_exception_level(int privilege)
}
}
-/*
- * Install a perf counter breakpoint.
+enum hw_breakpoint_ops {
+ HW_BREAKPOINT_INSTALL,
+ HW_BREAKPOINT_UNINSTALL,
+ HW_BREAKPOINT_RESTORE
+};
+
+/**
+ * hw_breakpoint_slot_setup - Find and setup a perf slot according to
+ * operations
+ *
+ * @slots: pointer to array of slots
+ * @max_slots: max number of slots
+ * @bp: perf_event to setup
+ * @ops: operation to be carried out on the slot
+ *
+ * Return:
+ * slot index on success
+ * -ENOSPC if no slot is available/matches
+ * -EINVAL on wrong operations parameter
*/
-int arch_install_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
+ struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ int i;
+ struct perf_event **slot;
+
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ if (!*slot) {
+ *slot = bp;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ if (*slot == bp) {
+ *slot = NULL;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_RESTORE:
+ if (*slot == bp)
+ return i;
+ break;
+ default:
+ pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
+ return -EINVAL;
+ }
+ }
+ return -ENOSPC;
+}
+
+static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
+ struct perf_event **slots;
struct debug_info *debug_info = &current->thread.debug;
int i, max_slots, ctrl_reg, val_reg, reg_enable;
+ enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
u32 ctrl;
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -196,67 +250,54 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
reg_enable = !debug_info->wps_disabled;
}
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
-
- if (!*slot) {
- *slot = bp;
- break;
- }
- }
-
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return -ENOSPC;
+ i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
- /* Ensure debug monitors are enabled at the correct exception level. */
- enable_debug_monitors(debug_exception_level(info->ctrl.privilege));
+ if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
+ return i;
- /* Setup the address register. */
- write_wb_reg(val_reg, i, info->address);
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /*
+ * Ensure debug monitors are enabled at the correct exception
+ * level.
+ */
+ enable_debug_monitors(dbg_el);
+ /* Fall through */
+ case HW_BREAKPOINT_RESTORE:
+ /* Setup the address register. */
+ write_wb_reg(val_reg, i, info->address);
+
+ /* Setup the control register. */
+ ctrl = encode_ctrl_reg(info->ctrl);
+ write_wb_reg(ctrl_reg, i,
+ reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the control register. */
+ write_wb_reg(ctrl_reg, i, 0);
- /* Setup the control register. */
- ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ /*
+ * Release the debug monitors for the correct exception
+ * level.
+ */
+ disable_debug_monitors(dbg_el);
+ break;
+ }
return 0;
}
-void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
- int i, max_slots, base;
-
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
- /* Breakpoint */
- base = AARCH64_DBG_REG_BCR;
- slots = __get_cpu_var(bp_on_reg);
- max_slots = core_num_brps;
- } else {
- /* Watchpoint */
- base = AARCH64_DBG_REG_WCR;
- slots = __get_cpu_var(wp_on_reg);
- max_slots = core_num_wrps;
- }
-
- /* Remove the breakpoint. */
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
-
- if (*slot == bp) {
- *slot = NULL;
- break;
- }
- }
-
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return;
-
- /* Reset the control register. */
- write_wb_reg(base, i, 0);
+ return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
+}
- /* Release the debug monitors for the correct exception level. */
- disable_debug_monitors(debug_exception_level(info->ctrl.privilege));
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
}
static int get_hbp_len(u8 hbp_len)
@@ -806,18 +847,36 @@ void hw_breakpoint_thread_switch(struct task_struct *next)
/*
* CPU initialisation.
*/
-static void reset_ctrl_regs(void *unused)
+static void hw_breakpoint_reset(void *unused)
{
int i;
-
- for (i = 0; i < core_num_brps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ struct perf_event **slots;
+ /*
+ * When a CPU goes through cold-boot, it does not have any installed
+ * slot, so it is safe to share the same function for restoring and
+ * resetting breakpoints; when a CPU is hotplugged in, it goes
+ * through the slots, which are all empty, hence it just resets control
+ * and value for debug registers.
+ * When this function is triggered on warm-boot through a CPU PM
+ * notifier some slots might be initialized; if so they are
+ * reprogrammed according to the debug slots content.
+ */
+ for (slots = __get_cpu_var(bp_on_reg), i = 0; i < core_num_brps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ }
}
- for (i = 0; i < core_num_wrps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ for (slots = __get_cpu_var(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ }
}
}
@@ -827,7 +886,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
{
int cpu = (long)hcpu;
if (action == CPU_ONLINE)
- smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1);
+ smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
return NOTIFY_OK;
}
@@ -835,6 +894,14 @@ static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
.notifier_call = hw_breakpoint_reset_notify,
};
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
+#else
+static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+}
+#endif
+
/*
* One-time initialisation.
*/
@@ -850,8 +917,8 @@ static int __init arch_hw_breakpoint_init(void)
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
- smp_call_function(reset_ctrl_regs, NULL, 1);
- reset_ctrl_regs(NULL);
+ smp_call_function(hw_breakpoint_reset, NULL, 1);
+ hw_breakpoint_reset(NULL);
/* Register debug fault handlers. */
hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
@@ -861,6 +928,8 @@ static int __init arch_hw_breakpoint_init(void)
/* Register hotplug notifier. */
register_cpu_notifier(&hw_breakpoint_reset_nb);
+ /* Register cpu_suspend hw breakpoint restore hook */
+ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
return 0;
}
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
new file mode 100644
index 000000000000..92f36835486b
--- /dev/null
+++ b/arch/arm64/kernel/insn.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/insn.h>
+
+static int aarch64_insn_encoding_class[] = {
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+};
+
+enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
+{
+ return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
+}
+
+/* NOP is an alias of HINT */
+bool __kprobes aarch64_insn_is_nop(u32 insn)
+{
+ if (!aarch64_insn_is_hint(insn))
+ return false;
+
+ switch (insn & 0xFE0) {
+ case AARCH64_INSN_HINT_YIELD:
+ case AARCH64_INSN_HINT_WFE:
+ case AARCH64_INSN_HINT_WFI:
+ case AARCH64_INSN_HINT_SEV:
+ case AARCH64_INSN_HINT_SEVL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ u32 val;
+
+ ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ insn = cpu_to_le32(insn);
+ return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
+}
+
+static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
+{
+ if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
+ return false;
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_svc(insn) ||
+ aarch64_insn_is_hvc(insn) ||
+ aarch64_insn_is_smc(insn) ||
+ aarch64_insn_is_brk(insn) ||
+ aarch64_insn_is_nop(insn);
+}
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section B2.6.5 "Concurrent modification and execution of instructions":
+ * Concurrent modification and execution of instructions can lead to the
+ * resulting instruction performing any behavior that can be achieved by
+ * executing any sequence of instructions that can be executed from the
+ * same Exception level, except where the instruction before modification
+ * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
+ * or SMC instruction.
+ */
+bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
+{
+ return __aarch64_insn_hotpatch_safe(old_insn) &&
+ __aarch64_insn_hotpatch_safe(new_insn);
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /*
+ * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
+ * which ends with "dsb; isb" pair guaranteeing global
+ * visibility.
+ */
+ atomic_set(&pp->cpu_count, -1);
+ } else {
+ while (atomic_read(&pp->cpu_count) != -1)
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ int ret;
+ u32 insn;
+
+ /* Unsafe to patch multiple instructions without synchronizaiton */
+ if (cnt == 1) {
+ ret = aarch64_insn_read(addrs[0], &insn);
+ if (ret)
+ return ret;
+
+ if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
+ /*
+ * ARMv8 architecture doesn't guarantee all CPUs see
+ * the new instruction after returning from function
+ * aarch64_insn_patch_text_nosync(). So send IPIs to
+ * all other CPUs to achieve instruction
+ * synchronization.
+ */
+ ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
+ kick_all_cpus_sync();
+ return ret;
+ }
+ }
+
+ return aarch64_insn_patch_text_sync(addrs, insns, cnt);
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, lomask, himask, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ lomask = 0x3;
+ himask = 0x7ffff;
+ immlo = imm & lomask;
+ imm >>= 2;
+ immhi = imm & himask;
+ imm = (immlo << 24) | (immhi);
+ mask = (lomask << 24) | (himask);
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_26:
+ mask = BIT(26) - 1;
+ shift = 0;
+ break;
+ case AARCH64_INSN_IMM_19:
+ mask = BIT(19) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_16:
+ mask = BIT(16) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_14:
+ mask = BIT(14) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_12:
+ mask = BIT(12) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_9:
+ mask = BIT(9) - 1;
+ shift = 12;
+ break;
+ default:
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+
+ /* Update the immediate field. */
+ insn &= ~(mask << shift);
+ insn |= (imm & mask) << shift;
+
+ return insn;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ /*
+ * PC: A 64-bit Program Counter holding the address of the current
+ * instruction. A64 instructions must be word-aligned.
+ */
+ BUG_ON((pc & 0x3) || (addr & 0x3));
+
+ /*
+ * B/BL support [-128M, 128M) offset
+ * ARM64 virtual address arrangement guarantees all kernel and module
+ * texts are within +/-128M.
+ */
+ offset = ((long)addr - (long)pc);
+ BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+
+ if (type == AARCH64_INSN_BRANCH_LINK)
+ insn = aarch64_insn_get_bl_value();
+ else
+ insn = aarch64_insn_get_b_value();
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+}
+
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
+{
+ return aarch64_insn_get_hint_value() | op;
+}
+
+u32 __kprobes aarch64_insn_gen_nop(void)
+{
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index ecb3354292ed..473e5dbf8f39 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -81,3 +81,64 @@ void __init init_IRQ(void)
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
+ bool ret = false;
+
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
+ ret = true;
+ }
+
+ c = irq_data_get_irq_chip(d);
+ if (!c->irq_set_affinity)
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ cpumask_copy(d->affinity, affinity);
+
+ return ret;
+}
+
+/*
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_irq_desc(i, desc) {
+ bool affinity_broken;
+
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
new file mode 100644
index 000000000000..263a166291fb
--- /dev/null
+++ b/arch/arm64/kernel/jump_label.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/kernel/jump_label.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/insn.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ bool is_static)
+{
+ void *addr = (void *)entry->code;
+ u32 insn;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ insn = aarch64_insn_gen_branch_imm(entry->code,
+ entry->target,
+ AARCH64_INSN_BRANCH_NOLINK);
+ } else {
+ insn = aarch64_insn_gen_nop();
+ }
+
+ if (is_static)
+ aarch64_insn_patch_text_nosync(addr, insn);
+ else
+ aarch64_insn_patch_text(&addr, &insn, 1);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, true);
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
new file mode 100644
index 000000000000..75c9cf1aafee
--- /dev/null
+++ b/arch/arm64/kernel/kgdb.c
@@ -0,0 +1,336 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/kernel/kgdb.c
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <asm/traps.h>
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "x0", 8, offsetof(struct pt_regs, regs[0])},
+ { "x1", 8, offsetof(struct pt_regs, regs[1])},
+ { "x2", 8, offsetof(struct pt_regs, regs[2])},
+ { "x3", 8, offsetof(struct pt_regs, regs[3])},
+ { "x4", 8, offsetof(struct pt_regs, regs[4])},
+ { "x5", 8, offsetof(struct pt_regs, regs[5])},
+ { "x6", 8, offsetof(struct pt_regs, regs[6])},
+ { "x7", 8, offsetof(struct pt_regs, regs[7])},
+ { "x8", 8, offsetof(struct pt_regs, regs[8])},
+ { "x9", 8, offsetof(struct pt_regs, regs[9])},
+ { "x10", 8, offsetof(struct pt_regs, regs[10])},
+ { "x11", 8, offsetof(struct pt_regs, regs[11])},
+ { "x12", 8, offsetof(struct pt_regs, regs[12])},
+ { "x13", 8, offsetof(struct pt_regs, regs[13])},
+ { "x14", 8, offsetof(struct pt_regs, regs[14])},
+ { "x15", 8, offsetof(struct pt_regs, regs[15])},
+ { "x16", 8, offsetof(struct pt_regs, regs[16])},
+ { "x17", 8, offsetof(struct pt_regs, regs[17])},
+ { "x18", 8, offsetof(struct pt_regs, regs[18])},
+ { "x19", 8, offsetof(struct pt_regs, regs[19])},
+ { "x20", 8, offsetof(struct pt_regs, regs[20])},
+ { "x21", 8, offsetof(struct pt_regs, regs[21])},
+ { "x22", 8, offsetof(struct pt_regs, regs[22])},
+ { "x23", 8, offsetof(struct pt_regs, regs[23])},
+ { "x24", 8, offsetof(struct pt_regs, regs[24])},
+ { "x25", 8, offsetof(struct pt_regs, regs[25])},
+ { "x26", 8, offsetof(struct pt_regs, regs[26])},
+ { "x27", 8, offsetof(struct pt_regs, regs[27])},
+ { "x28", 8, offsetof(struct pt_regs, regs[28])},
+ { "x29", 8, offsetof(struct pt_regs, regs[29])},
+ { "x30", 8, offsetof(struct pt_regs, regs[30])},
+ { "sp", 8, offsetof(struct pt_regs, sp)},
+ { "pc", 8, offsetof(struct pt_regs, pc)},
+ { "pstate", 8, offsetof(struct pt_regs, pstate)},
+ { "v0", 16, -1 },
+ { "v1", 16, -1 },
+ { "v2", 16, -1 },
+ { "v3", 16, -1 },
+ { "v4", 16, -1 },
+ { "v5", 16, -1 },
+ { "v6", 16, -1 },
+ { "v7", 16, -1 },
+ { "v8", 16, -1 },
+ { "v9", 16, -1 },
+ { "v10", 16, -1 },
+ { "v11", 16, -1 },
+ { "v12", 16, -1 },
+ { "v13", 16, -1 },
+ { "v14", 16, -1 },
+ { "v15", 16, -1 },
+ { "v16", 16, -1 },
+ { "v17", 16, -1 },
+ { "v18", 16, -1 },
+ { "v19", 16, -1 },
+ { "v20", 16, -1 },
+ { "v21", 16, -1 },
+ { "v22", 16, -1 },
+ { "v23", 16, -1 },
+ { "v24", 16, -1 },
+ { "v25", 16, -1 },
+ { "v26", 16, -1 },
+ { "v27", 16, -1 },
+ { "v28", 16, -1 },
+ { "v29", 16, -1 },
+ { "v30", 16, -1 },
+ { "v31", 16, -1 },
+ { "fpsr", 4, -1 },
+ { "fpcr", 4, -1 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+ struct pt_regs *thread_regs;
+
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+ thread_regs = task_pt_regs(task);
+ memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->pc = pc;
+}
+
+static int compiled_break;
+
+static void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ kgdb_arch_set_pc(regs, addr);
+ else if (compiled_break == 1)
+ kgdb_arch_set_pc(regs, regs->pc + 4);
+
+ compiled_break = 0;
+}
+
+int kgdb_arch_handle_exception(int exception_vector, int signo,
+ int err_code, char *remcom_in_buffer,
+ char *remcom_out_buffer,
+ struct pt_regs *linux_regs)
+{
+ int err;
+
+ switch (remcom_in_buffer[0]) {
+ case 'D':
+ case 'k':
+ /*
+ * Packet D (Detach), k (kill). No special handling
+ * is required here. Handle same as c packet.
+ */
+ case 'c':
+ /*
+ * Packet c (Continue) to continue executing.
+ * Set pc to required address.
+ * Try to read optional parameter and set pc.
+ * If this was a compiled breakpoint, we need to move
+ * to the next instruction else we will just breakpoint
+ * over and over again.
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+ kgdb_single_step = 0;
+
+ /*
+ * Received continue command, disable single step
+ */
+ if (kernel_active_single_step())
+ kernel_disable_single_step();
+
+ err = 0;
+ break;
+ case 's':
+ /*
+ * Update step address value with address passed
+ * with step packet.
+ * On debug exception return PC is copied to ELR
+ * So just update PC.
+ * If no step address is passed, resume from the address
+ * pointed by PC. Do not update PC
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+ kgdb_single_step = 1;
+
+ /*
+ * Enable single step handling
+ */
+ if (!kernel_active_single_step())
+ kernel_enable_single_step(linux_regs);
+ err = 0;
+ break;
+ default:
+ err = -1;
+ }
+ return err;
+}
+
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ return 0;
+}
+
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ compiled_break = 1;
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+ return 0;
+}
+
+static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ return 0;
+}
+
+static struct break_hook kgdb_brkpt_hook = {
+ .esr_mask = 0xffffffff,
+ .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
+ .fn = kgdb_brk_fn
+};
+
+static struct break_hook kgdb_compiled_brkpt_hook = {
+ .esr_mask = 0xffffffff,
+ .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
+ .fn = kgdb_compiled_brk_fn
+};
+
+static struct step_hook kgdb_step_hook = {
+ .fn = kgdb_step_brk_fn
+};
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ struct pt_regs *regs = args->regs;
+
+ if (kgdb_handle_exception(1, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+ /*
+ * Want to be lowest priority
+ */
+ .priority = -INT_MAX,
+};
+
+/*
+ * kgdb_arch_init - Perform any architecture specific initalization.
+ * This function will handle the initalization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+ int ret = register_die_notifier(&kgdb_notifier);
+
+ if (ret != 0)
+ return ret;
+
+ register_break_hook(&kgdb_brkpt_hook);
+ register_break_hook(&kgdb_compiled_brkpt_hook);
+ register_step_hook(&kgdb_step_hook);
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_break_hook(&kgdb_brkpt_hook);
+ unregister_break_hook(&kgdb_compiled_brkpt_hook);
+ unregister_step_hook(&kgdb_step_hook);
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * ARM instructions are always in LE.
+ * Break instruction is encoded in LE format
+ */
+struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {
+ KGDB_DYN_BRK_INS_BYTE0,
+ KGDB_DYN_BRK_INS_BYTE1,
+ KGDB_DYN_BRK_INS_BYTE2,
+ KGDB_DYN_BRK_INS_BYTE3,
+ }
+};
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 8b69ecb1d8bc..7787208e8cc6 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,6 +27,9 @@
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
+
+#include <asm/unistd32.h>
+
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -35,33 +38,32 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xf57ff05f // dmb sy
.inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
- .inst 0x01a23f96 // strexdeq r3, r6, [r2]
+ .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
.align 5
__kuser_memory_barrier: // 0xffff0fa0
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe12fff1e // bx lr
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xf57ff05f // dmb sy
.inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
- .inst 0x01823f91 // strexeq r3, r1, [r2]
+ .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
- .inst 0xeaffffef // b <__kuser_memory_barrier>
+ .inst 0xe12fff1e // bx lr
.align 5
__kuser_get_tls: // 0xffff0fe0
@@ -75,3 +77,42 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
+
+/*
+ * AArch32 sigreturn code
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index ca0e3d55da99..df08a6e0287d 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,10 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/insn.h>
+
+#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
+#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
void *module_alloc(unsigned long size)
{
@@ -94,25 +98,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
return 0;
}
-enum aarch64_imm_type {
- INSN_IMM_MOVNZ,
- INSN_IMM_MOVK,
- INSN_IMM_ADR,
- INSN_IMM_26,
- INSN_IMM_19,
- INSN_IMM_16,
- INSN_IMM_14,
- INSN_IMM_12,
- INSN_IMM_9,
-};
-
-static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
+static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
+ int lsb, enum aarch64_insn_imm_type imm_type)
{
- u32 immlo, immhi, lomask, himask, mask;
- int shift;
+ u64 imm, limit = 0;
+ s64 sval;
+ u32 insn = le32_to_cpu(*(u32 *)place);
+
+ sval = do_reloc(op, place, val);
+ sval >>= lsb;
+ imm = sval & 0xffff;
- switch (type) {
- case INSN_IMM_MOVNZ:
+ if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
* For signed MOVW relocations, we have to manipulate the
* instruction encoding depending on whether or not the
@@ -131,70 +128,12 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
*/
imm = ~imm;
}
- case INSN_IMM_MOVK:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_ADR:
- lomask = 0x3;
- himask = 0x7ffff;
- immlo = imm & lomask;
- imm >>= 2;
- immhi = imm & himask;
- imm = (immlo << 24) | (immhi);
- mask = (lomask << 24) | (himask);
- shift = 5;
- break;
- case INSN_IMM_26:
- mask = BIT(26) - 1;
- shift = 0;
- break;
- case INSN_IMM_19:
- mask = BIT(19) - 1;
- shift = 5;
- break;
- case INSN_IMM_16:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_14:
- mask = BIT(14) - 1;
- shift = 5;
- break;
- case INSN_IMM_12:
- mask = BIT(12) - 1;
- shift = 10;
- break;
- case INSN_IMM_9:
- mask = BIT(9) - 1;
- shift = 12;
- break;
- default:
- pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
+ imm_type = AARCH64_INSN_IMM_MOVK;
}
- /* Update the immediate field. */
- insn &= ~(mask << shift);
- insn |= (imm & mask) << shift;
-
- return insn;
-}
-
-static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_imm_type imm_type)
-{
- u64 imm, limit = 0;
- s64 sval;
- u32 insn = *(u32 *)place;
-
- sval = do_reloc(op, place, val);
- sval >>= lsb;
- imm = sval & 0xffff;
-
/* Update the instruction with the new encoding. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/* Shift out the immediate field. */
sval >>= 16;
@@ -203,9 +142,9 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
* For unsigned immediates, the overflow check is straightforward.
* For signed immediates, the sign bit is actually the bit past the
* most significant bit of the field.
- * The INSN_IMM_16 immediate type is unsigned.
+ * The AARCH64_INSN_IMM_16 immediate type is unsigned.
*/
- if (imm_type != INSN_IMM_16) {
+ if (imm_type != AARCH64_INSN_IMM_16) {
sval++;
limit++;
}
@@ -218,11 +157,11 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
}
static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, int len, enum aarch64_imm_type imm_type)
+ int lsb, int len, enum aarch64_insn_imm_type imm_type)
{
u64 imm, imm_mask;
s64 sval;
- u32 insn = *(u32 *)place;
+ u32 insn = le32_to_cpu(*(u32 *)place);
/* Calculate the relocation value. */
sval = do_reloc(op, place, val);
@@ -233,7 +172,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
imm = sval & imm_mask;
/* Update the instruction's immediate field. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/*
* Extract the upper value bits (including the sign bit) and
@@ -315,125 +255,125 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
/* Immediate instruction relocations. */
case R_AARCH64_LD_PREL_LO19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_ADR_PREL_LO21:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_TSTBR14:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
- INSN_IMM_14);
+ AARCH64_INSN_IMM_14);
break;
case R_AARCH64_CONDBR19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
- INSN_IMM_26);
+ AARCH64_INSN_IMM_26);
break;
default:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..dfcd8fadde3c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -22,6 +22,7 @@
#include <linux/bitmap.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/perf_event.h>
@@ -107,7 +108,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -317,7 +323,13 @@ validate_event(struct pmu_hw_events *hw_events,
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+ if (is_software_event(event))
+ return 1;
+
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -352,26 +364,53 @@ validate_group(struct perf_event *event)
}
static void
+armpmu_disable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ disable_percpu_irq(irq);
+}
+
+static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
+ int irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (!irqs)
+ return;
- for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, armpmu);
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0)
+ return;
+
+ if (irq_is_percpu(irq)) {
+ on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &cpu_hw_events);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq > 0)
+ free_irq(irq, armpmu);
+ }
}
}
+static void
+armpmu_enable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
- int i, err, irq, irqs;
+ int err, irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device) {
@@ -380,39 +419,59 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
}
irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
+ if (!irqs) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < irqs; ++i) {
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0) {
+ pr_err("failed to get valid irq for PMU device\n");
+ return -ENODEV;
+ }
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
- continue;
- }
+ if (irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, armpmu->handle_irq,
+ "arm-pmu", &cpu_hw_events);
- err = request_irq(irq, armpmu->handle_irq,
- IRQF_NOBALANCING,
- "arm-pmu", armpmu);
if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
+ pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
+ irq);
armpmu_release_hardware(armpmu);
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq <= 0)
+ continue;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
+ err = request_irq(irq, armpmu->handle_irq,
+ IRQF_NOBALANCING,
+ "arm-pmu", armpmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ cpumask_set_cpu(i, &armpmu->active_irqs);
+ }
}
return 0;
@@ -773,7 +832,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
@@ -1288,8 +1347,8 @@ early_initcall(init_hw_perf_events);
* Callchain handling code.
*/
struct frame_tail {
- struct frame_tail __user *fp;
- unsigned long lr;
+ struct frame_tail __user *fp;
+ unsigned long lr;
} __attribute__((packed));
/*
@@ -1326,22 +1385,84 @@ user_backtrace(struct frame_tail __user *tail,
return buftail.fp;
}
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
+ u32 sp;
+ u32 lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct compat_frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= (struct compat_frame_tail __user *)
+ compat_ptr(buftail.fp))
+ return NULL;
+
+ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
- struct frame_tail __user *tail;
-
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->pc);
- tail = (struct frame_tail __user *)regs->regs[29];
- while (entry->nr < PERF_MAX_STACK_DEPTH &&
- tail && !((unsigned long)tail & 0xf))
- tail = user_backtrace(tail, entry);
+ if (!compat_user_mode(regs)) {
+ /* AARCH64 mode */
+ struct frame_tail __user *tail;
+
+ tail = (struct frame_tail __user *)regs->regs[29];
+
+ while (entry->nr < PERF_MAX_STACK_DEPTH &&
+ tail && !((unsigned long)tail & 0xf))
+ tail = user_backtrace(tail, entry);
+ } else {
+#ifdef CONFIG_COMPAT
+ /* AARCH32 compat mode */
+ struct compat_frame_tail __user *tail;
+
+ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
+ tail = compat_user_backtrace(tail, entry);
+#endif
+ }
}
/*
@@ -1369,6 +1490,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
+
walk_stackframe(&frame, callchain_trace, entry);
}
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
new file mode 100644
index 000000000000..422ebd63b619
--- /dev/null
+++ b/arch/arm64/kernel/perf_regs.c
@@ -0,0 +1,46 @@
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+
+#include <asm/compat.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
+ return 0;
+
+ /*
+ * Compat (i.e. 32 bit) mode:
+ * - PC has been set in the pt_regs struct in kernel_entry,
+ * - Handle SP and LR here.
+ */
+ if (compat_user_mode(regs)) {
+ if ((u32)idx == PERF_REG_ARM64_SP)
+ return regs->compat_sp;
+ if ((u32)idx == PERF_REG_ARM64_LR)
+ return regs->compat_lr;
+ }
+
+ return regs->regs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (is_compat_thread(task_thread_info(task)))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 46f02c3b5015..15c91e25d045 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -20,6 +20,7 @@
#include <stdarg.h>
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -33,6 +34,7 @@
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
+#include <linux/cpuidle.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
@@ -71,8 +73,17 @@ static void setup_restart(void)
void soft_restart(unsigned long addr)
{
+ typedef void (*phys_reset_t)(unsigned long);
+ phys_reset_t phys_reset;
+
setup_restart();
- cpu_reset(addr);
+
+ /* Switch to the identity mapping */
+ phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
+ phys_reset(addr);
+
+ /* Should never get here */
+ BUG();
}
/*
@@ -98,10 +109,19 @@ void arch_cpu_idle(void)
* This should do all the clock switching and wait for interrupt
* tricks
*/
- cpu_do_idle();
- local_irq_enable();
+ if (cpuidle_idle_call()) {
+ cpu_do_idle();
+ local_irq_enable();
+ }
}
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+#endif
+
void machine_shutdown(void)
{
#ifdef CONFIG_SMP
@@ -143,15 +163,26 @@ void machine_restart(char *cmd)
void __show_regs(struct pt_regs *regs)
{
- int i;
+ int i, top_reg;
+ u64 lr, sp;
+
+ if (compat_user_mode(regs)) {
+ lr = regs->compat_lr;
+ sp = regs->compat_sp;
+ top_reg = 12;
+ } else {
+ lr = regs->regs[30];
+ sp = regs->sp;
+ top_reg = 29;
+ }
show_regs_print_info(KERN_DEFAULT);
print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", regs->regs[30]);
+ print_symbol("LR is at %s\n", lr);
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, regs->regs[30], regs->pstate);
- printk("sp : %016llx\n", regs->sp);
- for (i = 29; i >= 0; i--) {
+ regs->pc, lr, regs->pstate);
+ printk("sp : %016llx\n", sp);
+ for (i = top_reg; i >= 0; i--) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
if (i % 2 == 0)
printk("\n");
@@ -184,7 +215,7 @@ void release_thread(struct task_struct *dead_task)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- fpsimd_save_state(&current->thread.fpsimd_state);
+ fpsimd_preserve_current_state();
*dst = *src;
return 0;
}
@@ -279,7 +310,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
*/
- dsb();
+ dsb(ish);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -290,6 +321,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -297,9 +329,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 14f73c445ff5..0e32ab453e5b 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,14 +15,37 @@
#define pr_fmt(fmt) "psci: " fmt
+#include <linux/cpuidle.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
#include <asm/compiler.h>
+#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
-struct psci_operations psci_ops;
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+static struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
@@ -34,6 +57,8 @@ enum psci_function {
PSCI_FN_MAX,
};
+static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
+
static u32 psci_function_id[PSCI_FN_MAX];
#define PSCI_RET_SUCCESS 0
@@ -74,6 +99,17 @@ static u32 psci_power_state_pack(struct psci_power_state state)
<< PSCI_POWER_STATE_AFFL_SHIFT);
}
+static void psci_power_state_unpack(u32 power_state,
+ struct psci_power_state *state)
+{
+ state->id = (power_state >> PSCI_POWER_STATE_ID_SHIFT)
+ & PSCI_POWER_STATE_ID_MASK;
+ state->type = (power_state >> PSCI_POWER_STATE_TYPE_SHIFT)
+ & PSCI_POWER_STATE_TYPE_MASK;
+ state->affinity_level = (power_state >> PSCI_POWER_STATE_AFFL_SHIFT)
+ & PSCI_POWER_STATE_AFFL_MASK;
+}
+
/*
* The following two functions are invoked via the invoke_psci_fn pointer
* and will not be inlined, allowing us to piggyback on the AAPCS.
@@ -156,22 +192,91 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
-int __init psci_init(void)
+int __init psci_dt_register_idle_states(struct cpuidle_driver *drv,
+ struct device_node *state_nodes[])
+{
+ int cpu, i;
+ struct psci_power_state *psci_states;
+ const struct cpu_operations *cpu_ops_ptr;
+
+ if (!state_nodes)
+ return -EINVAL;
+ /*
+ * This is belt-and-braces: make sure that if the idle
+ * specified protocol is psci, the cpu_ops have been
+ * initialized to psci operations. Anything else is
+ * a recipe for mayhem.
+ */
+ for_each_cpu(cpu, drv->cpumask) {
+ cpu_ops_ptr = cpu_ops[cpu];
+ if (WARN_ON(!cpu_ops_ptr || strcmp(cpu_ops_ptr->name, "psci")))
+ return -EOPNOTSUPP;
+ }
+
+ psci_states = kcalloc(drv->state_count, sizeof(*psci_states),
+ GFP_KERNEL);
+
+ if (!psci_states) {
+ pr_warn("psci idle state allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for_each_cpu(cpu, drv->cpumask) {
+ if (per_cpu(psci_power_state, cpu)) {
+ pr_warn("idle states already initialized on cpu %u\n",
+ cpu);
+ continue;
+ }
+ per_cpu(psci_power_state, cpu) = psci_states;
+ }
+
+
+ for (i = 0; i < drv->state_count; i++) {
+ u32 psci_power_state;
+
+ if (!state_nodes[i]) {
+ /*
+ * An index with a missing node pointer falls back to
+ * simple STANDBYWFI
+ */
+ psci_states[i].type = PSCI_POWER_STATE_TYPE_STANDBY;
+ continue;
+ }
+
+ if (of_property_read_u32(state_nodes[i], "entry-method-param",
+ &psci_power_state)) {
+ pr_warn(" * %s missing entry-method-param property\n",
+ state_nodes[i]->full_name);
+ /*
+ * If entry-method-param property is missing, fall
+ * back to STANDBYWFI state
+ */
+ psci_states[i].type = PSCI_POWER_STATE_TYPE_STANDBY;
+ continue;
+ }
+
+ pr_debug("psci-power-state %#x index %u\n",
+ psci_power_state, i);
+ psci_power_state_unpack(psci_power_state, &psci_states[i]);
+ }
+
+ return 0;
+}
+
+void __init psci_init(void)
{
struct device_node *np;
const char *method;
u32 id;
- int err = 0;
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
- return -ENODEV;
+ return;
pr_info("probing function IDs from device-tree\n");
if (of_property_read_string(np, "method", &method)) {
pr_warning("missing \"method\" property\n");
- err = -ENXIO;
goto out_put_node;
}
@@ -181,7 +286,6 @@ int __init psci_init(void)
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
pr_warning("invalid \"method\" property: %s\n", method);
- err = -EINVAL;
goto out_put_node;
}
@@ -207,5 +311,85 @@ int __init psci_init(void)
out_put_node:
of_node_put(np);
+ return;
+}
+
+#ifdef CONFIG_SMP
+
+static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ return 0;
+}
+
+static int __init cpu_psci_cpu_prepare(unsigned int cpu)
+{
+ if (!psci_ops.cpu_on) {
+ pr_err("no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int cpu_psci_cpu_boot(unsigned int cpu)
+{
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ if (err)
+ pr_err("failed to boot CPU%d (%d)\n", cpu, err);
+
return err;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpu_psci_cpu_disable(unsigned int cpu)
+{
+ /* Fail early if we don't have CPU_OFF support */
+ if (!psci_ops.cpu_off)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void cpu_psci_cpu_die(unsigned int cpu)
+{
+ int ret;
+ /*
+ * There are no known implementations of PSCI actually using the
+ * power state field, pass a sensible default for now.
+ */
+ struct psci_power_state state = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ ret = psci_ops.cpu_off(state);
+
+ pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
+}
+#endif
+
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+static int cpu_psci_cpu_suspend(unsigned long index)
+{
+ struct psci_power_state *state = __get_cpu_var(psci_power_state);
+
+ if (!state)
+ return -EOPNOTSUPP;
+
+ return psci_ops.cpu_suspend(state[index], virt_to_phys(cpu_resume));
+}
+#endif
+
+const struct cpu_operations cpu_psci_ops = {
+ .name = "psci",
+ .cpu_init = cpu_psci_cpu_init,
+ .cpu_prepare = cpu_psci_cpu_prepare,
+ .cpu_boot = cpu_psci_cpu_boot,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cpu_psci_cpu_disable,
+ .cpu_die = cpu_psci_cpu_die,
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ .cpu_suspend = cpu_psci_cpu_suspend,
+#endif
+};
+
+#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6e1e77f1831c..993cdb79b70e 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -41,6 +42,9 @@
#include <asm/traps.h>
#include <asm/system_misc.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/*
* TODO: does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
@@ -53,28 +57,6 @@ void ptrace_disable(struct task_struct *child)
{
}
-/*
- * Handle hitting a breakpoint.
- */
-static int ptrace_break(struct pt_regs *regs)
-{
- siginfo_t info = {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = TRAP_BRKPT,
- .si_addr = (void __user *)instruction_pointer(regs),
- };
-
- force_sig_info(SIGTRAP, &info, current);
- return 0;
-}
-
-static int arm64_break_trap(unsigned long addr, unsigned int esr,
- struct pt_regs *regs)
-{
- return ptrace_break(regs);
-}
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Handle hitting a HW-breakpoint.
@@ -236,31 +218,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
{
int err, len, type, disabled = !ctrl.enabled;
- if (disabled) {
- len = 0;
- type = HW_BREAKPOINT_EMPTY;
- } else {
- err = arch_bp_generic_fields(ctrl, &len, &type);
- if (err)
- return err;
-
- switch (note_type) {
- case NT_ARM_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_ARM_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
+ attr->disabled = disabled;
+ if (disabled)
+ return 0;
+
+ err = arch_bp_generic_fields(ctrl, &len, &type);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
- }
+ break;
+ case NT_ARM_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
- attr->disabled = disabled;
return 0;
}
@@ -541,6 +521,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
return ret;
target->thread.fpsimd_state.user_fpsimd = newstate;
+ fpsimd_flush_task_state(target);
return ret;
}
@@ -658,28 +639,27 @@ static int compat_gpr_get(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
switch (idx) {
case 15:
- reg = (void *)&task_pt_regs(target)->pc;
+ reg = task_pt_regs(target)->pc;
break;
case 16:
- reg = (void *)&task_pt_regs(target)->pstate;
+ reg = task_pt_regs(target)->pstate;
break;
case 17:
- reg = (void *)&task_pt_regs(target)->orig_x0;
+ reg = task_pt_regs(target)->orig_x0;
break;
default:
- reg = (void *)&task_pt_regs(target)->regs[idx];
+ reg = task_pt_regs(target)->regs[idx];
}
- ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
-
+ ret = copy_to_user(ubuf, &reg, sizeof(reg));
if (ret)
break;
- else
- ubuf += sizeof(compat_ulong_t);
+
+ ubuf += sizeof(reg);
}
return ret;
@@ -707,28 +687,28 @@ static int compat_gpr_set(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
+
+ ret = copy_from_user(&reg, ubuf, sizeof(reg));
+ if (ret)
+ return ret;
+
+ ubuf += sizeof(reg);
switch (idx) {
case 15:
- reg = (void *)&newregs.pc;
+ newregs.pc = reg;
break;
case 16:
- reg = (void *)&newregs.pstate;
+ newregs.pstate = reg;
break;
case 17:
- reg = (void *)&newregs.orig_x0;
+ newregs.orig_x0 = reg;
break;
default:
- reg = (void *)&newregs.regs[idx];
+ newregs.regs[idx] = reg;
}
- ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
-
- if (ret)
- goto out;
- else
- ubuf += sizeof(compat_ulong_t);
}
if (valid_user_regs(&newregs.user_regs))
@@ -736,7 +716,6 @@ static int compat_gpr_set(struct task_struct *target,
else
ret = -EINVAL;
-out:
return ret;
}
@@ -790,6 +769,7 @@ static int compat_vfp_set(struct task_struct *target,
uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
}
+ fpsimd_flush_task_state(target);
return ret;
}
@@ -817,33 +797,6 @@ static const struct user_regset_view user_aarch32_view = {
.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
};
-int aarch32_break_trap(struct pt_regs *regs)
-{
- unsigned int instr;
- bool bp = false;
- void __user *pc = (void __user *)instruction_pointer(regs);
-
- if (compat_thumb_mode(regs)) {
- /* get 16-bit Thumb instruction */
- get_user(instr, (u16 __user *)pc);
- if (instr == AARCH32_BREAK_THUMB2_LO) {
- /* get second half of 32-bit Thumb-2 instruction */
- get_user(instr, (u16 __user *)(pc + 2));
- bp = instr == AARCH32_BREAK_THUMB2_HI;
- } else {
- bp = instr == AARCH32_BREAK_THUMB;
- }
- } else {
- /* 32-bit ARM instruction */
- get_user(instr, (u32 __user *)pc);
- bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
- }
-
- if (bp)
- return ptrace_break(regs);
- return 1;
-}
-
static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t __user *ret)
{
@@ -874,6 +827,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t val)
{
int ret;
+ mm_segment_t old_fs = get_fs();
if (off & 3 || off >= COMPAT_USER_SZ)
return -EIO;
@@ -881,10 +835,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
if (off >= sizeof(compat_elf_gregset_t))
return 0;
+ set_fs(KERNEL_DS);
ret = copy_regset_from_user(tsk, &user_aarch32_view,
REGSET_COMPAT_GPR, off,
sizeof(compat_ulong_t),
&val);
+ set_fs(old_fs);
+
return ret;
}
@@ -1111,45 +1068,49 @@ long arch_ptrace(struct task_struct *child, long request,
return ptrace_request(child, request, addr, data);
}
+enum ptrace_syscall_dir {
+ PTRACE_SYSCALL_ENTER = 0,
+ PTRACE_SYSCALL_EXIT,
+};
-static int __init ptrace_break_init(void)
-{
- hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP,
- TRAP_BRKPT, "ptrace BRK handler");
- return 0;
-}
-core_initcall(ptrace_break_init);
-
-
-asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
+static void tracehook_report_syscall(struct pt_regs *regs,
+ enum ptrace_syscall_dir dir)
{
+ int regno;
unsigned long saved_reg;
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return regs->syscallno;
-
- if (is_compat_task()) {
- /* AArch32 uses ip (r12) for scratch */
- saved_reg = regs->regs[12];
- regs->regs[12] = dir;
- } else {
- /*
- * Save X7. X7 is used to denote syscall entry/exit:
- * X7 = 0 -> entry, = 1 -> exit
- */
- saved_reg = regs->regs[7];
- regs->regs[7] = dir;
- }
+ /*
+ * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
+ * used to denote syscall entry/exit:
+ */
+ regno = (is_compat_task() ? 12 : 7);
+ saved_reg = regs->regs[regno];
+ regs->regs[regno] = dir;
- if (dir)
+ if (dir == PTRACE_SYSCALL_EXIT)
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
regs->syscallno = ~0UL;
- if (is_compat_task())
- regs->regs[12] = saved_reg;
- else
- regs->regs[7] = saved_reg;
+ regs->regs[regno] = saved_reg;
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, regs->syscallno);
return regs->syscallno;
}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs_return_value(regs));
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+}
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
new file mode 100644
index 000000000000..89102a6ffad5
--- /dev/null
+++ b/arch/arm64/kernel/return_address.c
@@ -0,0 +1,55 @@
+/*
+ * arch/arm64/kernel/return_address.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/ftrace.h>
+
+#include <asm/stacktrace.h>
+
+struct return_address_data {
+ unsigned int level;
+ void *addr;
+};
+
+static int save_return_addr(struct stackframe *frame, void *d)
+{
+ struct return_address_data *data = d;
+
+ if (!data->level) {
+ data->addr = (void *)frame->pc;
+ return 1;
+ } else {
+ --data->level;
+ return 0;
+ }
+}
+
+void *return_address(unsigned int level)
+{
+ struct return_address_data data;
+ struct stackframe frame;
+ register unsigned long current_sp asm ("sp");
+
+ data.level = level + 2;
+ data.addr = NULL;
+
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.pc = (unsigned long)return_address; /* dummy */
+
+ walk_stackframe(&frame, save_return_addr, &data);
+
+ if (!data.level)
+ return data.addr;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index add6ea616843..e87b5fd07b8c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -41,10 +41,13 @@
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <linux/efi.h>
+#include <asm/fixmap.h>
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
+#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -53,13 +56,25 @@
#include <asm/traps.h>
#include <asm/memblock.h>
#include <asm/psci.h>
+#include <asm/efi.h>
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
-unsigned int elf_hwcap __read_mostly;
+unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+unsigned int compat_elf_hwcap2 __read_mostly;
+#endif
+
static const char *cpu_name;
static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
@@ -97,15 +112,95 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
-static void __init setup_processor(void)
+void __init smp_setup_processor_id(void)
{
- struct cpu_info *cpu_info;
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+ * using percpu variable early, for example, lockdep will
+ * access percpu variable inside lock_release
+ */
+ set_my_cpu_offset(0);
+}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
+struct mpidr_hash mpidr_hash;
+#ifdef CONFIG_SMP
+/**
+ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
+ * level in order to build a linear index from an
+ * MPIDR value. Resulting algorithm is a collision
+ * free hash carried out through shifting and ORing
+ */
+static void __init smp_build_mpidr_hash(void)
+{
+ u32 i, affinity, fs[4], bits[4], ls;
+ u64 mask = 0;
+ /*
+ * Pre-scan the list of MPIDRS and filter out bits that do
+ * not contribute to affinity levels, ie they never toggle.
+ */
+ for_each_possible_cpu(i)
+ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
+ pr_debug("mask of set bits %#llx\n", mask);
/*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc.S
+ * Find and stash the last and first bit set at all affinity levels to
+ * check how many bits are required to represent them.
*/
+ for (i = 0; i < 4; i++) {
+ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
+ /*
+ * Find the MSB bit and LSB bits position
+ * to determine how many bits are required
+ * to express the affinity level.
+ */
+ ls = fls(affinity);
+ fs[i] = affinity ? ffs(affinity) - 1 : 0;
+ bits[i] = ls - fs[i];
+ }
+ /*
+ * An index can be created from the MPIDR_EL1 by isolating the
+ * significant bits at each affinity level and by shifting
+ * them in order to compress the 32 bits values space to a
+ * compressed set of values. This is equivalent to hashing
+ * the MPIDR_EL1 through shifting and ORing. It is a collision free
+ * hash though not minimal since some levels might contain a number
+ * of CPUs that is not an exact power of 2 and their bit
+ * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
+ */
+ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
+ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
+ mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
+ (bits[1] + bits[0]);
+ mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
+ fs[3] - (bits[2] + bits[1] + bits[0]);
+ mpidr_hash.mask = mask;
+ mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
+ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
+ mpidr_hash.shift_aff[0],
+ mpidr_hash.shift_aff[1],
+ mpidr_hash.shift_aff[2],
+ mpidr_hash.shift_aff[3],
+ mpidr_hash.mask,
+ mpidr_hash.bits);
+ /*
+ * 4x is an arbitrary value used to warn on a hash table much bigger
+ * than expected on most systems.
+ */
+ if (mpidr_hash_size() > 4 * num_possible_cpus())
+ pr_warn("Large number of MPIDR hash buckets detected\n");
+ __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
+}
+#endif
+
+static void __init setup_processor(void)
+{
+ struct cpu_info *cpu_info;
+ u64 features, block;
+
cpu_info = lookup_processor_type(read_cpuid_id());
if (!cpu_info) {
printk("CPU configuration botched (ID %08x), unable to continue.\n",
@@ -118,8 +213,71 @@ static void __init setup_processor(void)
printk("CPU: %s [%08x] revision %d\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
- sprintf(init_utsname()->machine, "aarch64");
+ sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
+
+ /*
+ * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
+ * The blocks we test below represent incremental functionality
+ * for non-negative values. Negative values are reserved.
+ */
+ features = read_cpuid(ID_AA64ISAR0_EL1);
+ block = (features >> 4) & 0xf;
+ if (!(block & 0x8)) {
+ switch (block) {
+ default:
+ case 2:
+ elf_hwcap |= HWCAP_PMULL;
+ case 1:
+ elf_hwcap |= HWCAP_AES;
+ case 0:
+ break;
+ }
+ }
+
+ block = (features >> 8) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA1;
+
+ block = (features >> 12) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA2;
+
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_CRC32;
+
+#ifdef CONFIG_COMPAT
+ /*
+ * ID_ISAR5_EL1 carries similar information as above, but pertaining to
+ * the Aarch32 32-bit execution state.
+ */
+ features = read_cpuid(ID_ISAR5_EL1);
+ block = (features >> 4) & 0xf;
+ if (!(block & 0x8)) {
+ switch (block) {
+ default:
+ case 2:
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
+ case 1:
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
+ case 0:
+ break;
+ }
+ }
+
+ block = (features >> 8) & 0xf;
+ if (block && !(block & 0x8))
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
+
+ block = (features >> 12) & 0xf;
+ if (block && !(block & 0x8))
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
+
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
+#endif
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -257,20 +415,27 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
+ early_ioremap_init();
+
parse_early_param();
+ efi_init();
arm64_memblock_init();
paging_init();
request_standard_resources();
+ efi_idmap_init();
+
unflatten_device_tree();
psci_init();
cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
+ smp_build_mpidr_hash();
#endif
#ifdef CONFIG_VT
@@ -288,7 +453,7 @@ static int __init arm64_device_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
static DEFINE_PER_CPU(struct cpu, cpu_data);
@@ -309,6 +474,12 @@ subsys_initcall(topology_init);
static const char *hwcap_str[] = {
"fp",
"asimd",
+ "evtstrm",
+ "aes",
+ "pmull",
+ "sha1",
+ "sha2",
+ "crc32",
NULL
};
@@ -328,9 +499,6 @@ static int c_show(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
- loops_per_jiffy / (500000UL/HZ),
- loops_per_jiffy / (5000UL/HZ) % 100);
}
/* dump out the processor features */
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 890a591f75dd..bbc1aad21ce6 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/personality.h>
@@ -25,7 +26,6 @@
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
-#include <asm/compat.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -51,7 +51,7 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
int err;
/* dump the hardware registers to the fpsimd_state structure */
- fpsimd_save_state(fpsimd);
+ fpsimd_preserve_current_state();
/* copy the FP and status/control registers */
err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
@@ -86,11 +86,8 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
/* load the hardware registers from the fpsimd_state structure */
- if (!err) {
- preempt_disable();
- fpsimd_load_state(&fpsimd);
- preempt_enable();
- }
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
return err ? -EFAULT : 0;
}
@@ -100,8 +97,7 @@ static int restore_sigframe(struct pt_regs *regs,
{
sigset_t set;
int i, err;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0)
@@ -121,8 +117,11 @@ static int restore_sigframe(struct pt_regs *regs,
err |= !valid_user_regs(&regs->user_regs);
- if (err == 0)
- err |= restore_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= restore_fpsimd_context(fpsimd_ctx);
+ }
return err;
}
@@ -167,8 +166,8 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
struct pt_regs *regs, sigset_t *set)
{
int i, err = 0;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
+ struct _aarch64_ctx *end;
/* set up the stack frame for unwinding */
__put_user_error(regs->regs[29], &sf->fp, err);
@@ -185,12 +184,17 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
- if (err == 0)
- err |= preserve_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= preserve_fpsimd_context(fpsimd_ctx);
+ aux += sizeof(*fpsimd_ctx);
+ }
/* set the "end" magic */
- __put_user_error(0, &aux->end.magic, err);
- __put_user_error(0, &aux->end.size, err);
+ end = aux;
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
return err;
}
@@ -416,4 +420,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
+
+ if (thread_flags & _TIF_FOREIGN_FPSTATE)
+ fpsimd_restore_current_state();
+
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e393174fe859..02de43260332 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -100,34 +100,6 @@ struct compat_rt_sigframe {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-/*
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
-#define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
-#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
-
-/*
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
- 0x2700 | __NR_compat_sigreturn)
-#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
- 0x2700 | __NR_compat_rt_sigreturn)
-
-const compat_ulong_t aarch32_sigret_code[6] = {
- /*
- * AArch32 sigreturn code.
- * We don't construct an OABI SWI - instead we just set the imm24 field
- * to the EABI syscall number so that we create a sane disassembly.
- */
- MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
-};
-
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
@@ -247,7 +219,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
* Note that this also saves V16-31, which aren't visible
* in AArch32.
*/
- fpsimd_save_state(fpsimd);
+ fpsimd_preserve_current_state();
/* Place structure header on the stack */
__put_user_error(magic, &frame->magic, err);
@@ -310,11 +282,8 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
* We don't need to touch the exception register, so
* reload the hardware state.
*/
- if (!err) {
- preempt_disable();
- fpsimd_load_state(&fpsimd);
- preempt_enable();
- }
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
return err ? -EFAULT : 0;
}
@@ -474,12 +443,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
- if (thumb) {
+ if (thumb)
spsr |= COMPAT_PSR_T_BIT;
- spsr &= ~COMPAT_PSR_IT_MASK;
- } else {
+ else
spsr &= ~COMPAT_PSR_T_BIT;
- }
+
+ /* The IT state must be cleared for both ARM and Thumb-2 */
+ spsr &= ~COMPAT_PSR_IT_MASK;
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
new file mode 100644
index 000000000000..b1925729c692
--- /dev/null
+++ b/arch/arm64/kernel/sleep.S
@@ -0,0 +1,184 @@
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+/*
+ * Implementation of MPIDR_EL1 hash algorithm through shifting
+ * and OR'ing.
+ *
+ * @dst: register containing hash result
+ * @rs0: register containing affinity level 0 bit shift
+ * @rs1: register containing affinity level 1 bit shift
+ * @rs2: register containing affinity level 2 bit shift
+ * @rs3: register containing affinity level 3 bit shift
+ * @mpidr: register containing MPIDR_EL1 value
+ * @mask: register containing MPIDR mask
+ *
+ * Pseudo C-code:
+ *
+ *u32 dst;
+ *
+ *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
+ * u32 aff0, aff1, aff2, aff3;
+ * u64 mpidr_masked = mpidr & mask;
+ * aff0 = mpidr_masked & 0xff;
+ * aff1 = mpidr_masked & 0xff00;
+ * aff2 = mpidr_masked & 0xff0000;
+ * aff2 = mpidr_masked & 0xff00000000;
+ * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
+ *}
+ * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
+ * Output register: dst
+ * Note: input and output registers must be disjoint register sets
+ (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
+ */
+ .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
+ and \mpidr, \mpidr, \mask // mask out MPIDR bits
+ and \dst, \mpidr, #0xff // mask=aff0
+ lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
+ and \mask, \mpidr, #0xff00 // mask = aff1
+ lsr \mask ,\mask, \rs1
+ orr \dst, \dst, \mask // dst|=(aff1>>rs1)
+ and \mask, \mpidr, #0xff0000 // mask = aff2
+ lsr \mask ,\mask, \rs2
+ orr \dst, \dst, \mask // dst|=(aff2>>rs2)
+ and \mask, \mpidr, #0xff00000000 // mask = aff3
+ lsr \mask ,\mask, \rs3
+ orr \dst, \dst, \mask // dst|=(aff3>>rs3)
+ .endm
+/*
+ * Save CPU state for a suspend. This saves callee registers, and allocates
+ * space on the kernel stack to save the CPU specific registers + some
+ * other data for resume.
+ *
+ * x0 = suspend finisher argument
+ */
+ENTRY(__cpu_suspend)
+ stp x29, lr, [sp, #-96]!
+ stp x19, x20, [sp,#16]
+ stp x21, x22, [sp,#32]
+ stp x23, x24, [sp,#48]
+ stp x25, x26, [sp,#64]
+ stp x27, x28, [sp,#80]
+ mov x2, sp
+ sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
+ mov x1, sp
+ /*
+ * x1 now points to struct cpu_suspend_ctx allocated on the stack
+ */
+ str x2, [x1, #CPU_CTX_SP]
+ ldr x2, =sleep_save_sp
+ ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
+#ifdef CONFIG_SMP
+ mrs x7, mpidr_el1
+ ldr x9, =mpidr_hash
+ ldr x10, [x9, #MPIDR_HASH_MASK]
+ /*
+ * Following code relies on the struct mpidr_hash
+ * members size.
+ */
+ ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
+ add x2, x2, x8, lsl #3
+#endif
+ bl __cpu_suspend_finisher
+ /*
+ * Never gets here, unless suspend fails.
+ * Successful cpu_suspend should return from cpu_resume, returning
+ * through this code path is considered an error
+ * If the return value is set to 0 force x0 = -EOPNOTSUPP
+ * to make sure a proper error condition is propagated
+ */
+ cmp x0, #0
+ mov x3, #-EOPNOTSUPP
+ csel x0, x3, x0, eq
+ add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(__cpu_suspend)
+ .ltorg
+
+/*
+ * x0 must contain the sctlr value retrieved from restored context
+ */
+ENTRY(cpu_resume_mmu)
+ ldr x3, =cpu_resume_after_mmu
+ msr sctlr_el1, x0 // restore sctlr_el1
+ isb
+ br x3 // global jump to virtual address
+ENDPROC(cpu_resume_mmu)
+cpu_resume_after_mmu:
+ mov x0, #0 // return zero on success
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(cpu_resume_after_mmu)
+
+ .data
+ENTRY(cpu_resume)
+ bl el2_setup // if in EL2 drop to EL1 cleanly
+#ifdef CONFIG_SMP
+ mrs x1, mpidr_el1
+ adr x4, mpidr_hash_ptr
+ ldr x5, [x4]
+ add x8, x4, x5 // x8 = struct mpidr_hash phys address
+ /* retrieve mpidr_hash members to compute the hash */
+ ldr x2, [x8, #MPIDR_HASH_MASK]
+ ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
+ /* x7 contains hash index, let's use it to grab context pointer */
+#else
+ mov x7, xzr
+#endif
+ adr x0, sleep_save_sp
+ ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
+ ldr x0, [x0, x7, lsl #3]
+ /* load sp from context */
+ ldr x2, [x0, #CPU_CTX_SP]
+ adr x1, sleep_idmap_phys
+ /* load physical address of identity map page table in x1 */
+ ldr x1, [x1]
+ mov sp, x2
+ /*
+ * cpu_do_resume expects x0 to contain context physical address
+ * pointer and x1 to contain physical address of 1:1 page tables
+ */
+ bl cpu_do_resume // PC relative jump, MMU off
+ b cpu_resume_mmu // Resume MMU, never returns
+ENDPROC(cpu_resume)
+
+ .align 3
+mpidr_hash_ptr:
+ /*
+ * offset of mpidr_hash symbol from current location
+ * used to obtain run-time mpidr_hash address with MMU off
+ */
+ .quad mpidr_hash - .
+/*
+ * physical address of identity mapped page tables
+ */
+ .type sleep_idmap_phys, #object
+ENTRY(sleep_idmap_phys)
+ .quad 0
+/*
+ * struct sleep_save_sp {
+ * phys_addr_t *save_ptr_stash;
+ * phys_addr_t save_ptr_stash_phys;
+ * };
+ */
+ .type sleep_save_sp, #object
+ENTRY(sleep_save_sp)
+ .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5d54e3717bf8..7c868a2ac38b 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -48,76 +49,34 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/arm-ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_TIMER,
};
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void __cpuinit write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
+ if (cpu_ops[cpu]->cpu_boot)
+ return cpu_ops[cpu]->cpu_boot(cpu);
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
-
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+ return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
@@ -158,6 +117,11 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret;
}
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+ store_cpu_topology(cpuid);
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -167,8 +131,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
- printk("CPU%u: Booted secondary processor\n", cpu);
-
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
@@ -177,6 +139,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ printk("CPU%u: Booted secondary processor\n", cpu);
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
@@ -187,24 +152,15 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
- /*
- * Let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(INVALID_HWID);
-
- /*
- * Synchronise with the boot thread.
- */
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ if (cpu_ops[cpu]->cpu_postboot)
+ cpu_ops[cpu]->cpu_postboot();
/*
- * Enable local interrupts.
+ * Enable GIC and timers.
*/
notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
+
+ smp_store_cpu_info(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -214,48 +170,127 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
+ local_dbg_enable();
+ local_irq_enable();
+ local_fiq_enable();
+
/*
* OK, it's off to the idle thread for us
*/
cpu_startup_entry(CPUHP_ONLINE);
}
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
{
- unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ return -EOPNOTSUPP;
- pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- num_online_cpus(), bogosum / (500000/HZ),
- (bogosum / (5000/HZ)) % 100);
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (cpu_ops[cpu]->cpu_disable)
+ return cpu_ops[cpu]->cpu_disable(cpu);
+
+ return 0;
}
-void __init smp_prepare_boot_cpu(void)
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
{
-}
+ unsigned int cpu = smp_processor_id();
+ int ret;
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
-static const struct smp_enable_ops *enable_ops[] __initconst = {
- &smp_spin_table_ops,
- &smp_psci_ops,
- NULL,
-};
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
-{
- const struct smp_enable_ops **ops = enable_ops;
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
+ return 0;
+}
+
+static DECLARE_COMPLETION(cpu_died);
- ops++;
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
}
+ pr_notice("CPU%u: shutdown\n", cpu);
+}
- return NULL;
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ idle_task_exit();
+
+ local_irq_disable();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ cpu_ops[cpu]->cpu_die(cpu);
+
+ BUG();
}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+
+ pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(), bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+}
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
/*
* Enumerate the possible CPU set from the device tree and build the
@@ -264,9 +299,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
*/
void __init smp_init_cpus(void)
{
- const char *enable_method;
struct device_node *dn = NULL;
- int i, cpu = 1;
+ unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -335,25 +369,10 @@ void __init smp_init_cpus(void)
if (cpu >= NR_CPUS)
goto next;
- /*
- * We currently support only the "spin-table" enable-method.
- */
- enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ if (cpu_read_ops(dn, cpu) != 0)
goto next;
- }
-
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
- if (!smp_enable_ops[cpu]) {
- pr_err("%s: invalid enable-method property: %s\n",
- dn->full_name, enable_method);
- goto next;
- }
-
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -383,8 +402,12 @@ next:
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, err;
- unsigned int ncores = num_possible_cpus();
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
+
+ init_cpu_topology();
+
+ smp_store_cpu_info(smp_processor_id());
/*
* are we trying to boot more cores than exist?
@@ -411,10 +434,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!smp_enable_ops[cpu])
+ if (!cpu_ops[cpu])
continue;
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
@@ -445,6 +468,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_TIMER, "Timer broadcast interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -454,7 +478,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_present_cpu(cpu)
+ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
@@ -530,6 +554,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ case IPI_TIMER:
+ irq_enter();
+ tick_receive_broadcast();
+ irq_exit();
+ break;
+#endif
+
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
@@ -542,6 +574,13 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_TIMER);
+}
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
deleted file mode 100644
index 0c533301be77..000000000000
--- a/arch/arm64/kernel/smp_psci.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * PSCI SMP initialisation
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/smp.h>
-
-#include <asm/psci.h>
-#include <asm/smp_plat.h>
-
-static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
-{
- return 0;
-}
-
-static int __init smp_psci_prepare_cpu(int cpu)
-{
- int err;
-
- if (!psci_ops.cpu_on) {
- pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
- return -ENODEV;
- }
-
- err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
- if (err) {
- pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
- return err;
- }
-
- return 0;
-}
-
-const struct smp_enable_ops smp_psci_ops __initconst = {
- .name = "psci",
- .init_cpu = smp_psci_init_cpu,
- .prepare_cpu = smp_psci_prepare_cpu,
-};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7c35fa682f76..0347d38eea29 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,15 +16,38 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+extern void secondary_holding_pen(void);
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
-static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
+
+static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
{
/*
* Determine the address from which the CPU is polling.
@@ -40,7 +63,7 @@ static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
return 0;
}
-static int __init smp_spin_table_prepare_cpu(int cpu)
+static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
void **release_addr;
@@ -48,7 +71,16 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
- release_addr[0] = (void *)__pa(secondary_holding_pen);
+
+ /*
+ * We write the release address as LE regardless of the native
+ * endianess of the kernel. Therefore, any boot-loaders that
+ * read this address need to convert this address to the
+ * boot-loader's endianess before jumping. This is mandated by
+ * the boot protocol.
+ */
+ release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
+
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
@@ -59,8 +91,24 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return 0;
}
-const struct smp_enable_ops smp_spin_table_ops __initconst = {
+static int smp_spin_table_cpu_boot(unsigned int cpu)
+{
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ return 0;
+}
+
+const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
- .init_cpu = smp_spin_table_init_cpu,
- .prepare_cpu = smp_spin_table_prepare_cpu,
+ .cpu_init = smp_spin_table_cpu_init,
+ .cpu_prepare = smp_spin_table_cpu_prepare,
+ .cpu_boot = smp_spin_table_cpu_boot,
};
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d25459ff57fc..55437ba1f5a4 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -35,7 +35,7 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
-int unwind_frame(struct stackframe *frame)
+int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
@@ -43,12 +43,16 @@ int unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
- if (fp < low || fp > high || fp & 0xf)
+ if (fp < low || fp > high - 0x18 || fp & 0xf)
return -EINVAL;
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- frame->pc = *(unsigned long *)(fp + 8);
+ /*
+ * -4 here because we care about the PC at time of bl,
+ * not where the return will go.
+ */
+ frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
new file mode 100644
index 000000000000..1fa9ce4afd8f
--- /dev/null
+++ b/arch/arm64/kernel/suspend.c
@@ -0,0 +1,140 @@
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/debug-monitors.h>
+#include <asm/pgtable.h>
+#include <asm/memory.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+
+extern int __cpu_suspend(unsigned long);
+/*
+ * This is called by __cpu_suspend() to save the state, and do whatever
+ * flushing is required to ensure that when the CPU goes to sleep we have
+ * the necessary data available when the caches are not searched.
+ *
+ * @arg: Argument to pass to suspend operations
+ * @ptr: CPU context virtual address
+ * @save_ptr: address of the location where the context physical address
+ * must be saved
+ */
+int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
+ phys_addr_t *save_ptr)
+{
+ int cpu = smp_processor_id();
+
+ *save_ptr = virt_to_phys(ptr);
+
+ cpu_do_suspend(ptr);
+ /*
+ * Only flush the context that must be retrieved with the MMU
+ * off. VA primitives ensure the flush is applied to all
+ * cache levels so context is pushed to DRAM.
+ */
+ __flush_dcache_area(ptr, sizeof(*ptr));
+ __flush_dcache_area(save_ptr, sizeof(*save_ptr));
+
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
+
+/*
+ * This hook is provided so that cpu_suspend code can restore HW
+ * breakpoints as early as possible in the resume path, before reenabling
+ * debug exceptions. Code cannot be run from a CPU PM notifier since by the
+ * time the notifier runs debug exceptions might have been enabled already,
+ * with HW breakpoints registers content still in an unknown state.
+ */
+void (*hw_breakpoint_restore)(void *);
+void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+ /* Prevent multiple restore hook initializations */
+ if (WARN_ON(hw_breakpoint_restore))
+ return;
+ hw_breakpoint_restore = hw_bp_restore;
+}
+
+/**
+ * cpu_suspend
+ *
+ * @arg: argument to pass to the finisher function
+ */
+int cpu_suspend(unsigned long arg)
+{
+ struct mm_struct *mm = current->active_mm;
+ int ret, cpu = smp_processor_id();
+ unsigned long flags;
+
+ /*
+ * If cpu_ops have not been registered or suspend
+ * has not been initialized, cpu_suspend call fails early.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /*
+ * From this point debug exceptions are disabled to prevent
+ * updates to mdscr register (saved and restored along with
+ * general purpose registers) from kernel debuggers.
+ */
+ local_dbg_save(flags);
+
+ /*
+ * mm context saved on the stack, it will be restored when
+ * the cpu comes out of reset through the identity mapped
+ * page tables, so that the thread address space is properly
+ * set-up on function return.
+ */
+ ret = __cpu_suspend(arg);
+ if (ret == 0) {
+ cpu_switch_mm(mm->pgd, mm);
+ flush_tlb_all();
+
+ /*
+ * Restore per-cpu offset before any kernel
+ * subsystem relying on it has a chance to run.
+ */
+ set_my_cpu_offset(per_cpu_offset(cpu));
+
+ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(NULL);
+ }
+
+ /*
+ * Restore pstate flags. OS lock and mdscr have been already
+ * restored, so from this point onwards, debugging is fully
+ * renabled if it was enabled when core started shutdown.
+ */
+ local_dbg_restore(flags);
+
+ return ret;
+}
+
+extern struct sleep_save_sp sleep_save_sp;
+extern phys_addr_t sleep_idmap_phys;
+
+static int cpu_suspend_init(void)
+{
+ void *ctx_ptr;
+
+ /* ctx_ptr is an array of physical addresses */
+ ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+
+ if (WARN_ON(!ctx_ptr))
+ return -ENOMEM;
+
+ sleep_save_sp.save_ptr_stash = ctx_ptr;
+ sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
+ sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
+ __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
+ __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
+
+ return 0;
+}
+early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index a1b19ed7467c..423a5b3fc2be 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -59,48 +59,48 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* extension.
*/
compat_sys_pread64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
compat_sys_pwrite64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
compat_sys_truncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
compat_sys_ftruncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
compat_sys_readahead_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
compat_sys_fadvise64_64_wrapper:
mov w6, w1
- orr x1, x2, x3, lsl #32
- orr x2, x4, x5, lsl #32
+ regs_to_64 x1, x2, x3
+ regs_to_64 x2, x4, x5
mov w3, w6
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
compat_sys_sync_file_range2_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
compat_sys_fallocate_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index a551f88ae2c1..03dc3718eb13 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -68,12 +68,6 @@ unsigned long long notrace sched_clock(void)
return arch_timer_read_counter() * sched_clock_mult;
}
-int read_current_timer(unsigned long *timer_value)
-{
- *timer_value = arch_timer_read_counter();
- return 0;
-}
-
void __init time_init(void)
{
u32 arch_timer_rate;
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
new file mode 100644
index 000000000000..db8bb29c3852
--- /dev/null
+++ b/arch/arm64/kernel/topology.c
@@ -0,0 +1,590 @@
+/*
+ * arch/arm64/kernel/topology.c
+ *
+ * Copyright (C) 2011,2013,2014 Linaro Limited.
+ *
+ * Based on the arm32 version written by Vincent Guittot in turn based on
+ * arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/cputype.h>
+#include <asm/topology.h>
+#include <asm/smp_plat.h>
+
+
+/*
+ * cpu power table
+ * This per cpu data structure describes the relative capacity of each core.
+ * On a heteregenous system, cores don't have the same computation capacity
+ * and we reflect that difference in the cpu_power field so the scheduler can
+ * take this difference into account during load balance. A per cpu structure
+ * is preferred because each CPU updates its own cpu_power field during the
+ * load balance except for idle cores. One idle core is selected to run the
+ * rebalance_domains for all idle cores and the cpu_power can be updated
+ * during this sequence.
+ */
+static DEFINE_PER_CPU(unsigned long, cpu_scale);
+
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ of_node_put(cpu_node);
+ return cpu;
+ }
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+
+ of_node_put(cpu_node);
+ return -1;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int cluster_id __initdata;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
+struct cpu_efficiency {
+ const char *compatible;
+ unsigned long efficiency;
+};
+
+/*
+ * Table of relative efficiency of each processors
+ * The efficiency value must fit in 20bit and the final
+ * cpu_scale value must be in the range
+ * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
+ * in order to return at most 1 when DIV_ROUND_CLOSEST
+ * is used to compute the capacity of a CPU.
+ * Processors that are not defined in the table,
+ * use the default SCHED_POWER_SCALE value for cpu_scale.
+ */
+static const struct cpu_efficiency table_efficiency[] = {
+ { "arm,cortex-a57", 3891 },
+ { "arm,cortex-a53", 2048 },
+ { NULL, },
+};
+
+static unsigned long *__cpu_capacity;
+#define cpu_capacity(cpu) __cpu_capacity[cpu]
+
+static unsigned long middle_capacity = 1;
+
+/*
+ * Iterate all CPUs' descriptor in DT and compute the efficiency
+ * (as per table_efficiency). Also calculate a middle efficiency
+ * as close as possible to (max{eff_i} - min{eff_i}) / 2
+ * This is later used to scale the cpu_power field such that an
+ * 'average' CPU is of middle power. Also see the comments near
+ * table_efficiency[] and update_cpu_power().
+ */
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu) {
+ if (cpu_topology[cpu].cluster_id == -1) {
+ pr_err("CPU%d: No topology information specified\n",
+ cpu);
+ ret = -EINVAL;
+ }
+ }
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+
+static void __init parse_dt_cpu_power(void)
+{
+ const struct cpu_efficiency *cpu_eff;
+ struct device_node *cn;
+ unsigned long min_capacity = ULONG_MAX;
+ unsigned long max_capacity = 0;
+ unsigned long capacity = 0;
+ int cpu;
+
+ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
+ GFP_NOWAIT);
+
+ for_each_possible_cpu(cpu) {
+ const u32 *rate;
+ int len;
+
+ /* Too early to use cpu->of_node */
+ cn = of_get_cpu_node(cpu, NULL);
+ if (!cn) {
+ pr_err("Missing device node for CPU %d\n", cpu);
+ continue;
+ }
+
+ for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
+ if (of_device_is_compatible(cn, cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL) {
+ pr_warn("%s: Unknown CPU type\n", cn->full_name);
+ continue;
+ }
+
+ rate = of_get_property(cn, "clock-frequency", &len);
+ if (!rate || len != 4) {
+ pr_err("%s: Missing clock-frequency property\n",
+ cn->full_name);
+ continue;
+ }
+
+ capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+
+ /* Save min capacity of the system */
+ if (capacity < min_capacity)
+ min_capacity = capacity;
+
+ /* Save max capacity of the system */
+ if (capacity > max_capacity)
+ max_capacity = capacity;
+
+ cpu_capacity(cpu) = capacity;
+ }
+
+ /* If min and max capacities are equal we bypass the update of the
+ * cpu_scale because all CPUs have the same capacity. Otherwise, we
+ * compute a middle_capacity factor that will ensure that the capacity
+ * of an 'average' CPU of the system will be as close as possible to
+ * SCHED_POWER_SCALE, which is the default value, but with the
+ * constraint explained near table_efficiency[].
+ */
+ if (min_capacity == max_capacity)
+ return;
+ else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
+ middle_capacity = (min_capacity + max_capacity)
+ >> (SCHED_POWER_SHIFT+1);
+ else
+ middle_capacity = ((max_capacity / 3)
+ >> (SCHED_POWER_SHIFT-1)) + 1;
+}
+
+/*
+ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
+ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
+ * function returns directly for SMP system.
+ */
+static void update_cpu_power(unsigned int cpu)
+{
+ if (!cpu_capacity(cpu))
+ return;
+
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+ pr_info("CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+/*
+ * cpu topology table
+ */
+struct cpu_topology cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ return &cpu_topology[cpu].core_sibling;
+}
+
+static void update_siblings_masks(unsigned int cpuid)
+{
+ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu;
+
+ if (cpuid_topo->cluster_id == -1) {
+ /*
+ * DT does not contain topology information for this cpu.
+ */
+ pr_debug("CPU%u: No topology information configured\n", cpuid);
+ return;
+ }
+
+ /* update core and thread sibling masks */
+ for_each_possible_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+}
+
+#ifdef CONFIG_SCHED_HMP
+
+/*
+ * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
+ * - mpidr: MPIDR[23:0] to be used for the look-up
+ *
+ * Returns the cpu logical index or -EINVAL on look-up error
+ */
+static inline int get_logical_index(u32 mpidr)
+{
+ int cpu;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (cpu_logical_map(cpu) == mpidr)
+ return cpu;
+ return -EINVAL;
+}
+
+static const char * const little_cores[] = {
+ "arm,cortex-a53",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ const u32 *mpidr;
+ int len;
+
+ mpidr = of_get_property(cn, "reg", &len);
+ if (!mpidr || len != 8) {
+ pr_err("%s missing reg property\n", cn->full_name);
+ continue;
+ }
+
+ cpu = get_logical_index(be32_to_cpup(mpidr+1));
+ if (cpu == -EINVAL) {
+ pr_err("couldn't get logical index for mpidr %x\n",
+ be32_to_cpup(mpidr+1));
+ break;
+ }
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+struct cpumask hmp_slow_cpu_mask;
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+ struct cpumask hmp_fast_cpu_mask;
+ struct hmp_domain *domain;
+
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+ /*
+ * Initialize hmp_domains
+ * Must be ordered with respect to compute capacity.
+ * Fastest domain at head of list.
+ */
+ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+ }
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id: cluster HW identifier
+ * @cluster_mask: the cpumask location to be initialized, modified by the
+ * function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+ int cpu;
+
+ if (!cluster_mask)
+ return -EINVAL;
+
+ for_each_online_cpu(cpu) {
+ if (socket_id == topology_physical_package_id(cpu)) {
+ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ update_siblings_masks(cpuid);
+ update_cpu_power(cpuid);
+}
+
+static void __init reset_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = 0;
+ cpu_topo->cluster_id = -1;
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+ }
+}
+
+static void __init reset_cpu_power(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ set_power_scale(cpu, SCHED_POWER_SCALE);
+}
+
+void __init init_cpu_topology(void)
+{
+ reset_cpu_topology();
+
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ if (parse_dt_topology())
+ reset_cpu_topology();
+
+ reset_cpu_power();
+ parse_dt_cpu_power();
+}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index f30852d28590..7ffadddb645d 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -32,6 +32,7 @@
#include <linux/syscalls.h>
#include <asm/atomic.h>
+#include <asm/debug-monitors.h>
#include <asm/traps.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -261,11 +262,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
-#ifdef CONFIG_COMPAT
/* check for AArch32 breakpoint instructions */
- if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0)
+ if (!aarch32_break_handler(regs))
return;
-#endif
if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
printk_ratelimit()) {
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 6a389dc1bd49..84cafbc3eb54 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -58,7 +58,10 @@ static struct page *vectors_page[1];
static int alloc_vectors_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage;
vpage = get_zeroed_page(GFP_ATOMIC);
@@ -72,7 +75,7 @@ static int alloc_vectors_page(void)
/* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- aarch32_sigret_code, sizeof(aarch32_sigret_code));
+ __aarch32_sigret_code_start, sigret_sz);
flush_icache_range(vpage, vpage + PAGE_SIZE);
vectors_page[0] = virt_to_page(vpage);
@@ -103,49 +106,31 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
static int __init vdso_init(void)
{
- struct page *pg;
- char *vbase;
- int i, ret = 0;
+ int i;
+
+ if (memcmp(&vdso_start, "\177ELF", 4)) {
+ pr_err("vDSO is not a valid ELF object!\n");
+ return -EINVAL;
+ }
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
vdso_pages + 1, vdso_pages, 1L, &vdso_start);
/* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1),
+ vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
GFP_KERNEL);
- if (vdso_pagelist == NULL) {
- pr_err("Failed to allocate vDSO pagelist!\n");
+ if (vdso_pagelist == NULL)
return -ENOMEM;
- }
/* Grab the vDSO code pages. */
- for (i = 0; i < vdso_pages; i++) {
- pg = virt_to_page(&vdso_start + i*PAGE_SIZE);
- ClearPageReserved(pg);
- get_page(pg);
- vdso_pagelist[i] = pg;
- }
-
- /* Sanity check the shared object header. */
- vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL);
- if (vbase == NULL) {
- pr_err("Failed to map vDSO pagelist!\n");
- return -ENOMEM;
- } else if (memcmp(vbase, "\177ELF", 4)) {
- pr_err("vDSO is not a valid ELF object!\n");
- ret = -EINVAL;
- goto unmap;
- }
+ for (i = 0; i < vdso_pages; i++)
+ vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
/* Grab the vDSO data page. */
- pg = virt_to_page(vdso_data);
- get_page(pg);
- vdso_pagelist[i] = pg;
+ vdso_pagelist[i] = virt_to_page(vdso_data);
-unmap:
- vunmap(vbase);
- return ret;
+ return 0;
}
arch_initcall(vdso_init);
@@ -153,11 +138,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
struct mm_struct *mm = current->mm;
- unsigned long vdso_base, vdso_mapping_len;
+ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
int ret;
+ vdso_text_len = vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
- vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
+ vdso_mapping_len = vdso_text_len + PAGE_SIZE;
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
@@ -167,35 +153,52 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
}
mm->context.vdso = (void *)vdso_base;
- ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
+ ret = install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist);
- if (ret) {
- mm->context.vdso = NULL;
+ if (ret)
+ goto up_fail;
+
+ vdso_base += vdso_text_len;
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ VM_READ|VM_MAYREAD,
+ vdso_pagelist + vdso_pages);
+ if (ret)
goto up_fail;
- }
-up_fail:
up_write(&mm->mmap_sem);
+ return 0;
+up_fail:
+ mm->context.vdso = NULL;
+ up_write(&mm->mmap_sem);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
+ unsigned long vdso_text;
+
+ if (!vma->vm_mm)
+ return NULL;
+
+ vdso_text = (unsigned long)vma->vm_mm->context.vdso;
+
/*
* We can re-use the vdso pointer in mm_context_t for identifying
* the vectors page for compat applications. The vDSO will always
* sit above TASK_UNMAPPED_BASE and so we don't need to worry about
* it conflicting with the vectors base.
*/
- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
+ if (vma->vm_start == vdso_text) {
#ifdef CONFIG_COMPAT
if (vma->vm_start == AARCH32_VECTORS_BASE)
return "[vectors]";
#endif
return "[vdso]";
+ } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
+ return "[vvar]";
}
return NULL;
@@ -235,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last;
@@ -242,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift;
- vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
- vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
}
smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af42e62..84b942612051 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -47,9 +47,9 @@ $(obj-vdso): %.o: %.S
$(call if_changed_dep,vdsoas)
# Actual build commands
-quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
-quiet_cmd_vdsoas = VDSOA $@
+quiet_cmd_vdsold = VDSOL $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
# Install commands for the unstripped file
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10b5211..fe652ffd34c2 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
bl __do_get_tspec
seqcnt_check w9, 1b
+ mov x30, x2
+
cmp w0, #CLOCK_MONOTONIC
b.ne 6f
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f
+ /* xtime_coarse_nsec is already right-shifted */
+ mov x12, #0
+
/* Get coarse timespec. */
adr vdso_data, _vdso_data
3: seqcnt_acquire
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr
- ret x2
+ ret
7:
mov x30, x2
8: /* Syscall fallback. */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3fae2be8b016..ce2d97255ba9 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -41,7 +41,6 @@ SECTIONS
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- *(.smp.pen.text)
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -56,7 +55,8 @@ SECTIONS
}
RO_DATA(PAGE_SIZE)
-
+ EXCEPTION_TABLE(8)
+ NOTES
_etext = .; /* End of text and rodata section */
. = ALIGN(PAGE_SIZE);
@@ -82,43 +82,21 @@ SECTIONS
PERCPU_SECTION(64)
__init_end = .;
- . = ALIGN(THREAD_SIZE);
- __data_loc = .;
-
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(64)
- READ_MOSTLY_DATA(64)
-
- /*
- * The exception fixup table (might need resorting at runtime)
- */
- . = ALIGN(32);
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
- }
- _edata_loc = __data_loc + SIZEOF(.data);
- NOTES
+ . = ALIGN(PAGE_SIZE);
+ _data = .;
+ _sdata = .;
+ RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
BSS_SECTION(0, 0, 0)
+
+ . = ALIGN(PAGE_SIZE);
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+ swapper_pg_dir = .;
+ . += SWAPPER_DIR_SIZE;
+
_end = .;
STABS_DEBUG
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 59acc0ef0462..328ce1a99daa 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,6 +1,4 @@
-lib-y := bitops.o delay.o \
- strncpy_from_user.o strnlen_user.o clear_user.o \
- copy_from_user.o copy_to_user.o copy_in_user.o \
- copy_page.o clear_page.o \
- memchr.o memcpy.o memmove.o memset.o \
+lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
+ copy_to_user.o copy_in_user.o copy_page.o \
+ clear_page.o memchr.o memcpy.o memmove.o memset.o \
strchr.o strrchr.o
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797790d3..7dac371cc9a2 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@ ENTRY( \name )
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldaxr x2, [x1]
+1: ldxr x2, [x1]
lsr x0, x2, x3 // Save old value of bit
\instr x2, x2, x4 // toggle bit
stlxr w5, x2, [x1]
cbnz w5, 1b
+ dmb ish
and x0, x0, #1
3: ret
ENDPROC(\name )
diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S
deleted file mode 100644
index 56e448a831a0..000000000000
--- a/arch/arm64/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Based on arch/arm/lib/strncpy_from_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/*
- * Copy a string from user space to kernel space.
- * x0 = dst, x1 = src, x2 = byte length
- * returns the number of characters copied (strlen of copied string),
- * -EFAULT on exception, or "len" if we fill the whole buffer
- */
-ENTRY(__strncpy_from_user)
- mov x4, x1
-1: subs x2, x2, #1
- bmi 2f
-USER(9f, ldrb w3, [x1], #1 )
- strb w3, [x0], #1
- cbnz w3, 1b
- sub x1, x1, #1 // take NUL character out of count
-2: sub x0, x1, x4
- ret
-ENDPROC(__strncpy_from_user)
-
- .section .fixup,"ax"
- .align 0
-9: strb wzr, [x0] // null terminate
- mov x0, #-EFAULT
- ret
- .previous
diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S
deleted file mode 100644
index 7f7b176a5646..000000000000
--- a/arch/arm64/lib/strnlen_user.S
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Based on arch/arm/lib/strnlen_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/* Prototype: unsigned long __strnlen_user(const char *str, long n)
- * Purpose : get length of a string in user memory
- * Params : str - address of string in user memory
- * Returns : length of string *including terminator*
- * or zero on exception, or n if too long
- */
-ENTRY(__strnlen_user)
- mov x2, x0
-1: subs x1, x1, #1
- b.mi 2f
-USER(9f, ldrb w3, [x0], #1 )
- cbnz w3, 1b
-2: sub x0, x0, x2
- ret
-ENDPROC(__strnlen_user)
-
- .section .fixup,"ax"
- .align 0
-9: mov x0, #0
- ret
- .previous
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 3140a2abcdc2..3ecb56c624d3 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,4 +1,5 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
- context.o tlb.o proc.o
+ context.o proc.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 48a386094fa3..fda756875fa6 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -30,7 +30,7 @@
*
* Corrupted registers: x0-x7, x9-x11
*/
-ENTRY(__flush_dcache_all)
+__flush_dcache_all:
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
@@ -146,7 +146,7 @@ ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
/*
- * __flush_kern_dcache_page(kaddr)
+ * __flush_dcache_area(kaddr, size)
*
* Ensure that the data held in the page kaddr is written back to the
* page in question.
@@ -166,3 +166,97 @@ ENTRY(__flush_dcache_area)
dsb sy
ret
ENDPROC(__flush_dcache_area)
+
+/*
+ * __inval_cache_range(start, end)
+ * - start - start address of region
+ * - end - end address of region
+ */
+ENTRY(__inval_cache_range)
+ /* FALLTHROUGH */
+
+/*
+ * __dma_inv_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_inv_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ tst x1, x3 // end cache line aligned?
+ bic x1, x1, x3
+ b.eq 1f
+ dc civac, x1 // clean & invalidate D / U line
+1: tst x0, x3 // start cache line aligned?
+ bic x0, x0, x3
+ b.eq 2f
+ dc civac, x0 // clean & invalidate D / U line
+ b 3f
+2: dc ivac, x0 // invalidate D / U line
+3: add x0, x0, x2
+ cmp x0, x1
+ b.lo 2b
+ dsb sy
+ ret
+ENDPROC(__inval_cache_range)
+ENDPROC(__dma_inv_range)
+
+/*
+ * __dma_clean_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_clean_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc cvac, x0 // clean D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_clean_range)
+
+/*
+ * __dma_flush_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__dma_flush_range)
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc civac, x0 // clean & invalidate D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_flush_range)
+
+/*
+ * __dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_map_area)
+ add x1, x1, x0
+ cmp w2, #DMA_FROM_DEVICE
+ b.eq __dma_inv_range
+ b __dma_clean_range
+ENDPROC(__dma_map_area)
+
+/*
+ * __dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_unmap_area)
+ add x1, x1, x0
+ cmp w2, #DMA_TO_DEVICE
+ b.ne __dma_inv_range
+ ret
+ENDPROC(__dma_unmap_area)
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 9aecbace4128..13bbc3be6f5a 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
copy_page(kto, kfrom);
__flush_dcache_area(kto, PAGE_SIZE);
}
+EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
+EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4bd7579ec9e6..f39a55d58918 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -21,34 +21,277 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
-static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+ bool coherent)
{
- if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ return pgprot_writecombine(prot);
+ else if (!coherent)
+ return pgprot_dmacoherent(prot);
+ return prot;
+}
+
+static void *__dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- flags |= GFP_DMA32;
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ flags |= GFP_DMA;
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ struct page *page;
+
+ size = PAGE_ALIGN(size);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ return page_address(page);
+ } else {
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ }
+}
+
+static void __dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
+ dma_release_from_contiguous(dev,
+ phys_to_page(paddr),
+ size >> PAGE_SHIFT);
+ } else {
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ }
+}
+
+static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ struct page *page, **map;
+ void *ptr, *coherent_ptr;
+ int order, i;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+ if (!ptr)
+ goto no_mem;
+ map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
+ if (!map)
+ goto no_map;
+
+ /* remove any dirty cache lines on the kernel alias */
+ __dma_flush_range(ptr, ptr + size);
+
+ /* create a coherent mapping */
+ page = virt_to_page(ptr);
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ map[i] = page + i;
+ coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
+ __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
+ kfree(map);
+ if (!coherent_ptr)
+ goto no_map;
+
+ return coherent_ptr;
+
+no_map:
+ __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+no_mem:
+ *dma_handle = ~0;
+ return NULL;
+}
+
+static void __dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+
+ vunmap(vaddr);
+ __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+}
+
+static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t dev_addr;
+
+ dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+
+ return dev_addr;
+}
+
+
+static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i, ret;
+
+ ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
+ for_each_sg(sgl, sg, ret, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+
+ return ret;
+}
+
+static void __swiotlb_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+ swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}
-static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static void __swiotlb_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}
-static struct dma_map_ops arm64_swiotlb_dma_ops = {
- .alloc = arm64_swiotlb_alloc_coherent,
- .free = arm64_swiotlb_free_coherent,
+static void __swiotlb_sync_single_for_device(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+}
+
+static void __swiotlb_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+ swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
+}
+
+static void __swiotlb_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+}
+
+/* vma->vm_page_prot must be set appropriately before calling this function */
+static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ int ret = -ENXIO;
+ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
+ PAGE_SHIFT;
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static int __swiotlb_mmap_noncoherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
+ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+static int __swiotlb_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ /* Just use whatever page_prot attributes were specified */
+ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+struct dma_map_ops noncoherent_swiotlb_dma_ops = {
+ .alloc = __dma_alloc_noncoherent,
+ .free = __dma_free_noncoherent,
+ .mmap = __swiotlb_mmap_noncoherent,
+ .map_page = __swiotlb_map_page,
+ .unmap_page = __swiotlb_unmap_page,
+ .map_sg = __swiotlb_map_sg_attrs,
+ .unmap_sg = __swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = __swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = __swiotlb_sync_sg_for_device,
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
+
+struct dma_map_ops coherent_swiotlb_dma_ops = {
+ .alloc = __dma_alloc_coherent,
+ .free = __dma_free_coherent,
+ .mmap = __swiotlb_mmap_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
@@ -60,12 +303,47 @@ static struct dma_map_ops arm64_swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
+EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
+
+static int dma_bus_notifier(struct notifier_block *nb,
+ unsigned long event, void *_dev)
+{
+ struct device *dev = _dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block platform_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+extern int swiotlb_late_init_with_default_size(size_t default_size);
-void __init arm64_swiotlb_init(void)
+static int __init swiotlb_late_init(void)
{
- dma_ops = &arm64_swiotlb_dma_ops;
- swiotlb_init(1);
+ size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+
+ /*
+ * These must be registered before of_platform_populate().
+ */
+ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+ bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+ dma_ops = &noncoherent_swiotlb_dma_ops;
+
+ return swiotlb_late_init_with_default_size(swiotlb_size);
}
+arch_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1426468b77f3..df4f2fd187c3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -130,7 +130,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
force_sig_info(sig, &si, tsk);
}
-void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
#define ESR_CM (1 << 8)
#define ESR_LNX_EXEC (1 << 24)
-/*
- * Check that the permissions on the VMA allow for the fault which occurred.
- * If we encountered a write fault, we must have write permission, otherwise
- * we allow any permission.
- */
-static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
-{
- unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
-
- if (esr & ESR_WRITE)
- mask = VM_WRITE;
- if (esr & ESR_LNX_EXEC)
- mask = VM_EXEC;
-
- return vma->vm_flags & mask ? false : true;
-}
-
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
- unsigned int esr, unsigned int flags,
+ unsigned int mm_flags, unsigned long vm_flags,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
* it.
*/
good_area:
- if (access_error(esr, vma)) {
+ /*
+ * Check that the permissions on the VMA allow for the fault which
+ * occurred. If we encountered a write or exec fault, we must have
+ * appropriate permissions, otherwise we allow any permission.
+ */
+ if (!(vma->vm_flags & vm_flags)) {
fault = VM_FAULT_BADACCESS;
goto out;
}
- return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+ unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ if (esr & ESR_LNX_EXEC) {
+ vm_flags = VM_EXEC;
+ } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+ vm_flags = VM_WRITE;
+ mm_flags |= FAULT_FLAG_WRITE;
+ }
tsk = current;
mm = tsk->mm;
@@ -248,7 +242,7 @@ retry:
#endif
}
- fault = __do_page_fault(mm, addr, esr, flags, tsk);
+ fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
/*
* If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
* starvation.
*/
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
@@ -365,17 +359,6 @@ static int __kprobes do_translation_fault(unsigned long addr,
}
/*
- * Some section permission faults need to be handled gracefully. They can
- * happen due to a __{get,put}_user during an oops.
- */
-static int do_sect_fault(unsigned long addr, unsigned int esr,
- struct pt_regs *regs)
-{
- do_bad_area(addr, esr, regs);
- return 0;
-}
-
-/*
* This abort handler always returns "fault".
*/
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
@@ -398,12 +381,12 @@ static struct fault_info {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "reserved access flag fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, 0, "reserved permission fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
- { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
{ do_bad, SIGBUS, 0, "asynchronous external abort" },
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 88611c3a421a..0d64089d28b5 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -70,23 +70,17 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
#endif
}
-void __flush_dcache_page(struct page *page)
-{
- __flush_dcache_area(page_address(page), PAGE_SIZE);
-}
-
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
- unsigned long pfn;
- struct page *page;
+ struct page *page = pte_page(pte);
- pfn = pte_pfn(pte);
- if (!pfn_valid(pfn))
+ /* no flushing needed for anonymous pages */
+ if (!page_mapping(page))
return;
- page = pfn_to_page(pfn);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
- __flush_dcache_page(page);
+ __flush_dcache_area(page_address(page),
+ PAGE_SIZE << compound_order(page));
__flush_icache_all();
} else if (icache_is_aivivt()) {
__flush_icache_all();
@@ -94,28 +88,14 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
}
/*
- * Ensure cache coherency between kernel mapping and userspace mapping of this
- * page.
+ * This function is called when a page has been modified by the kernel. Mark
+ * it as dirty for later flushing when mapped in user space (if executable,
+ * see __sync_icache_dcache).
*/
void flush_dcache_page(struct page *page)
{
- struct address_space *mapping;
-
- /*
- * The zero page is never written to, so never has any dirty cache
- * lines, and therefore never needs to be flushed.
- */
- if (page == ZERO_PAGE(0))
- return;
-
- mapping = page_mapping(page);
- if (mapping && mapping_mapped(mapping)) {
- __flush_dcache_page(page);
- __flush_icache_all();
- set_bit(PG_dcache_clean, &page->flags);
- } else {
+ if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
- }
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
new file mode 100644
index 000000000000..2fc8258bab2d
--- /dev/null
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -0,0 +1,70 @@
+/*
+ * arch/arm64/mm/hugetlbpage.c
+ *
+ * Copyright (C) 2013 Linaro Ltd.
+ *
+ * Based on arch/x86/mm/hugetlbpage.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+#endif
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return !(pmd_val(pmd) & PMD_TABLE_BIT);
+}
+
+int pud_huge(pud_t pud)
+{
+ return !(pud_val(pud) & PUD_TABLE_BIT);
+}
+
+static __init int setup_hugepagesz(char *opt)
+{
+ unsigned long ps = memparse(opt, &opt);
+ if (ps == PMD_SIZE) {
+ hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
+ } else if (ps == PUD_SIZE) {
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ } else {
+ pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
+ return 0;
+ }
+ return 1;
+}
+__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f497ca77925a..a725447ec8e0 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -30,6 +30,8 @@
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of_fdt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <asm/prom.h>
#include <asm/sections.h>
@@ -44,8 +46,7 @@ static unsigned long phys_initrd_size __initdata = 0;
phys_addr_t memstart_addr __read_mostly = 0;
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
@@ -67,22 +68,22 @@ static int __init early_initrd(char *p)
}
early_param("initrd", early_initrd);
-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
-
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long max_dma32 = min;
+ unsigned long max_dma = min;
memset(zone_size, 0, sizeof(zone_size));
-#ifdef CONFIG_ZONE_DMA32
/* 4GB maximum for 32-bit only capable devices */
- max_dma32 = max(min, min(max, MAX_DMA32_PFN));
- zone_size[ZONE_DMA32] = max_dma32 - min;
-#endif
- zone_size[ZONE_NORMAL] = max - max_dma32;
+ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ unsigned long max_dma_phys =
+ (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
+ max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+ zone_size[ZONE_DMA] = max_dma - min;
+ }
+ zone_size[ZONE_NORMAL] = max - max_dma;
memcpy(zhole_size, zone_size, sizeof(zhole_size));
@@ -92,15 +93,15 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-#ifdef CONFIG_ZONE_DMA32
- if (start < max_dma32) {
- unsigned long dma_end = min(end, max_dma32);
- zhole_size[ZONE_DMA32] -= dma_end - start;
+
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
+ unsigned long dma_end = min(end, max_dma);
+ zhole_size[ZONE_DMA] -= dma_end - start;
}
-#endif
- if (end > max_dma32) {
+
+ if (end > max_dma) {
unsigned long normal_end = min(end, max);
- unsigned long normal_start = max(start, max_dma32);
+ unsigned long normal_start = max(start, max_dma);
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
}
}
@@ -109,9 +110,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
}
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
+#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
+
int pfn_valid(unsigned long pfn)
{
- return memblock_is_memory(pfn << PAGE_SHIFT);
+ return (pfn & PFN_MASK) == pfn && memblock_is_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
#endif
@@ -135,7 +138,10 @@ void __init arm64_memblock_init(void)
{
u64 *reserve_map, base, size;
- /* Register the kernel text, kernel data and initrd with memblock */
+ /*
+ * Register the kernel text, kernel data, initrd, and initial
+ * pagetables with memblock.
+ */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
@@ -147,13 +153,6 @@ void __init arm64_memblock_init(void)
}
#endif
- /*
- * Reserve the page tables. These are already in use,
- * and can only be in node 0.
- */
- memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
- memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
-
/* Reserve the dtb region */
memblock_reserve(virt_to_phys(initial_boot_params),
be32_to_cpu(initial_boot_params->totalsize));
@@ -173,6 +172,9 @@ void __init arm64_memblock_init(void)
memblock_reserve(base, size);
}
+ early_init_fdt_scan_reserved_mem();
+ dma_contiguous_reserve(0);
+
memblock_allow_resize();
memblock_dump_all();
}
@@ -283,8 +285,6 @@ void __init mem_init(void)
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
- arm64_swiotlb_init();
-
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
#ifndef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 1725cd6db37a..00d315ae1de9 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -25,6 +25,10 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
pgprot_t prot, void *caller)
{
@@ -82,3 +86,95 @@ void __iounmap(volatile void __iomem *io_addr)
vunmap(addr);
}
EXPORT_SYMBOL(__iounmap);
+
+void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
+{
+ /* For normal memory we already have a cacheable mapping. */
+ if (pfn_valid(__phys_to_pfn(phys_addr)))
+ return (void __iomem *)__phys_to_virt(phys_addr);
+
+ return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+#ifndef CONFIG_ARM64_64K_PAGES
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#endif
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ pgd = pgd_offset_k(addr);
+ BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+
+ pud = pud_offset(pgd, addr);
+ BUG_ON(pud_none(*pud) || pud_bad(*pud));
+
+ return pmd_offset(pud, addr);
+}
+
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
+{
+ pmd_t *pmd = early_ioremap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+void __init early_ioremap_init(void)
+{
+ pmd_t *pmd;
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+#ifndef CONFIG_ARM64_64K_PAGES
+ /* need to populate pmd for 4k pagesize only */
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+#endif
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+ if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ pr_warn("pmd %p != %p\n",
+ pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n",
+ FIX_BTMAP_BEGIN);
+ }
+
+ early_ioremap_setup();
+}
+
+void __init __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *pte;
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ pte = early_ioremap_pte(addr);
+
+ if (pgprot_val(flags))
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ else {
+ pte_clear(&init_mm, addr, pte);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 916701e6d040..d519f4f50c8c 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1,3 +1,2 @@
-extern void __flush_dcache_page(struct page *page);
extern void __init bootmem_init(void);
extern void __init arm64_swiotlb_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index eeecc9c8ed68..a2155ca6921c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-pgprot_t pgprot_default;
-EXPORT_SYMBOL(pgprot_default);
-
-static pmdval_t prot_sect_kernel;
-
struct cachepolicy {
const char policy[16];
u64 mair;
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
}
early_param("cachepolicy", early_cachepolicy);
-/*
- * Adjust the PMD section entries according to the CPU in use.
- */
-static void __init init_mem_pgprot(void)
-{
- pteval_t default_pgprot;
- int i;
-
- default_pgprot = PTE_ATTRINDX(MT_NORMAL);
- prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
-
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- default_pgprot |= PTE_SHARED;
- prot_sect_kernel |= PMD_SECT_S;
-#endif
-
- for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
- protection_map[i] = __pgprot(v | default_pgprot);
- }
-
- pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
-}
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -168,7 +136,8 @@ static void __init *early_alloc(unsigned long sz)
}
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
- unsigned long end, unsigned long pfn)
+ unsigned long end, unsigned long pfn,
+ pgprot_t prot)
{
pte_t *pte;
@@ -180,16 +149,28 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ set_pte(pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys)
+ unsigned long end, phys_addr_t phys,
+ int map_io)
{
pmd_t *pmd;
unsigned long next;
+ pmdval_t prot_sect;
+ pgprot_t prot_pte;
+
+ if (map_io) {
+ prot_sect = PMD_TYPE_SECT | PMD_SECT_AF |
+ PMD_ATTRINDX(MT_DEVICE_nGnRE);
+ prot_pte = __pgprot(PROT_DEVICE_nGnRE);
+ } else {
+ prot_sect = PROT_SECT_NORMAL_EXEC;
+ prot_pte = PAGE_KERNEL_EXEC;
+ }
/*
* Check for initial section mappings in the pgd/pud and remove them.
@@ -203,23 +184,33 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0)
- set_pmd(pmd, __pmd(phys | prot_sect_kernel));
- else
- alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+ if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+ pmd_t old_pmd =*pmd;
+ set_pmd(pmd, __pmd(phys | prot_sect));
+ /*
+ * Check for previous table entries created during
+ * boot (__create_page_tables) and flush them.
+ */
+ if (!pmd_none(old_pmd))
+ flush_tlb_all();
+ } else {
+ alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
+ prot_pte);
+ }
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys)
+ unsigned long end, unsigned long phys,
+ int map_io)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys);
+ alloc_init_pmd(pud, addr, next, phys, map_io);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -228,74 +219,60 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
- phys_addr_t size)
+static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ int map_io)
{
unsigned long addr, length, end, next;
- pgd_t *pgd;
-
- if (virt < VMALLOC_START) {
- pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
- phys, virt);
- return;
- }
addr = virt & PAGE_MASK;
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
- pgd = pgd_offset_k(addr);
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys);
+ alloc_init_pud(pgd, addr, next, phys, map_io);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-#ifdef CONFIG_EARLY_PRINTK
-/*
- * Create an early I/O mapping using the pgd/pmd entries already populated
- * in head.S as this function is called too early to allocated any memory. The
- * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
- */
-void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size)
{
- unsigned long size, mask;
- bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- /*
- * No early pte entries with !ARM64_64K_PAGES configuration, so using
- * sections (pmd).
- */
- size = page64k ? PAGE_SIZE : SECTION_SIZE;
- mask = ~(size - 1);
-
- pgd = pgd_offset_k(virt);
- pud = pud_offset(pgd, virt);
- if (pud_none(*pud))
- return NULL;
- pmd = pmd_offset(pud, virt);
-
- if (page64k) {
- if (pmd_none(*pmd))
- return NULL;
- pte = pte_offset_kernel(pmd, virt);
- set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
- } else {
- set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
+ if (virt < VMALLOC_START) {
+ pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ &phys, virt);
+ return;
}
+ __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
+}
- return (void __iomem *)((virt & mask) + (phys & ~mask));
+void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
+{
+ if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
+ pr_warn("BUG: not creating id mapping for %pa\n", &addr);
+ return;
+ }
+ __create_mapping(&idmap_pg_dir[pgd_index(addr)],
+ addr, addr, size, map_io);
}
-#endif
static void __init map_mem(void)
{
struct memblock_region *reg;
+ phys_addr_t limit;
+
+ /*
+ * Temporarily limit the memblock range. We need to do this as
+ * create_mapping requires puds, pmds and ptes to be allocated from
+ * memory addressable from the initial direct kernel mapping.
+ *
+ * The initial direct kernel mapping, located at swapper_pg_dir,
+ * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
+ * aligned to 2MB as per Documentation/arm64/booting.txt).
+ */
+ limit = PHYS_OFFSET + PGDIR_SIZE;
+ memblock_set_current_limit(limit);
/* map all the memory banks */
for_each_memblock(memory, reg) {
@@ -305,8 +282,27 @@ static void __init map_mem(void)
if (start >= end)
break;
+#ifndef CONFIG_ARM64_64K_PAGES
+ /*
+ * For the first memory bank align the start address and
+ * current memblock limit to prevent create_mapping() from
+ * allocating pte page tables from unmapped memory.
+ * When 64K pages are enabled, the pte page table for the
+ * first PGDIR_SIZE is already present in swapper_pg_dir.
+ */
+ if (start < limit)
+ start = ALIGN(start, PMD_SIZE);
+ if (end < limit) {
+ limit = end & PMD_MASK;
+ memblock_set_current_limit(limit);
+ }
+#endif
+
create_mapping(start, __phys_to_virt(start), end - start);
}
+
+ /* Limit no longer required. */
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
/*
@@ -317,13 +313,6 @@ void __init paging_init(void)
{
void *zero_page;
- /*
- * Maximum PGDIR_SIZE addressable via the initial direct kernel
- * mapping in swapper_pg_dir.
- */
- memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE);
-
- init_mem_pgprot();
map_mem();
/*
@@ -339,7 +328,6 @@ void __init paging_init(void)
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
- __flush_dcache_page(empty_zero_page);
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
@@ -383,6 +371,9 @@ int kern_addr_valid(unsigned long addr)
if (pmd_none(*pmd))
return 0;
+ if (pmd_sect(*pmd))
+ return pfn_valid(pmd_pfn(*pmd));
+
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
@@ -423,7 +414,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
+ set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada657..62c6101df260 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *new_pgd;
-
if (PGD_SIZE == PAGE_SIZE)
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
- if (!new_pgd)
- return NULL;
-
- return new_pgd;
+ return kzalloc(PGD_SIZE, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 8957b822010b..005d29e2977d 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -38,8 +38,7 @@
*/
.macro dcache_line_size, reg, tmp
mrs \tmp, ctr_el0 // read CTR
- lsr \tmp, \tmp, #16
- and \tmp, \tmp, #0xf // cache line size encoding
+ ubfm \tmp, \tmp, #16, #19 // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index a82ae8868077..e0ef63cd05dc 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -80,8 +80,77 @@ ENTRY(cpu_do_idle)
ret
ENDPROC(cpu_do_idle)
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+/**
+ * cpu_do_suspend - save CPU registers context
+ *
+ * x0: virtual address of context pointer
+ */
+ENTRY(cpu_do_suspend)
+ mrs x2, tpidr_el0
+ mrs x3, tpidrro_el0
+ mrs x4, contextidr_el1
+ mrs x5, mair_el1
+ mrs x6, cpacr_el1
+ mrs x7, ttbr1_el1
+ mrs x8, tcr_el1
+ mrs x9, vbar_el1
+ mrs x10, mdscr_el1
+ mrs x11, oslsr_el1
+ mrs x12, sctlr_el1
+ stp x2, x3, [x0]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ str x12, [x0, #80]
+ ret
+ENDPROC(cpu_do_suspend)
+
+/**
+ * cpu_do_resume - restore CPU register context
+ *
+ * x0: Physical address of context pointer
+ * x1: ttbr0_el1 to be restored
+ *
+ * Returns:
+ * sctlr_el1 value in x0
+ */
+ENTRY(cpu_do_resume)
+ /*
+ * Invalidate local tlb entries before turning on MMU
+ */
+ tlbi vmalle1
+ ldp x2, x3, [x0]
+ ldp x4, x5, [x0, #16]
+ ldp x6, x7, [x0, #32]
+ ldp x8, x9, [x0, #48]
+ ldp x10, x11, [x0, #64]
+ ldr x12, [x0, #80]
+ msr tpidr_el0, x2
+ msr tpidrro_el0, x3
+ msr contextidr_el1, x4
+ msr mair_el1, x5
+ msr cpacr_el1, x6
+ msr ttbr0_el1, x1
+ msr ttbr1_el1, x7
+ msr tcr_el1, x8
+ msr vbar_el1, x9
+ msr mdscr_el1, x10
+ /*
+ * Restore oslsr_el1 by writing oslar_el1
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+ ret
+ENDPROC(cpu_do_resume)
+#endif
+
/*
- * cpu_switch_mm(pgd_phys, tsk)
+ * cpu_do_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys.
*
@@ -95,10 +164,6 @@ ENTRY(cpu_do_switch_mm)
ret
ENDPROC(cpu_do_switch_mm)
-cpu_name:
- .ascii "AArch64 Processor"
- .align
-
.section ".text.init", #alloc, #execinstr
/*
@@ -108,19 +173,13 @@ cpu_name:
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
- /*
- * Preserve the link register across the function call.
- */
- mov x28, lr
- bl __flush_dcache_all
- mov lr, x28
ic iallu // I+BTB cache invalidate
+ tlbi vmalle1is // invalidate I + D TLBs
dsb sy
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
msr mdscr_el1, xzr // Reset mdscr_el1
- tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
*
@@ -151,7 +210,7 @@ ENTRY(__cpu_setup)
* both user and kernel.
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
- TCR_ASID16 | (1 << 31)
+ TCR_ASID16 | TCR_TBI0 | (1 << 31)
#ifdef CONFIG_ARM64_64K_PAGES
orr x10, x10, TCR_TG0_64K
orr x10, x10, TCR_TG1_64K
@@ -166,9 +225,9 @@ ENDPROC(__cpu_setup)
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
+ * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x030802e2 // clear
+ .word 0x000802e2 // clear
.word 0x0405d11d // set
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
deleted file mode 100644
index 8ae80a18e8ec..000000000000
--- a/arch/arm64/mm/tlb.S
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Based on arch/arm/mm/tlb.S
- *
- * Copyright (C) 1997-2002 Russell King
- * Copyright (C) 2012 ARM Ltd.
- * Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-#include "proc-macros.S"
-
-/*
- * __cpu_flush_user_tlb_range(start, end, vma)
- *
- * Invalidate a range of TLB entries in the specified address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
- */
-ENTRY(__cpu_flush_user_tlb_range)
- vma_vm_mm x3, x2 // get vma->vm_mm
- mmid x3, x3 // get vm_mm->context.id
- dsb sy
- lsr x0, x0, #12 // align address
- lsr x1, x1, #12
- bfi x0, x3, #48, #16 // start VA and ASID
- bfi x1, x3, #48, #16 // end VA and ASID
-1: tlbi vae1is, x0 // TLB invalidate by address and ASID
- add x0, x0, #1
- cmp x0, x1
- b.lo 1b
- dsb sy
- ret
-ENDPROC(__cpu_flush_user_tlb_range)
-
-/*
- * __cpu_flush_kern_tlb_range(start,end)
- *
- * Invalidate a range of kernel TLB entries.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- */
-ENTRY(__cpu_flush_kern_tlb_range)
- dsb sy
- lsr x0, x0, #12 // align address
- lsr x1, x1, #12
-1: tlbi vaae1is, x0 // TLB invalidate by address
- add x0, x0, #1
- cmp x0, x1
- b.lo 1b
- dsb sy
- isb
- ret
-ENDPROC(__cpu_flush_kern_tlb_range)
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index 22fb66590dcd..dba48a5d5bb9 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -11,7 +11,7 @@ all: uImage vmlinux.elf
KBUILD_DEFCONFIG := atstk1002_defconfig
-KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
+KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
KBUILD_AFLAGS += -mrelax -mno-pic
KBUILD_CFLAGS_MODULE += -mno-relax
LDFLAGS_vmlinux += --relax
diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
index 9764a1a1073e..c1466a872b9c 100644
--- a/arch/avr32/boards/mimc200/fram.c
+++ b/arch/avr32/boards/mimc200/fram.c
@@ -11,6 +11,7 @@
#define FRAM_VERSION "1.0"
#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/io.h>
diff --git a/arch/avr32/boot/u-boot/head.S b/arch/avr32/boot/u-boot/head.S
index 4488fa27fe94..2ffc298f061b 100644
--- a/arch/avr32/boot/u-boot/head.S
+++ b/arch/avr32/boot/u-boot/head.S
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
#include <asm/setup.h>
+#include <asm/thread_info.h>
+#include <asm/sysreg.h>
/*
* The kernel is loaded where we want it to be and all caches
@@ -20,11 +22,6 @@
.section .init.text,"ax"
.global _start
_start:
- /* Check if the boot loader actually provided a tag table */
- lddpc r0, magic_number
- cp.w r12, r0
- brne no_tag_table
-
/* Initialize .bss */
lddpc r2, bss_start_addr
lddpc r3, end_addr
@@ -34,6 +31,25 @@ _start:
cp r2, r3
brlo 1b
+ /* Initialize status register */
+ lddpc r0, init_sr
+ mtsr SYSREG_SR, r0
+
+ /* Set initial stack pointer */
+ lddpc sp, stack_addr
+ sub sp, -THREAD_SIZE
+
+#ifdef CONFIG_FRAME_POINTER
+ /* Mark last stack frame */
+ mov lr, 0
+ mov r7, 0
+#endif
+
+ /* Check if the boot loader actually provided a tag table */
+ lddpc r0, magic_number
+ cp.w r12, r0
+ brne no_tag_table
+
/*
* Save the tag table address for later use. This must be done
* _after_ .bss has been initialized...
@@ -53,8 +69,15 @@ bss_start_addr:
.long __bss_start
end_addr:
.long _end
+init_sr:
+ .long 0x007f0000 /* Supervisor mode, everything masked */
+stack_addr:
+ .long init_thread_union
+panic_addr:
+ .long panic
no_tag_table:
sub r12, pc, (. - 2f)
- bral panic
+ /* branch to panic() which can be far away with that construct */
+ lddpc pc, panic_addr
2: .asciz "Boot loader didn't provide correct magic number\n"
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 9899d3cc6f03..7301f4806bbe 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -401,9 +401,10 @@ handle_critical:
/* We should never get here... */
bad_return:
sub r12, pc, (. - 1f)
- bral panic
+ lddpc pc, 2f
.align 2
1: .asciz "Return from critical exception!"
+2: .long panic
.align 1
do_bus_error_write:
diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S
index 6163bd0acb95..59eae6dfbed2 100644
--- a/arch/avr32/kernel/head.S
+++ b/arch/avr32/kernel/head.S
@@ -10,33 +10,13 @@
#include <linux/linkage.h>
#include <asm/page.h>
-#include <asm/thread_info.h>
-#include <asm/sysreg.h>
.section .init.text,"ax"
.global kernel_entry
kernel_entry:
- /* Initialize status register */
- lddpc r0, init_sr
- mtsr SYSREG_SR, r0
-
- /* Set initial stack pointer */
- lddpc sp, stack_addr
- sub sp, -THREAD_SIZE
-
-#ifdef CONFIG_FRAME_POINTER
- /* Mark last stack frame */
- mov lr, 0
- mov r7, 0
-#endif
-
/* Start the show */
lddpc pc, kernel_start_addr
.align 2
-init_sr:
- .long 0x007f0000 /* Supervisor mode, everything masked */
-stack_addr:
- .long init_thread_union
kernel_start_addr:
.long start_kernel
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 869a1c6ffeee..12f828ad5058 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_SHUTDOWN:
sysreg_write(COMPARE, 0);
pr_debug("%s: stop\n", evdev->name);
- cpu_idle_poll_ctrl(false);
+ if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
+ evdev->mode == CLOCK_EVT_MODE_RESUME) {
+ /*
+ * Only disable idle poll if we have forced that
+ * in a previous call.
+ */
+ cpu_idle_poll_ctrl(false);
+ }
break;
default:
BUG();
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
index 8a029505d7b7..2f1c3c2657ad 100644
--- a/arch/blackfin/include/asm/ftrace.h
+++ b/arch/blackfin/include/asm/ftrace.h
@@ -66,16 +66,7 @@ extern inline void *return_address(unsigned int level)
#endif /* CONFIG_FRAME_POINTER */
-#define HAVE_ARCH_CALLER_ADDR
-
-/* inline function or macro may lead to unexpected result */
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 ((unsigned long)return_address(1))
-#define CALLER_ADDR2 ((unsigned long)return_address(2))
-#define CALLER_ADDR3 ((unsigned long)return_address(3))
-#define CALLER_ADDR4 ((unsigned long)return_address(4))
-#define CALLER_ADDR5 ((unsigned long)return_address(5))
-#define CALLER_ADDR6 ((unsigned long)return_address(6))
+#define ftrace_return_address(n) return_address(n)
#endif /* __ASSEMBLY__ */
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
index bdb56f09d0ac..287d0e64dfba 100644
--- a/arch/c6x/kernel/devicetree.c
+++ b/arch/c6x/kernel/devicetree.c
@@ -33,8 +33,7 @@ void __init early_init_devtree(void *params)
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index a9fcd89b251b..b74ccb5a7690 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -18,6 +18,7 @@
#include <linux/initrd.h>
#include <asm/sections.h>
+#include <asm/uaccess.h>
/*
* ZERO_PAGE is a special page that is used for zero-initialized
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index ac12ae2b9286..db9a16c704f3 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -3,6 +3,7 @@
#include <asm/page.h> /* for __va, __pa */
#include <arch/io.h>
+#include <asm-generic/iomap.h>
#include <linux/kernel.h>
struct cris_io_operations
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index e0a899a1a8a6..5a84b3a50741 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -319,7 +319,7 @@ struct thread_struct {
regs->loadrs = 0; \
regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
- if (unlikely(!get_dumpable(current->mm))) { \
+ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de01954..bc5efc7c3f3f 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
+ unsigned long start, end;
unsigned long start_addr;
unsigned long end_addr;
struct page **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
- tlb->fullmm = full_mm_flush;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->start_addr = ~0UL;
}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f034563aeae5..ff0968042829 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -44,10 +44,15 @@
#define EFI_DEBUG 0
+static __initdata unsigned long palo_phys;
+
+static __initdata efi_config_table_type_t arch_tables[] = {
+ {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, "PALO", &palo_phys},
+ {NULL_GUID, NULL, 0},
+};
+
extern efi_status_t efi_call_phys (void *, ...);
-struct efi efi;
-EXPORT_SYMBOL(efi);
static efi_runtime_services_t *runtime;
static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
@@ -423,9 +428,9 @@ static u8 __init palo_checksum(u8 *buffer, u32 length)
* Parse and handle PALO table which is published at:
* http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
*/
-static void __init handle_palo(unsigned long palo_phys)
+static void __init handle_palo(unsigned long phys_addr)
{
- struct palo_table *palo = __va(palo_phys);
+ struct palo_table *palo = __va(phys_addr);
u8 checksum;
if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
@@ -467,12 +472,13 @@ void __init
efi_init (void)
{
void *efi_map_start, *efi_map_end;
- efi_config_table_t *config_tables;
efi_char16_t *c16;
u64 efi_desc_size;
char *cp, vendor[100] = "unknown";
int i;
- unsigned long palo_phys;
+
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
/*
* It's too early to be able to use the standard kernel command line
@@ -514,8 +520,6 @@ efi_init (void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff);
- config_tables = __va(efi.systab->tables);
-
/* Show what we know for posterity */
c16 = __va(efi.systab->fw_vendor);
if (c16) {
@@ -528,43 +532,12 @@ efi_init (void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
- efi.mps = EFI_INVALID_TABLE_ADDR;
- efi.acpi = EFI_INVALID_TABLE_ADDR;
- efi.acpi20 = EFI_INVALID_TABLE_ADDR;
- efi.smbios = EFI_INVALID_TABLE_ADDR;
- efi.sal_systab = EFI_INVALID_TABLE_ADDR;
- efi.boot_info = EFI_INVALID_TABLE_ADDR;
- efi.hcdp = EFI_INVALID_TABLE_ADDR;
- efi.uga = EFI_INVALID_TABLE_ADDR;
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
palo_phys = EFI_INVALID_TABLE_ADDR;
- for (i = 0; i < (int) efi.systab->nr_tables; i++) {
- if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
- efi.mps = config_tables[i].table;
- printk(" MPS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
- efi.acpi20 = config_tables[i].table;
- printk(" ACPI 2.0=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
- efi.acpi = config_tables[i].table;
- printk(" ACPI=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
- efi.smbios = config_tables[i].table;
- printk(" SMBIOS=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
- efi.sal_systab = config_tables[i].table;
- printk(" SALsystab=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
- efi.hcdp = config_tables[i].table;
- printk(" HCDP=0x%lx", config_tables[i].table);
- } else if (efi_guidcmp(config_tables[i].guid,
- PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) {
- palo_phys = config_tables[i].table;
- printk(" PALO=0x%lx", config_tables[i].table);
- }
- }
- printk("\n");
+ if (efi_config_init(arch_tables) != 0)
+ return;
if (palo_phys != EFI_INVALID_TABLE_ADDR)
handle_palo(palo_phys);
@@ -689,6 +662,8 @@ efi_enter_virtual_mode (void)
return;
}
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
/*
* Now that EFI is in virtual mode, we call the EFI functions more
* efficiently:
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 821170e5f6ed..b969eea4e237 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -16,6 +16,7 @@ config M68K
select FPU if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
+ select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d69d49..fa277aecfb78 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
#include <asm/machdep.h>
#include <asm/natfeat.h>
+extern long nf_get_id2(const char *feature_name);
+
asm("\n"
-" .global nf_get_id,nf_call\n"
-"nf_get_id:\n"
+" .global nf_get_id2,nf_call\n"
+"nf_get_id2:\n"
" .short 0x7300\n"
" rts\n"
"nf_call:\n"
@@ -29,12 +31,25 @@ asm("\n"
"1: moveq.l #0,%d0\n"
" rts\n"
" .section __ex_table,\"a\"\n"
-" .long nf_get_id,1b\n"
+" .long nf_get_id2,1b\n"
" .long nf_call,1b\n"
" .previous");
-EXPORT_SYMBOL_GPL(nf_get_id);
EXPORT_SYMBOL_GPL(nf_call);
+long nf_get_id(const char *feature_name)
+{
+ /* feature_name may be in vmalloc()ed memory, so make a copy */
+ char name_copy[32];
+ size_t n;
+
+ n = strlcpy(name_copy, feature_name, sizeof(name_copy));
+ if (n >= sizeof(name_copy))
+ return 0;
+
+ return nf_get_id2(name_copy);
+}
+EXPORT_SYMBOL_GPL(nf_get_id);
+
void nfprint(const char *fmt, ...)
{
static char buf[256];
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a09e9f..ef881cfbbca9 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
+ unsigned long __base = (base); \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
- : "=d" (__n.n32[0]), "=d" (__upper) \
- : "d" (base), "0" (__n.n32[0])); \
+ : "=d" (__n.n32[0]), "=d" (__upper) \
+ : "d" (__base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
- : "=d" (__n.n32[1]), "=d" (__rem) \
- : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
+ : "=d" (__n.n32[1]), "=d" (__rem) \
+ : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index c90bfc6bf648..e355a4c10968 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -15,6 +15,7 @@ static inline void wr_fence(void)
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
barrier();
*flushptr = 0;
+ barrier();
}
#else /* CONFIG_METAG_META21 */
@@ -35,6 +36,7 @@ static inline void wr_fence(void)
*flushptr = 0;
*flushptr = 0;
*flushptr = 0;
+ barrier();
}
#endif /* !CONFIG_METAG_META21 */
@@ -68,6 +70,7 @@ static inline void fence(void)
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
barrier();
*flushptr = 0;
+ barrier();
}
#define smp_mb() fence()
#define smp_rmb() fence()
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index 9b029a7911c3..579e3d93a5ca 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -22,6 +22,8 @@
/* Add an extra page of padding at the top of the stack for the guard page. */
#define STACK_TOP (TASK_SIZE - PAGE_SIZE)
#define STACK_TOP_MAX STACK_TOP
+/* Maximum virtual space for stack */
+#define STACK_SIZE_MAX (1 << 28) /* 256 MB */
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index d05b8455c44c..bdc48111f0df 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -419,10 +419,9 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
- pr_err("%s(%lx, %lx)\n",
+ pr_err("%s(%llx, %llx)\n",
__func__, start, end);
}
#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ecffff4..4fab52294d98 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@ config MICROBLAZE
select GENERIC_CLOCKEVENTS
select GENERIC_IDLE_POLL_SETUP
select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
+ select CLONE_BACKWARDS3
config SWAP
def_bool n
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 0a2c68f9f9b0..c9c766fe6321 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -52,13 +52,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#ifdef CONFIG_EARLY_PRINTK
-static char *stdout;
+static const char *stdout;
static int __init early_init_dt_scan_chosen_serial(unsigned long node,
const char *uname, int depth, void *data)
{
- unsigned long l;
- char *p;
+ int l;
+ const char *p;
pr_debug("%s: depth: %d, uname: %s\n", __func__, depth, uname);
@@ -89,7 +89,7 @@ static int __init early_init_dt_scan_chosen_serial(unsigned long node,
(strncmp(p, "xlnx,opb-uartlite", 17) == 0) ||
(strncmp(p, "xlnx,axi-uartlite", 17) == 0) ||
(strncmp(p, "xlnx,mdm", 8) == 0)) {
- unsigned int *addrp;
+ const unsigned int *addrp;
*(u32 *)data = UARTLITE;
@@ -136,8 +136,7 @@ void __init early_init_devtree(void *params)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7a58ab933b20..e53e2b40d695 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -27,6 +27,7 @@ config MIPS
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_PCI_IOMAP
select HAVE_ARCH_JUMP_LABEL
select ARCH_WANT_IPC_PARSE_VERSION
select IRQ_FORCED_THREADING
@@ -2412,7 +2413,6 @@ config PCI
bool "Support for PCI controller"
depends on HW_HAS_PCI
select PCI_DOMAINS
- select GENERIC_PCI_IOMAP
select NO_GENERIC_PCI_IOPORT_MAP
help
Find out whether you have a PCI motherboard. PCI is the name of a
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 765ef30e3e1c..733017b3dfe7 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -164,7 +164,7 @@ static void __init ar933x_clocks_init(void)
ath79_ahb_clk.rate = freq / t;
}
- ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ ath79_wdt_clk.rate = ath79_ahb_clk.rate;
ath79_uart_clk.rate = ath79_ref_clk.rate;
}
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index a22f06a6f7ca..45c1a6caa206 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
cpumask_clear(&new_affinity);
cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
}
- __irq_set_affinity_locked(data, &new_affinity);
+ irq_set_affinity_locked(data, &new_affinity, false);
}
static int octeon_irq_ciu_set_affinity(struct irq_data *data,
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 01b1b3f94feb..6430e7acb1eb 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -7,6 +7,7 @@
* Copyright (C) 2008, 2009 Wind River Systems
* written by Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/console.h>
@@ -462,6 +463,18 @@ static void octeon_halt(void)
octeon_kill_core(NULL);
}
+static char __read_mostly octeon_system_type[80];
+
+static int __init init_octeon_system_type(void)
+{
+ snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type),
+ octeon_model_get_string(read_c0_prid()));
+
+ return 0;
+}
+early_initcall(init_octeon_system_type);
+
/**
* Handle all the error condition interrupts that might occur.
*
@@ -481,11 +494,7 @@ static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
*/
const char *octeon_board_type_string(void)
{
- static char name[80];
- sprintf(name, "%s (%s)",
- cvmx_board_type_to_string(octeon_bootinfo->board_type),
- octeon_model_get_string(read_c0_prid()));
- return name;
+ return octeon_system_type;
}
const char *get_system_type(void)
@@ -712,7 +721,7 @@ void __init prom_init(void)
if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
pr_info("Skipping L2 locking due to reduced L2 cache size\n");
} else {
- uint32_t ebase = read_c0_ebase() & 0x3ffff000;
+ uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
/* TLB refill */
cvmx_l2c_lock_mem_region(ebase, 0x100);
@@ -996,7 +1005,7 @@ void __init plat_mem_setup(void)
cvmx_bootmem_unlock();
/* Add the memory region for the kernel. */
kernel_start = (unsigned long) _text;
- kernel_size = ALIGN(_end - _text, 0x100000);
+ kernel_size = _end - _text;
/* Adjust for physical offset. */
kernel_start &= ~0xffffffff80000000ULL;
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index b7e59853fd33..b84e1fb3fabf 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -170,6 +170,11 @@ static inline void * isa_bus_to_virt(unsigned long address)
extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
extern void __iounmap(const volatile void __iomem *addr);
+#ifndef CONFIG_PCI
+struct pci_dev;
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
+#endif
+
static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
unsigned long flags)
{
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 4d6d77ed9b9d..e194f957ca8c 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -22,7 +22,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\tnop\n\t"
+ asm_volatile_goto("1:\tnop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 87e6207b05e4..3d0074e10595 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -14,6 +14,7 @@
#define _ASM_MIPSREGS_H
#include <linux/linkage.h>
+#include <linux/types.h>
#include <asm/hazards.h>
#include <asm/war.h>
diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
index 910e71a12466..b8343ccbc989 100644
--- a/arch/mips/include/asm/reg.h
+++ b/arch/mips/include/asm/reg.h
@@ -12,116 +12,194 @@
#ifndef __ASM_MIPS_REG_H
#define __ASM_MIPS_REG_H
-
-#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H)
-
-#define EF_R0 6
-#define EF_R1 7
-#define EF_R2 8
-#define EF_R3 9
-#define EF_R4 10
-#define EF_R5 11
-#define EF_R6 12
-#define EF_R7 13
-#define EF_R8 14
-#define EF_R9 15
-#define EF_R10 16
-#define EF_R11 17
-#define EF_R12 18
-#define EF_R13 19
-#define EF_R14 20
-#define EF_R15 21
-#define EF_R16 22
-#define EF_R17 23
-#define EF_R18 24
-#define EF_R19 25
-#define EF_R20 26
-#define EF_R21 27
-#define EF_R22 28
-#define EF_R23 29
-#define EF_R24 30
-#define EF_R25 31
+#define MIPS32_EF_R0 6
+#define MIPS32_EF_R1 7
+#define MIPS32_EF_R2 8
+#define MIPS32_EF_R3 9
+#define MIPS32_EF_R4 10
+#define MIPS32_EF_R5 11
+#define MIPS32_EF_R6 12
+#define MIPS32_EF_R7 13
+#define MIPS32_EF_R8 14
+#define MIPS32_EF_R9 15
+#define MIPS32_EF_R10 16
+#define MIPS32_EF_R11 17
+#define MIPS32_EF_R12 18
+#define MIPS32_EF_R13 19
+#define MIPS32_EF_R14 20
+#define MIPS32_EF_R15 21
+#define MIPS32_EF_R16 22
+#define MIPS32_EF_R17 23
+#define MIPS32_EF_R18 24
+#define MIPS32_EF_R19 25
+#define MIPS32_EF_R20 26
+#define MIPS32_EF_R21 27
+#define MIPS32_EF_R22 28
+#define MIPS32_EF_R23 29
+#define MIPS32_EF_R24 30
+#define MIPS32_EF_R25 31
/*
* k0/k1 unsaved
*/
-#define EF_R26 32
-#define EF_R27 33
+#define MIPS32_EF_R26 32
+#define MIPS32_EF_R27 33
-#define EF_R28 34
-#define EF_R29 35
-#define EF_R30 36
-#define EF_R31 37
+#define MIPS32_EF_R28 34
+#define MIPS32_EF_R29 35
+#define MIPS32_EF_R30 36
+#define MIPS32_EF_R31 37
/*
* Saved special registers
*/
-#define EF_LO 38
-#define EF_HI 39
-
-#define EF_CP0_EPC 40
-#define EF_CP0_BADVADDR 41
-#define EF_CP0_STATUS 42
-#define EF_CP0_CAUSE 43
-#define EF_UNUSED0 44
-
-#define EF_SIZE 180
-
-#endif
-
-#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
-
-#define EF_R0 0
-#define EF_R1 1
-#define EF_R2 2
-#define EF_R3 3
-#define EF_R4 4
-#define EF_R5 5
-#define EF_R6 6
-#define EF_R7 7
-#define EF_R8 8
-#define EF_R9 9
-#define EF_R10 10
-#define EF_R11 11
-#define EF_R12 12
-#define EF_R13 13
-#define EF_R14 14
-#define EF_R15 15
-#define EF_R16 16
-#define EF_R17 17
-#define EF_R18 18
-#define EF_R19 19
-#define EF_R20 20
-#define EF_R21 21
-#define EF_R22 22
-#define EF_R23 23
-#define EF_R24 24
-#define EF_R25 25
+#define MIPS32_EF_LO 38
+#define MIPS32_EF_HI 39
+
+#define MIPS32_EF_CP0_EPC 40
+#define MIPS32_EF_CP0_BADVADDR 41
+#define MIPS32_EF_CP0_STATUS 42
+#define MIPS32_EF_CP0_CAUSE 43
+#define MIPS32_EF_UNUSED0 44
+
+#define MIPS32_EF_SIZE 180
+
+#define MIPS64_EF_R0 0
+#define MIPS64_EF_R1 1
+#define MIPS64_EF_R2 2
+#define MIPS64_EF_R3 3
+#define MIPS64_EF_R4 4
+#define MIPS64_EF_R5 5
+#define MIPS64_EF_R6 6
+#define MIPS64_EF_R7 7
+#define MIPS64_EF_R8 8
+#define MIPS64_EF_R9 9
+#define MIPS64_EF_R10 10
+#define MIPS64_EF_R11 11
+#define MIPS64_EF_R12 12
+#define MIPS64_EF_R13 13
+#define MIPS64_EF_R14 14
+#define MIPS64_EF_R15 15
+#define MIPS64_EF_R16 16
+#define MIPS64_EF_R17 17
+#define MIPS64_EF_R18 18
+#define MIPS64_EF_R19 19
+#define MIPS64_EF_R20 20
+#define MIPS64_EF_R21 21
+#define MIPS64_EF_R22 22
+#define MIPS64_EF_R23 23
+#define MIPS64_EF_R24 24
+#define MIPS64_EF_R25 25
/*
* k0/k1 unsaved
*/
-#define EF_R26 26
-#define EF_R27 27
+#define MIPS64_EF_R26 26
+#define MIPS64_EF_R27 27
-#define EF_R28 28
-#define EF_R29 29
-#define EF_R30 30
-#define EF_R31 31
+#define MIPS64_EF_R28 28
+#define MIPS64_EF_R29 29
+#define MIPS64_EF_R30 30
+#define MIPS64_EF_R31 31
/*
* Saved special registers
*/
-#define EF_LO 32
-#define EF_HI 33
-
-#define EF_CP0_EPC 34
-#define EF_CP0_BADVADDR 35
-#define EF_CP0_STATUS 36
-#define EF_CP0_CAUSE 37
-
-#define EF_SIZE 304 /* size in bytes */
+#define MIPS64_EF_LO 32
+#define MIPS64_EF_HI 33
+
+#define MIPS64_EF_CP0_EPC 34
+#define MIPS64_EF_CP0_BADVADDR 35
+#define MIPS64_EF_CP0_STATUS 36
+#define MIPS64_EF_CP0_CAUSE 37
+
+#define MIPS64_EF_SIZE 304 /* size in bytes */
+
+#if defined(CONFIG_32BIT)
+
+#define EF_R0 MIPS32_EF_R0
+#define EF_R1 MIPS32_EF_R1
+#define EF_R2 MIPS32_EF_R2
+#define EF_R3 MIPS32_EF_R3
+#define EF_R4 MIPS32_EF_R4
+#define EF_R5 MIPS32_EF_R5
+#define EF_R6 MIPS32_EF_R6
+#define EF_R7 MIPS32_EF_R7
+#define EF_R8 MIPS32_EF_R8
+#define EF_R9 MIPS32_EF_R9
+#define EF_R10 MIPS32_EF_R10
+#define EF_R11 MIPS32_EF_R11
+#define EF_R12 MIPS32_EF_R12
+#define EF_R13 MIPS32_EF_R13
+#define EF_R14 MIPS32_EF_R14
+#define EF_R15 MIPS32_EF_R15
+#define EF_R16 MIPS32_EF_R16
+#define EF_R17 MIPS32_EF_R17
+#define EF_R18 MIPS32_EF_R18
+#define EF_R19 MIPS32_EF_R19
+#define EF_R20 MIPS32_EF_R20
+#define EF_R21 MIPS32_EF_R21
+#define EF_R22 MIPS32_EF_R22
+#define EF_R23 MIPS32_EF_R23
+#define EF_R24 MIPS32_EF_R24
+#define EF_R25 MIPS32_EF_R25
+#define EF_R26 MIPS32_EF_R26
+#define EF_R27 MIPS32_EF_R27
+#define EF_R28 MIPS32_EF_R28
+#define EF_R29 MIPS32_EF_R29
+#define EF_R30 MIPS32_EF_R30
+#define EF_R31 MIPS32_EF_R31
+#define EF_LO MIPS32_EF_LO
+#define EF_HI MIPS32_EF_HI
+#define EF_CP0_EPC MIPS32_EF_CP0_EPC
+#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR
+#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS
+#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE
+#define EF_UNUSED0 MIPS32_EF_UNUSED0
+#define EF_SIZE MIPS32_EF_SIZE
+
+#elif defined(CONFIG_64BIT)
+
+#define EF_R0 MIPS64_EF_R0
+#define EF_R1 MIPS64_EF_R1
+#define EF_R2 MIPS64_EF_R2
+#define EF_R3 MIPS64_EF_R3
+#define EF_R4 MIPS64_EF_R4
+#define EF_R5 MIPS64_EF_R5
+#define EF_R6 MIPS64_EF_R6
+#define EF_R7 MIPS64_EF_R7
+#define EF_R8 MIPS64_EF_R8
+#define EF_R9 MIPS64_EF_R9
+#define EF_R10 MIPS64_EF_R10
+#define EF_R11 MIPS64_EF_R11
+#define EF_R12 MIPS64_EF_R12
+#define EF_R13 MIPS64_EF_R13
+#define EF_R14 MIPS64_EF_R14
+#define EF_R15 MIPS64_EF_R15
+#define EF_R16 MIPS64_EF_R16
+#define EF_R17 MIPS64_EF_R17
+#define EF_R18 MIPS64_EF_R18
+#define EF_R19 MIPS64_EF_R19
+#define EF_R20 MIPS64_EF_R20
+#define EF_R21 MIPS64_EF_R21
+#define EF_R22 MIPS64_EF_R22
+#define EF_R23 MIPS64_EF_R23
+#define EF_R24 MIPS64_EF_R24
+#define EF_R25 MIPS64_EF_R25
+#define EF_R26 MIPS64_EF_R26
+#define EF_R27 MIPS64_EF_R27
+#define EF_R28 MIPS64_EF_R28
+#define EF_R29 MIPS64_EF_R29
+#define EF_R30 MIPS64_EF_R30
+#define EF_R31 MIPS64_EF_R31
+#define EF_LO MIPS64_EF_LO
+#define EF_HI MIPS64_EF_HI
+#define EF_CP0_EPC MIPS64_EF_CP0_EPC
+#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR
+#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS
+#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE
+#define EF_SIZE MIPS64_EF_SIZE
#endif /* CONFIG_64BIT */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 895320e25662..e6e5d9162213 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -131,6 +131,8 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
+#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
+
/* work to do in syscall_trace_leave() */
#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 1dee279f9665..af4d5c0a2f02 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -369,16 +369,22 @@
#define __NR_process_vm_writev (__NR_Linux + 346)
#define __NR_kcmp (__NR_Linux + 347)
#define __NR_finit_module (__NR_Linux + 348)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr (__NR_Linux + 349)
+ * #define __NR_sched_getattr (__NR_Linux + 350)
+ * #define __NR_renameat2 (__NR_Linux + 351)
+ */
+#define __NR_seccomp (__NR_Linux + 352)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 348
+#define __NR_Linux_syscalls 352
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 348
+#define __NR_O32_Linux_syscalls 352
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -695,16 +701,22 @@
#define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307)
#define __NR_getdents64 (__NR_Linux + 308)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr (__NR_Linux + 309)
+ * #define __NR_sched_getattr (__NR_Linux + 310)
+ * #define __NR_renameat2 (__NR_Linux + 311)
+ */
+#define __NR_seccomp (__NR_Linux + 312)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 308
+#define __NR_Linux_syscalls 312
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 308
+#define __NR_64_Linux_syscalls 312
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1025,15 +1037,21 @@
#define __NR_process_vm_writev (__NR_Linux + 310)
#define __NR_kcmp (__NR_Linux + 311)
#define __NR_finit_module (__NR_Linux + 312)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr (__NR_Linux + 313)
+ * #define __NR_sched_getattr (__NR_Linux + 314)
+ * #define __NR_renameat2 (__NR_Linux + 315)
+ */
+#define __NR_seccomp (__NR_Linux + 316)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 312
+#define __NR_Linux_syscalls 316
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 312
+#define __NR_N32_Linux_syscalls 316
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 202e581e6096..7fdf1de0447f 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -58,12 +58,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <asm/processor.h>
-/*
- * When this file is selected, we are definitely running a 64bit kernel.
- * So using the right regs define in asm/reg.h
- */
-#define WANT_COMPAT_REG_H
-
/* These MUST be defined before elf.h gets included */
extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
@@ -135,21 +129,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
int i;
- for (i = 0; i < EF_R0; i++)
+ for (i = 0; i < MIPS32_EF_R0; i++)
grp[i] = 0;
- grp[EF_R0] = 0;
+ grp[MIPS32_EF_R0] = 0;
for (i = 1; i <= 31; i++)
- grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
- grp[EF_R26] = 0;
- grp[EF_R27] = 0;
- grp[EF_LO] = (elf_greg_t) regs->lo;
- grp[EF_HI] = (elf_greg_t) regs->hi;
- grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
- grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
- grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
- grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
-#ifdef EF_UNUSED0
- grp[EF_UNUSED0] = 0;
+ grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i];
+ grp[MIPS32_EF_R26] = 0;
+ grp[MIPS32_EF_R27] = 0;
+ grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo;
+ grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi;
+ grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
+ grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
+ grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
+ grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
+#ifdef MIPS32_EF_UNUSED0
+ grp[MIPS32_EF_UNUSED0] = 0;
#endif
}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index c01b307317a9..bffbbc557879 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -256,11 +256,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
/* Setup Intr to Pin mapping */
if (pin & GIC_MAP_TO_NMI_MSK) {
+ int i;
+
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
/* FIXME: hack to route NMI to all cpu's */
- for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+ for (i = 0; i < NR_CPUS; i += 32) {
GICWRITE(GIC_REG_ADDR(SHARED,
- GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+ GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
0xffffffff);
}
} else {
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index fab40f7d2e03..ac9facc08694 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
- for (; nirq >= 0; nirq--, imp++) {
+ for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 5712bb532245..32b87882ac87 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -58,8 +58,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9c6299c733a3..1b95b2443221 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -161,6 +161,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
__get_user(fregs[i], i + (__u64 __user *) data);
__get_user(child->thread.fpu.fcr31, data + 64);
+ child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* FIR may not be written. */
@@ -451,7 +452,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
- child->thread.fpu.fcr31 = data;
+ child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 9b36424b03c5..a5a93acdc2ce 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
stack_done:
lw t0, TI_FLAGS($28) # syscall tracing enabled?
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
bnez t0, syscall_trace_entry # -> yes
@@ -593,6 +593,12 @@ einval: li v0, -ENOSYS
sys sys_process_vm_writev 6
sys sys_kcmp 5
sys sys_finit_module 3
+ /* Backporting seccomp, skip a few ... */
+ sys sys_ni_syscall 0 /* sys_sched_setattr */
+ sys sys_ni_syscall 0 /* sys_sched_getattr */ /* 4350 */
+ sys sys_ni_syscall 0 /* sys_renameat2 */
+ sys sys_seccomp 3
+
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 97a5909a61cf..4220ab5d554a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
@@ -424,4 +424,8 @@ sys_call_table:
PTR sys_kcmp
PTR sys_finit_module
PTR sys_getdents64
+ sys sys_ni_syscall /* sys_sched_setattr */
+ sys sys_ni_syscall /* sys_sched_getattr */ /* 5310 */
+ sys sys_ni_syscall /* sys_renameat2 */
+ sys sys_seccomp
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index edcb6594e7b5..08fc29001e7b 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, n32_syscall_trace_entry
@@ -417,4 +417,8 @@ EXPORT(sysn32_call_table)
PTR compat_sys_process_vm_writev /* 6310 */
PTR sys_kcmp
PTR sys_finit_module
+ sys sys_ni_syscall /* sys_sched_setattr */
+ sys sys_ni_syscall /* sys_sched_getattr */
+ sys sys_ni_syscall /* sys_renameat2 */ /* 6315 */
+ sys sys_seccomp
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 74f485d3c0ef..e10e3432faea 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
PTR 4b, bad_stack
.previous
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, trace_a_syscall
@@ -541,4 +541,8 @@ sys_call_table:
PTR compat_sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
- .size sys_call_table,.-sys_call_table
+ sys sys_ni_syscall /* sys_sched_setattr */
+ sys sys_ni_syscall /* sys_sched_getattr */ /* 4350 */
+ sys sys_ni_syscall /* sys_renameat2 */
+ sys sys_seccomp
+ .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 203d8857070d..2c81265bcf46 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -604,7 +604,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
case sdc1_op:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
- BUG_ON(!is_fpu_owner());
lose_fpu(1); /* Save FPU state for the emulator. */
res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index dd203e59e6fd..2c7b3ade8ec0 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -149,9 +149,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
}
-
- if (kvm->arch.guest_pmap)
- kfree(kvm->arch.guest_pmap);
+ kfree(kvm->arch.guest_pmap);
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_arch_vcpu_free(vcpu);
@@ -299,7 +297,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
if (cpu_has_veic || cpu_has_vint) {
size = 0x200 + VECTORSPACING * 64;
} else {
- size = 0x200;
+ size = 0x4000;
}
/* Save Linux EBASE */
@@ -384,12 +382,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_mips_dump_stats(vcpu);
- if (vcpu->arch.guest_ebase)
- kfree(vcpu->arch.guest_ebase);
-
- if (vcpu->arch.kseg0_commpage)
- kfree(vcpu->arch.kseg0_commpage);
-
+ kfree(vcpu->arch.guest_ebase);
+ kfree(vcpu->arch.kseg0_commpage);
+ kfree(vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 4b6274b47f33..e75ef8219caf 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -1571,17 +1571,17 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
#else
/* UserLocal not implemented */
- er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ er = EMULATE_FAIL;
#endif
break;
default:
- printk("RDHWR not supported\n");
+ kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
er = EMULATE_FAIL;
break;
}
} else {
- printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+ kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
er = EMULATE_FAIL;
}
@@ -1590,6 +1590,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
*/
if (er == EMULATE_FAIL) {
vcpu->arch.pc = curr_pc;
+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
}
return er;
}
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
index fac1f5b178eb..143b8a37b5e4 100644
--- a/arch/mips/lantiq/dts/easy50712.dts
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -8,6 +8,7 @@
};
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 21813beec7a5..5495101d32c8 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -12,6 +12,7 @@
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
+#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
@@ -601,6 +602,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
@@ -621,6 +623,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache_range(addr, addr + size);
}
+ preempt_enable();
bc_wback_inv(addr, size);
__sync();
@@ -631,6 +634,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
@@ -655,6 +659,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
R4600_HIT_CACHEOP_WAR_IMPL;
blast_inv_dcache_range(addr, addr + size);
}
+ preempt_enable();
bc_inv(addr, size);
__sync();
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index caf92ecb37d6..23129d1005db 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -50,16 +50,20 @@ static inline struct page *dma_addr_to_page(struct device *dev,
}
/*
+ * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
+ * speculatively fill random cachelines with stale data at any time,
+ * requiring an extra flush post-DMA.
+ *
* Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency
* coherent.
*/
-
-static inline int cpu_is_noncoherent_r10000(struct device *dev)
+static inline int cpu_needs_post_dma_flush(struct device *dev)
{
return !plat_device_is_coherent(dev) &&
(current_cpu_type() == CPU_R10000 ||
- current_cpu_type() == CPU_R12000);
+ current_cpu_type() == CPU_R12000 ||
+ current_cpu_type() == CPU_BMIPS5000);
}
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
@@ -230,7 +234,7 @@ static inline void __dma_sync(struct page *page,
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{
- if (cpu_is_noncoherent_r10000(dev))
+ if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction);
@@ -281,7 +285,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
static void mips_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- if (cpu_is_noncoherent_r10000(dev))
+ if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
}
@@ -302,7 +306,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
- if (cpu_is_noncoherent_r10000(dev))
+ if (cpu_needs_post_dma_flush(dev))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index afeef93f81a7..0e17e1352718 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1329,6 +1329,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
}
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
+ UASM_i_LW(&p, K0, 0, K1);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
htlb_info.restore_scratch);
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index 7e0277a1048f..32a7c828f073 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -43,6 +43,7 @@ LEAF(swsusp_arch_resume)
bne t1, t3, 1b
PTR_L t0, PBE_NEXT(t0)
bnez t0, 0b
+ jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
PTR_LA t0, saved_regs
PTR_L ra, PT_R31(t0)
PTR_L sp, PT_R29(t0)
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
index 35eb874ab7f1..709f58132f5c 100644
--- a/arch/mips/ralink/dts/mt7620a_eval.dts
+++ b/arch/mips/ralink/dts/mt7620a_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink MT7620A evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
index 322d7002595b..0a685db093d4 100644
--- a/arch/mips/ralink/dts/rt2880_eval.dts
+++ b/arch/mips/ralink/dts/rt2880_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink RT2880 evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x8000000 0x2000000>;
};
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index 0ac73ea28198..ec9e9a035541 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink RT3052 evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
index 2fa6b330bf4f..e8df21a5d10d 100644
--- a/arch/mips/ralink/dts/rt3883_eval.dts
+++ b/arch/mips/ralink/dts/rt3883_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink RT3883 evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index d8a455ede5a7..fec8bf97d806 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -853,37 +853,44 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
/* ========================================================[ return ] === */
+_resume_userspace:
+ DISABLE_INTERRUPTS(r3,r4)
+ l.lwz r4,TI_FLAGS(r10)
+ l.andi r13,r4,_TIF_WORK_MASK
+ l.sfeqi r13,0
+ l.bf _restore_all
+ l.nop
+
_work_pending:
- /*
- * if (current_thread_info->flags & _TIF_NEED_RESCHED)
- * schedule();
- */
- l.lwz r5,TI_FLAGS(r10)
- l.andi r3,r5,_TIF_NEED_RESCHED
- l.sfnei r3,0
- l.bnf _work_notifysig
+ l.lwz r5,PT_ORIG_GPR11(r1)
+ l.sfltsi r5,0
+ l.bnf 1f
l.nop
- l.jal schedule
+ l.andi r5,r5,0
+1:
+ l.jal do_work_pending
+ l.ori r3,r1,0 /* pt_regs */
+
+ l.sfeqi r11,0
+ l.bf _restore_all
l.nop
- l.j _resume_userspace
+ l.sfltsi r11,0
+ l.bnf 1f
l.nop
-
-/* Handle pending signals and notify-resume requests.
- * do_notify_resume must be passed the latest pushed pt_regs, not
- * necessarily the "userspace" ones. Also, pt_regs->syscallno
- * must be set so that the syscall restart functionality works.
- */
-_work_notifysig:
- l.jal do_notify_resume
- l.ori r3,r1,0 /* pt_regs */
-
-_resume_userspace:
- DISABLE_INTERRUPTS(r3,r4)
- l.lwz r3,TI_FLAGS(r10)
- l.andi r3,r3,_TIF_WORK_MASK
- l.sfnei r3,0
- l.bf _work_pending
+ l.and r11,r11,r0
+ l.ori r11,r11,__NR_restart_syscall
+ l.j _syscall_check_trace_enter
l.nop
+1:
+ l.lwz r11,PT_ORIG_GPR11(r1)
+ /* Restore arg registers */
+ l.lwz r3,PT_GPR3(r1)
+ l.lwz r4,PT_GPR4(r1)
+ l.lwz r5,PT_GPR5(r1)
+ l.lwz r6,PT_GPR6(r1)
+ l.lwz r7,PT_GPR7(r1)
+ l.j _syscall_check_trace_enter
+ l.lwz r8,PT_GPR8(r1)
_restore_all:
RESTORE_ALL
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 5869e3fa5dd3..150215a91711 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -96,8 +96,7 @@ void __init early_init_devtree(void *params)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index ae167f7e081a..c277ec82783d 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -28,24 +28,24 @@
#include <linux/tracehook.h>
#include <asm/processor.h>
+#include <asm/syscall.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#define DEBUG_SIG 0
struct rt_sigframe {
- struct siginfo *pinfo;
- void *puc;
struct siginfo info;
struct ucontext uc;
unsigned char retcode[16]; /* trampoline code */
};
-static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+static int restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc)
{
- unsigned int err = 0;
+ int err = 0;
- /* Alwys make any pending restarted system call return -EINTR */
+ /* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/*
@@ -53,25 +53,21 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
* (sc is already checked for VERIFY_READ since the sigframe was
* checked in sys_sigreturn previously)
*/
- if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
- goto badframe;
- if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
- goto badframe;
- if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
- goto badframe;
+ err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
+ err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
+ err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
/* make sure the SM-bit is cleared so user-mode cannot fool us */
regs->sr &= ~SPR_SR_SM;
+ regs->orig_gpr11 = -1; /* Avoid syscall restart checks */
+
/* TODO: the other ports use regs->orig_XX to disable syscall checks
* after this completes, but we don't use that mechanism. maybe we can
* use it now ?
*/
return err;
-
-badframe:
- return 1;
}
asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
@@ -111,21 +107,18 @@ badframe:
* Set up a signal frame.
*/
-static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
- unsigned long mask)
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
/* copy the regs */
-
+ /* There should be no need to save callee-saved registers here...
+ * ...but we save them anyway. Revisit this
+ */
err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
- /* then some other stuff */
-
- err |= __put_user(mask, &sc->oldmask);
-
return err;
}
@@ -181,24 +174,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
-
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
-
+ /* Create siginfo. */
if (ka->sa.sa_flags & SA_SIGINFO)
err |= copy_siginfo_to_user(&frame->info, info);
- if (err)
- goto give_sigsegv;
- /* Clear all the bits of the ucontext we don't use. */
- err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+ /* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+ err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
@@ -207,9 +194,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* trampoline - the desired return ip is the retcode itself */
return_ip = (unsigned long)&frame->retcode;
- /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
- err |= __put_user(0xa960, (short *)(frame->retcode + 0));
- err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
+ /* This is:
+ l.ori r11,r0,__NR_sigreturn
+ l.sys 1
+ */
+ err |= __put_user(0xa960, (short *)(frame->retcode + 0));
+ err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
@@ -262,82 +252,106 @@ handle_signal(unsigned long sig,
* mode below.
*/
-void do_signal(struct pt_regs *regs)
+int do_signal(struct pt_regs *regs, int syscall)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
-
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-
- /* If we are coming out of a syscall then we need
- * to check if the syscall was interrupted and wants to be
- * restarted after handling the signal. If so, the original
- * syscall number is put back into r11 and the PC rewound to
- * point at the l.sys instruction that resulted in the
- * original syscall. Syscall results other than the four
- * below mean that the syscall executed to completion and no
- * restart is necessary.
- */
- if (regs->orig_gpr11) {
- int restart = 0;
-
- switch (regs->gpr[11]) {
+ unsigned long continue_addr = 0;
+ unsigned long restart_addr = 0;
+ unsigned long retval = 0;
+ int restart = 0;
+
+ if (syscall) {
+ continue_addr = regs->pc;
+ restart_addr = continue_addr - 4;
+ retval = regs->gpr[11];
+
+ /*
+ * Setup syscall restart here so that a debugger will
+ * see the already changed PC.
+ */
+ switch (retval) {
case -ERESTART_RESTARTBLOCK:
+ restart = -2;
+ /* Fall through */
case -ERESTARTNOHAND:
- /* Restart if there is no signal handler */
- restart = (signr <= 0);
- break;
case -ERESTARTSYS:
- /* Restart if there no signal handler or
- * SA_RESTART flag is set */
- restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
- break;
case -ERESTARTNOINTR:
- /* Always restart */
- restart = 1;
+ restart++;
+ regs->gpr[11] = regs->orig_gpr11;
+ regs->pc = restart_addr;
break;
}
+ }
- if (restart) {
- if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
- regs->gpr[11] = __NR_restart_syscall;
- else
- regs->gpr[11] = regs->orig_gpr11;
- regs->pc -= 4;
- } else {
- regs->gpr[11] = -EINTR;
+ /*
+ * Get the signal to deliver. When running under ptrace, at this
+ * point the debugger may change all our registers ...
+ */
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ /*
+ * Depending on the signal settings we may need to revert the
+ * decision to restart the system call. But skip this if a
+ * debugger has chosen to restart at a different PC.
+ */
+ if (signr > 0) {
+ if (unlikely(restart) && regs->pc == restart_addr) {
+ if (retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK
+ || (retval == -ERESTARTSYS
+ && !(ka.sa.sa_flags & SA_RESTART))) {
+ /* No automatic restart */
+ regs->gpr[11] = -EINTR;
+ regs->pc = continue_addr;
+ }
}
- }
- if (signr <= 0) {
- /* no signal to deliver so we just put the saved sigmask
- * back */
- restore_saved_sigmask();
- } else { /* signr > 0 */
- /* Whee! Actually deliver the signal. */
handle_signal(signr, &info, &ka, regs);
+ } else {
+ /* no handler */
+ restore_saved_sigmask();
+ /*
+ * Restore pt_regs PC as syscall restart will be handled by
+ * kernel without return to userspace
+ */
+ if (unlikely(restart) && regs->pc == restart_addr) {
+ regs->pc = continue_addr;
+ return restart;
+ }
}
- return;
+ return 0;
}
-asmlinkage void do_notify_resume(struct pt_regs *regs)
+asmlinkage int
+do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
- if (current_thread_info()->flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
+ do {
+ if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+ return 0;
+ local_irq_enable();
+ if (thread_flags & _TIF_SIGPENDING) {
+ int restart = do_signal(regs, syscall);
+ if (unlikely(restart)) {
+ /*
+ * Restart without handlers.
+ * Deal with it without leaving
+ * the kernel space.
+ */
+ return restart;
+ }
+ syscall = 0;
+ } else {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+ }
+ local_irq_disable();
+ thread_flags = current_thread_info()->flags;
+ } while (thread_flags & _TIF_WORK_MASK);
+ return 0;
}
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index f0e2784e7cca..de65f66ea64e 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -125,15 +125,10 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
void mark_rodata_ro(void);
#endif
-#ifdef CONFIG_PA8X00
-/* Only pa8800, pa8900 needs this */
-
#include <asm/kmap_types.h>
#define ARCH_HAS_KMAP
-void kunmap_parisc(void *addr);
-
static inline void *kmap(struct page *page)
{
might_sleep();
@@ -142,7 +137,7 @@ static inline void *kmap(struct page *page)
static inline void kunmap(struct page *page)
{
- kunmap_parisc(page_address(page));
+ flush_kernel_dcache_page_addr(page_address(page));
}
static inline void *kmap_atomic(struct page *page)
@@ -153,14 +148,13 @@ static inline void *kmap_atomic(struct page *page)
static inline void __kunmap_atomic(void *addr)
{
- kunmap_parisc(addr);
+ flush_kernel_dcache_page_addr(addr);
pagefault_enable();
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
-#endif
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 72c0fafaa039..544ed8ef87eb 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -24,15 +24,7 @@ extern void return_to_handler(void);
extern unsigned long return_address(unsigned int);
-#define HAVE_ARCH_CALLER_ADDR
-
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 return_address(1)
-#define CALLER_ADDR2 return_address(2)
-#define CALLER_ADDR3 return_address(3)
-#define CALLER_ADDR4 return_address(4)
-#define CALLER_ADDR5 return_address(5)
-#define CALLER_ADDR6 return_address(6)
+#define ftrace_return_address(n) return_address(n)
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index b7adb2ac049c..637fe031aa84 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -28,9 +28,9 @@ struct page;
void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
+#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg);
+ struct page *pg);
/* #define CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 9afdad6c2ffb..eaf4dc1c7294 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -23,6 +23,7 @@ struct parisc_device {
/* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */
+ unsigned long mod0;
#endif
u64 dma_mask; /* DMA mask for I/O */
struct device dev;
@@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
extern struct bus_type parisc_bus_type;
+int iosapic_serial_irq(struct parisc_device *dev);
+
#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index cc2290a3cace..c6ee86542fec 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -53,6 +53,8 @@
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX DEFAULT_TASK_SIZE
+#define STACK_SIZE_MAX (1 << 30) /* 1 GB */
+
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
new file mode 100644
index 000000000000..748016cb122d
--- /dev/null
+++ b/arch/parisc/include/asm/socket.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <uapi/asm/socket.h>
+
+/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+#define SOCK_NONBLOCK 0x40000000
+
+#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index d306b75bc77f..e1509308899f 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -32,9 +32,12 @@ static inline void set_eiem(unsigned long val)
cr; \
})
-#define mtsp(gr, cr) \
- __asm__ __volatile__("mtsp %0,%1" \
+#define mtsp(val, cr) \
+ { if (__builtin_constant_p(val) && ((val) == 0)) \
+ __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \
+ else \
+ __asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
- : "r" (gr), "i" (cr) : "memory")
+ : "r" (val), "i" (cr) : "memory"); }
#endif /* __PARISC_SPECIAL_INSNS_H */
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 5273da991e06..9d086a599fa0 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -63,13 +63,14 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
- unsigned long flags;
+ unsigned long flags, sid;
/* For one page, it's not worth testing the split_tlb variable */
mb();
- mtsp(vma->vm_mm->context,1);
+ sid = vma->vm_mm->context;
purge_tlb_start(flags);
+ mtsp(sid, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa297196bc..f5645d6a89f2 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 70c512a386f7..4fecb26230e7 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
#include <asm/sockios.h>
@@ -73,9 +73,4 @@
#define SO_SELECT_ERR_QUEUE 0x4026
-/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
- * have to define SOCK_NONBLOCK to a different value here.
- */
-#define SOCK_NONBLOCK 0x40000000
-
-#endif /* _ASM_SOCKET_H */
+#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 65fb4cbc3a0f..ac87a40502e6 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
+/* Virtual address of pfn. */
+#define pfn_va(pfn) __va(PFN_PHYS(pfn))
+
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- struct page *page = pte_page(*ptep);
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
+ /* We don't have pte special. As a result, we can be called with
+ an invalid pfn and we don't need to flush the kernel dcache page.
+ This occurs with FireGL card in C8000. */
+ if (!pfn_valid(pfn))
+ return;
- flush_kernel_dcache_page(page);
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page(page);
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
@@ -379,41 +388,20 @@ void flush_kernel_dcache_page_addr(void *addr)
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
-{
- clear_page_asm(vto);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(clear_user_page);
-
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
- /* Copy using kernel mapping. No coherency is needed
- (all in kmap/kunmap) on machines that don't support
- non-equivalent aliasing. However, the `from' page
- needs to be flushed before it can be accessed through
- the kernel mapping. */
+ /* Copy using kernel mapping. No coherency is needed (all in
+ kunmap) for the `to' page. However, the `from' page needs to
+ be flushed through a mapping equivalent to the user mapping
+ before it can be accessed through the kernel mapping. */
preempt_disable();
flush_dcache_page_asm(__pa(vfrom), vaddr);
preempt_enable();
copy_page_asm(vto, vfrom);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
}
EXPORT_SYMBOL(copy_user_page);
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
-{
- if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(addr);
-}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
-
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;
@@ -440,8 +428,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
else {
unsigned long flags;
- mtsp(sid, 1);
purge_tlb_start(flags);
+ mtsp(sid, 1);
if (split_tlb) {
while (npages--) {
pdtlb(start);
@@ -495,44 +483,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
void flush_cache_mm(struct mm_struct *mm)
{
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
- if (mm_total_size(mm) < parisc_cache_flush_threshold) {
- struct vm_area_struct *vma;
-
- if (mm->context == mfsp(3)) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- flush_user_dcache_range_asm(vma->vm_start,
- vma->vm_end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(
- vma->vm_start, vma->vm_end);
- }
- } else {
- pgd_t *pgd = mm->pgd;
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- unsigned long addr;
-
- for (addr = vma->vm_start; addr < vma->vm_end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- __flush_cache_page(vma, addr,
- page_to_phys(pte_page(pte)));
- }
- }
- }
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ continue;
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
}
return;
}
-#ifdef CONFIG_SMP
- flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ pgd = mm->pgd;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (!pfn_valid(pfn))
+ continue;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
@@ -556,33 +542,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ unsigned long addr;
+ pgd_t *pgd;
+
BUG_ON(!vma->vm_mm->context);
- if ((end - start) < parisc_cache_flush_threshold) {
- if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
- } else {
- unsigned long addr;
- pgd_t *pgd = vma->vm_mm->pgd;
-
- for (addr = start & PAGE_MASK; addr < end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- flush_cache_page(vma,
- addr, pte_pfn(pte));
- }
- }
- }
- } else {
-#ifdef CONFIG_SMP
+ if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ return;
+ }
+
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -591,9 +576,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- flush_tlb_page(vma, vmaddr);
- __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
-
+ if (pfn_valid(pfn)) {
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}
#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 872275659d98..c22c3d84e28b 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1205,7 +1205,8 @@ static struct hp_hardware hp_hardware_list[] = {
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
- {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
+ {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
+ {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 37aabd772fbb..d2d58258aea6 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -195,6 +195,8 @@ common_stext:
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
+ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
+
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
copy %r0,%arg3
stext_pdc_ret:
+ mtctl %r6,%cr30 /* restore task thread info */
+
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4a185d..f0b6722fc706 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location;
+ dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5dfd248e3f1a..0d3a9d4927b5 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -61,8 +61,15 @@ static int get_offset(struct address_space *mapping)
return (unsigned long) mapping >> 8;
}
-static unsigned long get_shared_area(struct address_space *mapping,
- unsigned long addr, unsigned long len, unsigned long pgoff)
+static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
+{
+ struct address_space *mapping = filp ? filp->f_mapping : NULL;
+
+ return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+}
+
+static unsigned long get_shared_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff)
{
struct vm_unmapped_area_info info;
@@ -71,7 +78,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
info.low_limit = PAGE_ALIGN(addr);
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & (SHMLBA - 1);
- info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+ info.align_offset = shared_align_offset(filp, pgoff);
return vm_unmapped_area(&info);
}
@@ -82,20 +89,18 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) &&
- (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (!addr)
addr = TASK_UNMAPPED_BASE;
- if (filp) {
- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
- } else if(flags & MAP_SHARED) {
- addr = get_shared_area(NULL, addr, len, pgoff);
- } else {
+ if (filp || (flags & MAP_SHARED))
+ addr = get_shared_area(filp, addr, len, pgoff);
+ else
addr = get_unshared_area(addr, len);
- }
+
return addr;
}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 0c9107285e66..10a0c2aad8cf 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -392,7 +392,7 @@
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
- ENTRY_SAME(epoll_pwait)
+ ENTRY_COMP(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
ENTRY_COMP(kexec_load) /* 300 */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 04e47c6a4562..b3f87a3b4bce 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -805,14 +805,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
else {
/*
- * The kernel should never fault on its own address space.
+ * The kernel should never fault on its own address space,
+ * unless pagefault_disable() was called before.
*/
- if (fault_space == 0)
+ if (fault_space == 0 && !in_atomic())
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
-
}
}
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index a49cc812df8a..ac4370b1ca40 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,6 +2,7 @@
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ * Copyright (C) 2013 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -153,17 +154,21 @@ static inline void prefetch_dst(const void *addr)
#define prefetch_dst(addr) do { } while(0)
#endif
+#define PA_MEMCPY_OK 0
+#define PA_MEMCPY_LOAD_ERROR 1
+#define PA_MEMCPY_STORE_ERROR 2
+
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
* per loop. This code is derived from glibc.
*/
-static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len)
+static inline unsigned long copy_dstaligned(unsigned long dst,
+ unsigned long src, unsigned long len)
{
/* gcc complains that a2 and a3 may be uninitialized, but actually
* they cannot be. Initialize a2/a3 to shut gcc up.
*/
register unsigned int a0, a1, a2 = 0, a3 = 0;
int sh_1, sh_2;
- struct exception_data *d;
/* prefetch_src((const void *)src); */
@@ -197,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src
goto do2;
case 0:
if (len == 0)
- return 0;
+ return PA_MEMCPY_OK;
/* a3 = ((unsigned int *) src)[0];
a0 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a3, cda_ldw_exc);
@@ -256,42 +261,35 @@ do0:
preserve_branch(handle_load_error);
preserve_branch(handle_store_error);
- return 0;
+ return PA_MEMCPY_OK;
handle_load_error:
__asm__ __volatile__ ("cda_ldw_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
- o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
- return o_len * 4 - d->fault_addr + o_src;
+ return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("cda_stw_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
- o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
- return o_len * 4 - d->fault_addr + o_dst;
+ return PA_MEMCPY_STORE_ERROR;
}
-/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
+ * In case of an access fault the faulty address can be read from the per_cpu
+ * exception data struct. */
+static unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
+ unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
register unsigned char *pcs, *pcd;
register unsigned int *pws, *pwd;
register double *pds, *pdd;
- unsigned long ret = 0;
- unsigned long o_dst, o_src, o_len;
- struct exception_data *d;
+ unsigned long ret;
src = (unsigned long)srcp;
dst = (unsigned long)dstp;
pcs = (unsigned char *)srcp;
pcd = (unsigned char *)dstp;
- o_dst = dst; o_src = src; o_len = len;
-
/* prefetch_src((const void *)srcp); */
if (len < THRESHOLD)
@@ -401,7 +399,7 @@ byte_copy:
len--;
}
- return 0;
+ return PA_MEMCPY_OK;
unaligned_copy:
/* possibly we are aligned on a word, but not on a double... */
@@ -438,8 +436,7 @@ unaligned_copy:
src = (unsigned long)pcs;
}
- ret = copy_dstaligned(dst, src, len / sizeof(unsigned int),
- o_dst, o_src, o_len);
+ ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
if (ret)
return ret;
@@ -454,17 +451,41 @@ unaligned_copy:
handle_load_error:
__asm__ __volatile__ ("pmc_load_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
- o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
- return o_len - d->fault_addr + o_src;
+ return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("pmc_store_exc:\n");
+ return PA_MEMCPY_STORE_ERROR;
+}
+
+
+/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+{
+ unsigned long ret, fault_addr, reference;
+ struct exception_data *d;
+
+ ret = pa_memcpy_internal(dstp, srcp, len);
+ if (likely(ret == PA_MEMCPY_OK))
+ return 0;
+
+ /* if a load or store fault occured we can get the faulty addr */
d = &__get_cpu_var(exception_data);
- DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
- o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
- return o_len - d->fault_addr + o_dst;
+ fault_addr = d->fault_addr;
+
+ /* error in load or store? */
+ if (ret == PA_MEMCPY_LOAD_ERROR)
+ reference = (unsigned long) srcp;
+ else
+ reference = (unsigned long) dstp;
+
+ DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
+ ret, len, fault_addr, reference);
+
+ if (fault_addr >= reference)
+ return len - (fault_addr - reference);
+ else
+ return len;
}
#ifdef __KERNEL__
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c33e3ad2c8fd..7f656f119ea6 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -138,6 +138,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select OLD_SIGSUSPEND
select OLD_SIGACTION if PPC32
+ select ARCH_SUPPORTS_ATOMIC_RMW
config EARLY_PRINTK
bool
@@ -572,7 +573,7 @@ config SCHED_SMT
config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
- default "n"
+ default "y" if PPC_POWERNV
---help---
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.
@@ -986,6 +987,7 @@ config RELOCATABLE
must live at a different physical address than the primary
kernel.
+# This value must have zeroes in the bottom 60 bits otherwise lots will break
config PAGE_OFFSET
hex
default "0xc000000000000000"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 967fd23ace78..56a4a5d205af 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -97,7 +97,9 @@ CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
-KBUILD_CPPFLAGS += -Iarch/$(ARCH)
+asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+
+KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
KBUILD_AFLAGS += -Iarch/$(ARCH)
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
CPP = $(CC) -E $(KBUILD_CFLAGS)
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 84fdf6857c31..ef22898daa93 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -8,7 +8,11 @@
#include <linux/sched.h>
#define COMPAT_USER_HZ 100
+#ifdef __BIG_ENDIAN__
#define COMPAT_UTS_MACHINE "ppc\0\0"
+#else
+#define COMPAT_UTS_MACHINE "ppcle\0\0"
+#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 46793b58a761..e17d94d429a8 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -264,7 +264,7 @@ do_kvm_##n: \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
+1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
blt+ cr1,3f; /* abort if it is */ \
li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \
@@ -358,12 +358,12 @@ label##_relon_pSeries: \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_STD, KVMTEST_PR, vec)
+ EXC_STD, NOTEST, vec)
#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
.globl label##_relon_pSeries; \
label##_relon_pSeries: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
@@ -374,12 +374,12 @@ label##_relon_hv: \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_HV, KVMTEST, vec)
+ EXC_HV, NOTEST, vec)
#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
.globl label##_relon_hv; \
label##_relon_hv: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
/* This associate vector numbers with bits in paca->irq_happened */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f00..f016bb699b5f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590ec444..49fa55bfbac4 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@ struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
-#ifdef CONFIG_MODVERSIONS
+#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
-
-extern const unsigned long reloc_start[];
+#define reloc_start PHYSICAL_START
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812aab5b..b9f426212d3a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@ extern long long virt_phys_offset;
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
+#ifdef CONFIG_PPC64
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
+#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+
+#else /* 32-bit, non book E */
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
#endif
+#endif
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index f265049dd7d6..960bf64788a3 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -59,7 +59,7 @@ struct power_pmu {
#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
-#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
+#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 27b2386f738a..842846c1b711 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index b66ae722a8e9..64aaf016b478 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -144,11 +144,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#else /* if CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 2f1b6c5f8174..22cee04a47fc 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -390,11 +390,16 @@ n:
* ld rY,ADDROFF(name)(rX)
*/
#ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
#define LOAD_REG_IMMEDIATE(reg,expr) \
lis reg,(expr)@highest; \
ori reg,reg,(expr)@higher; \
rldicr reg,reg,32,31; \
- oris reg,reg,(expr)@h; \
+ oris reg,reg,(expr)@__AS_ATHIGH; \
ori reg,reg,(expr)@l;
#define LOAD_REG_ADDR(reg,name) \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 14a658363698..419e7125cce2 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@ struct thread_struct {
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */
+ unsigned long tm_tar;
+ unsigned long tm_ppr;
+ unsigned long tm_dscr;
+
/*
* Transactional FP and VSX 0-31 register set.
* NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index bc2da154f68b..ac204e022922 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
/* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np);
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index d836d945068d..063fcadd1a00 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -40,17 +40,39 @@
#ifndef __ASSEMBLY__
+#include <asm/barrier.h> /* for smp_rmb() */
+
/*
* With 64K pages on hash table, we have a special PTE format that
* uses a second "half" of the page table to encode sub-page information
* in order to deal with 64K made of 4K HW pages. Thus we override the
* generic accessors and iterators here
*/
-#define __real_pte(e,p) ((real_pte_t) { \
- (e), (pte_val(e) & _PAGE_COMBO) ? \
- (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
-#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
- (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
+#define __real_pte __real_pte
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+{
+ real_pte_t rpte;
+
+ rpte.pte = pte;
+ rpte.hidx = 0;
+ if (pte_val(pte) & _PAGE_COMBO) {
+ /*
+ * Make sure we order the hidx load against the _PAGE_COMBO
+ * check. The store side ordering is done in __hash_page_4K
+ */
+ smp_rmb();
+ rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
+ }
+ return rpte;
+}
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+ if ((pte_val(rpte.pte) & _PAGE_COMBO))
+ return (rpte.hidx >> (index<<2)) & 0xf;
+ return (pte_val(rpte.pte) >> 12) & 0xf;
+}
+
#define __rpte_to_pte(r) ((r).pte)
#define __rpte_sub_valid(rpte, index) \
(pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4a9e408644fe..795f67792ea9 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -208,6 +208,7 @@
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
+#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
#define SPRN_CTRLF 0x088
@@ -254,19 +255,28 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG 8 /* Enable Target Address Register */
+#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
+#define FSCR_TM_LG 5 /* Enable Transactional Memory */
+#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
+#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
+#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
+#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define FSCR_TAR __MASK(FSCR_TAR_LG)
+#define FSCR_EBB __MASK(FSCR_EBB_LG)
+#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
-#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
-#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
-#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
-#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
+#define HFSCR_TAR __MASK(FSCR_TAR_LG)
+#define HFSCR_EBB __MASK(FSCR_EBB_LG)
+#define HFSCR_TM __MASK(FSCR_TM_LG)
+#define HFSCR_PM __MASK(FSCR_PM_LG)
+#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
+#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
@@ -626,6 +636,7 @@
#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
+#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabebcdca..48cfc858abd6 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps()
static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 200d763a0a67..685ecc86aa8b 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_tar(struct thread_struct *prev)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ prev->tar = mfspr(SPRN_TAR);
+}
+#else
+static inline void save_tar(struct thread_struct *prev) {}
+#endif
extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 43523fe0d8b4..05fcdd826829 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)
SYSCALL_SPU(capget)
SYSCALL_SPU(capset)
COMPAT_SYS(sigaltstack)
-COMPAT_SYS_SPU(sendfile)
+SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
PPC_SYS(vfork)
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 161ab662843b..884b001bafcb 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,7 +22,15 @@ struct device_node;
static inline int cpu_to_node(int cpu)
{
- return numa_cpu_lookup_table[cpu];
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * During early boot, the numa-cpu lookup table might not have been
+ * setup for all CPUs yet. In such cases, default to node 0.
+ */
+ return (nid < 0) ? 0 : nid;
}
#define parent_node(node) (node)
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 5b7657959faa..de2c0e4ee1aa 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -41,5 +41,6 @@
#define PPC_FEATURE2_EBB 0x10000000
#define PPC_FEATURE2_ISEL 0x08000000
#define PPC_FEATURE2_TAR 0x04000000
+#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ee5b690a0bed..52e5758ea368 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
+ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+ nb = 8;
+ flags = LD+SW;
+ } else if (IS_XFORM(instruction) &&
+ ((instruction >> 1) & 0x3ff) == 660) {
+ nb = 8;
+ flags = ST+SW;
+ }
+
/* Byteswap little endian loads and stores */
swiz = 0;
if (regs->msr & MSR_LE) {
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6f16ffafa6f0..302886b77de2 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -139,6 +139,9 @@ int main(void)
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+ DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+ DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+ DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0]));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 92c6b008dd2b..b4437e8a7a8f 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -788,6 +788,9 @@ static void remove_cache_dir(struct cache_dir *cache_dir)
{
remove_index_dirs(cache_dir);
+ /* Remove cache dir from sysfs */
+ kobject_del(cache_dir->kobj);
+
kobject_put(cache_dir->kobj);
kfree(cache_dir);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 2a45d0f04385..b2dc45522850 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -105,7 +105,8 @@ extern void __restore_cpu_e6500(void);
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
- PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
+ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
+ PPC_FEATURE2_VEC_CRYPTO)
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP)
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ec3fe174cba..555ae67e4086 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -108,17 +108,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
+ phys_addr_t paddr;
if (!csize)
return 0;
csize = min_t(size_t, csize, PAGE_SIZE);
+ paddr = pfn << PAGE_SHIFT;
- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
- vaddr = __va(pfn << PAGE_SHIFT);
+ if (memblock_is_region_memory(paddr, csize)) {
+ vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
+ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8741c854e03d..38847767012d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
- /*
- * Back up the TAR across context switches. Note that the TAR is not
- * available for use in the kernel. (To provide this, the TAR should
- * be backed up/restored on exception entry/exit instead, and be in
- * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
- */
- mfspr r0,SPRN_TAR
- std r0,THREAD_TAR(r3)
-
/* Event based branch registers */
mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
cmpwi r6,0
+ li r8, FSCR_DSCR
bne 1f
ld r0,0(r7)
-1: cmpd r0,r25
+ b 3f
+1:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ or r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ or r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ b 4f
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ andc r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ andc r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4: cmpd r0,r25
beq 2f
mtspr SPRN_DSCR,r0
2:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 40e4a17c8ba0..902ca3c6b4b6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -341,10 +341,17 @@ vsx_unavailable_pSeries_1:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_pSeries
+facility_unavailable_trampoline:
. = 0xf60
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b tm_unavailable_pSeries
+ b facility_unavailable_pSeries
+
+hv_facility_unavailable_trampoline:
+ . = 0xf80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b facility_unavailable_hv
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
@@ -522,8 +529,10 @@ denorm_done:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
- STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+ STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+ STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -793,14 +802,10 @@ system_call_relon_pSeries:
STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
. = 0x4e00
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_data_storage_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e20
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_instr_storage_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e40
SET_SCRATCH0(r13)
@@ -808,9 +813,7 @@ system_call_relon_pSeries:
b emulation_assist_relon_hv
. = 0x4e60
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b hmi_exception_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e80
SET_SCRATCH0(r13)
@@ -835,11 +838,17 @@ vsx_unavailable_relon_pSeries_1:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_relon_pSeries
-tm_unavailable_relon_pSeries_1:
+facility_unavailable_relon_trampoline:
. = 0x4f60
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b tm_unavailable_relon_pSeries
+ b facility_unavailable_relon_pSeries
+
+hv_facility_unavailable_relon_trampoline:
+ . = 0x4f80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1165,36 +1174,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
bl .vsx_unavailable_exception
b .ret_from_except
- .align 7
- .globl tm_unavailable_common
-tm_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
- bl .save_nvgprs
- DISABLE_INTS
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .tm_unavailable_exception
- b .ret_from_except
+ STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7
.globl __end_handlers
__end_handlers:
/* Equivalents to the above handlers for relocation-on interrupt vectors */
- STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
- STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
- STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 2230fd0ca3e4..7213d930918d 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -55,9 +55,9 @@ int crash_mem_ranges;
int __init early_init_dt_scan_fw_dump(unsigned long node,
const char *uname, int depth, void *data)
{
- __be32 *sections;
+ const __be32 *sections;
int i, num_sections;
- unsigned long size;
+ int size;
const int *token;
if (depth != 1 || strcmp(uname, "rtas") != 0)
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d557b5..192a3f562bdb 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -467,6 +467,7 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
+.balign 8
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index a949bdfc9623..f0b47d1a6b0e 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -176,7 +176,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
if ((bp->attr.bp_addr >> 10) !=
- ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
+ ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
return -EINVAL;
}
if (info->len >
@@ -250,6 +250,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
* we still need to single-step the instruction, but we don't
* generate an event.
*/
+ info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
if (!((bp->attr.bp_addr <= dar) &&
(dar - bp->attr.bp_addr < bp->attr.bp_len)))
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c0d0dbddfba1..93d8d96840b5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -658,7 +658,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
/* number of bytes needed for the bitmap */
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
- page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d92f3871e9cf..e2a0a162299b 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,7 +35,13 @@
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
#include <asm/mmu.h>
+#include <asm/machdep.h>
+
+/*
+ * This isn't a module but we expose that to userspace
+ * via /proc so leave the definitions here
+ */
#define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg"
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
}
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
}
static const struct file_operations lparcfg_fops = {
- .owner = THIS_MODULE,
.read = seq_read,
.write = lparcfg_write,
.open = lparcfg_open,
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
}
return 0;
}
-
-static void __exit lparcfg_cleanup(void)
-{
- remove_proc_subtree("powerpc/lparcfg", NULL);
-}
-
-module_init(lparcfg_init);
-module_exit(lparcfg_cleanup);
-MODULE_DESCRIPTION("Interface for LPAR configuration data");
-MODULE_AUTHOR("Dave Engebretsen");
-MODULE_LICENSE("GPL");
+machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 076d1242507a..d55357ee9028 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -523,6 +523,31 @@ out_and_saveregs:
tm_save_sprs(thr);
}
+extern void __tm_recheckpoint(struct thread_struct *thread,
+ unsigned long orig_msr);
+
+void tm_recheckpoint(struct thread_struct *thread,
+ unsigned long orig_msr)
+{
+ unsigned long flags;
+
+ /* We really can't be interrupted here as the TEXASR registers can't
+ * change and later in the trecheckpoint code, we have a userspace R1.
+ * So let's hard disable over this region.
+ */
+ local_irq_save(flags);
+ hard_irq_disable();
+
+ /* The TM SPRs are restored here, so that TEXASR.FS can be set
+ * before the trecheckpoint and no explosion occurs.
+ */
+ tm_restore_sprs(thread);
+
+ __tm_recheckpoint(thread, orig_msr);
+
+ local_irq_restore(flags);
+}
+
static inline void tm_recheckpoint_new_task(struct task_struct *new)
{
unsigned long msr;
@@ -541,13 +566,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
if (!new->thread.regs)
return;
- /* The TM SPRs are restored here, so that TEXASR.FS can be set
- * before the trecheckpoint and no explosion occurs.
- */
- tm_restore_sprs(&new->thread);
-
- if (!MSR_TM_ACTIVE(new->thread.regs->msr))
+ if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
+ tm_restore_sprs(&new->thread);
return;
+ }
msr = new->thread.tm_orig_msr;
/* Recheckpoint to restore original checkpointed register state. */
TM_DEBUG("*** tm_recheckpoint of pid %d "
@@ -600,6 +622,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch;
#endif
+ /* Back up the TAR across context switches.
+ * Note that the TAR is not available for use in the kernel. (To
+ * provide this, the TAR should be backed up/restored on exception
+ * entry/exit instead, and be in pt_regs. FIXME, this should be in
+ * pt_regs anyway (for debug).)
+ * Save the TAR here before we do treclaim/trecheckpoint as these
+ * will change the TAR.
+ */
+ save_tar(&prev->thread);
+
__switch_to_tm(prev);
#ifdef CONFIG_SMP
@@ -916,6 +948,16 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
+ /*
+ * Flush TM state out so we can copy it. __switch_to_tm() does this
+ * flush but it removes the checkpointed state from the current CPU and
+ * transitions the CPU out of TM mode. Hence we need to call
+ * tm_recheckpoint_new_task() (on the same task) to restore the
+ * checkpointed state back and the TM mode.
+ */
+ __switch_to_tm(src);
+ tm_recheckpoint_new_task(src);
+
*dst = *src;
return 0;
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b6f7a99cce2..fe0c17dcfbd7 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -162,7 +162,7 @@ static struct ibm_pa_feature {
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
};
-static void __init scan_features(unsigned long node, unsigned char *ftrs,
+static void __init scan_features(unsigned long node, const unsigned char *ftrs,
unsigned long tablelen,
struct ibm_pa_feature *fp,
unsigned long ft_size)
@@ -201,8 +201,8 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs,
static void __init check_cpu_pa_features(unsigned long node)
{
- unsigned char *pa_ftrs;
- unsigned long tablelen;
+ const unsigned char *pa_ftrs;
+ int tablelen;
pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
if (pa_ftrs == NULL)
@@ -215,7 +215,7 @@ static void __init check_cpu_pa_features(unsigned long node)
#ifdef CONFIG_PPC_STD_MMU_64
static void __init check_cpu_slb_size(unsigned long node)
{
- u32 *slb_size_ptr;
+ const __be32 *slb_size_ptr;
slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
if (slb_size_ptr != NULL) {
@@ -256,7 +256,7 @@ static struct feature_property {
static inline void identical_pvr_fixup(unsigned long node)
{
unsigned int pvr;
- char *model = of_get_flat_dt_prop(node, "model", NULL);
+ const char *model = of_get_flat_dt_prop(node, "model", NULL);
/*
* Since 440GR(x)/440EP(x) processors have the same pvr,
@@ -294,11 +294,11 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- const u32 *prop;
- const u32 *intserv;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
+ const __be32 *intserv;
int i, nthreads;
- unsigned long len;
+ int len;
int found = -1;
int found_thread = 0;
@@ -389,7 +389,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
int depth, void *data)
{
- unsigned long *lprop;
+ const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
/* Use common scan routine to determine if this is the chosen node */
if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
@@ -440,8 +440,9 @@ int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
*/
static int __init early_init_dt_scan_drconf_memory(unsigned long node)
{
- __be32 *dm, *ls, *usm;
- unsigned long l, n, flags;
+ const __be32 *dm, *ls, *usm;
+ int l;
+ unsigned long n, flags;
u64 base, size, memblock_size;
unsigned int is_kexec_kdump = 0, rngs;
@@ -550,8 +551,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
@@ -827,49 +827,10 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-/* Find the device node for a given logical cpu number, also returns the cpu
- * local thread number (index in ibm,interrupt-server#s) if relevant and
- * asked for (non NULL)
- */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- int hardid;
- struct device_node *np;
-
- hardid = get_hard_smp_processor_id(cpu);
-
- for_each_node_by_type(np, "cpu") {
- const u32 *intserv;
- unsigned int plen, t;
-
- /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
- * fallback to "reg" property and assume no threads
- */
- intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
- &plen);
- if (intserv == NULL) {
- const u32 *reg = of_get_property(np, "reg", NULL);
- if (reg == NULL)
- continue;
- if (*reg == hardid) {
- if (thread)
- *thread = 0;
- return np;
- }
- } else {
- plen /= sizeof(u32);
- for (t = 0; t < plen; t++) {
- if (hardid == intserv[t]) {
- if (thread)
- *thread = t;
- return np;
- }
- }
- }
- }
- return NULL;
+ return (int)phys_id == get_hard_smp_processor_id(cpu);
}
-EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 98c2fc198712..64f7bd5b1b0f 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1449,7 +1449,9 @@ static long ppc_set_hwdebug(struct task_struct *child,
*/
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
len = bp_info->addr2 - bp_info->addr;
- } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+ } else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+ len = 1;
+ else {
ptrace_put_breakpoints(child);
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab001..c712ecec13ba 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -81,6 +81,7 @@ _GLOBAL(relocate)
6: blr
+.balign 8
p_dyn: .llong __dynamic_start - 0b
p_rela: .llong __rela_dyn_start - 0b
p_st: .llong _stext - 0b
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 52add6f3e201..2d6f5a8e19e2 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1135,7 +1135,7 @@ void __init rtas_initialize(void)
int __init early_init_dt_scan_rtas(unsigned long node,
const char *uname, int depth, void *data)
{
- u32 *basep, *entryp, *sizep;
+ const u32 *basep, *entryp, *sizep;
if (depth != 1 || strcmp(uname, "rtas") != 0)
return 0;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e379d3fd1694..389fb8077cc9 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -76,7 +76,7 @@
#endif
int boot_cpuid = 0;
-int __initdata spinning_secondaries;
+int spinning_secondaries;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 201385c3a1ae..81f929f026f2 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -407,7 +407,8 @@ inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
* altivec/spe instructions at some point.
*/
static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
- int sigret, int ctx_has_vsx_region)
+ struct mcontext __user *tm_frame, int sigret,
+ int ctx_has_vsx_region)
{
unsigned long msr = regs->msr;
@@ -441,6 +442,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
#endif /* CONFIG_ALTIVEC */
if (copy_fpr_to_user(&frame->mc_fregs, current))
return 1;
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
@@ -475,6 +482,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
return 1;
+ /* We need to write 0 the MSR top 32 bits in the tm frame so that we
+ * can check it on the restore to see if TM is active
+ */
+ if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
+ return 1;
+
if (sigret) {
/* Set up the sigreturn trampoline: li r0,sigret; sc */
if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -747,7 +760,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *tm_sr)
{
long err;
- unsigned long msr;
+ unsigned long msr, msr_hi;
#ifdef CONFIG_VSX
int i;
#endif
@@ -850,10 +863,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
* transactional versions should be loaded.
*/
tm_enable();
+ /* Make sure the transaction is marked as failed */
+ current->thread.tm_texasr |= TEXASR_FS;
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
- /* The task has moved into TM state S, so ensure MSR reflects this */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
+ /* Get the top half of the MSR */
+ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+ return 1;
+ /* Pull in MSR TM from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
/* This loads the speculative FP/VEC state, if used */
if (msr & MSR_FP) {
@@ -952,6 +970,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
{
struct rt_sigframe __user *rt_sf;
struct mcontext __user *frame;
+ struct mcontext __user *tm_frame = NULL;
void __user *addr;
unsigned long newsp = 0;
int sigret;
@@ -985,23 +1004,24 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_frame = &rt_sf->uc_transact.uc_mcontext;
if (MSR_TM_ACTIVE(regs->msr)) {
- if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
- &rt_sf->uc_transact.uc_mcontext, sigret))
+ if (save_tm_user_regs(regs, frame, tm_frame, sigret))
goto badframe;
}
else
#endif
- if (save_user_regs(regs, frame, sigret, 1))
+ {
+ if (save_user_regs(regs, frame, tm_frame, sigret, 1))
goto badframe;
+ }
regs->link = tramp;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr)) {
if (__put_user((unsigned long)&rt_sf->uc_transact,
&rt_sf->uc.uc_link)
- || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
- &rt_sf->uc_transact.uc_regs))
+ || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
goto badframe;
}
else
@@ -1170,7 +1190,7 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
mctx = (struct mcontext __user *)
((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
- || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
+ || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
|| put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
|| __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
return -EFAULT;
@@ -1233,7 +1253,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
goto bad;
- if (MSR_TM_SUSPENDED(msr_hi<<32)) {
+ if (MSR_TM_ACTIVE(msr_hi<<32)) {
/* We only recheckpoint on return if we're
* transaction.
*/
@@ -1392,6 +1412,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
{
struct sigcontext __user *sc;
struct sigframe __user *frame;
+ struct mcontext __user *tm_mctx = NULL;
unsigned long newsp = 0;
int sigret;
unsigned long tramp;
@@ -1425,6 +1446,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_mctx = &frame->mctx_transact;
if (MSR_TM_ACTIVE(regs->msr)) {
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
sigret))
@@ -1432,8 +1454,10 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
}
else
#endif
- if (save_user_regs(regs, &frame->mctx, sigret, 1))
+ {
+ if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
goto badframe;
+ }
regs->link = tramp;
@@ -1481,16 +1505,22 @@ badframe:
long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
struct pt_regs *regs)
{
+ struct sigframe __user *sf;
struct sigcontext __user *sc;
struct sigcontext sigctx;
struct mcontext __user *sr;
void __user *addr;
sigset_t set;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ struct mcontext __user *mcp, *tm_mcp;
+ unsigned long msr_hi;
+#endif
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ sc = &sf->sctx;
addr = sc;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
@@ -1507,11 +1537,25 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
#endif
set_current_blocked(&set);
- sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
- addr = sr;
- if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
- || restore_user_regs(regs, sr, 1))
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ mcp = (struct mcontext __user *)&sf->mctx;
+ tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
+ if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
goto badframe;
+ if (MSR_TM_ACTIVE(msr_hi<<32)) {
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto badframe;
+ if (restore_tm_user_regs(regs, mcp, tm_mcp))
+ goto badframe;
+ } else
+#endif
+ {
+ sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
+ addr = sr;
+ if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
+ || restore_user_regs(regs, sr, 1))
+ goto badframe;
+ }
set_thread_flag(TIF_RESTOREALL);
return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 345947367ec0..74d9615a6bb6 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -121,6 +121,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
flush_fp_to_thread(current);
/* copy fpr regs and fpscr */
err |= copy_fpr_to_user(&sc->fp_regs, current);
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSX low doubleword to local buffer for formatting,
@@ -410,6 +416,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
/* get MSR separately, transfer the LE bit if doing signal return */
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ /* pull in MSR TM from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
+ /* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
@@ -503,10 +513,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
}
#endif
tm_enable();
+ /* Make sure the transaction is marked as failed */
+ current->thread.tm_texasr |= TEXASR_FS;
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
- /* The task has moved into TM state S, so ensure MSR reflects this: */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
/* This loads the speculative FP/VEC state, if used */
if (msr & MSR_FP) {
@@ -654,7 +664,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe;
- if (MSR_TM_SUSPENDED(msr)) {
+ if (MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
if (__get_user(uc_transact, &uc->uc_link))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e68a84568b8b..a15fd1a0690c 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
+#include <asm/firmware.h>
#include "cacheinfo.h"
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
SYSFS_PMCSETUP(dscr, SPRN_DSCR);
SYSFS_PMCSETUP(pir, SPRN_PIR);
+/*
+ Lets only enable read for phyp resources and
+ enable write when needed with a separate function.
+ Lets be conservative and default to pseries.
+*/
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
unsigned long dscr_default = 0;
EXPORT_SYMBOL(dscr_default);
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+ attr->attr.mode |= 0200;
+}
+
static ssize_t show_dscr_default(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -394,8 +405,11 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_MMCRA))
device_create_file(s, &dev_attr_mmcra);
- if (cpu_has_feature(CPU_FTR_PURR))
+ if (cpu_has_feature(CPU_FTR_PURR)) {
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ add_write_permission_dev_attr(&dev_attr_purr);
device_create_file(s, &dev_attr_purr);
+ }
if (cpu_has_feature(CPU_FTR_SPURR))
device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5fc29ad7e26f..57fd5c1e8e89 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -512,7 +512,7 @@ void timer_interrupt(struct pt_regs * regs)
__get_cpu_var(irq_stat).timer_irqs++;
-#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2da67e7a16d5..1e43ed404b95 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
TABORT(R3)
blr
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
/* void tm_reclaim(struct thread_struct *thread,
* unsigned long orig_msr,
@@ -178,11 +183,18 @@ dont_backup_fp:
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
+ /* Store the PPR in r11 and reset to decent value */
+ std r11, GPR11(r1) /* Temporary stash */
+ mfspr r11, SPRN_PPR
+ HMT_MEDIUM
+
/* Now get some more GPRS free */
std r7, GPR7(r1) /* Temporary stash */
std r12, GPR12(r1) /* '' '' '' */
ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
+ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
+
addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
/* Make r7 look like an exception frame so that we
@@ -194,15 +206,19 @@ dont_backup_fp:
SAVE_GPR(0, r7) /* user r0 */
SAVE_GPR(2, r7) /* user r2 */
SAVE_4GPRS(3, r7) /* user r3-r6 */
- SAVE_4GPRS(8, r7) /* user r8-r11 */
+ SAVE_GPR(8, r7) /* user r8 */
+ SAVE_GPR(9, r7) /* user r9 */
+ SAVE_GPR(10, r7) /* user r10 */
ld r3, PACATMSCRATCH(r13) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
- ld r5, GPR12(r1) /* user r12 */
- GET_SCRATCH0(6) /* user r13 */
+ ld r5, GPR11(r1) /* user r11 */
+ ld r6, GPR12(r1) /* user r12 */
+ GET_SCRATCH0(8) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
- std r5, GPR12(r7)
- std r6, GPR13(r7)
+ std r5, GPR11(r7)
+ std r6, GPR12(r7)
+ std r8, GPR13(r7)
SAVE_NVGPRS(r7) /* user r14-r31 */
@@ -224,6 +240,14 @@ dont_backup_fp:
std r5, _CCR(r7)
std r6, _XER(r7)
+
+ /* ******************** TAR, DSCR ********** */
+ mfspr r3, SPRN_TAR
+ mfspr r4, SPRN_DSCR
+
+ std r3, THREAD_TM_TAR(r12)
+ std r4, THREAD_TM_DSCR(r12)
+
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
@@ -239,7 +263,7 @@ dont_backup_fp:
std r3, THREAD_TM_TFHAR(r12)
std r4, THREAD_TM_TFIAR(r12)
- /* AMR and PPR are checkpointed too, but are unsupported by Linux. */
+ /* AMR is checkpointed too, but is unsupported by Linux. */
/* Restore original MSR/IRQ state & clear TM mode */
ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -255,6 +279,12 @@ dont_backup_fp:
mtcr r4
mtlr r0
ld r2, 40(r1)
+
+ /* Load system default DSCR */
+ ld r4, DSCR_DEFAULT@toc(r2)
+ ld r0, 0(r4)
+ mtspr SPRN_DSCR, r0
+
blr
@@ -266,7 +296,7 @@ dont_backup_fp:
* Call with IRQs off, stacks get all out of sync for
* some periods in here!
*/
-_GLOBAL(tm_recheckpoint)
+_GLOBAL(__tm_recheckpoint)
mfcr r5
mflr r0
std r5, 8(r1)
@@ -338,35 +368,51 @@ dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
restore_gprs:
+
/* ******************** CR,LR,CCR,MSR ********** */
- ld r3, _CTR(r7)
- ld r4, _LINK(r7)
- ld r5, _CCR(r7)
- ld r6, _XER(r7)
+ ld r4, _CTR(r7)
+ ld r5, _LINK(r7)
+ ld r6, _CCR(r7)
+ ld r8, _XER(r7)
- mtctr r3
- mtlr r4
- mtcr r5
- mtxer r6
+ mtctr r4
+ mtlr r5
+ mtcr r6
+ mtxer r8
+
+ /* ******************** TAR ******************** */
+ ld r4, THREAD_TM_TAR(r3)
+ mtspr SPRN_TAR, r4
+
+ /* Load up the PPR and DSCR in GPRs only at this stage */
+ ld r5, THREAD_TM_DSCR(r3)
+ ld r6, THREAD_TM_PPR(r3)
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
REST_4GPRS(0, r7) /* GPR0-3 */
- REST_GPR(4, r7) /* GPR4-6 */
- REST_GPR(5, r7)
- REST_GPR(6, r7)
+ REST_GPR(4, r7) /* GPR4 */
REST_4GPRS(8, r7) /* GPR8-11 */
REST_2GPRS(12, r7) /* GPR12-13 */
REST_NVGPRS(r7) /* GPR14-31 */
- ld r7, GPR7(r7) /* GPR7 */
+ /* Load up PPR and DSCR here so we don't run with user values for long
+ */
+ mtspr SPRN_DSCR, r5
+ mtspr SPRN_PPR, r6
+
+ REST_GPR(5, r7) /* GPR5-7 */
+ REST_GPR(6, r7)
+ ld r7, GPR7(r7)
/* Commit register state as checkpointed state: */
TRECHKPT
+ HMT_MEDIUM
+
/* Our transactional state has now changed.
*
* Now just get out of here. Transactional (current) state will be
@@ -385,6 +431,12 @@ restore_gprs:
mtcr r4
mtlr r0
ld r2, 40(r1)
+
+ /* Load system default DSCR */
+ ld r4, DSCR_DEFAULT@toc(r2)
+ ld r0, 0(r4)
+ mtspr SPRN_DSCR, r0
+
blr
/* ****************************************************************** */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c0e5caf8ccc7..88929b1f4f77 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
-#ifdef CONFIG_PPC32
#include <asm/reg.h>
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
@@ -1282,26 +1280,63 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
-void tm_unavailable_exception(struct pt_regs *regs)
+#ifdef CONFIG_PPC64
+void facility_unavailable_exception(struct pt_regs *regs)
{
+ static char *facility_strings[] = {
+ [FSCR_FP_LG] = "FPU",
+ [FSCR_VECVSX_LG] = "VMX/VSX",
+ [FSCR_DSCR_LG] = "DSCR",
+ [FSCR_PM_LG] = "PMU SPRs",
+ [FSCR_BHRB_LG] = "BHRB",
+ [FSCR_TM_LG] = "TM",
+ [FSCR_EBB_LG] = "EBB",
+ [FSCR_TAR_LG] = "TAR",
+ };
+ char *facility = "unknown";
+ u64 value;
+ u8 status;
+ bool hv;
+
+ hv = (regs->trap == 0xf80);
+ if (hv)
+ value = mfspr(SPRN_HFSCR);
+ else
+ value = mfspr(SPRN_FSCR);
+
+ status = value >> 56;
+ if (status == FSCR_DSCR_LG) {
+ /* User is acessing the DSCR. Set the inherit bit and allow
+ * the user to set it directly in future by setting via the
+ * H/FSCR DSCR bit.
+ */
+ current->thread.dscr_inherit = 1;
+ if (hv)
+ mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+ else
+ mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ return;
+ }
+
+ if ((status < ARRAY_SIZE(facility_strings)) &&
+ facility_strings[status])
+ facility = facility_strings[status];
+
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- /* Currently we never expect a TMU exception. Catch
- * this and kill the process!
- */
- printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
- "(msr %lx)\n",
- regs->nip, regs->msr);
+ pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
+ hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
}
- die("Unexpected TM unavailable exception", regs, SIGABRT);
+ die("Unexpected facility unavailable exception", regs, SIGABRT);
}
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 536016d792ba..56d2e72c85de 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1529,11 +1529,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
const char *cp;
dn = dev->of_node;
- if (!dn)
- return -ENODEV;
+ if (!dn) {
+ strcpy(buf, "\n");
+ return strlen(buf);
+ }
cp = of_get_property(dn, "compatible", NULL);
- if (!cp)
- return -ENODEV;
+ if (!cp) {
+ strcpy(buf, "\n");
+ return strlen(buf);
+ }
return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479802f2..f096e72262f4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
- . = 0;
- reloc_start = .;
-
. = KERNELBASE;
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 5880dfb31074..b616e364dbe9 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -473,11 +473,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
slb_v = vcpu->kvm->arch.vrma_slb_v;
}
+ preempt_disable();
/* Find the HPTE in the hash table */
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
HPTE_V_VALID | HPTE_V_ABSENT);
- if (index < 0)
+ if (index < 0) {
+ preempt_enable();
return -ENOENT;
+ }
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
v = hptep[0] & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
@@ -485,6 +488,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Unlock the HPTE */
asm volatile("lwsync" : : : "memory");
hptep[0] = v;
+ preempt_enable();
gpte->eaddr = eaddr;
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 550f5928b394..102ad8a255f3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -82,10 +82,13 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
/* CPU points to the first thread of the core */
if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+#ifdef CONFIG_KVM_XICS
int real_cpu = cpu + vcpu->arch.ptid;
if (paca[real_cpu].kvm_hstate.xics_phys)
xics_wake_cpu(real_cpu);
- else if (cpu_online(cpu))
+ else
+#endif
+ if (cpu_online(cpu))
smp_send_reschedule(cpu);
}
put_cpu();
@@ -1090,7 +1093,9 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
smp_wmb();
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
+#ifdef CONFIG_KVM_XICS
xics_wake_cpu(cpu);
+#endif
++vc->n_woken;
}
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6dcbb49105a4..049b899e40e4 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -724,6 +724,10 @@ static int slb_base_page_shift[4] = {
20, /* 1M, unsupported */
};
+/* When called from virtmode, this func should be protected by
+ * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
+ * can trigger deadlock issue.
+ */
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long valid)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b02f91e4c70d..7bcd4d6e177b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1054,7 +1054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
- std r8, VCPU_DSCR(r7)
+ std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 94c1dd46b83d..a3a5cb8ee7ea 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,6 +19,7 @@
#include <asm/hvcall.h>
#include <asm/xics.h>
#include <asm/debug.h>
+#include <asm/time.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 6d6f153b6c1d..c17600de7d59 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
}
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
- unsigned int eaddr, int as)
+ gva_t eaddr, int as)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
unsigned int victim, tsized;
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f72555d60..57a072065057 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
blr
- .macro source
+ .macro srcnr
100:
.section __ex_table,"a"
.align 3
- .llong 100b,.Lsrc_error
+ .llong 100b,.Lsrc_error_nr
.previous
.endm
- .macro dest
+ .macro source
+150:
+ .section __ex_table,"a"
+ .align 3
+ .llong 150b,.Lsrc_error
+ .previous
+ .endm
+
+ .macro dstnr
200:
.section __ex_table,"a"
.align 3
- .llong 200b,.Ldest_error
+ .llong 200b,.Ldest_error_nr
+ .previous
+ .endm
+
+ .macro dest
+250:
+ .section __ex_table,"a"
+ .align 3
+ .llong 250b,.Ldest_error
.previous
.endm
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
beq .Lcopy_aligned
- li r7,4
- sub r6,r7,r6
+ li r9,4
+ sub r6,r9,r6
mtctr r6
1:
-source; lhz r6,0(r3) /* align to doubleword */
+srcnr; lhz r6,0(r3) /* align to doubleword */
subi r5,r5,2
addi r3,r3,2
adde r0,r0,r6
-dest; sth r6,0(r4)
+dstnr; sth r6,0(r4)
addi r4,r4,2
bdnz 1b
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
mtctr r6
3:
-source; ld r6,0(r3)
+srcnr; ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
-dest; std r6,0(r4)
+dstnr; std r6,0(r4)
addi r4,r4,8
bdnz 3b
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
srdi. r6,r5,2
beq .Lcopy_tail_halfword
-source; lwz r6,0(r3)
+srcnr; lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
-dest; stw r6,0(r4)
+dstnr; stw r6,0(r4)
addi r4,r4,4
subi r5,r5,4
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
srdi. r6,r5,1
beq .Lcopy_tail_byte
-source; lhz r6,0(r3)
+srcnr; lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
-dest; sth r6,0(r4)
+dstnr; sth r6,0(r4)
addi r4,r4,2
subi r5,r5,2
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
andi. r6,r5,1
beq .Lcopy_finish
-source; lbz r6,0(r3)
+srcnr; lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
-dest; stb r6,0(r4)
+dstnr; stb r6,0(r4)
.Lcopy_finish:
addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
blr
.Lsrc_error:
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ addi r1,r1,STACKFRAMESIZE
+.Lsrc_error_nr:
cmpdi 0,r7,0
beqlr
li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
blr
.Ldest_error:
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ addi r1,r1,STACKFRAMESIZE
+.Ldest_error_nr:
cmpdi 0,r8,0
beqlr
li r6,-EFAULT
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index b2c68ce139ae..a5b30c71a8d3 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -231,6 +231,87 @@ _GLOBAL(_rest32gpr_31_x)
mr 1,11
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+_GLOBAL(_savevr_20)
+ li r11,-192
+ stvx vr20,r11,r0
+_GLOBAL(_savevr_21)
+ li r11,-176
+ stvx vr21,r11,r0
+_GLOBAL(_savevr_22)
+ li r11,-160
+ stvx vr22,r11,r0
+_GLOBAL(_savevr_23)
+ li r11,-144
+ stvx vr23,r11,r0
+_GLOBAL(_savevr_24)
+ li r11,-128
+ stvx vr24,r11,r0
+_GLOBAL(_savevr_25)
+ li r11,-112
+ stvx vr25,r11,r0
+_GLOBAL(_savevr_26)
+ li r11,-96
+ stvx vr26,r11,r0
+_GLOBAL(_savevr_27)
+ li r11,-80
+ stvx vr27,r11,r0
+_GLOBAL(_savevr_28)
+ li r11,-64
+ stvx vr28,r11,r0
+_GLOBAL(_savevr_29)
+ li r11,-48
+ stvx vr29,r11,r0
+_GLOBAL(_savevr_30)
+ li r11,-32
+ stvx vr30,r11,r0
+_GLOBAL(_savevr_31)
+ li r11,-16
+ stvx vr31,r11,r0
+ blr
+
+_GLOBAL(_restvr_20)
+ li r11,-192
+ lvx vr20,r11,r0
+_GLOBAL(_restvr_21)
+ li r11,-176
+ lvx vr21,r11,r0
+_GLOBAL(_restvr_22)
+ li r11,-160
+ lvx vr22,r11,r0
+_GLOBAL(_restvr_23)
+ li r11,-144
+ lvx vr23,r11,r0
+_GLOBAL(_restvr_24)
+ li r11,-128
+ lvx vr24,r11,r0
+_GLOBAL(_restvr_25)
+ li r11,-112
+ lvx vr25,r11,r0
+_GLOBAL(_restvr_26)
+ li r11,-96
+ lvx vr26,r11,r0
+_GLOBAL(_restvr_27)
+ li r11,-80
+ lvx vr27,r11,r0
+_GLOBAL(_restvr_28)
+ li r11,-64
+ lvx vr28,r11,r0
+_GLOBAL(_restvr_29)
+ li r11,-48
+ lvx vr29,r11,r0
+_GLOBAL(_restvr_30)
+ li r11,-32
+ lvx vr30,r11,r0
+_GLOBAL(_restvr_31)
+ li r11,-16
+ lvx vr31,r11,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#else /* CONFIG_PPC64 */
.section ".text.save.restore","ax",@progbits
@@ -356,6 +437,111 @@ _restgpr0_31:
mtlr r0
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+.globl _savevr_20
+_savevr_20:
+ li r12,-192
+ stvx vr20,r12,r0
+.globl _savevr_21
+_savevr_21:
+ li r12,-176
+ stvx vr21,r12,r0
+.globl _savevr_22
+_savevr_22:
+ li r12,-160
+ stvx vr22,r12,r0
+.globl _savevr_23
+_savevr_23:
+ li r12,-144
+ stvx vr23,r12,r0
+.globl _savevr_24
+_savevr_24:
+ li r12,-128
+ stvx vr24,r12,r0
+.globl _savevr_25
+_savevr_25:
+ li r12,-112
+ stvx vr25,r12,r0
+.globl _savevr_26
+_savevr_26:
+ li r12,-96
+ stvx vr26,r12,r0
+.globl _savevr_27
+_savevr_27:
+ li r12,-80
+ stvx vr27,r12,r0
+.globl _savevr_28
+_savevr_28:
+ li r12,-64
+ stvx vr28,r12,r0
+.globl _savevr_29
+_savevr_29:
+ li r12,-48
+ stvx vr29,r12,r0
+.globl _savevr_30
+_savevr_30:
+ li r12,-32
+ stvx vr30,r12,r0
+.globl _savevr_31
+_savevr_31:
+ li r12,-16
+ stvx vr31,r12,r0
+ blr
+
+.globl _restvr_20
+_restvr_20:
+ li r12,-192
+ lvx vr20,r12,r0
+.globl _restvr_21
+_restvr_21:
+ li r12,-176
+ lvx vr21,r12,r0
+.globl _restvr_22
+_restvr_22:
+ li r12,-160
+ lvx vr22,r12,r0
+.globl _restvr_23
+_restvr_23:
+ li r12,-144
+ lvx vr23,r12,r0
+.globl _restvr_24
+_restvr_24:
+ li r12,-128
+ lvx vr24,r12,r0
+.globl _restvr_25
+_restvr_25:
+ li r12,-112
+ lvx vr25,r12,r0
+.globl _restvr_26
+_restvr_26:
+ li r12,-96
+ lvx vr26,r12,r0
+.globl _restvr_27
+_restvr_27:
+ li r12,-80
+ lvx vr27,r12,r0
+.globl _restvr_28
+_restvr_28:
+ li r12,-64
+ lvx vr28,r12,r0
+.globl _restvr_29
+_restvr_29:
+ li r12,-48
+ lvx vr29,r12,r0
+.globl _restvr_30
+_restvr_30:
+ li r12,-32
+ lvx vr30,r12,r0
+.globl _restvr_31
+_restvr_31:
+ li r12,-16
+ lvx vr31,r12,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#endif /* CONFIG_PPC64 */
#endif
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index e15c521846ca..08490ecc465e 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1395,7 +1395,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
regs->gpr[rd] = byterev_4(val);
goto ldst_done;
-#ifdef CONFIG_PPC_CPU
+#ifdef CONFIG_PPC_FPU
case 535: /* lfsx */
case 567: /* lfsux */
if (!(regs->msr & MSR_FP))
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index e303a6d74e3a..929e3e675868 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -250,9 +250,9 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- u32 *prop;
- unsigned long size = 0;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
+ int size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@@ -306,9 +306,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- u32 *prop;
- unsigned long size = 0;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
+ int size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@@ -389,9 +389,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
const char *uname, int depth,
void *data) {
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- unsigned long *addr_prop;
- u32 *page_count_prop;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be64 *addr_prop;
+ const __be32 *page_count_prop;
unsigned int expected_pages;
long unsigned int phys_addr;
long unsigned int block_size;
@@ -533,8 +533,8 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- u32 *prop;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 88c0425dc0a8..08c6f3185d45 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,9 +27,12 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/topology.h>
#include <asm/firmware.h>
#include <asm/paca.h>
#include <asm/hvcall.h>
@@ -151,9 +154,22 @@ static void __init get_node_active_region(unsigned long pfn,
}
}
-static void map_cpu_to_node(int cpu, int node)
+static void reset_numa_cpu_lookup_table(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ numa_cpu_lookup_table[cpu] = -1;
+}
+
+static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
+}
+
+static void map_cpu_to_node(int cpu, int node)
+{
+ update_numa_cpu_lookup_table(cpu, node);
dbg("adding cpu %d to node %d\n", cpu, node);
@@ -518,11 +534,24 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
*/
static int __cpuinit numa_setup_cpu(unsigned long lcpu)
{
- int nid = 0;
- struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
+ int nid;
+ struct device_node *cpu;
+
+ /*
+ * If a valid cpu-to-node mapping is already available, use it
+ * directly instead of querying the firmware, since it represents
+ * the most recent mapping notified to us by the platform (eg: VPHN).
+ */
+ if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
+ map_cpu_to_node(lcpu, nid);
+ return nid;
+ }
+
+ cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
WARN_ON(1);
+ nid = 0;
goto out;
}
@@ -557,8 +586,8 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
unmap_cpu_from_node(lcpu);
- break;
ret = NOTIFY_OK;
+ break;
#endif
}
return ret;
@@ -1065,6 +1094,7 @@ void __init do_init_bootmem(void)
*/
setup_node_to_cpumask_map();
+ reset_numa_cpu_lookup_table();
register_cpu_notifier(&ppc64_numa_nb);
cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
(void *)(unsigned long)boot_cpuid);
@@ -1319,7 +1349,8 @@ static int update_cpu_associativity_changes_mask(void)
}
}
if (changed) {
- cpumask_set_cpu(cpu, changes);
+ cpumask_or(changes, changes, cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
}
}
@@ -1427,17 +1458,42 @@ static int update_cpu_topology(void *data)
if (!data)
return -EINVAL;
- cpu = get_cpu();
+ cpu = smp_processor_id();
for (update = data; update; update = update->next) {
if (cpu != update->cpu)
continue;
- unregister_cpu_under_node(update->cpu, update->old_nid);
unmap_cpu_from_node(update->cpu);
map_cpu_to_node(update->cpu, update->new_nid);
vdso_getcpu_init();
- register_cpu_under_node(update->cpu, update->new_nid);
+ }
+
+ return 0;
+}
+
+static int update_lookup_table(void *data)
+{
+ struct topology_update_data *update;
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Upon topology update, the numa-cpu lookup table needs to be updated
+ * for all threads in the core, including offline CPUs, to ensure that
+ * future hotplug operations respect the cpu-to-node associativity
+ * properly.
+ */
+ for (update = data; update; update = update->next) {
+ int nid, base, j;
+
+ nid = update->new_nid;
+ base = cpu_first_thread_sibling(update->cpu);
+
+ for (j = 0; j < threads_per_core; j++) {
+ update_numa_cpu_lookup_table(base + j, nid);
+ }
}
return 0;
@@ -1449,12 +1505,12 @@ static int update_cpu_topology(void *data)
*/
int arch_update_cpu_topology(void)
{
- unsigned int cpu, changed = 0;
+ unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
cpumask_t updated_cpus;
struct device *dev;
- int weight, i = 0;
+ int weight, new_nid, i = 0;
weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight)
@@ -1467,24 +1523,62 @@ int arch_update_cpu_topology(void)
cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) {
- ud = &updates[i++];
- ud->cpu = cpu;
- vphn_get_associativity(cpu, associativity);
- ud->new_nid = associativity_to_nid(associativity);
-
- if (ud->new_nid < 0 || !node_online(ud->new_nid))
- ud->new_nid = first_online_node;
+ /*
+ * If siblings aren't flagged for changes, updates list
+ * will be too short. Skip on this update and set for next
+ * update.
+ */
+ if (!cpumask_subset(cpu_sibling_mask(cpu),
+ &cpu_associativity_changes_mask)) {
+ pr_info("Sibling bits not set for associativity "
+ "change, cpu%d\n", cpu);
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- ud->old_nid = numa_cpu_lookup_table[cpu];
- cpumask_set_cpu(cpu, &updated_cpus);
+ /* Use associativity from first thread for all siblings */
+ vphn_get_associativity(cpu, associativity);
+ new_nid = associativity_to_nid(associativity);
+ if (new_nid < 0 || !node_online(new_nid))
+ new_nid = first_online_node;
+
+ if (new_nid == numa_cpu_lookup_table[cpu]) {
+ cpumask_andnot(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- if (i < weight)
- ud->next = &updates[i];
+ for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+ ud = &updates[i++];
+ ud->cpu = sibling;
+ ud->new_nid = new_nid;
+ ud->old_nid = numa_cpu_lookup_table[sibling];
+ cpumask_set_cpu(sibling, &updated_cpus);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+ cpu = cpu_last_thread_sibling(cpu);
}
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+ /*
+ * Update the numa-cpu lookup table with the new mappings, even for
+ * offline CPUs. It is best to perform this update from the stop-
+ * machine context.
+ */
+ stop_machine(update_lookup_table, &updates[0],
+ cpumask_of(raw_smp_processor_id()));
+
for (ud = &updates[0]; ud; ud = ud->next) {
+ unregister_cpu_under_node(ud->cpu, ud->old_nid);
+ register_cpu_under_node(ud->cpu, ud->new_nid);
+
dev = get_cpu_device(ud->cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 3e99c149271a..7ce9cf3b6988 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr,
slice = GET_HIGH_SLICE_INDEX(addr);
*boundary_addr = (slice + end) ?
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
- return !!(available.high_slices & (1u << slice));
+ return !!(available.high_slices & (1ul << slice));
}
}
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index c427ae36374a..a012a9747cdd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -209,10 +209,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
}
PPC_DIVWU(r_A, r_A, r_X);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
PPC_LI32(r_scratch1, K);
- /* Top 32 bits of 64bit result -> A */
- PPC_MULHWU(r_A, r_A, r_scratch1);
+ PPC_DIVWU(r_A, r_A, r_scratch1);
break;
case BPF_S_ALU_AND_X:
ctx->seen |= SEEN_XREG;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29c6482890c8..846861a20b07 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -75,6 +75,8 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
#define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE
+#define MMCR0_FC56 0
+#define MMCR0_PMAO 0
#define SPRN_MMCRA SPRN_MMCR2
#define MMCRA_SAMPLE_ENABLE 0
@@ -747,7 +749,22 @@ static void power_pmu_read(struct perf_event *event)
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
- local64_sub(delta, &event->hw.period_left);
+
+ /*
+ * A number of places program the PMC with (0x80000000 - period_left).
+ * We never want period_left to be less than 1 because we will program
+ * the PMC with a value >= 0x800000000 and an edge detected PMC will
+ * roll around to 0 before taking an exception. We have seen this
+ * on POWER8.
+ *
+ * To fix this, clamp the minimum value of period_left to 1.
+ */
+ do {
+ prev = local64_read(&event->hw.period_left);
+ val = prev - delta;
+ if (val < 1)
+ val = 1;
+ } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
}
/*
@@ -852,7 +869,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
- unsigned long flags;
+ unsigned long flags, val;
if (!ppmu)
return;
@@ -860,9 +877,6 @@ static void power_pmu_disable(struct pmu *pmu)
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
- cpuhw->disabled = 1;
- cpuhw->n_added = 0;
-
/*
* Check if we ever enabled the PMU on this cpu.
*/
@@ -872,6 +886,21 @@ static void power_pmu_disable(struct pmu *pmu)
}
/*
+ * Set the 'freeze counters' bit, clear PMAO/FC56.
+ */
+ val = mfspr(SPRN_MMCR0);
+ val |= MMCR0_FC;
+ val &= ~(MMCR0_PMAO | MMCR0_FC56);
+
+ /*
+ * The barrier is to make sure the mtspr has been
+ * executed and the PMU has frozen the events etc.
+ * before we return.
+ */
+ write_mmcr0(cpuhw, val);
+ mb();
+
+ /*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
@@ -880,14 +909,8 @@ static void power_pmu_disable(struct pmu *pmu)
mb();
}
- /*
- * Set the 'freeze counters' bit.
- * The barrier is to make sure the mtspr has been
- * executed and the PMU has frozen the events
- * before we return.
- */
- write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
- mb();
+ cpuhw->disabled = 1;
+ cpuhw->n_added = 0;
}
local_irq_restore(flags);
}
@@ -911,12 +934,18 @@ static void power_pmu_enable(struct pmu *pmu)
if (!ppmu)
return;
+
local_irq_save(flags);
+
cpuhw = &__get_cpu_var(cpu_hw_events);
- if (!cpuhw->disabled) {
- local_irq_restore(flags);
- return;
+ if (!cpuhw->disabled)
+ goto out;
+
+ if (cpuhw->n_events == 0) {
+ ppc_set_pmu_inuse(0);
+ goto out;
}
+
cpuhw->disabled = 0;
/*
@@ -928,8 +957,6 @@ static void power_pmu_enable(struct pmu *pmu)
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
- if (cpuhw->n_events == 0)
- ppc_set_pmu_inuse(0);
goto out_enable;
}
@@ -1315,6 +1342,9 @@ static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
if (ppmu->limited_pmc_event(ev))
return 1;
+ if (ppmu->flags & PPMU_ARCH_207S)
+ mtspr(SPRN_MMCR2, 0);
+
/*
* The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
@@ -1409,7 +1439,7 @@ static int power_pmu_event_init(struct perf_event *event)
if (has_branch_stack(event)) {
/* PMU has BHRB enabled */
- if (!(ppmu->flags & PPMU_BHRB))
+ if (!(ppmu->flags & PPMU_ARCH_207S))
return -EOPNOTSUPP;
}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index f7d1c4fff303..ee3b4048ab4d 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -109,6 +109,16 @@
#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
+#define EVENT_VALID_MASK \
+ ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
+ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
+ (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
+ (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
+ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
+ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
+ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
+ EVENT_PSEL_MASK)
+
/* MMCRA IFM bits - POWER8 */
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
@@ -184,6 +194,7 @@
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
+#define MMCR1_FAB_SHIFT 36
#define MMCR1_DC_QUAL_SHIFT 47
#define MMCR1_IC_QUAL_SHIFT 46
@@ -212,6 +223,9 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
mask = value = 0;
+ if (event & ~EVENT_VALID_MASK)
+ return -1;
+
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
@@ -354,8 +368,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
* the threshold bits are used for the match value.
*/
if (event_is_fab_match(event[i])) {
- mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
- EVENT_THR_CTL_MASK;
+ mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
+ EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
} else {
val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
mmcra |= val << MMCRA_THR_CTL_SHIFT;
@@ -378,6 +392,10 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
if (pmc_inuse & 0x7c)
mmcr[0] |= MMCR0_PMCjCE;
+ /* If we're not using PMC 5 or 6, freeze them */
+ if (!(pmc_inuse & 0x60))
+ mmcr[0] |= MMCR0_FC56;
+
mmcr[1] = mmcr1;
mmcr[2] = mmcra;
@@ -574,7 +592,7 @@ static struct power_pmu power8_pmu = {
.get_constraint = power8_get_constraint,
.get_alternatives = power8_get_alternatives,
.disable_pmc = power8_disable_pmc,
- .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB,
+ .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
.attr_groups = power8_pmu_attr_groups,
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 90f4496017e4..af54174801f7 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -57,5 +57,5 @@ config PPC_MPC5200_BUGFIX
config PPC_MPC5200_LPBFIFO
tristate "MPC5200 LocalPlus bus FIFO driver"
- depends on PPC_MPC52xx
+ depends on PPC_MPC52xx && PPC_BESTCOMM
select PPC_BESTCOMM_GEN_BD
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 18c104820198..6e19b0ad5d26 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -199,8 +199,8 @@ static void __init efika_setup_arch(void)
static int __init efika_probe(void)
{
- char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
- "model", NULL);
+ const char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "model", NULL);
if (model == NULL)
return 0;
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index c665d7de6c99..7044fd36197b 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -574,8 +574,8 @@ chrp_init2(void)
static int __init chrp_probe(void)
{
- char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
- "device_type", NULL);
+ const char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "device_type", NULL);
if (dtype == NULL)
return 0;
if (strcmp(dtype, "chrp"))
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 628c564ceadb..06620ea33a8e 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -35,8 +35,8 @@ static unsigned int opal_irq_count;
int __init early_init_dt_scan_opal(unsigned long node,
const char *uname, int depth, void *data)
{
- const void *basep, *entryp;
- unsigned long basesz, entrysz;
+ const void *basep, *entryp, *sizep;
+ int basesz, entrysz, runtimesz;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
return 0;
@@ -50,10 +50,10 @@ int __init early_init_dt_scan_opal(unsigned long node,
opal.base = of_read_number(basep, basesz/4);
opal.entry = of_read_number(entryp, entrysz/4);
- pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n",
+ pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
opal.base, basep, basesz);
- pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n",
- opal.entry, entryp, entrysz);
+ pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
+ opal.size, sizep, runtimesz);
powerpc_firmware_features |= FW_FEATURE_OPAL;
if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
@@ -105,7 +105,7 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count)
opal_poll_events(&evt);
if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0)
return 0;
- len = count;
+ len = cpu_to_be64(count);
rc = opal_console_read(vtermno, &len, buf);
if (rc == OPAL_SUCCESS)
return len;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 9c9d15e4cdf2..f75607c93e8a 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -151,13 +151,23 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
rid_end = pe->rid + 1;
}
- /* Associate PE in PELT */
+ /*
+ * Associate PE in PELT. We need add the PE into the
+ * corresponding PELT-V as well. Otherwise, the error
+ * originated from the PE might contribute to other
+ * PEs.
+ */
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
bcomp, dcomp, fcomp, OPAL_MAP_PE);
if (rc) {
pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
return -ENXIO;
}
+
+ rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
+ pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
+ if (rc)
+ pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
@@ -441,6 +451,17 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
+static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ if (dev->subordinate)
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ }
+}
+
static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
u64 *startp, u64 *endp)
{
@@ -596,6 +617,11 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
}
iommu_init_table(tbl, phb->hose->node);
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+ else
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
return;
fail:
/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -667,6 +693,11 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
}
iommu_init_table(tbl, phb->hose->node);
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+ else
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
return;
fail:
if (pe->tce32_seg >= 0)
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index b456b157d33d..68f97d5a4679 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -400,6 +400,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
} else {
result = EEH_STATE_NOT_SUPPORT;
}
+ break;
default:
result = EEH_STATE_NOT_SUPPORT;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 217ca5c75b20..2882d614221f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -34,12 +34,7 @@
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
-static struct rtas_args rtas_stop_self_args = {
- .token = RTAS_UNKNOWN_SERVICE,
- .nargs = 0,
- .nret = 1,
- .rets = &rtas_stop_self_args.args[0],
-};
+static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
@@ -92,15 +87,20 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void)
{
- struct rtas_args *args = &rtas_stop_self_args;
+ struct rtas_args args = {
+ .token = cpu_to_be32(rtas_stop_self_token),
+ .nargs = 0,
+ .nret = 1,
+ .rets = &args.args[0],
+ };
local_irq_disable();
- BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
+ BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
- enter_rtas(__pa(args));
+ enter_rtas(__pa(&args));
panic("Alas, I survived.\n");
}
@@ -391,10 +391,10 @@ static int __init pseries_cpu_hotplug_init(void)
}
}
- rtas_stop_self_args.token = rtas_token("stop-self");
+ rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
- if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
+ if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 9a432de363b8..bebe64ed5dc3 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -158,7 +158,7 @@ static int pseries_remove_memory(struct device_node *np)
static inline int pseries_remove_memblock(unsigned long base,
unsigned int memblock_size)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int pseries_remove_memory(struct device_node *np)
{
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c11c8238797c..995cc0457c76 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
}
early_initcall(alloc_dispatch_log_kmem_cache);
-static void pSeries_idle(void)
+static void pseries_lpar_idle(void)
{
/* This would call on the cpuidle framework, and the back-end pseries
* driver to go to idle states
@@ -362,10 +362,22 @@ static void pSeries_idle(void)
if (cpuidle_idle_call()) {
/* On error, execute default handler
* to go into low thread priority and possibly
- * low power mode.
+ * low power mode by cedeing processor to hypervisor
*/
- HMT_low();
- HMT_very_low();
+
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ cede_processor();
+
+ get_lppaca()->idle = 0;
}
}
@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
pSeries_nvram_init();
- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
- ppc_md.power_save = pSeries_idle;
- }
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
+ ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
- else
+ } else {
+ /* No special idle routine */
ppc_md.enable_pmcs = power4_enable_pmcs;
+ }
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
@@ -635,7 +646,7 @@ static int __init pseries_probe_fw_features(unsigned long node,
void *data)
{
const char *prop;
- unsigned long len;
+ int len;
static int hypertas_found;
static int vec5_found;
@@ -668,7 +679,7 @@ static int __init pseries_probe_fw_features(unsigned long node,
static int __init pSeries_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
+ const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
if (dtype == NULL)
return 0;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index da183c5a103c..d8d6eeca56b0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -116,6 +116,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
@@ -227,11 +228,12 @@ config MARCH_Z196
not work on older machines.
config MARCH_ZEC12
- bool "IBM zEC12"
+ bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES if 64BIT
help
- Select this to enable optimizations for IBM zEC12 (2827 series). The
- kernel will be slightly faster but will not work on older machines.
+ Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
+ 2827 series). The kernel will be slightly faster but will not work on
+ older machines.
endchoice
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b4dbade8ca24..fd104db9cea1 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/spinlock.h>
#include "crypt_s390.h"
#define AES_KEYLEN_128 1
@@ -32,10 +33,10 @@
#define AES_KEYLEN_256 4
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
static char keylen_flag;
struct s390_aes_ctx {
- u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
long enc;
long dec;
@@ -56,8 +57,7 @@ struct pcc_param {
struct s390_xts_ctx {
u8 key[32];
- u8 xts_param[16];
- struct pcc_param pcc;
+ u8 pcc_key[32];
long enc;
long dec;
int key_len;
@@ -441,30 +441,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
struct blkcipher_walk *walk)
{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(param, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.key, sctx->key, sctx->key_len);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, param, out, in, n);
+ ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, param, AES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
out:
return ret;
@@ -481,7 +487,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->enc, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +501,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->dec, &walk);
}
static struct crypto_alg cbc_aes_alg = {
@@ -586,7 +592,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
memcpy(xts_ctx->key + 16, in_key, 16);
- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
case 48:
xts_ctx->enc = 0;
@@ -597,7 +603,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
memcpy(xts_ctx->key, in_key, 32);
- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -616,29 +622,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
unsigned int nbytes = walk->nbytes;
unsigned int n;
u8 *in, *out;
- void *param;
+ struct pcc_param pcc_param;
+ struct {
+ u8 key[32];
+ u8 init[16];
+ } xts_param;
if (!nbytes)
goto out;
- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
- param = xts_ctx->pcc.key + offset;
- ret = crypt_s390_pcc(func, param);
+ memset(pcc_param.block, 0, sizeof(pcc_param.block));
+ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
+ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;
- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
- param = xts_ctx->key + offset;
+ memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.init, pcc_param.xts, 16);
do {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
- ret = crypt_s390_km(func, param, out, in, n);
+ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;
@@ -748,43 +758,70 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[AES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
if (!walk->nbytes)
return ret;
- memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= AES_BLOCK_SIZE) {
- /* only use complete blocks, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = AES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, sctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > AES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr, AES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ }
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@@ -792,14 +829,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrblk);
+ AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+
return ret;
}
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index bcca01c9989d..f2d6cccddcf8 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -25,6 +25,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
- u8 *iv, struct blkcipher_walk *walk)
+ struct blkcipher_walk *walk)
{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[DES_BLOCK_SIZE];
+ u8 key[DES3_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, iv, out, in, n);
+ ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, iv, DES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
out:
return ret;
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
}
static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,83 @@ static struct crypto_alg cbc_des3_alg = {
}
};
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* align to block size, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
- struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+ struct s390_des_ctx *ctx,
+ struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[DES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
- memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= DES_BLOCK_SIZE) {
- /* align to block size, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(DES_BLOCK_SIZE - 1);
- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = DES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, ctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > DES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr, DES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
-
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ }
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
- DES_BLOCK_SIZE, ctrblk);
+ DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
return ret;
}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e311f3..7d4676758733 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
size -= offset;
p = addr + offset / BITS_PER_LONG;
if (bit) {
- set = __flo_word(0, *p & (~0UL << bit));
+ set = __flo_word(0, *p & (~0UL >> bit));
if (set >= size)
return size + offset;
if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f201af8be580..31b5ca8f8c3d 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -219,7 +219,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
-extern struct ccw_device *ccw_device_probe_console(void);
+extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *);
extern void ccw_device_wait_idle(struct ccw_device *);
extern int ccw_device_force_console(struct ccw_device *);
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190dc73e..346b1c85ffb4 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("0: brcl 0,0\n"
+ asm_volatile_goto("0: brcl 0,0\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index bbf8141408cd..2bed4f02a558 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -142,9 +142,9 @@ struct _lowcore {
__u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
/* Interrupt response block */
- __u8 irb[64]; /* 0x0300 */
+ __u8 irb[96]; /* 0x0300 */
- __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
+ __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -288,12 +288,13 @@ struct _lowcore {
__u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
/* Interrupt response block. */
- __u8 irb[64]; /* 0x0400 */
+ __u8 irb[96]; /* 0x0400 */
+ __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */
/* Per cpu primary space access list */
- __u32 paste[16]; /* 0x0440 */
+ __u32 paste[16]; /* 0x0480 */
- __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
+ __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */
/*
* 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d686684..6d6d92b4ea11 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@ struct mmu_gather {
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
+ unsigned long start, end;
};
struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm,
- unsigned int full_mm_flush)
+ unsigned long start,
+ unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
if (tlb->fullmm)
__tlb_flush_mm(mm);
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index a61d538756f2..471eb09184d4 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -35,11 +35,11 @@ struct statfs {
struct statfs64 {
unsigned int f_type;
unsigned int f_bsize;
- unsigned long f_blocks;
- unsigned long f_bfree;
- unsigned long f_bavail;
- unsigned long f_files;
- unsigned long f_ffree;
+ unsigned long long f_blocks;
+ unsigned long long f_bfree;
+ unsigned long long f_bavail;
+ unsigned long long f_files;
+ unsigned long long f_ffree;
__kernel_fsid_t f_fsid;
unsigned int f_namelen;
unsigned int f_frsize;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4d5e6f8a7978..32bb7bf29397 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -265,6 +265,7 @@ sysc_sigpending:
tm __TI_flags+3(%r12),_TIF_SYSCALL
jno sysc_return
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
xr %r8,%r8 # svc 0 returns -ENOSYS
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4c17eece707e..2e3befddecee 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -293,6 +293,7 @@ sysc_sigpending:
tm __TI_flags+7(%r12),_TIF_SYSCALL
jno sysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579c..d7c00507568a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
- .quad 0 # cr15: linkage stack operations
+ .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
+.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
+.Llinkage_stack:
+ .long 0,0,0x89000000,0,0,0,0x8a000000,0
ENTRY(_ehead)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e94..9677d935583c 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -314,7 +314,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* psw and gprs are stored on the stack
*/
if (addr == (addr_t) &dummy->regs.psw.mask &&
- ((data & ~PSW_MASK_USER) != psw_user_bits ||
+ (((data^psw_user_bits) & ~PSW_MASK_USER) ||
+ (((data^psw_user_bits) & PSW_MASK_ASC) &&
+ ((data|psw_user_bits) & PSW_MASK_ASC) == PSW_MASK_ASC) ||
((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
/* Invalid psw mask. */
return -EINVAL;
@@ -627,7 +629,10 @@ static int __poke_user_compat(struct task_struct *child,
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+ if (((tmp^psw32_user_bits) & ~PSW32_MASK_USER) ||
+ (((tmp^psw32_user_bits) & PSW32_MASK_ASC) &&
+ ((tmp|psw32_user_bits) & PSW32_MASK_ASC)
+ == PSW32_MASK_ASC))
/* Invalid psw mask. */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0a49095104c9..8ad9413148bf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -998,6 +998,7 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z196");
break;
case 0x2827:
+ case 0x2828:
strcpy(elf_platform, "zEC12");
break;
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 4f977d0d25c2..14647fe09d0c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -933,7 +933,7 @@ static ssize_t show_idle_count(struct device *dev,
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -951,7 +951,7 @@ static ssize_t show_idle_time(struct device *dev,
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3fb09359eda6..737d50caa4fe 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -190,7 +190,7 @@ cputime64_t s390_get_idle_time(int cpu)
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 1c01a9912989..6acb24d606fd 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -130,7 +130,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
- int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
+ int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
trace_kvm_s390_handle_diag(vcpu, code);
switch (code) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c1c7c683fa26..698fb826e149 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -622,14 +622,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
kvm_s390_deliver_pending_interrupts(vcpu);
vcpu->arch.sie_block->icptcode = 0;
- preempt_disable();
- kvm_guest_enter();
- preempt_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,
atomic_read(&vcpu->arch.sie_block->cpuflags));
+
+ /*
+ * As PF_VCPU will be used in fault handler, between guest_enter
+ * and guest_exit should be no uaccess.
+ */
+ preempt_disable();
+ kvm_guest_enter();
+ preempt_enable();
rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
+ kvm_guest_exit();
+
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+ trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
+
if (rc) {
if (kvm_is_ucontrol(vcpu->kvm)) {
rc = SIE_INTERCEPT_UCONTROL;
@@ -639,10 +650,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
}
- VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
- vcpu->arch.sie_block->icptcode);
- trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
- kvm_guest_exit();
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
return rc;
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 50ea137a2d3c..1bf40ef57cf6 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
* contains the (negative) exception code.
*/
#ifdef CONFIG_64BIT
+
static unsigned long follow_table(struct mm_struct *mm,
unsigned long address, int write)
{
unsigned long *table = (unsigned long *)__pa(mm->pgd);
+ if (unlikely(address > mm->context.asce_limit - 1))
+ return -0x38UL;
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 89ebae4008f2..eba15f18fd38 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
order = 2;
break;
case 0x2827: /* zEC12 */
+ case 0x2828: /* zEC12 */
default:
order = 5;
break;
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
if (!cmma_flag)
return;
+ /*
+ * Disable CMM for dump, otherwise the tprot based memory
+ * detection can fail because of unstable pages.
+ */
+ if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ cmma_flag = 0;
+ return;
+ }
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..3bc6b7e43b24 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -243,7 +243,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
case BPF_S_LD_W_IND:
case BPF_S_LD_H_IND:
case BPF_S_LD_B_IND:
- case BPF_S_LDX_B_MSH:
case BPF_S_LD_IMM:
case BPF_S_LD_MEM:
case BPF_S_MISC_TXA:
@@ -335,14 +334,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
- /* m %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
- /* lr %r5,%r4 */
- EMIT2(0x1854);
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
break;
case BPF_S_ALU_MOD_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
@@ -352,16 +353,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_MOD_K: /* A %= K */
+ if (K == 1) {
+ /* lhi %r5,0 */
+ EMIT4(0xa7580000);
+ break;
+ }
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* d %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17ce7f31..930783d2c99b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
switch (id.machine) {
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
- case 0x2827: ops->cpu_type = "s390/zEC12"; break;
+ case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
default: return -ENODEV;
}
}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8bc9020..91182e95b887 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -109,3 +109,6 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+config NO_IOMEM
+ def_bool y
diff --git a/arch/score/Makefile b/arch/score/Makefile
index 974aefe86123..9e3e060290e0 100644
--- a/arch/score/Makefile
+++ b/arch/score/Makefile
@@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
#
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
-KBUILD_AFLAGS_MODULE += -mlong-calls
-KBUILD_CFLAGS_MODULE += -mlong-calls
+KBUILD_AFLAGS_MODULE +=
+KBUILD_CFLAGS_MODULE +=
LDFLAGS += --oformat elf32-littlescore
LDFLAGS_vmlinux += -G0 -static -nostdlib
diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h
index f909ac3144a4..961bd64015a8 100644
--- a/arch/score/include/asm/checksum.h
+++ b/arch/score/include/asm/checksum.h
@@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
__wsum sum)
{
__asm__ __volatile__(
- ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
- ".set\tnoat\n\t"
- "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
- "sltu\t$1, %0, %5\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %6\t\t\t# csum\n\t"
- "sltu\t$1, %0, %6\n\t"
- "lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 4(%2)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 8(%2)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 12(%2)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 0(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 4(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 8(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 12(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "addu\t%0, $1\t\t\t# Add final carry\n\t"
- ".set\tnoat\n\t"
- ".set\tnoreorder"
+ ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
+ "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
+ "cmp.c\t%5, %0\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %6\t\t\t# csum\n\t"
+ "cmp.c\t%6, %0\n\t"
+ "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "1:lw\t%1, [%2, 4]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%2,8]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%2, 12]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0,%1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 0]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 4]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 8]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 12]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:\n\t"
+ ".set\toptimize"
: "=r" (sum), "=r" (proto)
: "r" (saddr), "r" (daddr),
"0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h
index fbbfd7132e3b..574c8827abe2 100644
--- a/arch/score/include/asm/io.h
+++ b/arch/score/include/asm/io.h
@@ -5,5 +5,4 @@
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
-
#endif /* _ASM_SCORE_IO_H */
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 059a61b7071b..716b3fd1d863 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -2,7 +2,7 @@
#define _ASM_SCORE_PGALLOC_H
#include <linux/mm.h>
-
+#include <linux/highmem.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 7234ed09b7b7..befb87d30a89 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -264,7 +264,7 @@ resume_kernel:
disable_irq
lw r8, [r28, TI_PRE_COUNT]
cmpz.c r8
- bne r8, restore_all
+ bne restore_all
need_resched:
lw r8, [r28, TI_FLAGS]
andri.c r9, r8, _TIF_NEED_RESCHED
@@ -415,7 +415,7 @@ ENTRY(handle_sys)
sw r9, [r0, PT_EPC]
cmpi.c r27, __NR_syscalls # check syscall number
- bgeu illegal_syscall
+ bcs illegal_syscall
slli r8, r27, 2 # get syscall routine
la r11, sys_call_table
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index f4c6d02421d3..a1519ad3d49d 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.reg0 = (unsigned long) childregs;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
- p->thread->reg12 = usp;
- p->thread->reg13 = arg;
+ p->thread.reg12 = usp;
+ p->thread.reg13 = arg;
p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
} else {
*childregs = *current_pt_regs();
diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S
index eebcbaa4e978..7274b5c4287e 100644
--- a/arch/score/kernel/vmlinux.lds.S
+++ b/arch/score/kernel/vmlinux.lds.S
@@ -49,6 +49,7 @@ SECTIONS
}
. = ALIGN(16);
+ _sdata = .; /* Start of data section */
RODATA
EXCEPTION_TABLE(16)
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
index 13e9966464c2..e79fb6ebaa42 100644
--- a/arch/sh/include/asm/ftrace.h
+++ b/arch/sh/include/asm/ftrace.h
@@ -40,15 +40,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
/* arch/sh/kernel/return_address.c */
extern void *return_address(unsigned int);
-#define HAVE_ARCH_CALLER_ADDR
-
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 ((unsigned long)return_address(1))
-#define CALLER_ADDR2 ((unsigned long)return_address(2))
-#define CALLER_ADDR3 ((unsigned long)return_address(3))
-#define CALLER_ADDR4 ((unsigned long)return_address(4))
-#define CALLER_ADDR5 ((unsigned long)return_address(5))
-#define CALLER_ADDR6 ((unsigned long)return_address(6))
+#define ftrace_return_address(n) return_address(n)
#endif /* __ASSEMBLY__ */
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d9f689..362192ed12fe 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index b959f5592604..8dfe645bcc4b 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -115,7 +115,7 @@ static int print_trace_stack(void *data, char *name)
*/
static void print_trace_address(void *data, unsigned long addr, int reliable)
{
- printk(data);
+ printk("%s", (char *)data);
printk_address(addr, reliable);
}
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 38b313909ac9..adad46e41a1d 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -13,6 +13,7 @@
#include <linux/kdebug.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 2a0a596ebf67..d77f2f6c7ff0 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_FLATMEM
+/* need in pfn_valid macro */
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+#endif
#define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name)
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index 7b95f29e3174..3baff31e58cf 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
checksum.o strlen.o div64.o div64-generic.o
# Extracted from libgcc
-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
udiv_qrnnd.o
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9ac9f1666339..03a1bc3c3dde 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,7 +25,7 @@ config SPARC
select RTC_DRV_M48T59
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
@@ -77,6 +77,7 @@ config SPARC64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_C_RECORDMCOUNT
select NO_BOOTMEM
+ select ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16a832f..ec2e2e2aba7d 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
"nop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 7619f2f792af..6663604a902a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -24,7 +24,8 @@
/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
* The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
* The PROM resides in an area spanning 0xf0000000 to 0x100000000.
* The vmalloc area spans 0x100000000 to 0x200000000.
* Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
* 0x400000000.
*/
#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
-#define TSBMAP_BASE _AC(0x0000000008000000,UL)
+#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
+#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
#define MODULES_VADDR _AC(0x0000000010000000,UL)
#define MODULES_LEN _AC(0x00000000e0000000,UL)
#define MODULES_END _AC(0x00000000f0000000,UL)
@@ -616,7 +618,7 @@ static inline unsigned long pte_present(pte_t pte)
}
#define pte_accessible pte_accessible
-static inline unsigned long pte_accessible(pte_t a)
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
return pte_val(a) & _PAGE_VALID;
}
@@ -806,7 +808,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
- if (likely(mm != &init_mm) && pte_accessible(orig))
+ if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm);
}
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index f0d6a9700f4c..1a4bb971e06d 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -35,6 +35,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
{
}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
extern void flush_tlb_pending(void);
@@ -49,11 +51,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP
-#define flush_tlb_kernel_range(start,end) \
-do { flush_tsb_kernel_range(start,end); \
- __flush_tlb_kernel_range(start,end); \
-} while (0)
-
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
@@ -64,11 +61,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
-#define flush_tlb_kernel_range(start, end) \
-do { flush_tsb_kernel_range(start,end); \
- smp_flush_tlb_kernel_range(start, end); \
-} while (0)
-
#define global_flush_tlb_page(mm, vaddr) \
smp_flush_tlb_page(mm, vaddr)
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e562d3caee57..ad7e178337f1 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
-#define __copy_to_user_inatomic ___copy_to_user
-#define __copy_from_user_inatomic ___copy_from_user
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
struct pt_regs;
extern unsigned long compute_effective_address(struct pt_regs *,
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
index 961b87f99e69..f76389a32342 100644
--- a/arch/sparc/kernel/asm-offsets.c
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -49,6 +49,8 @@ int foo(void)
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK();
DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
+ BLANK();
+ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
/* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
return 0;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 5ef48dab5636..252f8768aac6 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -842,9 +842,8 @@ void ldom_reboot(const char *boot_command)
if (boot_command && strlen(boot_command)) {
unsigned long len;
- strcpy(full_boot_str, "boot ");
- strlcpy(full_boot_str + strlen("boot "), boot_command,
- sizeof(full_boot_str + strlen("boot ")));
+ snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
+ boot_command);
len = strlen(full_boot_str);
if (reboot_data_supported) {
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index e2a030045089..33c02b15f478 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -839,7 +839,7 @@ sys_sigreturn:
nop
call syscall_trace
- nop
+ mov 1, %o1
1:
/* We don't want to muck with user registers like a
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 0746e5e32b37..fde5a419cf27 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -25,11 +25,10 @@ kvmap_itlb:
*/
kvmap_itlb_4v:
-kvmap_itlb_nonlinear:
/* Catch kernel NULL pointer calls. */
sethi %hi(PAGE_SIZE), %g5
cmp %g4, %g5
- bleu,pn %xcc, kvmap_dtlb_longpath
+ blu,pn %xcc, kvmap_itlb_longpath
nop
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 54df554b82d9..fa4c900a0d1f 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
lp->hs_state != LDC_HS_OPEN)
- err = -EINVAL;
+ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
else
err = start_handshake(lp);
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index baf4366e2d6a..906cbf0f8608 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -399,8 +399,8 @@ static void apb_fake_ranges(struct pci_dev *dev,
apb_calc_first_last(map, &first, &last);
res = bus->resource[1];
res->flags = IORESOURCE_MEM;
- region.start = (first << 21);
- region.end = (last << 21) + ((1 << 21) - 1);
+ region.start = (first << 29);
+ region.end = (last << 29) + ((1 << 29) - 1);
pcibios_bus_to_resource(dev, res, &region);
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index baebab215492..b9cc9763faf4 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -57,9 +57,12 @@ void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
+ local_irq_enable();
} else {
unsigned long pstate;
+ local_irq_enable();
+
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@@ -81,7 +84,6 @@ void arch_cpu_idle(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
- local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 77539eda928c..8565ecd7d48a 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -150,7 +150,7 @@ void cpu_panic(void)
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
-static DEFINE_SPINLOCK(itc_sync_lock);
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
@@ -258,7 +258,7 @@ static void smp_synchronize_one_tick(int cpu)
go[MASTER] = 0;
membar_safe("#StoreLoad");
- spin_lock_irqsave(&itc_sync_lock, flags);
+ raw_spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
@@ -269,7 +269,7 @@ static void smp_synchronize_one_tick(int cpu)
membar_safe("#StoreLoad");
}
}
- spin_unlock_irqrestore(&itc_sync_lock, flags);
+ raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
}
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index f7c72b6efc27..d066eb18650c 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
SIGN1(sys32_select, compat_sys_select, %o0)
-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
+SIGN1(sys32_futex, compat_sys_futex, %o1)
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 22a1098961f5..c79c687fbe1e 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -152,7 +152,7 @@ linux_syscall_trace32:
srl %i4, 0, %o4
srl %i1, 0, %o1
srl %i2, 0, %o2
- ba,pt %xcc, 2f
+ ba,pt %xcc, 5f
srl %i3, 0, %o3
linux_syscall_trace:
@@ -182,14 +182,15 @@ linux_sparc_syscall32:
srl %i1, 0, %o1 ! IEU0 Group
ldx [%g6 + TI_FLAGS], %l0 ! Load
- srl %i5, 0, %o5 ! IEU1
+ srl %i3, 0, %o3 ! IEU0
srl %i2, 0, %o2 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
- call %l7 ! CTI Group brk forced
- srl %i3, 0, %o3 ! IEU0
- ba,a,pt %xcc, 3f
+5: call %l7 ! CTI Group brk forced
+ srl %i5, 0, %o5 ! IEU1
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -217,7 +218,6 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index 2e973a26fbda..3a43edb46aa3 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -131,7 +131,6 @@ startup_continue:
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
- add %l6, 1, %l6
mov 15, %l7
BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
@@ -224,7 +223,6 @@ niagara_lock_tlb:
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
- add %l6, 1, %l6
1:
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 8201c25e7669..4db8898199f7 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -163,17 +163,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ unsigned long addr;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
+
+ if (!from_kernel && test_thread_flag(TIF_32BIT))
+ addr &= 0xffffffff;
+
+ return addr;
}
/* This is just to make gcc think die_if_kernel does return... */
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index 2c20ad63ddbf..30eee6e8a81b 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
*/
VISEntryHalf
+ membar #Sync
alignaddr %o1, %g0, %g0
add %o1, (64 - 1), %o4
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 0c4e35e522fa..323335b9cd2b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -98,15 +98,6 @@ EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(__clear_user);
-/* RW semaphores */
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
-
/* Atomic counter implementation. */
EXPORT_SYMBOL(atomic_add);
EXPORT_SYMBOL(atomic_add_ret);
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index aa4d55b0bdf0..5ce8f2f64604 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
case 0: fsr = *pfsr;
if (IR == -1) IR = 2;
/* fcc is always fcc0 */
- fsr &= ~0xc00; fsr |= (IR << 10); break;
+ fsr &= ~0xc00; fsr |= (IR << 10);
*pfsr = fsr;
break;
case 1: rd->s = IR; break;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 5062ff389e83..ea83f82464da 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -95,38 +95,51 @@ static unsigned int get_user_insn(unsigned long tpc)
pte_t *ptep, pte;
unsigned long pa;
u32 insn = 0;
- unsigned long pstate;
- if (pgd_none(*pgdp))
- goto outret;
+ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
+ goto out;
pudp = pud_offset(pgdp, tpc);
- if (pud_none(*pudp))
- goto outret;
- pmdp = pmd_offset(pudp, tpc);
- if (pmd_none(*pmdp))
- goto outret;
-
- /* This disables preemption for us as well. */
- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
- __asm__ __volatile__("wrpr %0, %1, %%pstate"
- : : "r" (pstate), "i" (PSTATE_IE));
- ptep = pte_offset_map(pmdp, tpc);
- pte = *ptep;
- if (!pte_present(pte))
+ if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
goto out;
- pa = (pte_pfn(pte) << PAGE_SHIFT);
- pa += (tpc & ~PAGE_MASK);
-
- /* Use phys bypass so we don't pollute dtlb/dcache. */
- __asm__ __volatile__("lduwa [%1] %2, %0"
- : "=r" (insn)
- : "r" (pa), "i" (ASI_PHYS_USE_EC));
+ /* This disables preemption for us as well. */
+ local_irq_disable();
+ pmdp = pmd_offset(pudp, tpc);
+ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
+ goto out_irq_enable;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_trans_huge(*pmdp)) {
+ if (pmd_trans_splitting(*pmdp))
+ goto out_irq_enable;
+
+ pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
+ pa += tpc & ~HPAGE_MASK;
+
+ /* Use phys bypass so we don't pollute dtlb/dcache. */
+ __asm__ __volatile__("lduwa [%1] %2, %0"
+ : "=r" (insn)
+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
+ } else
+#endif
+ {
+ ptep = pte_offset_map(pmdp, tpc);
+ pte = *ptep;
+ if (pte_present(pte)) {
+ pa = (pte_pfn(pte) << PAGE_SHIFT);
+ pa += (tpc & ~PAGE_MASK);
+
+ /* Use phys bypass so we don't pollute dtlb/dcache. */
+ __asm__ __volatile__("lduwa [%1] %2, %0"
+ : "=r" (insn)
+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
+ }
+ pte_unmap(ptep);
+ }
+out_irq_enable:
+ local_irq_enable();
out:
- pte_unmap(ptep);
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
-outret:
return insn;
}
@@ -152,7 +165,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
}
static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
- unsigned int insn, int fault_code)
+ unsigned long fault_addr, unsigned int insn,
+ int fault_code)
{
unsigned long addr;
siginfo_t info;
@@ -160,10 +174,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
info.si_code = code;
info.si_signo = sig;
info.si_errno = 0;
- if (fault_code & FAULT_CODE_ITLB)
+ if (fault_code & FAULT_CODE_ITLB) {
addr = regs->tpc;
- else
- addr = compute_effective_address(regs, insn, 0);
+ } else {
+ /* If we were able to probe the faulting instruction, use it
+ * to compute a precise fault address. Otherwise use the fault
+ * time provided address which may only have page granularity.
+ */
+ if (insn)
+ addr = compute_effective_address(regs, insn, 0);
+ else
+ addr = fault_addr;
+ }
info.si_addr = (void __user *) addr;
info.si_trapno = 0;
@@ -238,7 +260,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
/* The si_code was set to make clear whether
* this was a SEGV_MAPERR or SEGV_ACCERR fault.
*/
- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
+ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
return;
}
@@ -258,18 +280,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
show_regs(regs);
}
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
- unsigned long addr)
-{
- static int times;
-
- if (times++ < 10)
- printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
- "reports 64-bit fault address [%lx]\n",
- current->comm, current->pid, addr);
- show_regs(regs);
-}
-
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
@@ -298,10 +308,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
goto intr_or_no_mm;
}
}
- if (unlikely((address >> 32) != 0)) {
- bogus_32bit_fault_address(regs, address);
+ if (unlikely((address >> 32) != 0))
goto intr_or_no_mm;
- }
}
if (regs->tstate & TSTATE_PRIV) {
@@ -519,7 +527,7 @@ do_sigbus:
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
+ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
index 44aad32eeb4e..969f96450f69 100644
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
/* The things we do for performance... */
hypersparc_flush_cache_range:
- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
#ifndef CONFIG_SMP
ld [%o0 + AOFF_mm_context], %g1
cmp %g1, -1
@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
*/
/* Verified, my ass... */
hypersparc_flush_cache_page:
- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %g2
#ifndef CONFIG_SMP
cmp %g2, -1
@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
sta %g5, [%g1] ASI_M_MMUREGS
hypersparc_flush_tlb_range:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
sta %g5, [%g1] ASI_M_MMUREGS
hypersparc_flush_tlb_page:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
andn %o1, (PAGE_SIZE - 1), %o1
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 04fd55a6e461..a751023dbdcd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
+ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
+ if (!pte_accessible(mm, pte))
+ return;
+
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -2764,3 +2768,26 @@ void hugetlb_setup(struct pt_regs *regs)
}
}
#endif
+
+#ifdef CONFIG_SMP
+#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
+#else
+#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
+#endif
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
+ if (start < LOW_OBP_ADDRESS) {
+ flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
+ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
+ }
+ if (end > HI_OBP_ADDRESS) {
+ flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
+ do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
+ }
+ } else {
+ flush_tsb_kernel_range(start, end);
+ do_flush_tlb_kernel_range(start, end);
+ }
+}
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
index c801c3953a00..5d2b88d39424 100644
--- a/arch/sparc/mm/swift.S
+++ b/arch/sparc/mm/swift.S
@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
.globl swift_flush_cache_range
swift_flush_cache_range:
- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
sub %o2, %o1, %o2
sethi %hi(4096), %o3
cmp %o2, %o3
@@ -116,7 +116,7 @@ swift_flush_cache_range:
.globl swift_flush_cache_page
swift_flush_cache_page:
- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
70:
ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -219,7 +219,7 @@ swift_flush_sig_insns:
.globl swift_flush_tlb_range
.globl swift_flush_tlb_all
swift_flush_tlb_range:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
swift_flush_tlb_mm:
ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
.globl swift_flush_tlb_page
swift_flush_tlb_page:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
andn %o1, (PAGE_SIZE - 1), %o1
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 2cc3bce5ee91..71d99a6c75a7 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
mm->context.tsb_block[tsb_idx].tsb_nentries =
tsb_bytes / sizeof(struct tsb);
- base = TSBMAP_BASE;
+ switch (tsb_idx) {
+ case MM_TSB_BASE:
+ base = TSBMAP_8K_BASE;
+ break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ case MM_TSB_HUGE:
+ base = TSBMAP_4M_BASE;
+ break;
+#endif
+ default:
+ BUG();
+ }
+
tte = pgprot_val(PAGE_KERNEL_LOCKED);
tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
index 4e55e8f76648..bf10a345fa8b 100644
--- a/arch/sparc/mm/tsunami.S
+++ b/arch/sparc/mm/tsunami.S
@@ -24,7 +24,7 @@
/* Sliiick... */
tsunami_flush_cache_page:
tsunami_flush_cache_range:
- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
tsunami_flush_cache_mm:
ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
/* More slick stuff... */
tsunami_flush_tlb_range:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
tsunami_flush_tlb_mm:
ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
/* This one can be done in a fine grained manner... */
tsunami_flush_tlb_page:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
andn %o1, (PAGE_SIZE - 1), %o1
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index bf8ee0613ae7..852257fcc82b 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -108,7 +108,7 @@ viking_mxcc_flush_page:
viking_flush_cache_page:
viking_flush_cache_range:
#ifndef CONFIG_SMP
- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
#endif
viking_flush_cache_mm:
#ifndef CONFIG_SMP
@@ -148,7 +148,7 @@ viking_flush_tlb_mm:
#endif
viking_flush_tlb_range:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
@@ -173,7 +173,7 @@ viking_flush_tlb_range:
#endif
viking_flush_tlb_page:
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
@@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range:
tst %g5
bne 3f
mov SRMMU_CTX_REG, %g1
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
@@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page:
tst %g5
bne 2f
mov SRMMU_CTX_REG, %g1
- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
and %o1, PAGE_MASK, %o1
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index d36a85ebb5e0..224fc0c71b8a 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
#define BNE (F2(0, 2) | CONDNE)
#ifdef CONFIG_SPARC64
-#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
+#define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
#else
-#define BNE_PTR BNE
+#define BE_PTR BE
#endif
#define SETHI(K, REG) \
@@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_MUL_K: /* A *= K */
emit_alu_K(MUL, K);
break;
- case BPF_S_ALU_DIV_K: /* A /= K */
- emit_alu_K(MUL, K);
- emit_read_y(r_A);
+ case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
+ if (K == 1)
+ break;
+ emit_write_y(G0);
+#ifdef CONFIG_SPARC32
+ /* The Sparc v8 architecture requires
+ * three instructions between a %y
+ * register write and the first use.
+ */
+ emit_nop();
+ emit_nop();
+ emit_nop();
+#endif
+ emit_alu_K(DIV, K);
break;
case BPF_S_ALU_DIV_X: /* A /= X; */
emit_cmpi(r_X, 0);
@@ -589,7 +600,7 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ANC_IFINDEX:
emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0);
- emit_branch(BNE_PTR, cleanup_addr + 4);
+ emit_branch(BE_PTR, cleanup_addr + 4);
emit_nop();
emit_load32(r_A, struct net_device, ifindex, r_A);
break;
@@ -602,7 +613,7 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ANC_HATYPE:
emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0);
- emit_branch(BNE_PTR, cleanup_addr + 4);
+ emit_branch(BE_PTR, cleanup_addr + 4);
emit_nop();
emit_load16(r_A, struct net_device, type, r_A);
break;
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 78f1f2ded86c..ffd4493efc78 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
u32 dummy, u32 low, u32 high);
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
u32 dummy, u32 low, u32 high);
-long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
long compat_sys_sync_file_range2(int fd, unsigned int flags,
u32 offset_lo, u32 offset_hi,
u32 nbytes_lo, u32 nbytes_hi);
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
index 63294f5a8efb..4f7ae39fa202 100644
--- a/arch/tile/include/asm/percpu.h
+++ b/arch/tile/include/asm/percpu.h
@@ -15,9 +15,37 @@
#ifndef _ASM_TILE_PERCPU_H
#define _ASM_TILE_PERCPU_H
-register unsigned long __my_cpu_offset __asm__("tp");
-#define __my_cpu_offset __my_cpu_offset
-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
+register unsigned long my_cpu_offset_reg asm("tp");
+
+#ifdef CONFIG_PREEMPT
+/*
+ * For full preemption, we can't just use the register variable
+ * directly, since we need barrier() to hazard against it, causing the
+ * compiler to reload anything computed from a previous "tp" value.
+ * But we also don't want to use volatile asm, since we'd like the
+ * compiler to be able to cache the value across multiple percpu reads.
+ * So we use a fake stack read as a hazard against barrier().
+ * The 'U' constraint is like 'm' but disallows postincrement.
+ */
+static inline unsigned long __my_cpu_offset(void)
+{
+ unsigned long tp;
+ register unsigned long *sp asm("sp");
+ asm("move %0, tp" : "=r" (tp) : "U" (*sp));
+ return tp;
+}
+#define __my_cpu_offset __my_cpu_offset()
+#else
+/*
+ * We don't need to hazard against barrier() since "tp" doesn't ever
+ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
+ * changes at function call points, at which we are already re-reading
+ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
+ */
+#define __my_cpu_offset my_cpu_offset_reg
+#endif
+
+#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
#include <asm-generic/percpu.h>
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd1a8a1..29b0301c18aa 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 95feaa47a2fb..c70a234a3f8c 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -200,6 +200,7 @@ extern int os_unmap_memory(void *addr, int len);
extern int os_drop_memory(void *addr, int length);
extern int can_drop_memory(void);
extern void os_flush_stdout(void);
+extern int os_mincore(void *addr, unsigned long len);
/* execvp.c */
extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index babe21826e3e..d8b78a03855c 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -13,7 +13,7 @@ clean-files :=
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
- um_arch.o umid.o skas/
+ um_arch.o umid.o maccess.o skas/
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 829df49dee99..41ebbfebb333 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
+ size_t size;
int tmp;
- if (copy_from_user(buf, buffer, count))
+ size = min(count, sizeof(buf));
+ if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
new file mode 100644
index 000000000000..1f3d5c4910d1
--- /dev/null
+++ b/arch/um/kernel/maccess.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <os.h>
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+ void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
+
+ if ((unsigned long)src < PAGE_SIZE || size <= 0)
+ return -EFAULT;
+
+ if (os_mincore(psrc, size + src - psrc) <= 0)
+ return -EFAULT;
+
+ return __probe_kernel_read(dst, src, size);
+}
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index b8f34c9e53ae..67b9c8f5a89e 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -4,6 +4,7 @@
*/
#include <stdio.h>
+#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
@@ -232,6 +233,57 @@ out:
return ok;
}
+static int os_page_mincore(void *addr)
+{
+ char vec[2];
+ int ret;
+
+ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
+ if (ret < 0) {
+ if (errno == ENOMEM || errno == EINVAL)
+ return 0;
+ else
+ return -errno;
+ }
+
+ return vec[0] & 1;
+}
+
+int os_mincore(void *addr, unsigned long len)
+{
+ char *vec;
+ int ret, i;
+
+ if (len <= UM_KERN_PAGE_SIZE)
+ return os_page_mincore(addr);
+
+ vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
+ if (!vec)
+ return -ENOMEM;
+
+ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
+ if (ret < 0) {
+ if (errno == ENOMEM || errno == EINVAL)
+ ret = 0;
+ else
+ ret = -errno;
+
+ goto out;
+ }
+
+ for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
+ if (!(vec[i] & 1)) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = 1;
+out:
+ free(vec);
+ return ret;
+}
+
void init_new_thread_signals(void)
{
set_handler(SIGSEGV);
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
index de7dc5fdd58b..24e836023e6c 100644
--- a/arch/unicore32/mm/alignment.c
+++ b/arch/unicore32/mm/alignment.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/unaligned.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fe120da25625..3115eae96ad8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -121,6 +121,7 @@ config X86
select OLD_SIGACTION if X86_32
select COMPAT_OLD_SIGACTION if IA32_EMULATION
select RTC_LIB
+ select ARCH_SUPPORTS_ATOMIC_RMW
config INSTRUCTION_DECODER
def_bool y
@@ -207,6 +208,12 @@ config ARCH_HIBERNATION_POSSIBLE
config ARCH_SUSPEND_POSSIBLE
def_bool y
+config ARCH_WANT_HUGE_PMD_SHARE
+ def_bool y
+
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
config ZONE_DMA32
bool
default X86_64
@@ -951,10 +958,27 @@ config VM86
default y
depends on X86_32
---help---
- This option is required by programs like DOSEMU to run 16-bit legacy
- code on X86 processors. It also may be needed by software like
- XFree86 to initialize some video cards via BIOS. Disabling this
- option saves about 6k.
+ This option is required by programs like DOSEMU to run
+ 16-bit real mode legacy code on x86 processors. It also may
+ be needed by software like XFree86 to initialize some video
+ cards via BIOS. Disabling this option saves about 6K.
+
+config X86_16BIT
+ bool "Enable support for 16-bit segments" if EXPERT
+ default y
+ ---help---
+ This option is required by programs like Wine to run 16-bit
+ protected mode legacy code on x86 processors. Disabling
+ this option saves about 300 bytes on i386, or around 6K text
+ plus 16K runtime memory on x86-64,
+
+config X86_ESPFIX32
+ def_bool y
+ depends on X86_16BIT && X86_32
+
+config X86_ESPFIX64
+ def_bool y
+ depends on X86_16BIT && X86_64
config TOSHIBA
tristate "Toshiba Laptop support"
@@ -1560,6 +1584,7 @@ config EFI
config EFI_STUB
bool "EFI stub support"
depends on EFI
+ select RELOCATABLE
---help---
This kernel feature allows a bzImage to be loaded directly
by EFI firmware without the use of a bootloader.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5c477260294f..412189d2bff9 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -31,6 +31,9 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
+ # Don't autogenerate MMX or SSE instructions
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built
# with nonstandard options
KBUILD_CFLAGS += -fno-pic
@@ -57,8 +60,11 @@ else
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
+ # Don't autogenerate MMX or SSE instructions
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+
# Use -mpreferred-stack-boundary=3 if supported.
- KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+ KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 379814bc41e3..6cf0111783d3 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
# How to compile the 16-bit code. Note we always compile for -march=i386,
# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
-DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS += $(call cc-option, -m32)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5ef205c5f37b..7194d9f094bc 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -12,6 +12,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c205035a6b96..17177e80d466 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -865,6 +865,9 @@ fail:
* Because the x86 boot code expects to be passed a boot_params we
* need to create one ourselves (usually the bootloader would create
* one for us).
+ *
+ * The caller is responsible for filling out ->code32_start in the
+ * returned boot_params.
*/
struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
{
@@ -875,14 +878,15 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
struct efi_info *efi;
efi_loaded_image_t *image;
void *options;
- u32 load_options_size;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
int options_size = 0;
efi_status_t status;
- unsigned long cmdline;
+ char *cmdline_ptr;
u16 *s2;
u8 *s1;
int i;
+ unsigned long ramdisk_addr;
+ unsigned long ramdisk_size;
sys_table = _table;
@@ -893,13 +897,14 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
status = efi_call_phys3(sys_table->boottime->handle_protocol,
handle, &proto, (void *)&image);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+ efi_printk(sys_table, "Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
return NULL;
}
- status = low_alloc(0x4000, 1, (unsigned long *)&boot_params);
+ status = efi_low_alloc(sys_table, 0x4000, 1,
+ (unsigned long *)&boot_params);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc lowmem for boot params\n");
+ efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
return NULL;
}
@@ -921,45 +926,13 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
hdr->vid_mode = 0xffff;
hdr->boot_flag = 0xAA55;
- hdr->code32_start = (__u64)(unsigned long)image->image_base;
-
hdr->type_of_loader = 0x21;
/* Convert unicode cmdline to ascii */
- options = image->load_options;
- load_options_size = image->load_options_size / 2; /* ASCII */
- cmdline = 0;
- s2 = (u16 *)options;
-
- if (s2) {
- while (*s2 && *s2 != '\n' && options_size < load_options_size) {
- s2++;
- options_size++;
- }
-
- if (options_size) {
- if (options_size > hdr->cmdline_size)
- options_size = hdr->cmdline_size;
-
- options_size++; /* NUL termination */
-
- status = low_alloc(options_size, 1, &cmdline);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for cmdline\n");
- goto fail;
- }
-
- s1 = (u8 *)(unsigned long)cmdline;
- s2 = (u16 *)options;
-
- for (i = 0; i < options_size - 1; i++)
- *s1++ = *s2++;
-
- *s1 = '\0';
- }
- }
-
- hdr->cmd_line_ptr = cmdline;
+ cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
+ if (!cmdline_ptr)
+ goto fail;
+ hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
@@ -969,16 +942,20 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
memset(sdt, 0, sizeof(*sdt));
- status = handle_ramdisks(image, hdr);
+ status = handle_cmdline_files(sys_table, image,
+ (char *)(unsigned long)hdr->cmd_line_ptr,
+ "initrd=", hdr->initrd_addr_max,
+ &ramdisk_addr, &ramdisk_size);
if (status != EFI_SUCCESS)
goto fail2;
+ hdr->ramdisk_image = ramdisk_addr;
+ hdr->ramdisk_size = ramdisk_size;
return boot_params;
fail2:
- if (options_size)
- low_free(options_size, hdr->cmd_line_ptr);
+ efi_free(sys_table, options_size, hdr->cmd_line_ptr);
fail:
- low_free(0x4000, (unsigned long)boot_params);
+ efi_free(sys_table, 0x4000, (unsigned long)boot_params);
return NULL;
}
@@ -992,22 +969,24 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
efi_memory_desc_t *mem_map;
efi_status_t status;
__u32 desc_version;
+ bool called_exit = false;
u8 nr_entries;
int i;
size = sizeof(*mem_map) * 32;
again:
- size += sizeof(*mem_map);
+ size += sizeof(*mem_map) * 2;
_size = size;
- status = low_alloc(size, 1, (unsigned long *)&mem_map);
+ status = efi_low_alloc(sys_table, size, 1, (unsigned long *)&mem_map);
if (status != EFI_SUCCESS)
return status;
+get_map:
status = efi_call_phys5(sys_table->boottime->get_memory_map, &size,
mem_map, &key, &desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) {
- low_free(_size, (unsigned long)mem_map);
+ efi_free(sys_table, _size, (unsigned long)mem_map);
goto again;
}
@@ -1029,8 +1008,20 @@ again:
/* Might as well exit boot services now */
status = efi_call_phys2(sys_table->boottime->exit_boot_services,
handle, key);
- if (status != EFI_SUCCESS)
- goto free_mem_map;
+ if (status != EFI_SUCCESS) {
+ /*
+ * ExitBootServices() will fail if any of the event
+ * handlers change the memory map. In which case, we
+ * must be prepared to retry, but only once so that
+ * we're guaranteed to exit on repeated failures instead
+ * of spinning forever.
+ */
+ if (called_exit)
+ goto free_mem_map;
+
+ called_exit = true;
+ goto get_map;
+ }
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;
@@ -1097,44 +1088,10 @@ again:
return EFI_SUCCESS;
free_mem_map:
- low_free(_size, (unsigned long)mem_map);
+ efi_free(sys_table, _size, (unsigned long)mem_map);
return status;
}
-static efi_status_t relocate_kernel(struct setup_header *hdr)
-{
- unsigned long start, nr_pages;
- efi_status_t status;
-
- /*
- * The EFI firmware loader could have placed the kernel image
- * anywhere in memory, but the kernel has various restrictions
- * on the max physical address it can run at. Attempt to move
- * the kernel to boot_params.pref_address, or as low as
- * possible.
- */
- start = hdr->pref_address;
- nr_pages = round_up(hdr->init_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
-
- status = efi_call_phys4(sys_table->boottime->allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &start);
- if (status != EFI_SUCCESS) {
- status = low_alloc(hdr->init_size, hdr->kernel_alignment,
- &start);
- if (status != EFI_SUCCESS)
- efi_printk("Failed to alloc mem for kernel\n");
- }
-
- if (status == EFI_SUCCESS)
- memcpy((void *)start, (void *)(unsigned long)hdr->code32_start,
- hdr->init_size);
-
- hdr->pref_address = hdr->code32_start;
- hdr->code32_start = (__u32)start;
-
- return status;
-}
/*
* On success we return a pointer to a boot_params structure, and NULL
@@ -1163,14 +1120,15 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
EFI_LOADER_DATA, sizeof(*gdt),
(void **)&gdt);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for gdt structure\n");
+ efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
goto fail;
}
gdt->size = 0x800;
- status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address);
+ status = efi_low_alloc(sys_table, gdt->size, 8,
+ (unsigned long *)&gdt->address);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for gdt\n");
+ efi_printk(sys_table, "Failed to alloc mem for gdt\n");
goto fail;
}
@@ -1178,7 +1136,7 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
EFI_LOADER_DATA, sizeof(*idt),
(void **)&idt);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for idt structure\n");
+ efi_printk(sys_table, "Failed to alloc mem for idt structure\n");
goto fail;
}
@@ -1190,10 +1148,16 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
* address, relocate it.
*/
if (hdr->pref_address != hdr->code32_start) {
- status = relocate_kernel(hdr);
-
+ unsigned long bzimage_addr = hdr->code32_start;
+ status = efi_relocate_kernel(sys_table, &bzimage_addr,
+ hdr->init_size, hdr->init_size,
+ hdr->pref_address,
+ hdr->kernel_alignment);
if (status != EFI_SUCCESS)
goto fail;
+
+ hdr->pref_address = hdr->code32_start;
+ hdr->code32_start = bzimage_addr;
}
status = exit_boot(boot_params, handle);
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index e5b0a8f91c5f..cc78e2da7af9 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -10,8 +10,6 @@
#define SEG_GRANULARITY_4KB (1 << 0)
#define DESC_TYPE_CODE_DATA (1 << 0)
-
-#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
#define EFI_READ_CHUNK_SIZE (1024 * 1024)
#define EFI_CONSOLE_OUT_DEVICE_GUID \
@@ -40,6 +38,24 @@ struct efi_graphics_output_mode_info {
u32 pixels_per_scan_line;
} __packed;
+struct efi_graphics_output_protocol_mode_32 {
+ u32 max_mode;
+ u32 mode;
+ u32 info;
+ u32 size_of_info;
+ u64 frame_buffer_base;
+ u32 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_64 {
+ u32 max_mode;
+ u32 mode;
+ u64 info;
+ u64 size_of_info;
+ u64 frame_buffer_base;
+ u64 frame_buffer_size;
+} __packed;
+
struct efi_graphics_output_protocol_mode {
u32 max_mode;
u32 mode;
@@ -49,6 +65,20 @@ struct efi_graphics_output_protocol_mode {
unsigned long frame_buffer_size;
} __packed;
+struct efi_graphics_output_protocol_32 {
+ u32 query_mode;
+ u32 set_mode;
+ u32 blt;
+ u32 mode;
+};
+
+struct efi_graphics_output_protocol_64 {
+ u64 query_mode;
+ u64 set_mode;
+ u64 blt;
+ u64 mode;
+};
+
struct efi_graphics_output_protocol {
void *query_mode;
unsigned long set_mode;
@@ -56,16 +86,22 @@ struct efi_graphics_output_protocol {
struct efi_graphics_output_protocol_mode *mode;
};
+struct efi_uga_draw_protocol_32 {
+ u32 get_mode;
+ u32 set_mode;
+ u32 blt;
+};
+
+struct efi_uga_draw_protocol_64 {
+ u64 get_mode;
+ u64 set_mode;
+ u64 blt;
+};
+
struct efi_uga_draw_protocol {
void *get_mode;
void *set_mode;
void *blt;
};
-struct efi_simple_text_output_protocol {
- void *reset;
- void *output_string;
- void *test_string;
-};
-
#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 1e3184f6072f..abb988a54c69 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -50,6 +50,13 @@ ENTRY(efi_pe_entry)
pushl %eax
pushl %esi
pushl %ecx
+
+ call reloc
+reloc:
+ popl %ecx
+ subl reloc, %ecx
+ movl %ecx, BP_code32_start(%eax)
+
sub $0x4, %esp
ENTRY(efi_stub_entry)
@@ -63,12 +70,7 @@ ENTRY(efi_stub_entry)
hlt
jmp 1b
2:
- call 3f
-3:
- popl %eax
- subl $3b, %eax
- subl BP_pref_address(%esi), %eax
- add BP_code32_start(%esi), %eax
+ movl BP_code32_start(%esi), %eax
leal preferred_addr(%eax), %eax
jmp *%eax
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 16f24e6dad79..92059b8f3f7b 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -217,6 +217,8 @@ ENTRY(efi_pe_entry)
cmpq $0,%rax
je 1f
mov %rax, %rdx
+ leaq startup_32(%rip), %rax
+ movl %eax, BP_code32_start(%rdx)
popq %rsi
popq %rdi
@@ -230,12 +232,7 @@ ENTRY(efi_stub_entry)
hlt
jmp 1b
2:
- call 3f
-3:
- popq %rax
- subq $3b, %rax
- subq BP_pref_address(%rsi), %rax
- add BP_code32_start(%esi), %eax
+ movl BP_code32_start(%esi), %eax
leaq preferred_addr(%rax), %rax
jmp *%rax
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 9ec06a1f6d61..425712462178 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -91,10 +91,9 @@ bs_die:
.section ".bsdata", "a"
bugger_off_msg:
- .ascii "Direct floppy boot is not supported. "
- .ascii "Use a boot loader program instead.\r\n"
+ .ascii "Use a boot loader.\r\n"
.ascii "\n"
- .ascii "Remove disk and press any key to reboot ...\r\n"
+ .ascii "Remove disk and press any key to reboot...\r\n"
.byte 0
#ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
#else
.word 0x8664 # x86-64
#endif
- .word 3 # nr_sections
+ .word 4 # nr_sections
.long 0 # TimeDateStamp
.long 0 # PointerToSymbolTable
.long 1 # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
.word 0 # NumberOfLineNumbers
.long 0x60500020 # Characteristics (section flags)
+ #
+ # The offset & size fields are filled in by build.c.
+ #
+ .ascii ".bss"
+ .byte 0
+ .byte 0
+ .byte 0
+ .byte 0
+ .long 0
+ .long 0x0
+ .long 0 # Size of initialized data
+ # on disk
+ .long 0x0
+ .long 0 # PointerToRelocations
+ .long 0 # PointerToLineNumbers
+ .word 0 # NumberOfRelocations
+ .word 0 # NumberOfLineNumbers
+ .long 0xc8000080 # Characteristics (section flags)
+
#endif /* CONFIG_EFI_STUB */
# Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 94c544650020..971a0ce062aa 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -141,7 +141,7 @@ static void usage(void)
#ifdef CONFIG_EFI_STUB
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
{
unsigned int pe_header;
unsigned short num_sections;
@@ -162,10 +162,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
put_unaligned_le32(size, section + 0x8);
/* section header vma field */
- put_unaligned_le32(offset, section + 0xc);
+ put_unaligned_le32(vma, section + 0xc);
/* section header 'size of initialised data' field */
- put_unaligned_le32(size, section + 0x10);
+ put_unaligned_le32(datasz, section + 0x10);
/* section header 'file offset' field */
put_unaligned_le32(offset, section + 0x14);
@@ -177,6 +177,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
}
}
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+ update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+}
+
static void update_pecoff_setup_and_reloc(unsigned int size)
{
u32 setup_offset = 0x200;
@@ -201,9 +206,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
pe_header = get_unaligned_le32(&buf[0x3c]);
- /* Size of image */
- put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
/*
* Size of code: Subtract the size of the first sector (512 bytes)
* which includes the header.
@@ -218,6 +220,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
update_pecoff_section_header(".text", text_start, text_sz);
}
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
+{
+ unsigned int pe_header;
+ unsigned int bss_sz = init_sz - file_sz;
+
+ pe_header = get_unaligned_le32(&buf[0x3c]);
+
+ /* Size of uninitialized data */
+ put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
+
+ /* Size of image */
+ put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+
+ update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
+}
+
#endif /* CONFIG_EFI_STUB */
@@ -268,6 +286,9 @@ int main(int argc, char ** argv)
int fd;
void *kernel;
u32 crc = 0xffffffffUL;
+#ifdef CONFIG_EFI_STUB
+ unsigned int init_sz;
+#endif
/* Defaults for old kernel */
#ifdef CONFIG_X86_32
@@ -338,7 +359,9 @@ int main(int argc, char ** argv)
put_unaligned_le32(sys_size, &buf[0x1f4]);
#ifdef CONFIG_EFI_STUB
- update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+ update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
+ init_sz = get_unaligned_le32(&buf[0x260]);
+ update_pecoff_bss(i + (sys_size * 16), init_sz);
#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
efi_stub_entry -= 0x200;
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 586f41aac361..185fad49d86f 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -24,10 +24,6 @@
.align 16
.Lbswap_mask:
.octa 0x000102030405060708090a0b0c0d0e0f
-.Lpoly:
- .octa 0xc2000000000000000000000000000001
-.Ltwo_one:
- .octa 0x00000001000000000000000000000001
#define DATA %xmm0
#define SHASH %xmm1
@@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update)
.Lupdate_just_ret:
ret
ENDPROC(clmul_ghash_update)
-
-/*
- * void clmul_ghash_setkey(be128 *shash, const u8 *key);
- *
- * Calculate hash_key << 1 mod poly
- */
-ENTRY(clmul_ghash_setkey)
- movaps .Lbswap_mask, BSWAP
- movups (%rsi), %xmm0
- PSHUFB_XMM BSWAP %xmm0
- movaps %xmm0, %xmm1
- psllq $1, %xmm0
- psrlq $63, %xmm1
- movaps %xmm1, %xmm2
- pslldq $8, %xmm1
- psrldq $8, %xmm2
- por %xmm1, %xmm0
- # reduction
- pshufd $0b00100100, %xmm2, %xmm1
- pcmpeqd .Ltwo_one, %xmm1
- pand .Lpoly, %xmm1
- pxor %xmm1, %xmm0
- movups %xmm0, (%rdi)
- ret
-ENDPROC(clmul_ghash_setkey)
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 6759dd1135be..d785cf2c529c 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash);
void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
const be128 *shash);
-void clmul_ghash_setkey(be128 *shash, const u8 *key);
-
struct ghash_async_ctx {
struct cryptd_ahash *cryptd_tfm;
};
@@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+ be128 *x = (be128 *)key;
+ u64 a, b;
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- clmul_ghash_setkey(&ctx->shash, key);
+ /* perform multiplication by 'x' in GF(2^128) */
+ a = be64_to_cpu(x->a);
+ b = be64_to_cpu(x->b);
+
+ ctx->shash.a = (__be64)((b << 1) | (a >> 63));
+ ctx->shash.b = (__be64)((a << 1) | (b >> 63));
+
+ if (a >> 63)
+ ctx->shash.b ^= cpu_to_be64(0xc2);
return 0;
}
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 6cbd8df348d2..9f5e71f06671 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
/* save number of bits */
bits[1] = cpu_to_be64(sctx->count[0] << 3);
- bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61;
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
/* Pad out to 112 mod 128 and append length */
index = sctx->count[0] & 0x7f;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index cf1a471a18a2..10adb41f162e 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -459,7 +459,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d140f9..4a8cb8d7cbd5 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
- memset(&boot_params->olpc_ofw_header, 0,
+ memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
- (char *)&boot_params->olpc_ofw_header);
+ (char *)&boot_params->ext_ramdisk_image);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
(char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 46fc474fd819..f50de6951738 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
int len, __wsum sum,
int *err_ptr)
{
+ __wsum ret;
+
might_sleep();
- return csum_partial_copy_generic((__force void *)src, dst,
- len, sum, err_ptr, NULL);
+ stac();
+ ret = csum_partial_copy_generic((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ clac();
+
+ return ret;
}
/*
@@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
int len, __wsum sum,
int *err_ptr)
{
+ __wsum ret;
+
might_sleep();
- if (access_ok(VERIFY_WRITE, dst, len))
- return csum_partial_copy_generic(src, (__force void *)dst,
- len, sum, NULL, err_ptr);
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ stac();
+ ret = csum_partial_copy_generic(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+ clac();
+ return ret;
+ }
if (len)
*err_ptr = -EFAULT;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e99ac27f95b2..4af181dacf9e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -365,7 +365,7 @@ extern const char * const x86_power_flags[32];
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
- asm goto("1: jmp %l[t_no]\n"
+ asm_volatile_goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
index c09241659971..b4b38bacb404 100644
--- a/arch/x86/include/asm/dma-contiguous.h
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm-generic/dma-contiguous.h>
static inline void
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index cccd07fa5e3a..779c2efe2e97 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -29,7 +29,7 @@ extern void e820_setup_gap(void);
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
unsigned long start_addr, unsigned long long end_addr);
struct setup_data;
-extern void parse_e820_ext(struct setup_data *data);
+extern void parse_e820_ext(u64 phys_addr, u32 data_len);
#if defined(CONFIG_X86_64) || \
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 60c89f30c727..7a76dc284166 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -94,7 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
#endif /* CONFIG_X86_32 */
extern int add_efi_memmap;
-extern unsigned long x86_efi_facility;
+extern struct efi_scratch efi_scratch;
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int efi_memblock_x86_reserve_range(void);
extern void efi_call_phys_prelog(void);
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
new file mode 100644
index 000000000000..99efebb2f69d
--- /dev/null
+++ b/arch/x86/include/asm/espfix.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_ESPFIX_H
+#define _ASM_X86_ESPFIX_H
+
+#ifdef CONFIG_X86_64
+
+#include <asm/percpu.h>
+
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
+
+extern void init_espfix_bsp(void);
+extern void init_espfix_ap(void);
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_ESPFIX_H */
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e25cc33ec54d..e72b2e41499e 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -295,12 +295,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
- alternative_input(
- ASM_NOP8 ASM_NOP2,
- "emms\n\t" /* clear stack tags */
- "fildl %P[addr]", /* set F?P to defined value */
- X86_FEATURE_FXSAVE_LEAK,
- [addr] "m" (tsk->thread.fpu.has_fpu));
+ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (tsk->thread.fpu.has_fpu));
+ }
return fpu_restore_checking(&tsk->thread.fpu);
}
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index a8091216963b..68c05398bba9 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+ ptep_clear_flush(vma, addr, ptep);
}
static inline int huge_pte_none(pte_t pte)
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index bba3cf88e624..0a8b519226b8 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
-#define INTERRUPT_RETURN iretq
+#define INTERRUPT_RETURN jmp native_iret
#define USERGS_SYSRET64 \
swapgs; \
sysretq;
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 3a16c1483b45..029766958e69 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -13,7 +13,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:"
+ asm_volatile_goto("1:"
STATIC_KEY_INITIAL_NOP
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3741c653767c..f7f20f7fac3c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -445,7 +445,7 @@ struct kvm_vcpu_arch {
bool nmi_injected; /* Trying to inject an NMI this entry */
struct mtrr_state_type mtrr_state;
- u32 pat;
+ u64 pat;
int switch_db_regs;
unsigned long db[KVM_NR_DB_REGS];
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index fa5f71e021d5..e6833c655e59 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -32,11 +32,20 @@
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
-#define MCACOD 0xffff /* MCA Error Code */
+
+/*
+ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
+ * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
+ * errors to indicate that errors are being filtered by hardware.
+ * We should mask out bit 12 when looking for specific signatures
+ * of uncorrected errors - so the F bit is deliberately skipped
+ * in this #define.
+ */
+#define MCACOD 0xefff /* MCA Error Code */
/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
#define MCACOD_DATA 0x0134 /* Data Load */
#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index cdbf36776106..be12c534fd59 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Re-load page tables */
load_cr3(next->pgd);
- /* stop flush ipis for the previous mm */
+ /* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
- /*
- * load the LDT, if the LDT is different:
- */
+ /* Load the LDT, if the LDT is different: */
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
- else {
+ else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
- /* We were in lazy tlb mode and leave_mm disabled
+ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+ /*
+ * On established mms, the mm_cpumask is only changed
+ * from irq context, from ptep_clear_flush() while in
+ * lazy tlb mode, and here. Irqs are blocked during
+ * schedule, protecting us from simultaneous changes.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ /*
+ * We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1e672234c4ff..5460bf923e16 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -415,9 +415,16 @@ static inline int pte_present(pte_t a)
}
#define pte_accessible pte_accessible
-static inline int pte_accessible(pte_t a)
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
- return pte_flags(a) & _PAGE_PRESENT;
+ if (pte_flags(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+ mm_tlb_flush_pending(mm))
+ return true;
+
+ return false;
}
static inline int pte_hidden(pte_t pte)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 2d883440cb9a..b1609f2c524c 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -61,6 +61,8 @@ typedef struct { pteval_t pte; } pte_t;
#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
+#define ESPFIX_PGD_ENTRY _AC(-2, UL)
+#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
#define EARLY_DYNAMIC_PAGE_TABLES 64
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 942a08623a1a..68e9f007cd4a 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -232,6 +232,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#define ARCH_HAS_USER_SINGLE_STEP_INFO
+/*
+ * When hitting ptrace_stop(), we cannot return using SYSRET because
+ * that does not restore the full CPU state, only a minimal set. The
+ * ptracer can change arbitrary register values, which is usually okay
+ * because the usual ptrace stops run off the signal delivery path which
+ * forces IRET; however, ptrace_event() stops happen in arbitrary places
+ * in the kernel and don't force IRET path.
+ *
+ * So force IRET path after a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info) \
+({ \
+ set_thread_flag(TIF_NOTIFY_RESUME); \
+ false; \
+})
+
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index b7bf3505e1ec..2e327f114a1b 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -62,6 +62,8 @@ static inline void x86_ce4100_early_setup(void) { }
#ifndef _SETUP
+#include <asm/espfix.h>
+
/*
* This is set up by the setup-routine at boot-time
*/
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab5..e3ddd7db723f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 095b21507b6a..60bd2748a7c9 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
extern const struct cpumask *cpu_coregroup_mask(int cpu);
-#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fbc09b7..b913915e8e63 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
}
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
{
unsigned long pfn;
- int ret = 0;
+ int ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
- if (unlikely(mfn >= machine_to_phys_nr)) {
- pfn = ~0;
- goto try_override;
- }
- pfn = 0;
+ if (unlikely(mfn >= machine_to_phys_nr))
+ return ~0;
+
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
- /* ret might be < 0 if there are no entries in the m2p for mfn */
if (ret < 0)
- pfn = ~0;
- else if (get_phys_to_machine(pfn) != mfn)
+ return ~0;
+
+ return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) != mfn) {
/*
* If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
*/
pfn = m2p_find_override_pfn(mfn, ~0);
+ }
/*
* pfn is ~0 if there are no entries in the m2p for mfn or if the
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
index 7ea79c5fa1f2..492b29802f57 100644
--- a/arch/x86/include/asm/xor_avx.h
+++ b/arch/x86/include/asm/xor_avx.h
@@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
#define AVX_XOR_SPEED \
do { \
- if (cpu_has_avx) \
+ if (cpu_has_avx && cpu_has_osxsave) \
xor_speed(&xor_block_avx); \
} while (0)
#define AVX_SELECT(FASTEST) \
- (cpu_has_avx ? &xor_block_avx : FASTEST)
+ (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
#else
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 2af848dfa754..d3fd447ecbee 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -179,6 +179,7 @@
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
+#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 7bd3bd310106..111eb356dbea 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-y += syscall_$(BITS).o
obj-$(CONFIG_X86_64) += vsyscall_64.o
obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
+obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index b44577bc9744..ec94e11807dc 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,9 +48,20 @@ int acpi_suspend_lowlevel(void)
#ifndef CONFIG_64BIT
native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
+ /*
+ * We have to check that we can write back the value, and not
+ * just read it. At least on 90 nm Pentium M (Family 6, Model
+ * 13), reading an invalid MSR is not guaranteed to trap, see
+ * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
+ * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
+ * nm process with 512-KB L2 Cache Specification Update".
+ */
if (!rdmsr_safe(MSR_EFER,
&header->pmode_efer_low,
- &header->pmode_efer_high))
+ &header->pmode_efer_high) &&
+ !wrmsr_safe(MSR_EFER,
+ header->pmode_efer_low,
+ header->pmode_efer_high))
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */
@@ -61,7 +72,10 @@ int acpi_suspend_lowlevel(void)
}
if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
&header->pmode_misc_en_low,
- &header->pmode_misc_en_high))
+ &header->pmode_misc_en_high) &&
+ !wrmsr_safe(MSR_IA32_MISC_ENABLE,
+ header->pmode_misc_en_low,
+ header->pmode_misc_en_high))
header->pmode_behavior |=
(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
header->realmode_flags = acpi_realmode_flags;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3048ded1b598..59554dca96ec 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{}
};
@@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{}
};
@@ -81,13 +83,20 @@ int amd_cache_northbridges(void)
next_northbridge(misc, amd_nb_misc_ids);
node_to_amd_nb(i)->link = link =
next_northbridge(link, amd_nb_link_ids);
- }
+ }
+ /* GART present only on Fam15h upto model 0fh */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
- boot_cpu_data.x86 == 0x15)
+ (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
amd_northbridges.flags |= AMD_NB_GART;
/*
+ * Check for L3 cache presence.
+ */
+ if (!cpuid_edx(0x80000006))
+ return 0;
+
+ /*
* Some CPU families support L3 Cache Index Disable. There are some
* limitations because of E382 and E388 on family 0x10.
*/
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 794f6eb54cd3..b32dbb411a9a 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -98,7 +98,7 @@ static int __init early_get_pnodeid(void)
break;
case UV3_HUB_PART_NUMBER:
case UV3_HUB_PART_NUMBER_X:
- uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
+ uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
break;
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5013a48d1aff..ae177a014180 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -508,6 +508,16 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
}
#endif
+
+ /* F16h erratum 793, CVE-2013-6885 */
+ if (c->x86 == 0x16 && c->x86_model <= 0xf) {
+ u64 val;
+
+ rdmsrl(MSR_AMD64_LS_CFG, val);
+ if (!(val & BIT(15)))
+ wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
+ }
+
}
static const int amd_erratum_383[];
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 22018f70a671..deeb48d9459b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
raw_local_save_flags(eflags);
BUG_ON(eflags & X86_EFLAGS_AC);
- if (cpu_has(c, X86_FEATURE_SMAP))
+ if (cpu_has(c, X86_FEATURE_SMAP)) {
+#ifdef CONFIG_X86_SMAP
set_in_cr4(X86_CR4_SMAP);
+#else
+ clear_in_cr4(X86_CR4_SMAP);
+#endif
+ }
}
/*
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9b0c441c03f5..f187806dfc18 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -387,7 +387,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_PEBS);
}
- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
#ifdef CONFIG_X86_64
@@ -627,7 +628,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
tlb_flushall_shift = 5;
break;
case 0x63a: /* Ivybridge */
- tlb_flushall_shift = 1;
+ tlb_flushall_shift = 2;
break;
default:
tlb_flushall_shift = 6;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fa72a39e5d46..3982357de5b0 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -510,8 +510,9 @@ generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
static void generic_get_mtrr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type *type)
{
- unsigned int mask_lo, mask_hi, base_lo, base_hi;
- unsigned int tmp, hi;
+ u32 mask_lo, mask_hi, base_lo, base_hi;
+ unsigned int hi;
+ u64 tmp, mask;
/*
* get_mtrr doesn't need to update mtrr_state, also it could be called
@@ -532,18 +533,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
/* Work out the shifted address mask: */
- tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
- mask_lo = size_or_mask | tmp;
+ tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
+ mask = size_or_mask | tmp;
/* Expand tmp with high bits to all 1s: */
- hi = fls(tmp);
+ hi = fls64(tmp);
if (hi > 0) {
- tmp |= ~((1<<(hi - 1)) - 1);
+ tmp |= ~((1ULL<<(hi - 1)) - 1);
- if (tmp != mask_lo) {
+ if (tmp != mask) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- mask_lo = tmp;
+ mask = tmp;
}
}
@@ -551,8 +552,8 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
* This works correctly if size is a power of two, i.e. a
* contiguous range:
*/
- *size = -mask_lo;
- *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
+ *size = -mask;
+ *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
*type = base_lo & 0xff;
out_put_cpu:
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 726bf963c227..ca22b73aaa25 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -305,7 +305,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return -EINVAL;
}
- if (base & size_or_mask || size & size_or_mask) {
+ if ((base | (base + size - 1)) >>
+ (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
pr_warning("mtrr: base or size exceeds the MTRR width\n");
return -EINVAL;
}
@@ -583,6 +584,7 @@ static struct syscore_ops mtrr_syscore_ops = {
int __initdata changed_by_mtrr_cleanup;
+#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
@@ -600,7 +602,7 @@ void __init mtrr_bp_init(void)
if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000;
phys_addr = 36;
@@ -619,7 +621,7 @@ void __init mtrr_bp_init(void)
boot_cpu_data.x86_mask == 0x4))
phys_addr = 36;
- size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
+ size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
boot_cpu_data.x86 == 6) {
@@ -627,7 +629,7 @@ void __init mtrr_bp_init(void)
* VIA C* family have Intel style MTRRs,
* but don't support PAE
*/
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
phys_addr = 32;
}
@@ -637,21 +639,21 @@ void __init mtrr_bp_init(void)
if (cpu_has_k6_mtrr) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = mtrr_ops[X86_VENDOR_AMD];
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CENTAUR:
if (cpu_has_centaur_mcr) {
mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CYRIX:
if (cpu_has_cyrix_arr) {
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1025f3c99d20..123d9e2271dc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1165,6 +1165,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) {
+ if (i >= cpuc->n_events - cpuc->n_added)
+ --cpuc->n_added;
+
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event);
@@ -1249,10 +1252,20 @@ void perf_events_lapic_init(void)
static int __kprobes
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
+ int ret;
+ u64 start_clock;
+ u64 finish_clock;
+
if (!atomic_read(&active_events))
return NMI_DONE;
- return x86_pmu.handle_irq(regs);
+ start_clock = local_clock();
+ ret = x86_pmu.handle_irq(regs);
+ finish_clock = local_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+
+ return ret;
}
struct event_constraint emptyconstraint;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 5f0581e713c2..b46601ada813 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
+#include <linux/syscore_ops.h>
#include <asm/apic.h>
@@ -816,6 +817,18 @@ out:
return ret;
}
+static void ibs_eilvt_setup(void)
+{
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+}
+
static inline int get_ibs_lvt_offset(void)
{
u64 val;
@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
+#ifdef CONFIG_PM
+
+static int perf_ibs_suspend(void)
+{
+ clear_APIC_ibs(NULL);
+ return 0;
+}
+
+static void perf_ibs_resume(void)
+{
+ ibs_eilvt_setup();
+ setup_APIC_ibs(NULL);
+}
+
+static struct syscore_ops perf_ibs_syscore_ops = {
+ .resume = perf_ibs_resume,
+ .suspend = perf_ibs_suspend,
+};
+
+static void perf_ibs_pm_init(void)
+{
+ register_syscore_ops(&perf_ibs_syscore_ops);
+}
+
+#else
+
+static inline void perf_ibs_pm_init(void) { }
+
+#endif
+
static int __cpuinit
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void)
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
- /*
- * Force LVT offset assignment for family 10h: The offsets are
- * not assigned by the BIOS for this family, so the OS is
- * responsible for doing it. If the OS assignment fails, fall
- * back to BIOS settings and try to setup this.
- */
- if (boot_cpu_data.x86 == 0x10)
- force_ibs_eilvt_setup();
+ ibs_eilvt_setup();
if (!ibs_eilvt_valid())
goto out;
+ perf_ibs_pm_init();
get_online_cpus();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a9e22073bd56..b45ac6affa9c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1199,6 +1199,15 @@ again:
intel_pmu_lbr_read();
/*
+ * CondChgd bit 63 doesn't mean any overflow status. Ignore
+ * and clear the bit.
+ */
+ if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+ if (!status)
+ goto done;
+ }
+
+ /*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 52441a2af538..8aac56bda7dc 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
static struct uncore_event_desc snbep_uncore_qpi_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
- INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
- INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
+ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
{ /* end: all zeroes */ },
};
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index b1581527a236..2fbad6b9f23c 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -52,8 +52,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d32abeabbda5..174da5fc5a7b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -658,15 +658,18 @@ __init void e820_setup_gap(void)
* boot_params.e820_map, others are passed via SETUP_E820_EXT node of
* linked list of struct setup_data, which is parsed here.
*/
-void __init parse_e820_ext(struct setup_data *sdata)
+void __init parse_e820_ext(u64 phys_addr, u32 data_len)
{
int entries;
struct e820entry *extmap;
+ struct setup_data *sdata;
+ sdata = early_memremap(phys_addr, data_len);
entries = sdata->len / sizeof(struct e820entry);
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ early_iounmap(sdata, data_len);
printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended");
}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b90dd3f..4f7c82cdd0f5 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,16 +196,21 @@ static void __init ati_bugs_contd(int num, int slot, int func)
static void __init intel_remapping_check(int num, int slot, int func)
{
u8 revision;
+ u16 device;
+ device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
/*
- * Revision 0x13 of this chipset supports irq remapping
- * but has an erratum that breaks its behavior, flag it as such
+ * Revision <= 13 of all triggering devices id in this quirk
+ * have a problem draining interrupts when irq remapping is
+ * enabled, and should be flagged as broken. Additionally
+ * revision 0x22 of device id 0x3405 has this problem.
*/
- if (revision == 0x13)
+ if (revision <= 0x13)
+ set_irq_remapping_broken();
+ else if (device == 0x3405 && revision == 0x22)
set_irq_remapping_broken();
-
}
#define QFLAG_APPLY_ONCE 0x1
@@ -239,6 +244,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
{ PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+ { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
+ PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{}
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 8f3e2dec1df3..5c38e2b298cd 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -434,8 +434,9 @@ sysenter_past_esp:
jnz sysenter_audit
sysenter_do_call:
cmpl $(NR_syscalls), %eax
- jae syscall_badsys
+ jae sysenter_badsys
call *sys_call_table(,%eax,4)
+sysenter_after_call:
movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -516,6 +517,7 @@ ENTRY(system_call)
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
+syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
LOCKDEP_SYS_EXIT
@@ -530,6 +532,7 @@ syscall_exit:
restore_all:
TRACE_IRQS_IRET
restore_all_notrace:
+#ifdef CONFIG_X86_ESPFIX32
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
# are returning to the kernel.
@@ -540,6 +543,7 @@ restore_all_notrace:
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
+#endif
restore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code
irq_return:
@@ -552,13 +556,9 @@ ENTRY(iret_exc)
.previous
_ASM_EXTABLE(irq_return,iret_exc)
+#ifdef CONFIG_X86_ESPFIX32
CFI_RESTORE_STATE
ldt_ss:
- larl PT_OLDSS(%esp), %eax
- jnz restore_nocheck
- testl $0x00400000, %eax # returning to 32bit stack?
- jnz restore_nocheck # allright, normal return
-
#ifdef CONFIG_PARAVIRT
/*
* The kernel can't run on a non-flat stack if paravirt mode
@@ -600,6 +600,7 @@ ldt_ss:
lss (%esp), %esp /* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET -8
jmp restore_nocheck
+#endif
CFI_ENDPROC
ENDPROC(system_call)
@@ -690,8 +691,13 @@ syscall_fault:
END(syscall_fault)
syscall_badsys:
- movl $-ENOSYS,PT_EAX(%esp)
- jmp resume_userspace
+ movl $-ENOSYS,%eax
+ jmp syscall_after_call
+END(syscall_badsys)
+
+sysenter_badsys:
+ movl $-ENOSYS,%eax
+ jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC
/*
@@ -707,6 +713,7 @@ END(syscall_badsys)
* the high word of the segment base from the GDT and swiches to the
* normal stack and adjusts ESP with the matching offset.
*/
+#ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
@@ -716,8 +723,10 @@ END(syscall_badsys)
pushl_cfi %eax
lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8
+#endif
.endm
.macro UNWIND_ESPFIX_STACK
+#ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax
/* see if on espfix stack */
cmpw $__ESPFIX_SS, %ax
@@ -728,6 +737,7 @@ END(syscall_badsys)
/* switch to normal stack */
FIXUP_ESPFIX_STACK
27:
+#endif
.endm
/*
@@ -1075,7 +1085,7 @@ ENTRY(ftrace_caller)
pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
- leal function_trace_op, %ecx
+ movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
@@ -1133,7 +1143,7 @@ ENTRY(ftrace_regs_caller)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
- leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+ movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
@@ -1335,11 +1345,13 @@ END(debug)
ENTRY(nmi)
RING0_INT_FRAME
ASM_CLAC
+#ifdef CONFIG_X86_ESPFIX32
pushl_cfi %eax
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
popl_cfi %eax
je nmi_espfix_stack
+#endif
cmpl $ia32_sysenter_target,(%esp)
je nmi_stack_fixup
pushl_cfi %eax
@@ -1379,6 +1391,7 @@ nmi_debug_stack_check:
FIX_STACK 24, nmi_stack_correct, 1
jmp nmi_stack_correct
+#ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack:
/* We have a RING0_INT_FRAME here.
*
@@ -1400,6 +1413,7 @@ nmi_espfix_stack:
lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24
jmp irq_return
+#endif
CFI_ENDPROC
END(nmi)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 727208941030..39ba6914bbc6 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -58,6 +58,7 @@
#include <asm/asm.h>
#include <asm/context_tracking.h>
#include <asm/smap.h>
+#include <asm/pgtable_types.h>
#include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@@ -88,7 +89,7 @@ END(function_hook)
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
- leaq function_trace_op, %rdx
+ movq function_trace_op(%rip), %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
@@ -1056,12 +1057,45 @@ restore_args:
irq_return:
INTERRUPT_RETURN
- _ASM_EXTABLE(irq_return, bad_iret)
-#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
+ /*
+ * Are we returning to a stack segment from the LDT? Note: in
+ * 64-bit mode SS:RSP on the exception stack is always valid.
+ */
+#ifdef CONFIG_X86_ESPFIX64
+ testb $4,(SS-RIP)(%rsp)
+ jnz native_irq_return_ldt
+#endif
+
+native_irq_return_iret:
iretq
- _ASM_EXTABLE(native_iret, bad_iret)
+ _ASM_EXTABLE(native_irq_return_iret, bad_iret)
+
+#ifdef CONFIG_X86_ESPFIX64
+native_irq_return_ldt:
+ pushq_cfi %rax
+ pushq_cfi %rdi
+ SWAPGS
+ movq PER_CPU_VAR(espfix_waddr),%rdi
+ movq %rax,(0*8)(%rdi) /* RAX */
+ movq (2*8)(%rsp),%rax /* RIP */
+ movq %rax,(1*8)(%rdi)
+ movq (3*8)(%rsp),%rax /* CS */
+ movq %rax,(2*8)(%rdi)
+ movq (4*8)(%rsp),%rax /* RFLAGS */
+ movq %rax,(3*8)(%rdi)
+ movq (6*8)(%rsp),%rax /* SS */
+ movq %rax,(5*8)(%rdi)
+ movq (5*8)(%rsp),%rax /* RSP */
+ movq %rax,(4*8)(%rdi)
+ andl $0xffff0000,%eax
+ popq_cfi %rdi
+ orq PER_CPU_VAR(espfix_stack),%rax
+ SWAPGS
+ movq %rax,%rsp
+ popq_cfi %rax
+ jmp native_irq_return_iret
#endif
.section .fixup,"ax"
@@ -1127,9 +1161,40 @@ ENTRY(retint_kernel)
call preempt_schedule_irq
jmp exit_intr
#endif
-
CFI_ENDPROC
END(common_interrupt)
+
+ /*
+ * If IRET takes a fault on the espfix stack, then we
+ * end up promoting it to a doublefault. In that case,
+ * modify the stack to make it look like we just entered
+ * the #GP handler from user space, similar to bad_iret.
+ */
+#ifdef CONFIG_X86_ESPFIX64
+ ALIGN
+__do_double_fault:
+ XCPT_FRAME 1 RDI+8
+ movq RSP(%rdi),%rax /* Trap on the espfix stack? */
+ sarq $PGDIR_SHIFT,%rax
+ cmpl $ESPFIX_PGD_ENTRY,%eax
+ jne do_double_fault /* No, just deliver the fault */
+ cmpl $__KERNEL_CS,CS(%rdi)
+ jne do_double_fault
+ movq RIP(%rdi),%rax
+ cmpq $native_irq_return_iret,%rax
+ jne do_double_fault /* This shouldn't happen... */
+ movq PER_CPU_VAR(kernel_stack),%rax
+ subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
+ movq %rax,RSP(%rdi)
+ movq $0,(%rax) /* Missing (lost) #GP error code */
+ movq $general_protection,RIP(%rdi)
+ retq
+ CFI_ENDPROC
+END(__do_double_fault)
+#else
+# define __do_double_fault do_double_fault
+#endif
+
/*
* End of kprobes section
*/
@@ -1298,7 +1363,7 @@ zeroentry overflow do_overflow
zeroentry bounds do_bounds
zeroentry invalid_op do_invalid_op
zeroentry device_not_available do_device_not_available
-paranoiderrorentry double_fault do_double_fault
+paranoiderrorentry double_fault __do_double_fault
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
errorentry invalid_TSS do_invalid_TSS
errorentry segment_not_present do_segment_not_present
@@ -1585,7 +1650,7 @@ error_sti:
*/
error_kernelspace:
incl %ebx
- leaq irq_return(%rip),%rcx
+ leaq native_irq_return_iret(%rip),%rcx
cmpq %rcx,RIP+8(%rsp)
je error_swapgs
movl %ecx,%eax /* zero extend */
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
new file mode 100644
index 000000000000..94d857fb1033
--- /dev/null
+++ b/arch/x86/kernel/espfix_64.c
@@ -0,0 +1,208 @@
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2014 Intel Corporation; author: H. Peter Anvin
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * The IRET instruction, when returning to a 16-bit segment, only
+ * restores the bottom 16 bits of the user space stack pointer. This
+ * causes some 16-bit software to break, but it also leaks kernel state
+ * to user space.
+ *
+ * This works around this by creating percpu "ministacks", each of which
+ * is mapped 2^16 times 64K apart. When we detect that the return SS is
+ * on the LDT, we copy the IRET frame to the ministack and use the
+ * relevant alias to return to userspace. The ministacks are mapped
+ * readonly, so if the IRET fault we promote #GP to #DF which is an IST
+ * vector and thus has its own stack; we then do the fixup in the #DF
+ * handler.
+ *
+ * This file sets up the ministacks and the related page tables. The
+ * actual ministack invocation is in entry_64.S.
+ */
+
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/setup.h>
+#include <asm/espfix.h>
+
+/*
+ * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
+ * it up to a cache line to avoid unnecessary sharing.
+ */
+#define ESPFIX_STACK_SIZE (8*8UL)
+#define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
+
+/* There is address space for how many espfix pages? */
+#define ESPFIX_PAGE_SPACE (1UL << (PGDIR_SHIFT-PAGE_SHIFT-16))
+
+#define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
+#if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
+# error "Need more than one PGD for the ESPFIX hack"
+#endif
+
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+
+/* This contains the *bottom* address of the espfix stack */
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
+
+/* Initialization mutex - should this be a spinlock? */
+static DEFINE_MUTEX(espfix_init_mutex);
+
+/* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
+#define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
+static void *espfix_pages[ESPFIX_MAX_PAGES];
+
+static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
+ __aligned(PAGE_SIZE);
+
+static unsigned int page_random, slot_random;
+
+/*
+ * This returns the bottom address of the espfix stack for a specific CPU.
+ * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
+ * we have to account for some amount of padding at the end of each page.
+ */
+static inline unsigned long espfix_base_addr(unsigned int cpu)
+{
+ unsigned long page, slot;
+ unsigned long addr;
+
+ page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
+ slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
+ addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
+ addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
+ addr += ESPFIX_BASE_ADDR;
+ return addr;
+}
+
+#define PTE_STRIDE (65536/PAGE_SIZE)
+#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
+#define ESPFIX_PMD_CLONES PTRS_PER_PMD
+#define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
+
+#define PGTABLE_PROT ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
+
+static void init_espfix_random(void)
+{
+ unsigned long rand;
+
+ /*
+ * This is run before the entropy pools are initialized,
+ * but this is hopefully better than nothing.
+ */
+ if (!arch_get_random_long(&rand)) {
+ /* The constant is an arbitrary large prime */
+ rdtscll(rand);
+ rand *= 0xc345c6b72fd16123UL;
+ }
+
+ slot_random = rand % ESPFIX_STACKS_PER_PAGE;
+ page_random = (rand / ESPFIX_STACKS_PER_PAGE)
+ & (ESPFIX_PAGE_SPACE - 1);
+}
+
+void __init init_espfix_bsp(void)
+{
+ pgd_t *pgd_p;
+ pteval_t ptemask;
+
+ ptemask = __supported_pte_mask;
+
+ /* Install the espfix pud into the kernel page directory */
+ pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+ pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
+
+ /* Randomize the locations */
+ init_espfix_random();
+
+ /* The rest is the same as for any other processor */
+ init_espfix_ap();
+}
+
+void init_espfix_ap(void)
+{
+ unsigned int cpu, page;
+ unsigned long addr;
+ pud_t pud, *pud_p;
+ pmd_t pmd, *pmd_p;
+ pte_t pte, *pte_p;
+ int n;
+ void *stack_page;
+ pteval_t ptemask;
+
+ /* We only have to do this once... */
+ if (likely(this_cpu_read(espfix_stack)))
+ return; /* Already initialized */
+
+ cpu = smp_processor_id();
+ addr = espfix_base_addr(cpu);
+ page = cpu/ESPFIX_STACKS_PER_PAGE;
+
+ /* Did another CPU already set this up? */
+ stack_page = ACCESS_ONCE(espfix_pages[page]);
+ if (likely(stack_page))
+ goto done;
+
+ mutex_lock(&espfix_init_mutex);
+
+ /* Did we race on the lock? */
+ stack_page = ACCESS_ONCE(espfix_pages[page]);
+ if (stack_page)
+ goto unlock_done;
+
+ ptemask = __supported_pte_mask;
+
+ pud_p = &espfix_pud_page[pud_index(addr)];
+ pud = *pud_p;
+ if (!pud_present(pud)) {
+ pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
+ pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
+ paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+ for (n = 0; n < ESPFIX_PUD_CLONES; n++)
+ set_pud(&pud_p[n], pud);
+ }
+
+ pmd_p = pmd_offset(&pud, addr);
+ pmd = *pmd_p;
+ if (!pmd_present(pmd)) {
+ pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
+ pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
+ paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+ for (n = 0; n < ESPFIX_PMD_CLONES; n++)
+ set_pmd(&pmd_p[n], pmd);
+ }
+
+ pte_p = pte_offset_kernel(&pmd, addr);
+ stack_page = (void *)__get_free_page(GFP_KERNEL);
+ pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
+ for (n = 0; n < ESPFIX_PTE_CLONES; n++)
+ set_pte(&pte_p[n*PTE_STRIDE], pte);
+
+ /* Job is done for this CPU and any CPU which shares this page */
+ ACCESS_ONCE(espfix_pages[page]) = stack_page;
+
+unlock_done:
+ mutex_unlock(&espfix_init_mutex);
+done:
+ this_cpu_write(espfix_stack, addr);
+ this_cpu_write(espfix_waddr, (unsigned long)stack_page
+ + (addr & ~PAGE_MASK));
+}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 42a392a9fd02..1ffc32dbe450 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
return addr >= start && addr < end;
}
-static int
-do_ftrace_mod_code(unsigned long ip, const void *new_code)
+static unsigned long text_ip_addr(unsigned long ip)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa_symbol(ip));
- return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
+ return ip;
}
static const unsigned char *ftrace_nop_replace(void)
@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
+ ip = text_ip_addr(ip);
+
/* replace the text with the new text */
- if (do_ftrace_mod_code(ip, new_code))
+ if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
return -EPERM;
sync_core();
@@ -221,33 +222,56 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
-int ftrace_update_ftrace_func(ftrace_func_t func)
+static unsigned long ftrace_update_func;
+
+static int update_ftrace_func(unsigned long ip, void *new)
{
- unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned char old[MCOUNT_INSN_SIZE], *new;
+ unsigned char old[MCOUNT_INSN_SIZE];
int ret;
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
- new = ftrace_call_replace(ip, (unsigned long)func);
+ memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
+
+ ftrace_update_func = ip;
+ /* Make sure the breakpoints see the ftrace_update_func update */
+ smp_wmb();
/* See comment above by declaration of modifying_ftrace_code */
atomic_inc(&modifying_ftrace_code);
ret = ftrace_modify_code(ip, old, new);
+ atomic_dec(&modifying_ftrace_code);
+
+ return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned char *new;
+ int ret;
+
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
- ret = ftrace_modify_code(ip, old, new);
+ ret = update_ftrace_func(ip, new);
}
- atomic_dec(&modifying_ftrace_code);
-
return ret;
}
+static int is_ftrace_caller(unsigned long ip)
+{
+ if (ip == ftrace_update_func)
+ return 1;
+
+ return 0;
+}
+
/*
* A breakpoint was added to the code address we are about to
* modify, and this is the handle that will just skip over it.
@@ -257,10 +281,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
*/
int ftrace_int3_handler(struct pt_regs *regs)
{
+ unsigned long ip;
+
if (WARN_ON_ONCE(!regs))
return 0;
- if (!ftrace_location(regs->ip - 1))
+ ip = regs->ip - 1;
+ if (!ftrace_location(ip) && !is_ftrace_caller(ip))
return 0;
regs->ip += MCOUNT_INSN_SIZE - 1;
@@ -632,8 +659,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
ret = -EPERM;
goto out;
}
- run_sync();
out:
+ run_sync();
return ret;
fail_update:
@@ -665,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data)
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
-static int ftrace_mod_jmp(unsigned long ip,
- int old_offset, int new_offset)
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{
- unsigned char code[MCOUNT_INSN_SIZE];
+ static union ftrace_code_union calc;
- if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
+ /* Jmp not a call (ignore the .e8) */
+ calc.e8 = 0xe9;
+ calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
- if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
- return -EINVAL;
+ /*
+ * ftrace external locks synchronize the access to the static variable.
+ */
+ return calc.code;
+}
- *(int *)(&code[1]) = new_offset;
+static int ftrace_mod_jmp(unsigned long ip, void *func)
+{
+ unsigned char *new;
- if (do_ftrace_mod_code(ip, &code))
- return -EPERM;
+ new = ftrace_jmp_replace(ip, (unsigned long)func);
- return 0;
+ return update_ftrace_func(ip, new);
}
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
- old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
-
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
-
- old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_stub);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 73afd11799ca..df63cae573e0 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -566,6 +566,10 @@ ENDPROC(early_idt_handlers)
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
cld
+
+ cmpl $2,(%esp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,%ss:early_recursion_flag
je hlt_loop
incl %ss:early_recursion_flag
@@ -616,8 +620,9 @@ ex_entry:
pop %edx
pop %ecx
pop %eax
- addl $8,%esp /* drop vector number and error code */
decl %ss:early_recursion_flag
+is_nmi:
+ addl $8,%esp /* drop vector number and error code */
iret
ENDPROC(early_idt_handler)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 321d65ebaffe..f2a9a2aa98f3 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -343,6 +343,9 @@ early_idt_handlers:
ENTRY(early_idt_handler)
cld
+ cmpl $2,(%rsp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,early_recursion_flag(%rip)
jz 1f
incl early_recursion_flag(%rip)
@@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
popq %rdx
popq %rcx
popq %rax
- addq $16,%rsp # drop vector number and error code
decl early_recursion_flag(%rip)
+is_nmi:
+ addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN
ENDPROC(early_idt_handler)
@@ -513,7 +517,7 @@ ENTRY(phys_base)
#include "../../x86/xen/xen-head.S"
.section .bss, "aw", @nobits
- .align L1_CACHE_BYTES
+ .align PAGE_SIZE
ENTRY(idt_table)
.skip IDT_ENTRIES * 16
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index cb339097b9ea..b03ff1842547 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void)
{
- if (use_eager_fpu())
- math_state_restore();
- else
+ if (use_eager_fpu()) {
+ /*
+ * For eager fpu, most the time, tsk_used_math() is true.
+ * Restore the user math as we are done with the kernel usage.
+ * At few instances during thread exit, signal handling etc,
+ * tsk_used_math() is false. Those few places will take proper
+ * actions, so we don't need to restore the math here.
+ */
+ if (likely(tsk_used_math(current)))
+ math_state_restore();
+ } else {
stts();
+ }
}
EXPORT_SYMBOL(__kernel_fpu_end);
@@ -116,7 +125,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
if (cpu_has_fxsr) {
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
- asm volatile("fxsave %0" : : "m" (fx_scratch));
+ asm volatile("fxsave %0" : "+m" (fx_scratch));
mask = fx_scratch.mxcsr_mask;
if (mask == 0)
mask = 0x0000ffbf;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index ebc987398923..c37886d759cc 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -229,6 +229,11 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
}
}
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+
fill_ldt(&ldt, &ldt_info);
if (oldmode)
ldt.avl = 0;
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index efdec7cd8e01..b516dfb411ec 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -430,7 +430,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
if (request_firmware(&fw, (const char *)fw_name, device)) {
- pr_err("failed to load file %s\n", fw_name);
+ pr_debug("failed to load file %s\n", fw_name);
goto out;
}
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 3f08f34f93eb..a1da6737ba5b 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_irq_ops, save_fl);
PATCH_SITE(pv_irq_ops, irq_enable);
PATCH_SITE(pv_irq_ops, irq_disable);
- PATCH_SITE(pv_cpu_ops, iret);
PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
PATCH_SITE(pv_cpu_ops, usergs_sysret32);
PATCH_SITE(pv_cpu_ops, usergs_sysret64);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 872079a67e4d..f7d0672481fd 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
flag |= __GFP_ZERO;
again:
page = NULL;
- if (!(flag & GFP_ATOMIC))
+ /* CMA can be used only in the context which permits sleeping */
+ if (flag & __GFP_WAIT)
page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ /* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 1ce8966f2488..48f439953436 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -378,9 +378,9 @@ static void amd_e400_idle(void)
* The switch back from broadcast mode needs to be
* called with interrupts disabled.
*/
- local_irq_disable();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
- local_irq_enable();
+ local_irq_disable();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+ local_irq_enable();
} else
default_idle();
}
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 04ee1e2e4c02..52dbf1e400dc 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
return;
pci_read_config_dword(nb_ht, 0x60, &val);
- node = val & 7;
+ node = pcibus_to_node(dev->bus) | (val & 7);
/*
* Some hardware may return an invalid node ID,
* so check it first:
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 76fa1e9a2b39..90fd1195f276 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -447,6 +447,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
},
},
+ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
+ { /* Some C6100 machines were shipped with vendor being 'Dell'. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
index 2a26819bb6a8..80eab01c1a68 100644
--- a/arch/x86/kernel/resource.c
+++ b/arch/x86/kernel/resource.c
@@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail)
void arch_remove_reservations(struct resource *avail)
{
- /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
+ /*
+ * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
+ * the low 1MB unconditionally, as this area is needed for some ISA
+ * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
+ */
if (avail->flags & IORESOURCE_MEM) {
- if (avail->start < BIOS_END)
- avail->start = BIOS_END;
resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
remove_e820_regions(avail);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 56f7fcfe7fa2..a3627ade4b15 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -426,25 +426,23 @@ static void __init reserve_initrd(void)
static void __init parse_setup_data(void)
{
struct setup_data *data;
- u64 pa_data;
+ u64 pa_data, pa_next;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- u32 data_len, map_len;
+ u32 data_len, map_len, data_type;
map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
(u64)sizeof(struct setup_data));
data = early_memremap(pa_data, map_len);
data_len = data->len + sizeof(struct setup_data);
- if (data_len > map_len) {
- early_iounmap(data, map_len);
- data = early_memremap(pa_data, data_len);
- map_len = data_len;
- }
+ data_type = data->type;
+ pa_next = data->next;
+ early_iounmap(data, map_len);
- switch (data->type) {
+ switch (data_type) {
case SETUP_E820_EXT:
- parse_e820_ext(data);
+ parse_e820_ext(pa_data, data_len);
break;
case SETUP_DTB:
add_dtb(pa_data);
@@ -452,8 +450,7 @@ static void __init parse_setup_data(void)
default:
break;
}
- pa_data = data->next;
- early_iounmap(data, map_len);
+ pa_data = pa_next;
}
}
@@ -911,11 +908,11 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_EFI
if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL32", 4)) {
- set_bit(EFI_BOOT, &x86_efi_facility);
+ set_bit(EFI_BOOT, &efi.flags);
} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL64", 4)) {
- set_bit(EFI_BOOT, &x86_efi_facility);
- set_bit(EFI_64BIT, &x86_efi_facility);
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
}
if (efi_enabled(EFI_BOOT))
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 69562992e457..087ab2af381a 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -364,7 +364,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -429,7 +429,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
@@ -496,7 +496,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
put_user_ex(0, &frame->uc.uc__pad0);
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bfd348e99369..fe862750583b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -265,6 +265,13 @@ notrace static void __cpuinit start_secondary(void *unused)
check_tsc_sync_target();
/*
+ * Enable the espfix hack for this CPU
+ */
+#ifdef CONFIG_X86_ESPFIX64
+ init_espfix_ap();
+#endif
+
+ /*
* We need to hold vector_lock so there the set of online cpus
* does not change while we are assigning vectors to cpus. Holding
* this lock ensures we don't half assign or remove an irq from a cpu.
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5aedb81..30277e27431a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
*begin = new_begin;
}
} else {
- *begin = TASK_UNMAPPED_BASE;
+ *begin = current->mm->mmap_legacy_base;
*end = TASK_SIZE;
}
}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9a907a67be8f..c52c07efe970 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -125,10 +125,10 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
if (!show_unhandled_signals)
return;
- pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
- level, current->comm, task_pid_nr(current),
- message, regs->ip, regs->cs,
- regs->sp, regs->ax, regs->si, regs->di);
+ printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+ level, current->comm, task_pid_nr(current),
+ message, regs->ip, regs->cs,
+ regs->sp, regs->ax, regs->si, regs->di);
}
static int addr_to_vsyscall_nr(unsigned long addr)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5953dcea752d..fb3fddc322f8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2209,6 +2209,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long cs;
+ int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
@@ -2218,6 +2219,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
+ /* Outer-privilege level return is not implemented */
+ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+ return X86EMUL_UNHANDLEABLE;
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
return rc;
}
@@ -4207,7 +4211,10 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
case OpMem8:
ctxt->memop.bytes = 1;
if (ctxt->memop.type == OP_REG) {
- ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1);
+ int highbyte_regs = ctxt->rex_prefix == 0;
+
+ ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm,
+ highbyte_regs);
fetch_register_operand(&ctxt->memop);
}
goto mem_common;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 412a5aa0ef94..518d86471b76 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -37,6 +37,7 @@
#include "irq.h"
#include "i8254.h"
+#include "x86.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -349,6 +350,23 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
atomic_set(&ps->pending, 0);
ps->irq_ack = 1;
+ /*
+ * Do not allow the guest to program periodic timers with small
+ * interval, since the hrtimers are not throttled by the host
+ * scheduler.
+ */
+ if (ps->is_periodic) {
+ s64 min_period = min_timer_period_us * 1000LL;
+
+ if (ps->period < min_period) {
+ pr_info_ratelimited(
+ "kvm: requested %lld ns "
+ "i8254 timer period limited to %lld ns\n",
+ ps->period, min_period);
+ ps->period = min_period;
+ }
+ }
+
hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
HRTIMER_MODE_ABS);
}
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 484bc874688b..3ec38cb56bd5 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
vector = kvm_cpu_get_extint(v);
- if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
+ if (vector != -1)
return vector; /* PIC */
return kvm_get_apic_interrupt(v); /* APIC */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0eee2c8b64d1..681e4e251f00 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -71,9 +71,6 @@
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
-static unsigned int min_timer_period_us = 500;
-module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
-
static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{
*((u32 *) (apic->regs + reg_off)) = val;
@@ -153,6 +150,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
+#define KVM_X2APIC_CID_BITS 0
+
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -190,7 +189,8 @@ static void recalculate_apic_map(struct kvm *kvm)
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
- new->cid_mask = new->lid_mask = 0xffff;
+ new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
+ new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
@@ -362,31 +362,90 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
- apic->irr_pending = false;
+ struct kvm_vcpu *vcpu;
+
+ vcpu = apic->vcpu;
+
apic_clear_vector(vec, apic->regs + APIC_IRR);
- if (apic_search_irr(apic) != -1)
- apic->irr_pending = true;
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ /* try to update RVI */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ else {
+ vec = apic_search_irr(apic);
+ apic->irr_pending = (vec != -1);
+ }
}
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
- if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+ struct kvm_vcpu *vcpu;
+
+ if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+ return;
+
+ vcpu = apic->vcpu;
+
+ /*
+ * With APIC virtualization enabled, all caching is disabled
+ * because the processor can modify ISR under the hood. Instead
+ * just set SVI.
+ */
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
+ else {
++apic->isr_count;
- BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+ BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+ /*
+ * ISR (in service register) bit is set when injecting an interrupt.
+ * The highest vector is injected. Thus the latest bit set matches
+ * the highest bit in ISR.
+ */
+ apic->highest_isr_cache = vec;
+ }
+}
+
+static inline int apic_find_highest_isr(struct kvm_lapic *apic)
+{
+ int result;
+
/*
- * ISR (in service register) bit is set when injecting an interrupt.
- * The highest vector is injected. Thus the latest bit set matches
- * the highest bit in ISR.
+ * Note that isr_count is always 1, and highest_isr_cache
+ * is always -1, with APIC virtualization enabled.
*/
- apic->highest_isr_cache = vec;
+ if (!apic->isr_count)
+ return -1;
+ if (likely(apic->highest_isr_cache != -1))
+ return apic->highest_isr_cache;
+
+ result = find_highest_vector(apic->regs + APIC_ISR);
+ ASSERT(result == -1 || result >= 16);
+
+ return result;
}
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
{
- if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
+ struct kvm_vcpu *vcpu;
+ if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
+ return;
+
+ vcpu = apic->vcpu;
+
+ /*
+ * We do get here for APIC virtualization enabled if the guest
+ * uses the Hyper-V APIC enlightenment. In this case we may need
+ * to trigger a new interrupt delivery by writing the SVI field;
+ * on the other hand isr_count and highest_isr_cache are unused
+ * and must be left alone.
+ */
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
+ apic_find_highest_isr(apic));
+ else {
--apic->isr_count;
- BUG_ON(apic->isr_count < 0);
- apic->highest_isr_cache = -1;
+ BUG_ON(apic->isr_count < 0);
+ apic->highest_isr_cache = -1;
+ }
}
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
@@ -466,22 +525,6 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
-static inline int apic_find_highest_isr(struct kvm_lapic *apic)
-{
- int result;
-
- /* Note that isr_count is always 1 with vid enabled */
- if (!apic->isr_count)
- return -1;
- if (likely(apic->highest_isr_cache != -1))
- return apic->highest_isr_cache;
-
- result = find_highest_vector(apic->regs + APIC_ISR);
- ASSERT(result == -1 || result >= 16);
-
- return result;
-}
-
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -855,7 +898,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */
- if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
+ if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
+ apic->lapic_timer.period == 0)
return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
@@ -1360,8 +1404,12 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
+ if (!kvm_vcpu_is_bsp(apic->vcpu))
+ value &= ~MSR_IA32_APICBASE_BSP;
+ vcpu->arch.apic_base = value;
+
/* update jump label if enable bit changes */
- if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
+ if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
static_key_slow_dec_deferred(&apic_hw_disabled);
else
@@ -1369,10 +1417,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
recalculate_apic_map(vcpu->kvm);
}
- if (!kvm_vcpu_is_bsp(apic->vcpu))
- value &= ~MSR_IA32_APICBASE_BSP;
-
- vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);
@@ -1621,6 +1665,13 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
if (vector == -1)
return -1;
+ /*
+ * We get here even with APIC virtualization enabled, if doing
+ * nested virtualization and L1 runs with the "acknowledge interrupt
+ * on exit" mode. Then we cannot inject the interrupt via RVI,
+ * because the process would deliver it through the IDT.
+ */
+
apic_set_isr(vector, apic);
apic_update_ppr(apic);
apic_clear_irr(vector, apic);
@@ -1705,7 +1756,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
- void *vapic;
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@@ -1713,9 +1763,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
- kunmap_atomic(vapic);
+ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
@@ -1751,7 +1800,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic = vcpu->arch.apic;
- void *vapic;
apic_sync_pv_eoi_to_guest(vcpu, apic);
@@ -1767,18 +1815,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
- kunmap_atomic(vapic);
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
}
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
- vcpu->arch.apic->vapic_addr = vapic_addr;
- if (vapic_addr)
+ if (vapic_addr) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.apic->vapic_cache,
+ vapic_addr, sizeof(u32)))
+ return -EINVAL;
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
- else
+ } else {
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+ }
+
+ vcpu->arch.apic->vapic_addr = vapic_addr;
+ return 0;
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c730ac9fe801..c8b0d0d2da5c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -34,7 +34,7 @@ struct kvm_lapic {
*/
void *regs;
gpa_t vapic_addr;
- struct page *vapic_page;
+ struct gfn_to_hva_cache vapic_cache;
unsigned long pending_events;
unsigned int sipi_vector;
};
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 004cc87b781c..711c649f80b7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2585,6 +2585,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
int emulate = 0;
gfn_t pseudo_gfn;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return 0;
+
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
@@ -2748,6 +2751,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
bool ret = false;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return false;
+
if (!page_fault_can_be_fast(vcpu, error_code))
return false;
@@ -3139,6 +3145,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
struct kvm_shadow_walk_iterator iterator;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return spte;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
if (!is_shadow_present_pte(spte))
@@ -4329,6 +4338,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
u64 spte;
int nr_sptes = 0;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return nr_sptes;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
sptes[iterator.level-1] = spte;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index da20860b457a..7e6090e13237 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -423,6 +423,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ goto out_gpte_changed;
+
for (shadow_walk_init(&it, vcpu, addr);
shadow_walk_okay(&it) && it.level > gw->level;
shadow_walk_next(&it)) {
@@ -671,6 +674,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
*/
mmu_topup_memory_caches(vcpu);
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+ WARN_ON(1);
+ return;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a14a6eaf871d..765210d4d925 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2985,10 +2985,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
- if (irqchip_in_kernel(svm->vcpu.kvm)) {
- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ if (irqchip_in_kernel(svm->vcpu.kvm))
return r;
- }
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -3550,6 +3548,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
+ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+
if (irr == -1)
return;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 260a91939555..7cdafb6dc705 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3399,15 +3399,22 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
var->limit = vmx_read_guest_seg_limit(vmx, seg);
var->selector = vmx_read_guest_seg_selector(vmx, seg);
ar = vmx_read_guest_seg_ar(vmx, seg);
+ var->unusable = (ar >> 16) & 1;
var->type = ar & 15;
var->s = (ar >> 4) & 1;
var->dpl = (ar >> 5) & 3;
- var->present = (ar >> 7) & 1;
+ /*
+ * Some userspaces do not preserve unusable property. Since usable
+ * segment has to be present according to VMX spec we can use present
+ * property to amend userspace bug by making unusable segment always
+ * nonpresent. vmx_segment_access_rights() already marks nonpresent
+ * segment as unusable.
+ */
+ var->present = !var->unusable;
var->avl = (ar >> 12) & 1;
var->l = (ar >> 13) & 1;
var->db = (ar >> 14) & 1;
var->g = (ar >> 15) & 1;
- var->unusable = (ar >> 16) & 1;
}
static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
@@ -7126,8 +7133,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx);
- free_nested(vmx);
free_loaded_vmcs(vmx->loaded_vmcs);
+ free_nested(vmx);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e8ba99c34180..1be0a9e75d1f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -94,6 +94,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
static bool ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
+unsigned int min_timer_period_us = 500;
+module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
+
bool kvm_has_tsc_control;
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
u32 kvm_max_guest_tsc_khz;
@@ -3138,8 +3141,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
- r = 0;
- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -5539,36 +5541,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu);
}
-static int vapic_enter(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- struct page *page;
-
- if (!apic || !apic->vapic_addr)
- return 0;
-
- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- if (is_error_page(page))
- return -EFAULT;
-
- vcpu->arch.apic->vapic_page = page;
- return 0;
-}
-
-static void vapic_exit(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- int idx;
-
- if (!apic || !apic->vapic_addr)
- return;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- kvm_release_page_dirty(apic->vapic_page);
- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
@@ -5889,11 +5861,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- r = vapic_enter(vcpu);
- if (r) {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- return r;
- }
r = 1;
while (r > 0) {
@@ -5951,8 +5918,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- vapic_exit(vcpu);
-
return r;
}
@@ -6017,7 +5982,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
frag->len -= len;
}
- if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+ if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
if (vcpu->mmio_is_write)
return 1;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e224f7a671b6..3186542f2fa3 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -124,5 +124,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
extern u64 host_xcr0;
+extern unsigned int min_timer_period_us;
+
extern struct static_key kvm_no_apic_vcpu;
#endif
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 25b7ae8d058a..7609e0e421ec 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -6,6 +6,7 @@
*/
#include <asm/checksum.h>
#include <linux/module.h>
+#include <asm/smap.h>
/**
* csum_partial_copy_from_user - Copy and checksum from user space.
@@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
len -= 2;
}
}
+ stac();
isum = csum_partial_copy_generic((__force const void *)src,
dst, len, isum, errp, NULL);
+ clac();
if (unlikely(*errp))
goto out_err;
@@ -82,6 +85,8 @@ __wsum
csum_partial_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp)
{
+ __wsum ret;
+
might_sleep();
if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
@@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
}
*errp = 0;
- return csum_partial_copy_generic(src, (void __force *)dst,
- len, isum, NULL, errp);
+ stac();
+ ret = csum_partial_copy_generic(src, (void __force *)dst,
+ len, isum, NULL, errp);
+ clac();
+ return ret;
}
EXPORT_SYMBOL(csum_partial_copy_to_user);
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 0002a3a33081..e04e67753238 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -30,11 +30,13 @@ struct pg_state {
unsigned long start_address;
unsigned long current_address;
const struct addr_marker *marker;
+ unsigned long lines;
};
struct addr_marker {
unsigned long start_address;
const char *name;
+ unsigned long max_lines;
};
/* indices for address_markers; keep sync'd w/ address_markers below */
@@ -45,6 +47,7 @@ enum address_markers_idx {
LOW_KERNEL_NR,
VMALLOC_START_NR,
VMEMMAP_START_NR,
+ ESPFIX_START_NR,
HIGH_KERNEL_NR,
MODULES_VADDR_NR,
MODULES_END_NR,
@@ -67,6 +70,7 @@ static struct addr_marker address_markers[] = {
{ PAGE_OFFSET, "Low Kernel Mapping" },
{ VMALLOC_START, "vmalloc() Area" },
{ VMEMMAP_START, "Vmemmap" },
+ { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
{ __START_KERNEL_map, "High Kernel Mapping" },
{ MODULES_VADDR, "Modules" },
{ MODULES_END, "End Modules" },
@@ -163,7 +167,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
pgprot_t new_prot, int level)
{
pgprotval_t prot, cur;
- static const char units[] = "KMGTPE";
+ static const char units[] = "BKMGTPE";
/*
* If we have a "break" in the series, we need to flush the state that
@@ -178,6 +182,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
st->current_prot = new_prot;
st->level = level;
st->marker = address_markers;
+ st->lines = 0;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
@@ -188,17 +193,21 @@ static void note_page(struct seq_file *m, struct pg_state *st,
/*
* Now print the actual finished series
*/
- seq_printf(m, "0x%0*lx-0x%0*lx ",
- width, st->start_address,
- width, st->current_address);
-
- delta = (st->current_address - st->start_address) >> 10;
- while (!(delta & 1023) && unit[1]) {
- delta >>= 10;
- unit++;
+ if (!st->marker->max_lines ||
+ st->lines < st->marker->max_lines) {
+ seq_printf(m, "0x%0*lx-0x%0*lx ",
+ width, st->start_address,
+ width, st->current_address);
+
+ delta = (st->current_address - st->start_address);
+ while (!(delta & 1023) && unit[1]) {
+ delta >>= 10;
+ unit++;
+ }
+ seq_printf(m, "%9lu%c ", delta, *unit);
+ printk_prot(m, st->current_prot, st->level);
}
- seq_printf(m, "%9lu%c ", delta, *unit);
- printk_prot(m, st->current_prot, st->level);
+ st->lines++;
/*
* We print markers for special areas of address space,
@@ -206,7 +215,15 @@ static void note_page(struct seq_file *m, struct pg_state *st,
* This helps in the interpretation.
*/
if (st->current_address >= st->marker[1].start_address) {
+ if (st->marker->max_lines &&
+ st->lines > st->marker->max_lines) {
+ unsigned long nskip =
+ st->lines - st->marker->max_lines;
+ seq_printf(m, "... %lu entr%s skipped ... \n",
+ nskip, nskip == 1 ? "y" : "ies");
+ }
st->marker++;
+ st->lines = 0;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 654be4ae3047..c1e9e4cbbd76 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -989,6 +989,12 @@ static int fault_in_kernel_space(unsigned long address)
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
+ if (!IS_ENABLED(CONFIG_X86_SMAP))
+ return false;
+
+ if (!static_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
if (error_code & PF_USER)
return false;
@@ -1091,11 +1097,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- if (static_cpu_has(X86_FEATURE_SMAP)) {
- if (unlikely(smap_violation(error_code, regs))) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
+ if (unlikely(smap_violation(error_code, regs))) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index ae1aa71d0115..7e73e8c69096 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -16,169 +16,6 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
-static unsigned long page_table_shareable(struct vm_area_struct *svma,
- struct vm_area_struct *vma,
- unsigned long addr, pgoff_t idx)
-{
- unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
- svma->vm_start;
- unsigned long sbase = saddr & PUD_MASK;
- unsigned long s_end = sbase + PUD_SIZE;
-
- /* Allow segments to share if only one is marked locked */
- unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
- unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
-
- /*
- * match the virtual addresses, permission and the alignment of the
- * page table page.
- */
- if (pmd_index(addr) != pmd_index(saddr) ||
- vm_flags != svm_flags ||
- sbase < svma->vm_start || svma->vm_end < s_end)
- return 0;
-
- return saddr;
-}
-
-static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long base = addr & PUD_MASK;
- unsigned long end = base + PUD_SIZE;
-
- /*
- * check on proper vm_flags and page table alignment
- */
- if (vma->vm_flags & VM_MAYSHARE &&
- vma->vm_start <= base && end <= vma->vm_end)
- return 1;
- return 0;
-}
-
-/*
- * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
- * and returns the corresponding pte. While this is not necessary for the
- * !shared pmd case because we can allocate the pmd later as well, it makes the
- * code much cleaner. pmd allocation is essential for the shared case because
- * pud has to be populated inside the same i_mmap_mutex section - otherwise
- * racing tasks could either miss the sharing (see huge_pte_offset) or select a
- * bad pmd for sharing.
- */
-static pte_t *
-huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
-{
- struct vm_area_struct *vma = find_vma(mm, addr);
- struct address_space *mapping = vma->vm_file->f_mapping;
- pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff;
- struct vm_area_struct *svma;
- unsigned long saddr;
- pte_t *spte = NULL;
- pte_t *pte;
-
- if (!vma_shareable(vma, addr))
- return (pte_t *)pmd_alloc(mm, pud, addr);
-
- mutex_lock(&mapping->i_mmap_mutex);
- vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
- if (svma == vma)
- continue;
-
- saddr = page_table_shareable(svma, vma, addr, idx);
- if (saddr) {
- spte = huge_pte_offset(svma->vm_mm, saddr);
- if (spte) {
- get_page(virt_to_page(spte));
- break;
- }
- }
- }
-
- if (!spte)
- goto out;
-
- spin_lock(&mm->page_table_lock);
- if (pud_none(*pud))
- pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
- else
- put_page(virt_to_page(spte));
- spin_unlock(&mm->page_table_lock);
-out:
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
- mutex_unlock(&mapping->i_mmap_mutex);
- return pte;
-}
-
-/*
- * unmap huge page backed by shared pte.
- *
- * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
- * indicated by page_count > 1, unmap is achieved by clearing pud and
- * decrementing the ref count. If count == 1, the pte page is not shared.
- *
- * called with vma->vm_mm->page_table_lock held.
- *
- * returns: 1 successfully unmapped a shared pte page
- * 0 the underlying pte page is not shared, or it is the last user
- */
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- pgd_t *pgd = pgd_offset(mm, *addr);
- pud_t *pud = pud_offset(pgd, *addr);
-
- BUG_ON(page_count(virt_to_page(ptep)) == 0);
- if (page_count(virt_to_page(ptep)) == 1)
- return 0;
-
- pud_clear(pud);
- put_page(virt_to_page(ptep));
- *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
- return 1;
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (pud) {
- if (sz == PUD_SIZE) {
- pte = (pte_t *)pud;
- } else {
- BUG_ON(sz != PMD_SIZE);
- if (pud_none(*pud))
- pte = huge_pmd_share(mm, addr, pud);
- else
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
- }
- }
- BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
-
- return pte;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud)) {
- if (pud_large(*pud))
- return (pte_t *)pud;
- pmd = pmd_offset(pud, addr);
- }
- }
- return (pte_t *) pmd;
-}
-
#if 0 /* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
@@ -240,30 +77,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_PSE);
}
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
-struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pud);
- if (page)
- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
#endif
/* x86_64 also uses this file */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 1f34e9219775..7a5bf1b76e2f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -78,8 +78,8 @@ __ref void *alloc_low_pages(unsigned int num)
return __va(pfn << PAGE_SHIFT);
}
-/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE (5 * PAGE_SIZE)
+/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
+#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
void __init early_alloc_pgt_buf(void)
{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 9a1e6583910c..86c758de4b34 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
return err;
}
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; ++i)
+ if (pfn_valid(start_pfn + i) &&
+ !PageReserved(pfn_to_page(start_pfn + i)))
+ return 1;
+
+ WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
+
+ return 0;
+}
+
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
@@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
+ pfn = phys_addr >> PAGE_SHIFT;
last_pfn = last_addr >> PAGE_SHIFT;
- for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
- int is_ram = page_is_ram(pfn);
-
- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
- return NULL;
- WARN_ON_ONCE(is_ram);
- }
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1)
+ return NULL;
/*
* Mappings have to be page-aligned
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 845df6835f9f..5c1ae28825cd 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,12 +112,14 @@ static unsigned long mmap_legacy_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_base();
+
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_legacy_base();
+ mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
- mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 877b9a1b2152..01495755701b 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
- mov $SIZE,%ecx; /* size */ \
+ mov $SIZE,%edx; /* size */ \
call bpf_internal_load_pointer_neg_helper; \
test %rax,%rax; \
pop SKBDATA; \
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index f66b54086ce5..0c966fecfb8c 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -324,15 +324,21 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
case BPF_S_ALU_MOD_K: /* A %= K; */
+ if (K == 1) {
+ CLEAR_A();
+ break;
+ }
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
EMIT2(0xf7, 0xf1); /* div %ecx */
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
- EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
- EMIT(K, 4);
- EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
+ EMIT2(0xf7, 0xf1); /* div %ecx */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 94919e307f8e..2883f0840201 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res,
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* The low 1MB range is reserved for ISA cards */
+ if (start < BIOS_END)
+ start = BIOS_END;
}
return start;
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index d2fbcedcf6ea..816e940b3998 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -60,34 +60,19 @@
static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
-struct efi __read_mostly efi = {
- .mps = EFI_INVALID_TABLE_ADDR,
- .acpi = EFI_INVALID_TABLE_ADDR,
- .acpi20 = EFI_INVALID_TABLE_ADDR,
- .smbios = EFI_INVALID_TABLE_ADDR,
- .sal_systab = EFI_INVALID_TABLE_ADDR,
- .boot_info = EFI_INVALID_TABLE_ADDR,
- .hcdp = EFI_INVALID_TABLE_ADDR,
- .uga = EFI_INVALID_TABLE_ADDR,
- .uv_systab = EFI_INVALID_TABLE_ADDR,
-};
-EXPORT_SYMBOL(efi);
-
struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
-unsigned long x86_efi_facility;
+static __initdata efi_config_table_type_t arch_tables[] = {
+#ifdef CONFIG_X86_UV
+ {UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
+#endif
+ {NULL_GUID, NULL, 0},
+};
-/*
- * Returns 1 if 'facility' is enabled, 0 otherwise.
- */
-int efi_enabled(int facility)
-{
- return test_bit(facility, &x86_efi_facility) != 0;
-}
-EXPORT_SYMBOL(efi_enabled);
+u64 efi_setup; /* efi setup_data physical address */
static bool __initdata disable_runtime = false;
static int __init setup_noefi(char *arg)
@@ -397,6 +382,8 @@ int __init efi_memblock_x86_reserve_range(void)
memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
+ efi.memmap = &memmap;
+
return 0;
}
@@ -438,7 +425,7 @@ void __init efi_reserve_boot_services(void)
* - Not within any part of the kernel
* - Not the bios reserved area
*/
- if ((start+size >= __pa_symbol(_text)
+ if ((start + size > __pa_symbol(_text)
&& start <= __pa_symbol(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
memblock_is_region_reserved(start, size)) {
@@ -454,7 +441,7 @@ void __init efi_reserve_boot_services(void)
void __init efi_unmap_memmap(void)
{
- clear_bit(EFI_MEMMAP, &x86_efi_facility);
+ clear_bit(EFI_MEMMAP, &efi.flags);
if (memmap.map) {
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
memmap.map = NULL;
@@ -576,80 +563,6 @@ static int __init efi_systab_init(void *phys)
return 0;
}
-static int __init efi_config_init(u64 tables, int nr_tables)
-{
- void *config_tables, *tablep;
- int i, sz;
-
- if (efi_enabled(EFI_64BIT))
- sz = sizeof(efi_config_table_64_t);
- else
- sz = sizeof(efi_config_table_32_t);
-
- /*
- * Let's see what config tables the firmware passed to us.
- */
- config_tables = early_ioremap(tables, nr_tables * sz);
- if (config_tables == NULL) {
- pr_err("Could not map Configuration table!\n");
- return -ENOMEM;
- }
-
- tablep = config_tables;
- pr_info("");
- for (i = 0; i < efi.systab->nr_tables; i++) {
- efi_guid_t guid;
- unsigned long table;
-
- if (efi_enabled(EFI_64BIT)) {
- u64 table64;
- guid = ((efi_config_table_64_t *)tablep)->guid;
- table64 = ((efi_config_table_64_t *)tablep)->table;
- table = table64;
-#ifdef CONFIG_X86_32
- if (table64 >> 32) {
- pr_cont("\n");
- pr_err("Table located above 4GB, disabling EFI.\n");
- early_iounmap(config_tables,
- efi.systab->nr_tables * sz);
- return -EINVAL;
- }
-#endif
- } else {
- guid = ((efi_config_table_32_t *)tablep)->guid;
- table = ((efi_config_table_32_t *)tablep)->table;
- }
- if (!efi_guidcmp(guid, MPS_TABLE_GUID)) {
- efi.mps = table;
- pr_cont(" MPS=0x%lx ", table);
- } else if (!efi_guidcmp(guid, ACPI_20_TABLE_GUID)) {
- efi.acpi20 = table;
- pr_cont(" ACPI 2.0=0x%lx ", table);
- } else if (!efi_guidcmp(guid, ACPI_TABLE_GUID)) {
- efi.acpi = table;
- pr_cont(" ACPI=0x%lx ", table);
- } else if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) {
- efi.smbios = table;
- pr_cont(" SMBIOS=0x%lx ", table);
-#ifdef CONFIG_X86_UV
- } else if (!efi_guidcmp(guid, UV_SYSTEM_TABLE_GUID)) {
- efi.uv_systab = table;
- pr_cont(" UVsystab=0x%lx ", table);
-#endif
- } else if (!efi_guidcmp(guid, HCDP_TABLE_GUID)) {
- efi.hcdp = table;
- pr_cont(" HCDP=0x%lx ", table);
- } else if (!efi_guidcmp(guid, UGA_IO_PROTOCOL_GUID)) {
- efi.uga = table;
- pr_cont(" UGA=0x%lx ", table);
- }
- tablep += sz;
- }
- pr_cont("\n");
- early_iounmap(config_tables, efi.systab->nr_tables * sz);
- return 0;
-}
-
static int __init efi_runtime_init(void)
{
efi_runtime_services_t *runtime;
@@ -725,7 +638,11 @@ void __init efi_init(void)
if (efi_systab_init(efi_phys.systab))
return;
- set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+ efi.config_table = (unsigned long)efi.systab->tables;
+ efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
+ efi.runtime = (unsigned long)efi.systab->runtime;
/*
* Show what we know for posterity
@@ -743,10 +660,10 @@ void __init efi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
- if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
+ if (efi_config_init(arch_tables))
return;
- set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
/*
* Note: We currently don't support runtime services on an EFI
@@ -758,20 +675,13 @@ void __init efi_init(void)
else {
if (disable_runtime || efi_runtime_init())
return;
- set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
if (efi_memmap_init())
return;
- set_bit(EFI_MEMMAP, &x86_efi_facility);
-
-#ifdef CONFIG_X86_32
- if (efi_is_native()) {
- x86_platform.get_wallclock = efi_get_time;
- x86_platform.set_wallclock = efi_set_rtc_mmss;
- }
-#endif
+ set_bit(EFI_MEMMAP, &efi.flags);
#if EFI_DEBUG
print_efi_memmap();
@@ -814,34 +724,6 @@ static void __init runtime_code_page_mkexec(void)
}
}
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM. So, look it up in the existing EFI memory map instead. Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
- */
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
-{
- void *p;
- if (WARN_ON(!memmap.map))
- return NULL;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- efi_memory_desc_t *md = p;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = md->phys_addr + size;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
- if (!md->virt_addr)
- continue;
- if (phys_addr >= md->phys_addr && phys_addr < end) {
- phys_addr += md->virt_addr - md->phys_addr;
- return (__force void __iomem *)(unsigned long)phys_addr;
- }
- }
- return NULL;
-}
-
void efi_memory_uc(u64 addr, unsigned long size)
{
unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
@@ -910,10 +792,13 @@ void __init efi_enter_virtual_mode(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
+ if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+ continue;
+ }
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 88692871823f..9cac82588cbc 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/../../boot/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index aabfb8380a1c..de6d048d0305 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -357,3 +357,8 @@
348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
349 i386 kcmp sys_kcmp
350 i386 finit_module sys_finit_module
+# Backporting seccomp, skip a few ...
+# 351 i386 sched_setattr sys_sched_setattr
+# 352 i386 sched_getattr sys_sched_getattr
+# 353 i386 renameat2 sys_renameat2
+354 i386 seccomp sys_seccomp
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 38ae65dfd14f..960cbb025cab 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -212,10 +212,10 @@
203 common sched_setaffinity sys_sched_setaffinity
204 common sched_getaffinity sys_sched_getaffinity
205 64 set_thread_area
-206 common io_setup sys_io_setup
+206 64 io_setup sys_io_setup
207 common io_destroy sys_io_destroy
208 common io_getevents sys_io_getevents
-209 common io_submit sys_io_submit
+209 64 io_submit sys_io_submit
210 common io_cancel sys_io_cancel
211 64 get_thread_area
212 common lookup_dcookie sys_lookup_dcookie
@@ -320,6 +320,11 @@
311 64 process_vm_writev sys_process_vm_writev
312 common kcmp sys_kcmp
313 common finit_module sys_finit_module
+# Backporting seccomp, skip a few ...
+# 314 common sched_setattr sys_sched_setattr
+# 315 common sched_getattr sys_sched_getattr
+# 316 common renameat2 sys_renameat2
+317 common seccomp sys_seccomp
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -356,3 +361,5 @@
540 x32 process_vm_writev compat_sys_process_vm_writev
541 x32 setsockopt compat_sys_setsockopt
542 x32 getsockopt compat_sys_getsockopt
+543 x32 io_setup compat_sys_io_setup
+544 x32 io_submit compat_sys_io_submit
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 95fb2aa5927e..156344448d19 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -878,7 +878,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
- int ret = 0;
pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
@@ -925,8 +924,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
* frontend pages while they are being shared with the backend,
* because mfn_to_pfn (that ends up being called by GUPF) will
* return the backend pfn rather than the frontend pfn. */
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
- if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) == mfn)
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
return 0;
@@ -941,7 +940,6 @@ int m2p_remove_override(struct page *page,
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
- int ret = 0;
pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
@@ -1019,8 +1017,8 @@ int m2p_remove_override(struct page *page,
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
* pfn again. */
mfn &= ~FOREIGN_FRAME_BIT;
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
- if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
m2p_find_override(mfn) == NULL)
set_phys_to_machine(pfn, mfn);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 94eac5c85cdc..0a9fb7a0b452 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
e820_add_region(start, end - start, type);
}
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+ struct e820entry *entry;
+ unsigned int i;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ if (entry->type == E820_UNUSABLE)
+ entry->type = E820_RAM;
+ }
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
+ /*
+ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+ * regions, so if we're using the machine memory map leave the
+ * region as RAM as it is in the pseudo-physical map.
+ *
+ * UNUSABLE regions in domUs are not handled and will need
+ * a patch in the future.
+ */
+ if (xen_initial_domain())
+ xen_ignore_unusable(map, memmap.nr_entries);
+
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d99cae8147d1..570c9a5c4d3f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -245,6 +245,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
old memory can be recycled */
make_lowmem_page_readwrite(xen_initial_gdt);
+#ifdef CONFIG_X86_32
+ /*
+ * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+ * expects __USER_DS
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
+
xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
}
@@ -667,8 +676,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
- rc = native_cpu_up(cpu, tidle);
- WARN_ON (xen_smp_intr_init(cpu));
+ /*
+ * xen_smp_intr_init() needs to run before native_cpu_up()
+ * so that IPI vectors are set up on the booting CPU before
+ * it is marked online in native_cpu_up().
+ */
+ rc = xen_smp_intr_init(cpu);
+ WARN_ON(rc);
+ if (!rc)
+ rc = native_cpu_up(cpu, tidle);
return rc;
}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 3d88bfdf9e1c..13e8935e2eab 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
/* snapshots of runstate info */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
-/* unused ns of stolen and blocked time */
+/* unused ns of stolen time */
static DEFINE_PER_CPU(u64, xen_residual_stolen);
-static DEFINE_PER_CPU(u64, xen_residual_blocked);
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
@@ -115,7 +114,7 @@ static void do_stolen_accounting(void)
{
struct vcpu_runstate_info state;
struct vcpu_runstate_info *snap;
- s64 blocked, runnable, offline, stolen;
+ s64 runnable, offline, stolen;
cputime_t ticks;
get_runstate_snapshot(&state);
@@ -125,7 +124,6 @@ static void do_stolen_accounting(void)
snap = &__get_cpu_var(xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
- blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
@@ -141,17 +139,6 @@ static void do_stolen_accounting(void)
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
__this_cpu_write(xen_residual_stolen, stolen);
account_steal_ticks(ticks);
-
- /* Add the appropriate number of ticks of blocked time,
- including any left-overs from last time. */
- blocked += __this_cpu_read(xen_residual_blocked);
-
- if (blocked < 0)
- blocked = 0;
-
- ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
- __this_cpu_write(xen_residual_blocked, blocked);
- account_idle_ticks(ticks);
}
/* Get the TSC speed from Xen */
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
deleted file mode 100644
index 36dc7a684397..000000000000
--- a/arch/xtensa/include/asm/ftrace.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * arch/xtensa/include/asm/ftrace.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2013 Tensilica Inc.
- */
-#ifndef _XTENSA_FTRACE_H
-#define _XTENSA_FTRACE_H
-
-#include <asm/processor.h>
-
-#define HAVE_ARCH_CALLER_ADDR
-#define CALLER_ADDR0 ({ unsigned long a0, a1; \
- __asm__ __volatile__ ( \
- "mov %0, a0\n" \
- "mov %1, a1\n" \
- : "=r"(a0), "=r"(a1) : : ); \
- MAKE_PC_FROM_RA(a0, a1); })
-#ifdef CONFIG_FRAME_POINTER
-extern unsigned long return_address(unsigned level);
-#define CALLER_ADDR1 return_address(1)
-#define CALLER_ADDR2 return_address(2)
-#define CALLER_ADDR3 return_address(3)
-#else
-#define CALLER_ADDR1 (0)
-#define CALLER_ADDR2 (0)
-#define CALLER_ADDR3 (0)
-#endif
-
-#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 917488a0ab00..f2faa58f9a43 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -22,25 +22,37 @@ extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
static inline void spill_registers(void)
{
-
+#if XCHAL_NUM_AREGS > 16
__asm__ __volatile__ (
- "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
- "mov a12, a0\n\t"
- "rsr a13, sar\n\t"
- "xsr a14, ps\n\t"
- "movi a0, _spill_registers\n\t"
- "rsync\n\t"
- "callx0 a0\n\t"
- "mov a0, a12\n\t"
- "wsr a13, sar\n\t"
- "wsr a14, ps\n\t"
- : :
-#if defined(CONFIG_FRAME_POINTER)
- : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
+ " call12 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " _entry a1, 48\n"
+ " addi a12, a0, 3\n"
+#if XCHAL_NUM_AREGS > 32
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
+#endif
+ " _entry a1, 48\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a8, a8\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a4, a4\n"
+#endif
+ " retw\n"
+ "2:\n"
+ : : : "a12", "a13", "memory");
#else
- : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
#endif
- "memory");
}
#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 5082507d5631..aa7f9add7d77 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1912,6 +1912,43 @@ ENTRY(system_call)
ENDPROC(system_call)
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+ .macro spill_registers_kernel
+
+#if XCHAL_NUM_AREGS > 16
+ call12 1f
+ _j 2f
+ retw
+ .align 4
+1:
+ _entry a1, 48
+ addi a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+ .rept (XCHAL_NUM_AREGS - 32) / 12
+ _entry a1, 48
+ mov a12, a0
+ .endr
+#endif
+ _entry a1, 48
+#if XCHAL_NUM_AREGS % 12 == 0
+ mov a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+ mov a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+ mov a4, a4
+#endif
+ retw
+2:
+#else
+ mov a12, a12
+#endif
+ .endm
/*
* Task switch.
@@ -1924,21 +1961,20 @@ ENTRY(_switch_to)
entry a1, 16
- mov a12, a2 # preserve 'prev' (a2)
- mov a13, a3 # and 'next' (a3)
+ mov a10, a2 # preserve 'prev' (a2)
+ mov a11, a3 # and 'next' (a3)
l32i a4, a2, TASK_THREAD_INFO
l32i a5, a3, TASK_THREAD_INFO
- save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- s32i a0, a12, THREAD_RA # save return address
- s32i a1, a12, THREAD_SP # save stack pointer
+ s32i a0, a10, THREAD_RA # save return address
+ s32i a1, a10, THREAD_SP # save stack pointer
/* Disable ints while we manipulate the stack pointer. */
- movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
- xsr a14, ps
+ rsil a14, LOCKLEVEL
rsr a3, excsave1
rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
@@ -1953,7 +1989,7 @@ ENTRY(_switch_to)
/* Flush register file. */
- call0 _spill_registers # destroys a3, a4, and SAR
+ spill_registers_kernel
/* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten
@@ -1969,13 +2005,13 @@ ENTRY(_switch_to)
/* restore context of the task 'next' */
- l32i a0, a13, THREAD_RA # restore return address
- l32i a1, a13, THREAD_SP # restore stack pointer
+ l32i a0, a11, THREAD_RA # restore return address
+ l32i a1, a11, THREAD_SP # restore stack pointer
- load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
wsr a14, ps
- mov a2, a12 # return 'prev'
+ mov a2, a10 # return 'prev'
rsync
retw
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index ef12c0e6fa25..7d740ebbe198 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -68,6 +68,15 @@ _SetupMMU:
#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
initialize_mmu
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ rsr a2, excsave1
+ movi a3, 0x08000000
+ bgeu a2, a3, 1f
+ movi a3, 0xd0000000
+ add a2, a2, a3
+ wsr a2, excsave1
+1:
+#endif
#endif
.end no-absolute-literals
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 6dd25ecde3f5..ea9afb6904d7 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -152,8 +152,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
{
meminfo_t* mi;
mi = (meminfo_t*)(tag->data);
- initrd_start = (void*)(mi->start);
- initrd_end = (void*)(mi->end);
+ initrd_start = __va(mi->start);
+ initrd_end = __va(mi->end);
return 0;
}
@@ -164,14 +164,13 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
static int __init parse_tag_fdt(const bp_tag_t *tag)
{
- dtb_start = (void *)(tag->data[0]);
+ dtb_start = __va(tag->data[0]);
return 0;
}
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (void *)__va(start);
initrd_end = (void *)__va(end);
@@ -223,6 +222,43 @@ static int __init parse_bootparam(const bp_tag_t* tag)
}
#ifdef CONFIG_OF
+bool __initdata dt_memory_scan = false;
+
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+EXPORT_SYMBOL(xtensa_kio_paddr);
+
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ const __be32 *ranges;
+ int len;
+
+ if (depth > 1)
+ return 0;
+
+ if (!of_flat_dt_is_compatible(node, "simple-bus"))
+ return 0;
+
+ ranges = of_get_flat_dt_prop(node, "ranges", &len);
+ if (!ranges)
+ return 1;
+ if (len == 0)
+ return 1;
+
+ xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
+ /* round down to nearest 256MB boundary */
+ xtensa_kio_paddr &= 0xf0000000;
+
+ return 1;
+}
+#else
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ return 1;
+}
+#endif
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 718eca1850bd..98b67d5f1514 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sp = regs->areg[1];
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size;
}
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 96ef8eeb064e..8a95204cbdae 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -195,7 +195,7 @@ void platform_calibrate_ccount(void)
* Ethernet -- OpenCores Ethernet MAC (ethoc driver)
*/
-static struct resource ethoc_res[] __initdata = {
+static struct resource ethoc_res[] = {
[0] = { /* register space */
.start = OETH_REGS_PADDR,
.end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
@@ -213,7 +213,7 @@ static struct resource ethoc_res[] __initdata = {
},
};
-static struct ethoc_platform_data ethoc_pdata __initdata = {
+static struct ethoc_platform_data ethoc_pdata = {
/*
* The MAC address for these boards is 00:50:c2:13:6f:xx.
* The last byte (here as zero) is read from the DIP switches on the
@@ -223,7 +223,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = {
.phy_id = -1,
};
-static struct platform_device ethoc_device __initdata = {
+static struct platform_device ethoc_device = {
.name = "ethoc",
.id = -1,
.num_resources = ARRAY_SIZE(ethoc_res),
@@ -237,13 +237,13 @@ static struct platform_device ethoc_device __initdata = {
* UART
*/
-static struct resource serial_resource __initdata = {
+static struct resource serial_resource = {
.start = DUART16552_PADDR,
.end = DUART16552_PADDR + 0x1f,
.flags = IORESOURCE_MEM,
};
-static struct plat_serial8250_port serial_platform_data[] __initdata = {
+static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM,
@@ -256,7 +256,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = {
{ },
};
-static struct platform_device xtavnet_uart __initdata = {
+static struct platform_device xtavnet_uart = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {