aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Hilman <khilman@linaro.org>2015-11-04 10:01:20 -0800
committerKevin Hilman <khilman@linaro.org>2015-11-04 10:01:20 -0800
commit49bd04cb11e57c4cd4d2c00feca898537dd6db1f (patch)
tree846c594b2b1d2f203c24ff32569b6c615822a5dc
parent260409578ecbf7ce1725b85f9c082312bff808c0 (diff)
parent12ba397b828b7e7f5d28a73e9a2ee14470b7edc7 (diff)
downloadlinux-linaro-stable-v4.1/topic/rt.tar.gz
Merge tag 'v4.1.12-rt12' of git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel into v4.1/topic/rtv4.1/topic/rt
v4.1.12-rt12
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-loopback2
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-sourcesink2
-rw-r--r--Documentation/HOWTO28
-rw-r--r--Documentation/devicetree/bindings/clock/keystone-pll.txt8
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt4
-rw-r--r--Documentation/input/alps.txt6
-rw-r--r--Documentation/usb/gadget-testing.txt7
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/compressed/decompress.c2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts2
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts5
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51-apf51dev.dts2
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts4
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts4
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-tx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-voipac-bsb.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi8
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap4.dtsi3
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts4
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/kernel/kgdb.c8
-rw-r--r--arch/arm/kernel/signal.c15
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/kvm/interrupts_head.S6
-rw-r--r--arch/arm/kvm/mmu.c6
-rw-r--r--arch/arm/mach-bcm/Makefile2
-rw-r--r--arch/arm/mach-bcm/brcmstb.h19
-rw-r--r--arch/arm/mach-bcm/headsmp-brcmstb.S33
-rw-r--r--arch/arm/mach-bcm/platsmp-brcmstb.c4
-rw-r--r--arch/arm/mach-berlin/headsmp.S6
-rw-r--r--arch/arm/mach-berlin/platsmp.c3
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c27
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h6
-rw-r--r--arch/arm/mach-hisi/Makefile2
-rw-r--r--arch/arm/mach-hisi/core.h1
-rw-r--r--arch/arm/mach-hisi/headsmp.S16
-rw-r--r--arch/arm/mach-hisi/platsmp.c4
-rw-r--r--arch/arm/mach-imx/headsmp.S1
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S1
-rw-r--r--arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-orion5x/include/mach/irqs.h64
-rw-r--r--arch/arm/mach-orion5x/irq.c4
-rw-r--r--arch/arm/mach-prima2/headsmp.S1
-rw-r--r--arch/arm/mach-rockchip/core.h1
-rw-r--r--arch/arm/mach-rockchip/headsmp.S8
-rw-r--r--arch/arm/mach-rockchip/platsmp.c42
-rw-r--r--arch/arm/mach-shmobile/common.h1
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S4
-rw-r--r--arch/arm/mach-shmobile/headsmp.S7
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.c2
-rw-r--r--arch/arm/mach-socfpga/core.h1
-rw-r--r--arch/arm/mach-socfpga/headsmp.S5
-rw-r--r--arch/arm/mach-socfpga/platsmp.c2
-rw-r--r--arch/arm/mach-tegra/Makefile2
-rw-r--r--arch/arm/mach-tegra/headsmp.S12
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mach-tegra/reset.h1
-rw-r--r--arch/arm/mach-zynq/common.h2
-rw-r--r--arch/arm/mach-zynq/headsmp.S5
-rw-r--r--arch/arm/mach-zynq/platsmp.c5
-rw-r--r--arch/arm/mm/proc-v7.S16
-rw-r--r--arch/arm/vdso/Makefile18
-rw-r--r--arch/arm64/Kconfig22
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/include/asm/memory.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/kernel/debug-monitors.c21
-rw-r--r--arch/arm64/kernel/efi.c3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S22
-rw-r--r--arch/arm64/kernel/fpsimd.c1
-rw-r--r--arch/arm64/kernel/head.S5
-rw-r--r--arch/arm64/kernel/insn.c6
-rw-r--r--arch/arm64/kernel/module.c2
-rw-r--r--arch/arm64/kernel/perf_event.c3
-rw-r--r--arch/arm64/kernel/signal32.c52
-rw-r--r--arch/arm64/kvm/hyp.S9
-rw-r--r--arch/arm64/kvm/inject_fault.c12
-rw-r--r--arch/arm64/mm/fault.c1
-rw-r--r--arch/m32r/boot/compressed/misc.c3
-rw-r--r--arch/m68k/include/asm/linkage.h30
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/boot/compressed/decompress.c4
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c1
-rw-r--r--arch/mips/loongson/common/env.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c24
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c16
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/pistachio/time.c1
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/parisc/kernel/irq.c8
-rw-r--r--arch/parisc/kernel/syscall.S2
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h14
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/kernel/eeh.c27
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/rtas.c17
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c3
-rw-r--r--arch/powerpc/platforms/powernv/pci.c5
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c5
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c6
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c5
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c5
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/boot/compressed/misc.c2
-rw-r--r--arch/s390/kernel/compat_signal.c27
-rw-r--r--arch/s390/kernel/setup.c15
-rw-r--r--arch/sh/boot/compressed/misc.c2
-rw-r--r--arch/sparc/crypto/aes_glue.c2
-rw-r--r--arch/sparc/crypto/camellia_glue.c1
-rw-r--r--arch/sparc/crypto/des_glue.c2
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/unicore32/boot/compressed/misc.c4
-rw-r--r--arch/x86/boot/compressed/misc.c3
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c1
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h54
-rw-r--r--arch/x86/include/asm/preempt.h6
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/alternative.c5
-rw-r--r--arch/x86/kernel/apic/apic.c21
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c41
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
-rw-r--r--arch/x86/kernel/crash.c7
-rw-r--r--arch/x86/kernel/entry_64.S300
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/nmi.c123
-rw-r--r--arch/x86/kernel/paravirt.c16
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_64.c56
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/step.c8
-rw-r--r--arch/x86/kernel/tsc.c17
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c45
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/math-emu/fpu_system.h21
-rw-r--r--arch/x86/math-emu/get_address.c3
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/platform/efi/efi.c67
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c63
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/include/asm/traps.h29
-rw-r--r--arch/xtensa/kernel/entry.S7
-rw-r--r--block/blk-mq-sysfs.c25
-rw-r--r--block/blk-settings.c4
-rw-r--r--crypto/ahash.c3
-rw-r--r--drivers/acpi/acpi_pnp.c1
-rw-r--r--drivers/acpi/pci_link.c16
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/libata-core.c26
-rw-r--r--drivers/ata/libata-eh.c105
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/pata_jmicron.c12
-rw-r--r--drivers/auxdisplay/ks0108.c1
-rw-r--r--drivers/base/cacheinfo.c10
-rw-r--r--drivers/base/devres.c4
-rw-r--r--drivers/base/node.c10
-rw-r--r--drivers/base/platform.c8
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/property.c5
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/rbd.c23
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/block/zram/zcomp.c12
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/i8k.c18
-rw-r--r--drivers/clk/keystone/pll.c20
-rw-r--r--drivers/clk/pistachio/clk-pistachio.c19
-rw-r--r--drivers/clk/pistachio/clk-pll.c5
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c2
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c2
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c4
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c2
-rw-r--r--drivers/clk/ti/clk-3xxx.c2
-rw-r--r--drivers/clk/versatile/clk-sp810.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c6
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c7
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c70
-rw-r--r--drivers/crypto/nx/nx-sha256.c126
-rw-r--r--drivers/crypto/nx/nx-sha512.c129
-rw-r--r--drivers/crypto/nx/nx.c71
-rw-r--r--drivers/crypto/nx/nx.h16
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/crypto/vmx/aes.c3
-rw-r--r--drivers/crypto/vmx/aes_cbc.c3
-rw-r--r--drivers/crypto/vmx/aes_ctr.c3
-rw-r--r--drivers/crypto/vmx/ghash.c4
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl6
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c88
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c9
-rw-r--r--drivers/gpu/drm/drm_lock.c6
-rw-r--r--drivers/gpu/drm/drm_sysfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c40
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c24
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c80
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/hid/hid-cp2112.c10
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hwmon/nct6775.c16
-rw-r--r--drivers/hwmon/nct7904.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c33
-rw-r--r--drivers/i2c/busses/i2c-rcar.c7
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c8
-rw-r--r--drivers/iio/gyro/Kconfig3
-rw-r--r--drivers/iio/imu/adis16400_core.c2
-rw-r--r--drivers/iio/imu/adis16480.c39
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/infiniband/core/uverbs.h3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c10
-rw-r--r--drivers/infiniband/core/uverbs_main.c43
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c6
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c7
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c12
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c47
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c12
-rw-r--r--drivers/input/evdev.c13
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/iommu/fsl_pamu.c26
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/io-pgtable-arm.c17
-rw-r--r--drivers/iommu/tegra-smmu.c9
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c24
-rw-r--r--drivers/irqchip/irq-crossbar.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c6
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c11
-rw-r--r--drivers/leds/led-class.c7
-rw-r--r--drivers/macintosh/windfarm_core.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c2
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-raid.c3
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm.c33
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c12
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c7
-rw-r--r--drivers/md/raid0.c75
-rw-r--r--drivers/md/raid1.c10
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c34
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c16
-rw-r--r--drivers/media/platform/omap3isp/isp.c4
-rw-r--r--drivers/media/rc/rc-main.c3
-rw-r--r--drivers/memory/tegra/tegra114.c1
-rw-r--r--drivers/memory/tegra/tegra124.c1
-rw-r--r--drivers/memory/tegra/tegra30.c1
-rw-r--r--drivers/mfd/arizona-core.c14
-rw-r--r--drivers/mfd/max77843.c2
-rw-r--r--drivers/misc/cxl/pci.c21
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c109
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c50
-rw-r--r--drivers/mmc/host/sdhci-pci.c1
-rw-r--r--drivers/mmc/host/sdhci.c103
-rw-r--r--drivers/mmc/host/sdhci.h8
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c3
-rw-r--r--drivers/mtd/nand/sunxi_nand.c27
-rw-r--r--drivers/mtd/ubi/io.c5
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/bonding/bond_main.c21
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c24
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c96
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c24
-rw-r--r--drivers/net/dsa/bcm_sf2.c30
-rw-r--r--drivers/net/dsa/bcm_sf2.h4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c49
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/fixed_phy.c8
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c22
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c8
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/reg.h1
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c11
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/of_mdio.c19
-rw-r--r--drivers/parisc/lba_pci.c7
-rw-r--r--drivers/pci/access.c42
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/quirks.c46
-rw-r--r--drivers/pcmcia/sa1100_generic.c1
-rw-r--r--drivers/pcmcia/sa11xx_base.c3
-rw-r--r--drivers/phy/phy-twl4030-usb.c29
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91.c15
-rw-r--r--drivers/platform/x86/hp-wmi.c31
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/toshiba_acpi.c8
-rw-r--r--drivers/power/avs/Kconfig2
-rw-r--r--drivers/regulator/pbias-regulator.c5
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-s3c.c24
-rw-r--r--drivers/rtc/rtc-s5m.c10
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/scsi/3w-9xxx.c28
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/ipr.c30
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c19
-rw-r--r--drivers/scsi/libiscsi.c25
-rw-r--r--drivers/scsi/scsi_error.c42
-rw-r--r--drivers/scsi/scsi_pm.c22
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/spi/spi-bcm2835.c28
-rw-r--r--drivers/spi/spi-bitbang-txrx.h4
-rw-r--r--drivers/spi/spi-dw-mmio.c3
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-dw.h35
-rw-r--r--drivers/spi/spi-img-spfi.c59
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c15
-rw-r--r--drivers/spi/spi-xilinx.c20
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c4
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c16
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c70
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/speakup/fakekey.c1
-rw-r--r--drivers/staging/vt6655/device_main.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c37
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h2
-rw-r--r--drivers/target/target_core_pr.c3
-rw-r--r--drivers/target/target_core_ua.c8
-rw-r--r--drivers/target/target_core_xcopy.c6
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/tty/n_tty.c15
-rw-r--r--drivers/tty/serial/8250/8250_core.c8
-rw-r--r--drivers/tty/serial/8250/8250_pci.c82
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c11
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/men_z135_uart.c9
-rw-r--r--drivers/tty/serial/samsung.c47
-rw-r--r--drivers/tty/serial/samsung.h1
-rw-r--r--drivers/tty/tty_io.c31
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/core.c13
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/chipidea/udc.c84
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/core/quirks.c13
-rw-r--r--drivers/usb/dwc3/ep0.c12
-rw-r--r--drivers/usb/gadget/function/f_uac2.c35
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c1
-rw-r--r--drivers/usb/host/ehci-sysfs.c8
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-ring.c81
-rw-r--r--drivers/usb/host/xhci.c15
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c6
-rw-r--r--drivers/usb/musb/musb_dsps.c7
-rw-r--r--drivers/usb/phy/phy-generic.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/option.c26
-rw-r--r--drivers/usb/serial/pl2303.c35
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/symbolserial.c6
-rw-r--r--drivers/usb/serial/whiteheat.c31
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/preempt.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/backref.c8
-rw-r--r--fs/btrfs/extent_io.c80
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/btrfs/volumes.h8
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/cifs/cifsencrypt.c53
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/inode.c34
-rw-r--r--fs/cifs/ioctl.c6
-rw-r--r--fs/cifs/smb2ops.c8
-rw-r--r--fs/cifs/smb2pdu.c76
-rw-r--r--fs/coredump.c46
-rw-r--r--fs/dcache.c12
-rw-r--r--fs/ecryptfs/dentry.c16
-rw-r--r--fs/ext4/super.c34
-rw-r--r--fs/gfs2/super.c6
-rw-r--r--fs/hfs/bnode.c9
-rw-r--r--fs/hfs/brec.c20
-rw-r--r--fs/hfs/super.c4
-rw-r--r--fs/hfsplus/bnode.c3
-rw-r--r--fs/hfsplus/options.c4
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/hpfs/namei.c25
-rw-r--r--fs/internal.h1
-rw-r--r--fs/jbd2/checkpoint.c39
-rw-r--r--fs/jbd2/commit.c2
-rw-r--r--fs/jbd2/journal.c11
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/locks.c38
-rw-r--r--fs/namei.c39
-rw-r--r--fs/nfs/filelayout/filelayout.c31
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c5
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c7
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/nfs4proc.c36
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs_nfs.c33
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c20
-rw-r--r--fs/nfsd/blocklayout.c8
-rw-r--r--fs/nfsd/nfs4state.c185
-rw-r--r--fs/nfsd/nfs4xdr.c56
-rw-r--r--fs/notify/mark.c30
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c8
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/open.c49
-rw-r--r--fs/overlayfs/inode.c25
-rw-r--r--fs/overlayfs/overlayfs.h1
-rw-r--r--fs/overlayfs/super.c7
-rw-r--r--fs/reiserfs/super.c8
-rw-r--r--fs/signalfd.c5
-rw-r--r--fs/ubifs/xattr.c3
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h11
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c3
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c10
-rw-r--r--fs/xfs/xfs_inode.c68
-rw-r--r--fs/xfs/xfs_inode.h85
-rw-r--r--fs/xfs/xfs_super.c4
-rw-r--r--include/asm-generic/preempt.h5
-rw-r--r--include/drm/drm_dp_mst_helper.h3
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/ata.h18
-rw-r--r--include/linux/bottom_half.h32
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/fs.h32
-rw-r--r--include/linux/highmem.h4
-rw-r--r--include/linux/iio/iio.h17
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/mm.h28
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/preempt_mask.h20
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/seq_file.h35
-rw-r--r--include/linux/skbuff.h17
-rw-r--r--include/linux/sunrpc/xprtsock.h3
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/sock.h8
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/soc/tegra/mc.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/trace/events/hist.h2
-rw-r--r--include/trace/events/sched.h30
-rw-r--r--include/trace/events/sunrpc.h21
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/xen/interface/sched.h8
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/msg.c14
-rw-r--r--ipc/sem.c41
-rw-r--r--ipc/shm.c13
-rw-r--r--ipc/util.c8
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/events/core.c99
-rw-r--r--kernel/events/internal.h10
-rw-r--r--kernel/events/ring_buffer.c37
-rw-r--r--kernel/fork.c28
-rw-r--r--kernel/irq/chip.c19
-rw-r--r--kernel/irq/manage.c158
-rw-r--r--kernel/irq/proc.c19
-rw-r--r--kernel/sched/core.c47
-rw-r--r--kernel/sched/fair.c25
-rw-r--r--kernel/sched/sched.h5
-rw-r--r--kernel/sched/work-simple.c1
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/softirq.c38
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/latency_hist.c4
-rw-r--r--kernel/trace/trace_irqsoff.c4
-rw-r--r--kernel/trace/trace_sched_switch.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/workqueue.c8
-rw-r--r--lib/decompress_bunzip2.c6
-rw-r--r--lib/decompress_inflate.c31
-rw-r--r--lib/decompress_unlz4.c6
-rw-r--r--lib/decompress_unlzma.c7
-rw-r--r--lib/decompress_unlzo.c13
-rw-r--r--lib/decompress_unxz.c12
-rw-r--r--lib/iommu-common.c6
-rw-r--r--lib/rhashtable.c4
-rw-r--r--localversion-rt2
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/memory-failure.c15
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/slab.c17
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmscan.c16
-rw-r--r--net/9p/client.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c7
-rw-r--r--net/batman-adv/multicast.c81
-rw-r--r--net/batman-adv/network-coding.c7
-rw-r--r--net/batman-adv/originator.c5
-rw-r--r--net/batman-adv/soft-interface.c10
-rw-r--r--net/batman-adv/translation-table.c37
-rw-r--r--net/batman-adv/types.h15
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_mdb.c3
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netlink.c14
-rw-r--r--net/ceph/ceph_common.c7
-rw-r--r--net/core/datagram.c57
-rw-r--r--net/core/dev.c47
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/core/rtnetlink.c187
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/sock_diag.c3
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ipv4/datagram.c16
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/inet_connection_sock.c29
-rw-r--r--net/ipv4/ip_fragment.c7
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/exthdrs_offload.c2
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/route.c201
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/mac80211/rc80211_minstrel.c11
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c34
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_internals.h1
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nf_queue.c17
-rw-r--r--net/netfilter/nf_tables_core.c7
-rw-r--r--net/netfilter/nfnetlink.c8
-rw-r--r--net/netfilter/nfnetlink_queue_core.c24
-rw-r--r--net/netfilter/nft_compat.c24
-rw-r--r--net/netlink/af_netlink.c180
-rw-r--r--net/netlink/af_netlink.h10
-rw-r--r--net/nfc/nci/hci.c2
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/openvswitch/flow_table.c26
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/info.c2
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c50
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_fw.c30
-rw-r--r--net/sched/cls_u32.c13
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sctp/protocol.c64
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c10
-rw-r--r--net/sunrpc/xprtsock.c21
-rw-r--r--net/tipc/msg.h4
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/unix/af_unix.c16
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/arm/Kconfig15
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/amdtp.h2
-rw-r--r--sound/firewire/fireworks/fireworks.c8
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c9
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_generic.c11
-rw-r--r--sound/pci/hda/patch_cirrus.c5
-rw-r--r--sound/pci/hda/patch_conexant.c23
-rw-r--r--sound/pci/hda/patch_realtek.c44
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
-rw-r--r--sound/soc/au1x/db1200.c4
-rw-r--r--sound/soc/codecs/adav80x.c1
-rw-r--r--sound/soc/codecs/arizona.c49
-rw-r--r--sound/soc/codecs/arizona.h1
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/rt5640.c40
-rw-r--r--sound/soc/codecs/sgtl5000.c4
-rw-r--r--sound/soc/codecs/ssm4567.c8
-rw-r--r--sound/soc/dwc/designware_i2s.c4
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c14
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c4
-rw-r--r--sound/soc/samsung/arndale_rt5631.c10
-rw-r--r--sound/soc/soc-dapm.c21
-rw-r--r--sound/synth/emux/emux_oss.c3
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/mixer.c2
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/lib/traceevent/event-parse.c23
-rw-r--r--tools/perf/arch/alpha/Build1
-rw-r--r--tools/perf/arch/mips/Build1
-rw-r--r--tools/perf/arch/parisc/Build1
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/util/header.c4
-rw-r--r--tools/perf/util/hist.c3
-rw-r--r--tools/perf/util/symbol-elf.c35
-rw-r--r--virt/kvm/eventfd.c124
-rw-r--r--virt/kvm/kvm_main.c19
793 files changed, 7570 insertions, 4263 deletions
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-loopback b/Documentation/ABI/testing/configfs-usb-gadget-loopback
index 9aae5bf..06beefbc 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-loopback
+++ b/Documentation/ABI/testing/configfs-usb-gadget-loopback
@@ -5,4 +5,4 @@ Description:
The attributes:
qlen - depth of loopback queue
- bulk_buflen - buffer length
+ buflen - buffer length
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
index 29477c3..bc7ff73 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
+++ b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
@@ -9,4 +9,4 @@ Description:
isoc_maxpacket - 0 - 1023 (fs), 0 - 1024 (hs/ss)
isoc_mult - 0..2 (hs/ss only)
isoc_maxburst - 0..15 (ss only)
- qlen - buffer length
+ buflen - buffer length
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 93aa860..21152d3 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -218,16 +218,16 @@ The development process
Linux kernel development process currently consists of a few different
main kernel "branches" and lots of different subsystem-specific kernel
branches. These different branches are:
- - main 3.x kernel tree
- - 3.x.y -stable kernel tree
- - 3.x -git kernel patches
+ - main 4.x kernel tree
+ - 4.x.y -stable kernel tree
+ - 4.x -git kernel patches
- subsystem specific kernel trees and patches
- - the 3.x -next kernel tree for integration tests
+ - the 4.x -next kernel tree for integration tests
-3.x kernel tree
+4.x kernel tree
-----------------
-3.x kernels are maintained by Linus Torvalds, and can be found on
-kernel.org in the pub/linux/kernel/v3.x/ directory. Its development
+4.x kernels are maintained by Linus Torvalds, and can be found on
+kernel.org in the pub/linux/kernel/v4.x/ directory. Its development
process is as follows:
- As soon as a new kernel is released a two weeks window is open,
during this period of time maintainers can submit big diffs to
@@ -262,20 +262,20 @@ mailing list about kernel releases:
released according to perceived bug status, not according to a
preconceived timeline."
-3.x.y -stable kernel tree
+4.x.y -stable kernel tree
---------------------------
Kernels with 3-part versions are -stable kernels. They contain
relatively small and critical fixes for security problems or significant
-regressions discovered in a given 3.x kernel.
+regressions discovered in a given 4.x kernel.
This is the recommended branch for users who want the most recent stable
kernel and are not interested in helping test development/experimental
versions.
-If no 3.x.y kernel is available, then the highest numbered 3.x
+If no 4.x.y kernel is available, then the highest numbered 4.x
kernel is the current stable kernel.
-3.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+4.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
are released as needs dictate. The normal release period is approximately
two weeks, but it can be longer if there are no pressing problems. A
security-related problem, instead, can cause a release to happen almost
@@ -285,7 +285,7 @@ The file Documentation/stable_kernel_rules.txt in the kernel tree
documents what kinds of changes are acceptable for the -stable tree, and
how the release process works.
-3.x -git patches
+4.x -git patches
------------------
These are daily snapshots of Linus' kernel tree which are managed in a
git repository (hence the name.) These patches are usually released
@@ -317,9 +317,9 @@ revisions to it, and maintainers can mark patches as under review,
accepted, or rejected. Most of these patchwork sites are listed at
http://patchwork.kernel.org/.
-3.x -next kernel tree for integration tests
+4.x -next kernel tree for integration tests
---------------------------------------------
-Before updates from subsystem trees are merged into the mainline 3.x
+Before updates from subsystem trees are merged into the mainline 4.x
tree, they need to be integration-tested. For this purpose, a special
testing repository exists into which virtually all subsystem trees are
pulled on an almost daily basis:
diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt b/Documentation/devicetree/bindings/clock/keystone-pll.txt
index 225990f..47570d2 100644
--- a/Documentation/devicetree/bindings/clock/keystone-pll.txt
+++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt
@@ -15,8 +15,8 @@ Required properties:
- compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock"
- clocks : parent clock phandle
- reg - pll control0 and pll multipler registers
-- reg-names : control and multiplier. The multiplier is applicable only for
- main pll clock
+- reg-names : control, multiplier and post-divider. The multiplier and
+ post-divider registers are applicable only for main pll clock
- fixed-postdiv : fixed post divider value. If absent, use clkod register bits
for postdiv
@@ -25,8 +25,8 @@ Example:
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
fixed-postdiv = <2>;
};
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 41b3f3f..5d88f37 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers:
flow control thresholds.
- tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
is used for components that can have configurable fifo sizes.
+- managed: string, specifies the PHY management type. Supported values are:
+ "auto", "in-band-status". "auto" is the default, it usess MDIO for
+ management if fixed-link is not specified.
Child nodes of the Ethernet controller are typically the individual PHY devices
connected via the MDIO bus (sometimes the MDIO bus controller is separate).
They are described in the phy.txt file in this same directory.
+For non-MDIO PHY management see fixed-link.txt.
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1..1fec113 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
-buttons get reported separately in the PSM, PSR and PSL bits.
+the DualPoint Stick. The M, R and L bits signal the combined status of both
+the pointingstick and touchpad buttons, except for Dell dualpoint devices
+where the pointingstick buttons get reported separately in the PSM, PSR
+and PSL bits.
Dualpoint device -- interleaved packet format
---------------------------------------------
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index f45b2bf..820664a 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -237,9 +237,7 @@ Testing the LOOPBACK function
-----------------------------
device: run the gadget
-host: test-usb
-
-http://www.linux-usb.org/usbtest/testusb.c
+host: test-usb (tools/usb/testusb.c)
8. MASS STORAGE function
========================
@@ -588,9 +586,8 @@ Testing the SOURCESINK function
-------------------------------
device: run the gadget
-host: test-usb
+host: test-usb (tools/usb/testusb.c)
-http://www.linux-usb.org/usbtest/testusb.c
16. UAC1 function
=================
diff --git a/Makefile b/Makefile
index 068dd69..2320f19 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 1
-SUBLEVEL = 5
+SUBLEVEL = 12
EXTRAVERSION =
NAME = Series 4800
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index bfaeed7..e16a259 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -539,6 +539,7 @@ config ARCH_ORION5X
select MVEBU_MBUS
select PCI
select PLAT_ORION_LEGACY
+ select MULTI_IRQ_HANDLER
help
Support for the following Marvell Orion 5x series SoCs:
Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182),
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 985227c..47f10e7 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -50,6 +50,14 @@ AS += -EL
LD += -EL
endif
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
+
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index bd245d3..a0765e7 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2);
int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
{
- return decompress(input, len, NULL, NULL, output, NULL, error);
+ return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
}
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index f03a091..dfcc0dd 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 0b99068..75aba40 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -181,7 +181,7 @@
display-timings {
timing-0 {
- clock-frequency = <0>;
+ clock-frequency = <4600000>;
hactive = <320>;
vactive = <320>;
hfront-porch = <1>;
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index dd45e69..9351296 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -10,6 +10,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include "imx25.dtsi"
@@ -114,8 +115,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio2 1 0>;
- wp-gpios = <&gpio2 0 0>;
+ cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e9..e6540b5 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
can1: can@53fe4000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe4000 0x1000>;
- clocks = <&clks 33>;
- clock-names = "ipg";
+ clocks = <&clks 33>, <&clks 33>;
+ clock-names = "ipg", "per";
interrupts = <43>;
status = "disabled";
};
@@ -295,8 +295,8 @@
can2: can@53fe8000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe8000 0x1000>;
- clocks = <&clks 34>;
- clock-names = "ipg";
+ clocks = <&clks 34>, <&clks 34>;
+ clock-names = "ipg", "per";
interrupts = <44>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
index 93d3ea1..0f3fe29 100644
--- a/arch/arm/boot/dts/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/imx51-apf51dev.dts
@@ -98,7 +98,7 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
bus-width = <4>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index e9337ad..3bc1883 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -103,8 +103,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio1 1 0>;
- wp-gpios = <&gpio1 9 0>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d0e0f57..53f4088 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -124,8 +124,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio1 1 0>;
- wp-gpios = <&gpio1 9 0>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 181ae5e..1f55187 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -147,8 +147,8 @@
&esdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc3>;
- cd-gpios = <&gpio3 11 0>;
- wp-gpios = <&gpio3 12 0>;
+ cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
bus-width = <8>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 1d32557..fc89ce1 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -41,8 +41,8 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
- cd-gpios = <&gpio3 13 0>;
- wp-gpios = <&gpio4 11 0>;
+ cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index 4f1f0e2..e03373a 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -41,8 +41,8 @@
pinctrl-0 = <&pinctrl_esdhc2>,
<&pinctrl_esdhc2_cdwp>;
vmmc-supply = <&reg_3p3v>;
- wp-gpios = <&gpio1 2 0>;
- cd-gpios = <&gpio1 4 0>;
+ wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index 704bd72..d3e50b2 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -183,7 +183,7 @@
};
&esdhc1 {
- cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
fsl,wp-controller;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
@@ -191,7 +191,7 @@
};
&esdhc2 {
- cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
fsl,wp-controller;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc2>;
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
index c17d3ad..fc51b87 100644
--- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
+++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
@@ -119,8 +119,8 @@
&esdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc2>;
- cd-gpios = <&gpio3 25 0>;
- wp-gpios = <&gpio2 19 0>;
+ cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&reg_3p3v>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 488a640..394a4ac 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -35,7 +35,6 @@
compatible = "regulator-fixed";
reg = <1>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbh1>;
regulator-name = "usbh1_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
compatible = "regulator-fixed";
reg = <2>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbotg>;
regulator-name = "usb_otg_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index f74a8de..38c7860 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -153,10 +153,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
<&clks IMX6QDL_CLK_LVDS1_GATE>,
<&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6a..d56d68f 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3..af9b719 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e2..ef8464b 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963..2390f38 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
};
scm_conf: scm_conf@270 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a547411..67659a0 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -202,7 +202,7 @@
tfp410_pins: pinmux_tfp410_pins {
pinctrl-single,pins = <
- 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
+ 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
>;
};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index f884d6a..84be9da 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
};
omap4_padconf_global: omap4_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 74777a6..1b958e9 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -174,8 +174,8 @@
i2c5_pins: pinmux_i2c5_pins {
pinctrl-single,pins = <
- 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
- 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
+ 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
+ 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
>;
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7d24ae0..874a26f 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
};
omap5_padconf_global: omap5_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 165968d..8eca587 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -584,7 +584,7 @@
compatible = "rockchip,rk3288-wdt", "snps,dw-wdt";
reg = <0xff800000 0x100>;
clocks = <&cru PCLK_WDT>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a6ad93c..fd9eefc 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return err;
- patch_text((void *)bpt->bpt_addr,
- *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr,
+ *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
return err;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
return 0;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 365934f..25bd12e 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -343,12 +343,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
*/
thumb = handler & 1;
-#if __LINUX_ARM_ARCH__ >= 7
+#if __LINUX_ARM_ARCH__ >= 6
/*
- * Clear the If-Then Thumb-2 execution state
- * ARM spec requires this to be all 000s in ARM mode
- * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
- * signal transition without this.
+ * Clear the If-Then Thumb-2 execution state. ARM spec
+ * requires this to be all 000s in ARM mode. Snapdragon
+ * S4/Krait misbehaves on a Thumb=>ARM signal transition
+ * without this.
+ *
+ * We must do this whenever we are running on a Thumb-2
+ * capable CPU, which includes ARMv6T2. However, we elect
+ * to do this whenever we're on an ARMv6 or later CPU for
+ * simplicity.
*/
cpsr &= ~PSR_IT_MASK;
#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index f11d825..e561aef 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -213,8 +213,6 @@ int __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
- clear_tasks_mm_cpumask(cpu);
-
return 0;
}
@@ -230,6 +228,9 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: cpu didn't die\n", cpu);
return;
}
+
+ clear_tasks_mm_cpumask(cpu);
+
pr_notice("CPU%u: shutdown\n", cpu);
/*
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index adb1988..0c9014d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -450,7 +450,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the first
* time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
+ if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 48efe2e..58048b3 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -518,8 +518,7 @@ ARM_BE8(rev r6, r6 )
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
- bic r2, #1 @ Clear ENABLE
- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
isb
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
@@ -532,6 +531,9 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
+ mov r2, #0 @ Clear ENABLE
+ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 1d5accb..191dcfa 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1790,8 +1790,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
- phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
- vm_start - vma->vm_start;
+ phys_addr_t pa;
+
+ pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile
index 4c38674..54d274d 100644
--- a/arch/arm/mach-bcm/Makefile
+++ b/arch/arm/mach-bcm/Makefile
@@ -43,5 +43,5 @@ obj-$(CONFIG_ARCH_BCM_63XX) := bcm63xx.o
ifeq ($(CONFIG_ARCH_BRCMSTB),y)
CFLAGS_platsmp-brcmstb.o += -march=armv7-a
obj-y += brcmstb.o
-obj-$(CONFIG_SMP) += headsmp-brcmstb.o platsmp-brcmstb.o
+obj-$(CONFIG_SMP) += platsmp-brcmstb.o
endif
diff --git a/arch/arm/mach-bcm/brcmstb.h b/arch/arm/mach-bcm/brcmstb.h
deleted file mode 100644
index ec0c3d1..0000000
--- a/arch/arm/mach-bcm/brcmstb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) 2013-2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __BRCMSTB_H__
-#define __BRCMSTB_H__
-
-void brcmstb_secondary_startup(void);
-
-#endif /* __BRCMSTB_H__ */
diff --git a/arch/arm/mach-bcm/headsmp-brcmstb.S b/arch/arm/mach-bcm/headsmp-brcmstb.S
deleted file mode 100644
index 199c1ea..0000000
--- a/arch/arm/mach-bcm/headsmp-brcmstb.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * SMP boot code for secondary CPUs
- * Based on arch/arm/mach-tegra/headsmp.S
- *
- * Copyright (C) 2010 NVIDIA, Inc.
- * Copyright (C) 2013-2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <asm/assembler.h>
-#include <linux/linkage.h>
-#include <linux/init.h>
-
- .section ".text.head", "ax"
-
-ENTRY(brcmstb_secondary_startup)
- /*
- * Ensure CPU is in a sane state by disabling all IRQs and switching
- * into SVC mode.
- */
- setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r0
-
- bl v7_invalidate_l1
- b secondary_startup
-ENDPROC(brcmstb_secondary_startup)
diff --git a/arch/arm/mach-bcm/platsmp-brcmstb.c b/arch/arm/mach-bcm/platsmp-brcmstb.c
index e209e6f..44d6bddf 100644
--- a/arch/arm/mach-bcm/platsmp-brcmstb.c
+++ b/arch/arm/mach-bcm/platsmp-brcmstb.c
@@ -30,8 +30,6 @@
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
-#include "brcmstb.h"
-
enum {
ZONE_MAN_CLKEN_MASK = BIT(0),
ZONE_MAN_RESET_CNTL_MASK = BIT(1),
@@ -153,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu)
* Set the reset vector to point to the secondary_startup
* routine
*/
- cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup));
+ cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup));
/* Unhalt the cpu */
cpu_rst_cfg_set(cpu, 0);
diff --git a/arch/arm/mach-berlin/headsmp.S b/arch/arm/mach-berlin/headsmp.S
index 4a4c56a..dc82a34 100644
--- a/arch/arm/mach-berlin/headsmp.S
+++ b/arch/arm/mach-berlin/headsmp.S
@@ -12,12 +12,6 @@
#include <linux/init.h>
#include <asm/assembler.h>
-ENTRY(berlin_secondary_startup)
- ARM_BE8(setend be)
- bl v7_invalidate_l1
- b secondary_startup
-ENDPROC(berlin_secondary_startup)
-
/*
* If the following instruction is set in the reset exception vector, CPUs
* will fetch the value of the software reset address vector when being
diff --git a/arch/arm/mach-berlin/platsmp.c b/arch/arm/mach-berlin/platsmp.c
index 702e798..34a3753 100644
--- a/arch/arm/mach-berlin/platsmp.c
+++ b/arch/arm/mach-berlin/platsmp.c
@@ -22,7 +22,6 @@
#define RESET_VECT 0x00
#define SW_RESET_ADDR 0x94
-extern void berlin_secondary_startup(void);
extern u32 boot_inst;
static void __iomem *cpu_ctrl;
@@ -85,7 +84,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
* Write the secondary startup address into the SW reset address
* vector. This is used by boot_inst.
*/
- writel(virt_to_phys(berlin_secondary_startup), vectors_base + SW_RESET_ADDR);
+ writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR);
iounmap(vectors_base);
unmap_scu:
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 9bdf547..5697819 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -20,6 +20,7 @@
#include <asm/cputype.h>
#include <asm/cp15.h>
#include <asm/mcpm.h>
+#include <asm/smp_plat.h>
#include "regs-pmu.h"
#include "common.h"
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
- exynos_cpu_power_up(cpunr);
+ if (!exynos_cpu_power_state(cpunr)) {
+ exynos_cpu_power_up(cpunr);
+
+ /*
+ * This assumes the cluster number of the big cores(Cortex A15)
+ * is 0 and the Little cores(Cortex A7) is 1.
+ * When the system was booted from the Little core,
+ * they should be reset during power up cpu.
+ */
+ if (cluster &&
+ cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+ /*
+ * Before we reset the Little cores, we should wait
+ * the SPARE2 register is set to 1 because the init
+ * codes of the iROM will set the register after
+ * initialization.
+ */
+ while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ udelay(10);
+
+ pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
+ EXYNOS_SWRESET);
+ }
+ }
+
return 0;
}
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index b761433..fba9068 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
#define SPREAD_ENABLE 0xF
#define SPREAD_USE_STANDWFI 0xF
+#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr) \
+ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
#define EXYNOS5420_BB_CON1 0x0784
#define EXYNOS5420_BB_SEL_EN BIT(31)
#define EXYNOS5420_BB_PMOS_EN BIT(7)
diff --git a/arch/arm/mach-hisi/Makefile b/arch/arm/mach-hisi/Makefile
index 6b7b303..659db19 100644
--- a/arch/arm/mach-hisi/Makefile
+++ b/arch/arm/mach-hisi/Makefile
@@ -6,4 +6,4 @@ CFLAGS_platmcpm.o := -march=armv7-a
obj-y += hisilicon.o
obj-$(CONFIG_MCPM) += platmcpm.o
-obj-$(CONFIG_SMP) += platsmp.o hotplug.o headsmp.o
+obj-$(CONFIG_SMP) += platsmp.o hotplug.o
diff --git a/arch/arm/mach-hisi/core.h b/arch/arm/mach-hisi/core.h
index 92a682d..c7648ef 100644
--- a/arch/arm/mach-hisi/core.h
+++ b/arch/arm/mach-hisi/core.h
@@ -12,7 +12,6 @@ extern void hi3xxx_cpu_die(unsigned int cpu);
extern int hi3xxx_cpu_kill(unsigned int cpu);
extern void hi3xxx_set_cpu(int cpu, bool enable);
-extern void hisi_secondary_startup(void);
extern struct smp_operations hix5hd2_smp_ops;
extern void hix5hd2_set_cpu(int cpu, bool enable);
extern void hix5hd2_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-hisi/headsmp.S b/arch/arm/mach-hisi/headsmp.S
deleted file mode 100644
index 81e35b1..0000000
--- a/arch/arm/mach-hisi/headsmp.S
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2014 Hisilicon Limited.
- * Copyright (c) 2014 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-
- __CPUINIT
-
-ENTRY(hisi_secondary_startup)
- bl v7_invalidate_l1
- b secondary_startup
diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c
index 8880c8e..5174412 100644
--- a/arch/arm/mach-hisi/platsmp.c
+++ b/arch/arm/mach-hisi/platsmp.c
@@ -118,7 +118,7 @@ static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
phys_addr_t jumpaddr;
- jumpaddr = virt_to_phys(hisi_secondary_startup);
+ jumpaddr = virt_to_phys(secondary_startup);
hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr);
hix5hd2_set_cpu(cpu, true);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
@@ -156,7 +156,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
struct device_node *node;
- jumpaddr = virt_to_phys(hisi_secondary_startup);
+ jumpaddr = virt_to_phys(secondary_startup);
hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr);
node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
index de5047c..b5e9768 100644
--- a/arch/arm/mach-imx/headsmp.S
+++ b/arch/arm/mach-imx/headsmp.S
@@ -25,7 +25,6 @@ diag_reg_offset:
.endm
ENTRY(v7_secondary_startup)
- bl v7_invalidate_l1
set_diag_reg
b secondary_startup
ENDPROC(v7_secondary_startup)
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 08d5ed4..48e4c4b 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -21,7 +21,6 @@
ENTRY(mvebu_cortex_a9_secondary_startup)
ARM_BE8(setend be)
- bl v7_invalidate_l1
bl armada_38x_scu_power_up
b secondary_startup
ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 57d5df0..7581e03 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = {
.dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT,
.wkdep_srcs = l4per2_wkup_sleep_deps,
.sleepdep_srcs = l4per2_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain mpu0_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 3b56722..6833df4 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 752969f..5286e77 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
* registers. This address is needed early so the OCP registers that
* are part of the device's address space can be ioremapped properly.
*
+ * If SYSC access is not needed, the registers will not be remapped
+ * and non-availability of MPU access is not treated as an error.
+ *
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
* -ENXIO on absent or invalid register target address space.
*/
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
_save_mpu_port_index(oh);
+ /* if we don't need sysc access we don't need to ioremap */
+ if (!oh->class->sysc)
+ return 0;
+
+ /* we can't continue without MPU PORT if we need sysc access */
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
return -ENXIO;
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
oh->name);
/* Extract the IO space from device tree blob */
- if (!np)
+ if (!np) {
+ pr_err("omap_hwmod: %s: no dt node\n", oh->name);
return -ENXIO;
+ }
va_start = of_iomap(np, index + oh->mpu_rt_idx);
} else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->name, np->name);
}
- if (oh->class->sysc) {
- r = _init_mpu_rt_base(oh, NULL, index, np);
- if (r < 0) {
- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
- oh->name);
- return 0;
- }
+ r = _init_mpu_rt_base(oh, NULL, index, np);
+ if (r < 0) {
+ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+ oh->name);
+ return 0;
}
r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-orion5x/include/mach/irqs.h b/arch/arm/mach-orion5x/include/mach/irqs.h
index a6fa9d8..2431d99 100644
--- a/arch/arm/mach-orion5x/include/mach/irqs.h
+++ b/arch/arm/mach-orion5x/include/mach/irqs.h
@@ -16,42 +16,42 @@
/*
* Orion Main Interrupt Controller
*/
-#define IRQ_ORION5X_BRIDGE 0
-#define IRQ_ORION5X_DOORBELL_H2C 1
-#define IRQ_ORION5X_DOORBELL_C2H 2
-#define IRQ_ORION5X_UART0 3
-#define IRQ_ORION5X_UART1 4
-#define IRQ_ORION5X_I2C 5
-#define IRQ_ORION5X_GPIO_0_7 6
-#define IRQ_ORION5X_GPIO_8_15 7
-#define IRQ_ORION5X_GPIO_16_23 8
-#define IRQ_ORION5X_GPIO_24_31 9
-#define IRQ_ORION5X_PCIE0_ERR 10
-#define IRQ_ORION5X_PCIE0_INT 11
-#define IRQ_ORION5X_USB1_CTRL 12
-#define IRQ_ORION5X_DEV_BUS_ERR 14
-#define IRQ_ORION5X_PCI_ERR 15
-#define IRQ_ORION5X_USB_BR_ERR 16
-#define IRQ_ORION5X_USB0_CTRL 17
-#define IRQ_ORION5X_ETH_RX 18
-#define IRQ_ORION5X_ETH_TX 19
-#define IRQ_ORION5X_ETH_MISC 20
-#define IRQ_ORION5X_ETH_SUM 21
-#define IRQ_ORION5X_ETH_ERR 22
-#define IRQ_ORION5X_IDMA_ERR 23
-#define IRQ_ORION5X_IDMA_0 24
-#define IRQ_ORION5X_IDMA_1 25
-#define IRQ_ORION5X_IDMA_2 26
-#define IRQ_ORION5X_IDMA_3 27
-#define IRQ_ORION5X_CESA 28
-#define IRQ_ORION5X_SATA 29
-#define IRQ_ORION5X_XOR0 30
-#define IRQ_ORION5X_XOR1 31
+#define IRQ_ORION5X_BRIDGE (1 + 0)
+#define IRQ_ORION5X_DOORBELL_H2C (1 + 1)
+#define IRQ_ORION5X_DOORBELL_C2H (1 + 2)
+#define IRQ_ORION5X_UART0 (1 + 3)
+#define IRQ_ORION5X_UART1 (1 + 4)
+#define IRQ_ORION5X_I2C (1 + 5)
+#define IRQ_ORION5X_GPIO_0_7 (1 + 6)
+#define IRQ_ORION5X_GPIO_8_15 (1 + 7)
+#define IRQ_ORION5X_GPIO_16_23 (1 + 8)
+#define IRQ_ORION5X_GPIO_24_31 (1 + 9)
+#define IRQ_ORION5X_PCIE0_ERR (1 + 10)
+#define IRQ_ORION5X_PCIE0_INT (1 + 11)
+#define IRQ_ORION5X_USB1_CTRL (1 + 12)
+#define IRQ_ORION5X_DEV_BUS_ERR (1 + 14)
+#define IRQ_ORION5X_PCI_ERR (1 + 15)
+#define IRQ_ORION5X_USB_BR_ERR (1 + 16)
+#define IRQ_ORION5X_USB0_CTRL (1 + 17)
+#define IRQ_ORION5X_ETH_RX (1 + 18)
+#define IRQ_ORION5X_ETH_TX (1 + 19)
+#define IRQ_ORION5X_ETH_MISC (1 + 20)
+#define IRQ_ORION5X_ETH_SUM (1 + 21)
+#define IRQ_ORION5X_ETH_ERR (1 + 22)
+#define IRQ_ORION5X_IDMA_ERR (1 + 23)
+#define IRQ_ORION5X_IDMA_0 (1 + 24)
+#define IRQ_ORION5X_IDMA_1 (1 + 25)
+#define IRQ_ORION5X_IDMA_2 (1 + 26)
+#define IRQ_ORION5X_IDMA_3 (1 + 27)
+#define IRQ_ORION5X_CESA (1 + 28)
+#define IRQ_ORION5X_SATA (1 + 29)
+#define IRQ_ORION5X_XOR0 (1 + 30)
+#define IRQ_ORION5X_XOR1 (1 + 31)
/*
* Orion General Purpose Pins
*/
-#define IRQ_ORION5X_GPIO_START 32
+#define IRQ_ORION5X_GPIO_START 33
#define NR_GPIO_IRQS 32
#define NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS)
diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
index cd4bac4..086ecb8 100644
--- a/arch/arm/mach-orion5x/irq.c
+++ b/arch/arm/mach-orion5x/irq.c
@@ -42,7 +42,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
stat = readl_relaxed(MAIN_IRQ_CAUSE);
stat &= readl_relaxed(MAIN_IRQ_MASK);
if (stat) {
- unsigned int hwirq = __fls(stat);
+ unsigned int hwirq = 1 + __fls(stat);
handle_IRQ(hwirq, regs);
return;
}
@@ -51,7 +51,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
void __init orion5x_init_irq(void)
{
- orion_irq_init(0, MAIN_IRQ_MASK);
+ orion_irq_init(1, MAIN_IRQ_MASK);
#ifdef CONFIG_MULTI_IRQ_HANDLER
set_handle_irq(orion5x_legacy_handle_irq);
diff --git a/arch/arm/mach-prima2/headsmp.S b/arch/arm/mach-prima2/headsmp.S
index d86fe33..209d9fc 100644
--- a/arch/arm/mach-prima2/headsmp.S
+++ b/arch/arm/mach-prima2/headsmp.S
@@ -15,7 +15,6 @@
* ready for them to initialise.
*/
ENTRY(sirfsoc_secondary_startup)
- bl v7_invalidate_l1
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
adr r4, 1f
diff --git a/arch/arm/mach-rockchip/core.h b/arch/arm/mach-rockchip/core.h
index 39bca96..492c048 100644
--- a/arch/arm/mach-rockchip/core.h
+++ b/arch/arm/mach-rockchip/core.h
@@ -17,4 +17,3 @@ extern char rockchip_secondary_trampoline;
extern char rockchip_secondary_trampoline_end;
extern unsigned long rockchip_boot_fn;
-extern void rockchip_secondary_startup(void);
diff --git a/arch/arm/mach-rockchip/headsmp.S b/arch/arm/mach-rockchip/headsmp.S
index 46c22de..d69708b 100644
--- a/arch/arm/mach-rockchip/headsmp.S
+++ b/arch/arm/mach-rockchip/headsmp.S
@@ -15,14 +15,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
-ENTRY(rockchip_secondary_startup)
- mrc p15, 0, r0, c0, c0, 0 @ read main ID register
- ldr r1, =0x00000c09 @ Cortex-A9 primary part number
- teq r0, r1
- beq v7_invalidate_l1
- b secondary_startup
-ENDPROC(rockchip_secondary_startup)
-
ENTRY(rockchip_secondary_trampoline)
ldr pc, 1f
ENDPROC(rockchip_secondary_trampoline)
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index 5b4ca3c..611a5f9 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -72,29 +72,22 @@ static struct reset_control *rockchip_get_core_reset(int cpu)
static int pmu_set_power_domain(int pd, bool on)
{
u32 val = (on) ? 0 : BIT(pd);
+ struct reset_control *rstc = rockchip_get_core_reset(pd);
int ret;
+ if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
+ pr_err("%s: could not get reset control for core %d\n",
+ __func__, pd);
+ return PTR_ERR(rstc);
+ }
+
/*
* We need to soft reset the cpu when we turn off the cpu power domain,
* or else the active processors might be stalled when the individual
* processor is powered down.
*/
- if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
- struct reset_control *rstc = rockchip_get_core_reset(pd);
-
- if (IS_ERR(rstc)) {
- pr_err("%s: could not get reset control for core %d\n",
- __func__, pd);
- return PTR_ERR(rstc);
- }
-
- if (on)
- reset_control_deassert(rstc);
- else
- reset_control_assert(rstc);
-
- reset_control_put(rstc);
- }
+ if (!IS_ERR(rstc) && !on)
+ reset_control_assert(rstc);
ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
if (ret < 0) {
@@ -112,6 +105,12 @@ static int pmu_set_power_domain(int pd, bool on)
}
}
+ if (!IS_ERR(rstc)) {
+ if (on)
+ reset_control_deassert(rstc);
+ reset_control_put(rstc);
+ }
+
return 0;
}
@@ -147,10 +146,13 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
* the mailbox:
* sram_base_addr + 4: 0xdeadbeaf
* sram_base_addr + 8: start address for pc
+ * The cpu0 need to wait the other cpus other than cpu0 entering
+ * the wfe state.The wait time is affected by many aspects.
+ * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
* */
- udelay(10);
- writel(virt_to_phys(rockchip_secondary_startup),
- sram_base_addr + 8);
+ mdelay(1); /* ensure the cpus other than cpu0 to startup */
+
+ writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
writel(0xDEADBEAF, sram_base_addr + 4);
dsb_sev();
}
@@ -189,7 +191,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
}
/* set the boot function for the sram code */
- rockchip_boot_fn = virt_to_phys(rockchip_secondary_startup);
+ rockchip_boot_fn = virt_to_phys(secondary_startup);
/* copy the trampoline to sram, that runs during startup of the core */
memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index afc60ba..476092b 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -14,7 +14,6 @@ extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg);
extern int shmobile_smp_cpu_disable(unsigned int cpu);
-extern void shmobile_invalidate_start(void);
extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index 69df8bf..fa5248c 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -22,7 +22,7 @@
* Boot code for secondary CPUs.
*
* First we turn on L1 cache coherency for our CPU. Then we jump to
- * shmobile_invalidate_start that invalidates the cache and hands over control
+ * secondary_startup that invalidates the cache and hands over control
* to the common ARM startup code.
*/
ENTRY(shmobile_boot_scu)
@@ -36,7 +36,7 @@ ENTRY(shmobile_boot_scu)
bic r2, r2, r3 @ Clear bits of our CPU (Run Mode)
str r2, [r0, #8] @ write back
- b shmobile_invalidate_start
+ b secondary_startup
ENDPROC(shmobile_boot_scu)
.text
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 50c4915..330c1fc 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -16,13 +16,6 @@
#include <asm/assembler.h>
#include <asm/memory.h>
-#ifdef CONFIG_SMP
-ENTRY(shmobile_invalidate_start)
- bl v7_invalidate_l1
- b secondary_startup
-ENDPROC(shmobile_invalidate_start)
-#endif
-
/*
* Reset vector for secondary CPUs.
* This will be mapped at address 0 by SBAR register.
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index f483b56..b0790fc 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -133,7 +133,7 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
/* For this particular CPU register boot vector */
- shmobile_smp_hook(cpu, virt_to_phys(shmobile_invalidate_start), 0);
+ shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0);
return apmu_wrap(cpu, apmu_power_on);
}
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index a0f3b1c..767c09e 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -31,7 +31,6 @@
#define RSTMGR_MPUMODRST_CPU1 0x2 /* CPU1 Reset */
-extern void socfpga_secondary_startup(void);
extern void __iomem *socfpga_scu_base_addr;
extern void socfpga_init_clocks(void);
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index f65ea0a..5bb0164 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -30,8 +30,3 @@ ENTRY(secondary_trampoline)
1: .long .
.long socfpga_cpu1start_addr
ENTRY(secondary_trampoline_end)
-
-ENTRY(socfpga_secondary_startup)
- bl v7_invalidate_l1
- b secondary_startup
-ENDPROC(socfpga_secondary_startup)
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index c64d89b..79c5336 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -40,7 +40,7 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
- writel(virt_to_phys(socfpga_secondary_startup),
+ writel(virt_to_phys(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
flush_cache_all();
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index e48a744..fffad24 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -19,7 +19,7 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += pm-tegra30.o
ifeq ($(CONFIG_CPU_IDLE),y)
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += cpuidle-tegra30.o
endif
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += sleep-tegra30.o
diff --git a/arch/arm/mach-tegra/headsmp.S b/arch/arm/mach-tegra/headsmp.S
deleted file mode 100644
index 2072e73..0000000
--- a/arch/arm/mach-tegra/headsmp.S
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-#include "sleep.h"
-
- .section ".text.head", "ax"
-
-ENTRY(tegra_secondary_startup)
- check_cpu_part_num 0xc09, r8, r9
- bleq v7_invalidate_l1
- b secondary_startup
-ENDPROC(tegra_secondary_startup)
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 894c5c4..6fd9db5 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -94,7 +94,7 @@ void __init tegra_cpu_reset_handler_init(void)
__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
*((u32 *)cpu_possible_mask);
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
- virt_to_phys((void *)tegra_secondary_startup);
+ virt_to_phys((void *)secondary_startup);
#endif
#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
index 29c3dec..9c479c7 100644
--- a/arch/arm/mach-tegra/reset.h
+++ b/arch/arm/mach-tegra/reset.h
@@ -37,7 +37,6 @@ void __tegra_cpu_reset_handler_start(void);
void __tegra_cpu_reset_handler(void);
void __tegra20_cpu1_resettable_status_offset(void);
void __tegra_cpu_reset_handler_end(void);
-void tegra_secondary_startup(void);
#ifdef CONFIG_PM_SLEEP
#define tegra_cpu_lp1_mask \
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index 382c60e..7038cae 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -17,8 +17,6 @@
#ifndef __MACH_ZYNQ_COMMON_H__
#define __MACH_ZYNQ_COMMON_H__
-void zynq_secondary_startup(void);
-
extern int zynq_slcr_init(void);
extern int zynq_early_slcr_init(void);
extern void zynq_slcr_system_reset(void);
diff --git a/arch/arm/mach-zynq/headsmp.S b/arch/arm/mach-zynq/headsmp.S
index dd8c071..045c727 100644
--- a/arch/arm/mach-zynq/headsmp.S
+++ b/arch/arm/mach-zynq/headsmp.S
@@ -22,8 +22,3 @@ zynq_secondary_trampoline_jump:
.globl zynq_secondary_trampoline_end
zynq_secondary_trampoline_end:
ENDPROC(zynq_secondary_trampoline)
-
-ENTRY(zynq_secondary_startup)
- bl v7_invalidate_l1
- b secondary_startup
-ENDPROC(zynq_secondary_startup)
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index 52d768f..f66816c 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -87,10 +87,9 @@ int zynq_cpun_start(u32 address, int cpu)
}
EXPORT_SYMBOL(zynq_cpun_start);
-static int zynq_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
+static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- return zynq_cpun_start(virt_to_phys(zynq_secondary_startup), cpu);
+ return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);
}
/*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3d1054f..7911f14 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -268,7 +268,10 @@ __v7_ca15mp_setup:
__v7_b15mp_setup:
__v7_ca17mp_setup:
mov r10, #0
-1:
+1: adr r12, __v7_setup_stack @ the local stack
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
+ bl v7_invalidate_l1
+ ldmia r12, {r0-r5, lr}
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
@@ -277,7 +280,7 @@ __v7_ca17mp_setup:
orreq r0, r0, r10 @ Enable CPU-specific SMP bits
mcreq p15, 0, r0, c1, c0, 1
#endif
- b __v7_setup
+ b __v7_setup_cont
__v7_pj4b_setup:
#ifdef CONFIG_CPU_PJ4B
@@ -335,10 +338,11 @@ __v7_pj4b_setup:
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
- stmia r12, {r0-r5, r7, r9, r11, lr}
- bl v7_flush_dcache_louis
- ldmia r12, {r0-r5, r7, r9, r11, lr}
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
+ bl v7_invalidate_l1
+ ldmia r12, {r0-r5, lr}
+__v7_setup_cont:
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
@@ -460,7 +464,7 @@ ENDPROC(__v7_setup)
.align 2
__v7_setup_stack:
- .space 4 * 11 @ 11 registers
+ .space 4 * 7 @ 12 registers
__INITDATA
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 8aa79105..1160434 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -6,9 +6,15 @@ obj-vdso := vgettimeofday.o datapage.o
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ccflags-y := -shared -fPIC -fno-common -fno-builtin -fno-stack-protector
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 -DDISABLE_BRANCH_PROFILING
-ccflags-y += -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
+ccflags-y += -DDISABLE_BRANCH_PROFILING
+
+VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
+VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS += -nostdlib -shared
+VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
@@ -40,10 +46,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Actual build commands
quiet_cmd_vdsold = VDSO $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) \
- $(call cc-ldoption, -Wl$(comma)--build-id) \
- -Wl,-Bsymbolic -Wl,-z,max-page-size=4096 \
- -Wl,-z,common-page-size=4096 -o $@
+ cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
+ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d555ed3..09a4125 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -103,6 +103,10 @@ config NO_IOPORT_MAP
config STACKTRACE_SUPPORT
def_bool y
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0xdead000000000000
+
config LOCKDEP_SUPPORT
def_bool y
@@ -411,6 +415,22 @@ config ARM64_ERRATUM_845719
If unsure, say Y.
+config ARM64_ERRATUM_843419
+ bool "Cortex-A53: 843419: A load or store might access an incorrect address"
+ depends on MODULES
+ default y
+ help
+ This option builds kernel modules using the large memory model in
+ order to avoid the use of the ADRP instruction, which can cause
+ a subsequent memory access to use an incorrect address on Cortex-A53
+ parts up to r0p4.
+
+ Note that the kernel itself must be linked with a version of ld
+ which fixes potentially affected ADRP instructions through the
+ use of veneers.
+
+ If unsure, say Y.
+
endmenu
@@ -581,7 +601,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
- depends on ARM64 && OF
+ depends on ARM64 && OF && !PREEMPT_RT_FULL
select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 4d2a925..3258174 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -30,6 +30,10 @@ endif
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+KBUILD_CFLAGS_MODULE += -mcmodel=large
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f800d45..44a59c2 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -114,6 +114,14 @@ extern phys_addr_t memstart_addr;
#define PHYS_OFFSET ({ memstart_addr; })
/*
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
+ */
+#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
+
+/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 56283f8..cf73194 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -80,7 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index b056369..70654d84 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_add(&hook->node, &break_hook);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_add_rcu(&hook->node, &break_hook);
+ spin_unlock(&break_hook_lock);
}
void unregister_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_del(&hook->node);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_del_rcu(&hook->node);
+ spin_unlock(&break_hook_lock);
+ synchronize_rcu();
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
struct break_hook *hook;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
- read_lock(&break_hook_lock);
- list_for_each_entry(hook, &break_hook, node)
+ rcu_read_lock();
+ list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn;
- read_unlock(&break_hook_lock);
+ rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 352962b..5170fd5 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -257,7 +257,8 @@ static bool __init efi_virtmap_init(void)
*/
if (!is_normal_ram(md))
prot = __pgprot(PROT_DEVICE_nGnRE);
- else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ !PAGE_ALIGNED(md->phys_addr))
prot = PAGE_KERNEL_EXEC;
else
prot = PAGE_KERNEL;
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 08cafc5..0f03a8f 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* save return value regs*/
+ .macro save_return_regs
+ sub sp, sp, #64
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #48]
+ .endm
+
+ /* restore return value regs*/
+ .macro restore_return_regs
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ add sp, sp, #64
+ .endm
+
/*
* void ftrace_graph_caller(void)
*
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
* only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
*/
ENTRY(return_to_handler)
- str x0, [sp, #-16]!
+ save_return_regs
mov x0, x29 // parent's fp
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
mov x30, x0 // restore the original return address
- ldr x0, [sp], #16
+ restore_return_regs
ret
END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 3dca156..c31e59f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -157,6 +157,7 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+ fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 19f915e..36aa31f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -565,6 +565,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif
+ /* EL2 debug */
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+ msr mdcr_el2, x0 // all PMU counters from EL1
+
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 9249020..30eb88e 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -77,7 +77,7 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
}
}
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap)
{
@@ -124,13 +124,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
unsigned long flags = 0;
int ret;
- spin_lock_irqsave(&patch_lock, flags);
+ raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
patch_unmap(FIX_TEXT_POKE0);
- spin_unlock_irqrestore(&patch_lock, flags);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 67bf410..876eb8d 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
AARCH64_INSN_IMM_ADR);
break;
+#ifndef CONFIG_ARM64_ERRATUM_843419
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
AARCH64_INSN_IMM_ADR);
break;
+#endif
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 702591f..fd26e57 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
if (irq >= 0 && irq_is_percpu(irq))
- return 0;
+ goto out;
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
@@ -1355,6 +1355,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
else
kfree(irqs);
+out:
cpu_pmu->plat_device = pdev;
return 0;
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index d26fcd4..c58aee0 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field,
* so check explicitely for the right codes here.
*/
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+ if (from->si_signo == SIGBUS &&
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE))
@@ -213,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
/*
* VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
*/
+union __fpsimd_vreg {
+ __uint128_t raw;
+ struct {
+#ifdef __AARCH64EB__
+ u64 hi;
+ u64 lo;
+#else
+ u64 lo;
+ u64 hi;
+#endif
+ };
+};
+
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc;
- int err = 0;
+ int i, err = 0;
/*
* Save the hardware registers to the fpsimd_state structure.
@@ -236,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
/*
* Now copy the FP registers. Since the registers are packed,
* we can copy the prefix we want (V0-V15) as it is.
- * FIXME: Won't work if big endian.
*/
- err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
- sizeof(frame->ufp.fpregs));
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg = {
+ .raw = fpsimd->vregs[i >> 1],
+ };
+
+ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ }
/* Create an AArch32 fpscr from the fpsr and the fpcr. */
fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -264,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr;
- int err = 0;
+ int i, err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
@@ -274,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
- /*
- * Copy the FP registers into the start of the fpsimd_state.
- * FIXME: Won't work if big endian.
- */
- err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
- sizeof(frame->ufp.fpregs));
+ /* Copy the FP registers into the start of the fpsimd_state. */
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg;
+
+ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ fpsimd.vregs[i >> 1] = vreg.raw;
+ }
/* Extract the fpsr and the fpcr from the fpscr */
__get_user_error(fpscr, &frame->ufp.fpscr, err);
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 5befd01..64f9e60 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -844,8 +844,6 @@
mrs x3, cntv_ctl_el0
and x3, x3, #3
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
- bic x3, x3, #1 // Clear Enable
- msr cntv_ctl_el0, x3
isb
@@ -853,6 +851,9 @@
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
1:
+ // Disable the virtual timer
+ msr cntv_ctl_el0, xzr
+
// Allow physical timer/counter access for the host
mrs x2, cnthctl_el2
orr x2, x2, #3
@@ -947,13 +948,15 @@ ENTRY(__kvm_vcpu_run)
// Guest context
add x2, x0, #VCPU_CONTEXT
+ // We must restore the 32-bit state before the sysregs, thanks
+ // to Cortex-A57 erratum #852523.
+ restore_guest_32bit_state
bl __restore_sysregs
bl __restore_fpsimd
skip_debug_state x3, 1f
bl __restore_debug
1:
- restore_guest_32bit_state
restore_guest_regs
// That's it, no more messing around.
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index f02530e..85c5715 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, false, addr);
-
- inject_abt64(vcpu, false, addr);
+ else
+ inject_abt64(vcpu, false, addr);
}
/**
@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, true, addr);
-
- inject_abt64(vcpu, true, addr);
+ else
+ inject_abt64(vcpu, true, addr);
}
/**
@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_undef32(vcpu);
-
- inject_undef64(vcpu);
+ else
+ inject_undef64(vcpu);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0948d32..1fdbb37 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -279,6 +279,7 @@ retry:
* starvation.
*/
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
index 28a0952..3a76927 100644
--- a/arch/m32r/boot/compressed/misc.c
+++ b/arch/m32r/boot/compressed/misc.c
@@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data,
free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
puts("\nDecompressing Linux... ");
- decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output_data, 0,
+ NULL, error);
puts("done.\nBooting the kernel.\n");
}
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
index 5a822bb..066e74f 100644
--- a/arch/m68k/include/asm/linkage.h
+++ b/arch/m68k/include/asm/linkage.h
@@ -4,4 +4,34 @@
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+ __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+ __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+ __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5), "m" (arg6))
+
#endif
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 7fc8397..fd2a36a 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -186,6 +186,7 @@ int get_c0_perfcount_int(void)
{
return ATH79_MISC_IRQ(5);
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 5483106..080cd53 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start)
puts("\n");
/* Decompress the kernel with according algorithm */
- decompress((char *)zimage_start, zimage_size, 0, 0,
- (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
+ __decompress((char *)zimage_start, zimage_size, 0, 0,
+ (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
/* FIXME: should we flush cache here? */
puts("Now, booting the kernel...\n");
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b57..0000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 819af9d..70f6e7f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d93..a71da57 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
.set noreorder
bltz k0, 8f
move k1, sp
+#ifdef CONFIG_EVA
+ /*
+ * Flush interAptiv's Return Prediction Stack (RPS) by writing
+ * EntryHi. Toggling Config7.RPS is slower and less portable.
+ *
+ * The RPS isn't automatically flushed when exceptions are
+ * taken, which can result in kernel mode speculative accesses
+ * to user addresses if the RPS mispredicts. That's harmless
+ * when user and kernel share the same address space, but with
+ * EVA the same user segments may be unmapped to kernel mode,
+ * even containing sensitive MMIO regions or invalid memory.
+ *
+ * This can happen when the kernel sets the return address to
+ * ret_from_* and jr's to the exception handler, which looks
+ * more like a tail call than a function call. If nested calls
+ * don't evict the last user address in the RPS, it will
+ * mispredict the return and fetch from a user controlled
+ * address into the icache.
+ *
+ * More recent EVA-capable cores with MAAR to restrict
+ * speculative accesses aren't affected.
+ */
+ MFC0 k0, CP0_ENTRYHI
+ MTC0 k0, CP0_ENTRYHI
+#endif
.set reorder
/* Called from user mode, new stack. */
get_saved_sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491a..789d7bf 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
- cpumask_t mask;
+ cpumask_t allowed, mask;
int retval;
struct task_struct *p;
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9d..c6bbf21 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
process_entry:
PTR_L s2, (s0)
- PTR_ADD s0, s0, SZREG
+ PTR_ADDIU s0, s0, SZREG
/*
* In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
- PTR_ADD s4, s4, SZREG
- PTR_ADD s2, s2, SZREG
- LONG_SUB s6, s6, 1
+ PTR_ADDIU s4, s4, SZREG
+ PTR_ADDIU s2, s2, SZREG
+ LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
b process_entry
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d4463..a6f6b76 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_64_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc65..4b20106 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_N32_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705..5d7f263 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d2d1c19..5f5f44e 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
+ mm_segment_t old_fs = get_fs();
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
prepare_frametrace(&regs);
}
}
+ /*
+ * show_stack() deals exclusively with kernel mode, so be sure to access
+ * the stack in the kernel (not user) address space.
+ */
+ set_fs(KERNEL_DS);
show_stacktrace(task, &regs);
+ set_fs(old_fs);
}
static void show_code(unsigned int __user *pc)
@@ -1518,6 +1525,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
+ mm_segment_t old_fs = get_fs();
prev_state = exception_enter();
show_regs(regs);
@@ -1539,8 +1547,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all();
}
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
show_code((unsigned int __user *) regs->cp0_epc);
+ set_fs(old_fs);
+
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef..eb3efd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
: "memory"); \
} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab1057..d01ade6 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
{
return ltq_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index 22f04ca..2efb18a 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -64,6 +64,9 @@ void __init prom_init_env(void)
}
if (memsize == 0)
memsize = 256;
+
+ loongson_sysconf.nr_uarts = 1;
+
pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
#else
struct boot_params *boot_p;
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 6983fcd..2b95e34 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1137,7 +1137,7 @@ emul:
break;
case mfhc_op:
- if (!cpu_has_mips_r2)
+ if (!cpu_has_mips_r2_r6)
goto sigill;
/* copregister rd -> gpr[rt] */
@@ -1148,7 +1148,7 @@ emul:
break;
case mthc_op:
- if (!cpu_has_mips_r2)
+ if (!cpu_has_mips_r2_r6)
goto sigill;
/* copregister rd <- gpr[rt] */
@@ -1181,6 +1181,24 @@ emul:
}
break;
+ case bc1eqz_op:
+ case bc1nez_op:
+ if (!cpu_has_mips_r6 || delay_slot(xcp))
+ return SIGILL;
+
+ cond = likely = 0;
+ switch (MIPSInst_RS(ir)) {
+ case bc1eqz_op:
+ if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
+ cond = 1;
+ break;
+ case bc1nez_op:
+ if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
+ cond = 1;
+ break;
+ }
+ goto branch_common;
+
case bc_op:
if (delay_slot(xcp))
return SIGILL;
@@ -1207,7 +1225,7 @@ emul:
case bct_op:
break;
}
-
+branch_common:
set_delay_slot(xcp);
if (cond) {
/*
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 609d124..371eec1 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
dma_flag = __GFP_DMA;
else
#endif
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 185e682..a7f7d9f 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -148,6 +148,7 @@ int get_c0_perfcount_int(void)
return mips_cpu_perf_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
@@ -165,14 +166,17 @@ unsigned int get_c0_compare_int(void)
static void __init init_rtc(void)
{
- /* stop the clock whilst setting it up */
- CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+ unsigned char freq, ctrl;
- /* 32KHz time base */
- CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+ /* Set 32KHz time base if not already set */
+ freq = CMOS_READ(RTC_FREQ_SELECT);
+ if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
- /* start the clock */
- CMOS_WRITE(RTC_24H, RTC_CONTROL);
+ /* Ensure SET bit is clear so RTC can run */
+ ctrl = CMOS_READ(RTC_CONTROL);
+ if (ctrl & RTC_SET)
+ CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
}
void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d6989..a120b7a 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
return -1;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 67889fc..ab73f6f 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
{
return gic_get_c0_perfcount_int();
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
void __init plat_time_init(void)
{
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 7cf91b9..199ace4 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
{
return rt_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index f3191db..c0eab24 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
struct pt_regs *old_regs;
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
-#ifdef CONFIG_SMP
struct irq_desc *desc;
+#ifdef CONFIG_SMP
cpumask_t dest;
#endif
@@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
irq = eirr_to_irq(eirr_val);
-#ifdef CONFIG_SMP
+ /* Filter out spurious interrupts, mostly from serial port at bootup */
desc = irq_to_desc(irq);
+ if (unlikely(!desc->action))
+ goto set_out;
+
+#ifdef CONFIG_SMP
cpumask_copy(&dest, desc->irq_data.affinity);
if (irqd_is_per_cpu(&desc->irq_data) &&
!cpumask_test_cpu(smp_processor_id(), &dest)) {
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 7ef22e3..0b8d26d 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -821,7 +821,7 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
19: ldd,ma 0(%sr3,%r26), %r29
- sub,= %r29, %r25, %r0
+ sub,*= %r29, %r25, %r0
b,n cas2_end
20: std,ma %r24, 0(%sr3,%r26)
copy %r0, %r28
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 73eddda..4eec430 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
endif
ifdef CONFIG_CPU_BIG_ENDIAN
BOOTCFLAGS += -mbig-endian
+else
+BOOTCFLAGS += -mlittle-endian
+BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 43e6ad4..88d27e3 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -135,7 +135,19 @@
#define pte_iterate_hashed_end() } while(0)
#ifdef CONFIG_PPC_HAS_HASH_64K
-#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte) \
+ ({ \
+ unsigned int psize; \
+ if (is_kernel_addr(addr)) \
+ psize = MMU_PAGE_4K; \
+ else \
+ psize = get_slice_psize(mm, addr); \
+ psize; \
+ })
#else
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
#endif
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 7a4ede1..b77ef36 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -343,6 +343,7 @@ extern void rtas_power_off(void);
extern void rtas_halt(void);
extern void rtas_os_term(char *str);
extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
extern int rtas_get_power_level(int powerdomain, int *level);
extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
extern bool rtas_indicator_present(int token, int *maxindex);
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 58abeda..15cca17 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
extern void enable_kernel_fp(void);
extern void enable_kernel_altivec(void);
+extern void enable_kernel_vsx(void);
extern int emulate_altivec(struct pt_regs *);
extern void __giveup_vsx(struct task_struct *);
extern void giveup_vsx(struct task_struct *);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 9ee61d1..cb565ad 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -310,11 +310,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
if (!(pe->type & EEH_PE_PHB)) {
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+
+ /*
+ * The config space of some PCI devices can't be accessed
+ * when their PEs are in frozen state. Otherwise, fenced
+ * PHB might be seen. Those PEs are identified with flag
+ * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
+ * is set automatically when the PE is put to EEH_PE_ISOLATED.
+ *
+ * Restoring BARs possibly triggers PCI config access in
+ * (OPAL) firmware and then causes fenced PHB. If the
+ * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
+ * pointless to restore BARs and dump config space.
+ */
eeh_ops->configure_bridge(pe);
- eeh_pe_restore_bars(pe);
+ if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
+ eeh_pe_restore_bars(pe);
- pci_regs_buf[0] = 0;
- eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+ pci_regs_buf[0] = 0;
+ eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+ }
}
eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
@@ -1118,9 +1133,6 @@ void eeh_add_device_late(struct pci_dev *dev)
return;
}
- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
- eeh_ops->probe(pdn, NULL);
-
/*
* The EEH cache might not be removed correctly because of
* unbalanced kref to the device during unplug time, which
@@ -1144,6 +1156,9 @@ void eeh_add_device_late(struct pci_dev *dev)
dev->dev.archdata.edev = NULL;
}
+ if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+ eeh_ops->probe(pdn, NULL);
+
edev->pdev = dev;
dev->dev.archdata.edev = edev;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index febb50d..0596373 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
-#if 0
-/* not currently used, but some crazy RAID module might want to later */
void enable_kernel_vsx(void)
{
WARN_ON(preemptible());
@@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_vsx);
-#endif
void giveup_vsx(struct task_struct *tsk)
{
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 7a488c1..caffb10 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
}
EXPORT_SYMBOL(rtas_get_sensor);
+int rtas_get_sensor_fast(int sensor, int index, int *state)
+{
+ int token = rtas_token("get-sensor-state");
+ int rc;
+
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
+ rc = rtas_call(token, 2, 2, state, sensor, index);
+ WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+ rc <= RTAS_EXTENDED_DELAY_MAX));
+
+ if (rc < 0)
+ return rtas_error_rc(rc);
+ return rc;
+}
+
bool rtas_indicator_present(int token, int *maxindex)
{
int proplen, count, i;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831a..da50e0c 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 453a8a4..964c0ce 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -826,12 +826,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
unsigned long size = kvmppc_get_gpr(vcpu, 4);
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
u64 buf;
+ int srcu_idx;
int ret;
if (!is_power_of_2(size) || (size > sizeof(buf)))
return H_TOO_HARD;
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
if (ret != 0)
return H_TOO_HARD;
@@ -866,6 +869,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
unsigned long val = kvmppc_get_gpr(vcpu, 6);
u64 buf;
+ int srcu_idx;
int ret;
switch (size) {
@@ -889,7 +893,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
return H_TOO_HARD;
}
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
if (ret != 0)
return H_TOO_HARD;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3e9087f..1b5e411 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2177,7 +2177,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vc->runner = vcpu;
if (n_ceded == vc->n_runnable) {
kvmppc_vcore_blocked(vc);
- } else if (should_resched()) {
+ } else if (need_resched()) {
vc->vcore_state = VCORE_PREEMPT;
/* Let something else run */
cond_resched_lock(&vc->lock);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index b027a89..c6d601c 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -421,14 +421,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
v = pte & ~HPTE_V_HVLOCK;
if (v & HPTE_V_VALID) {
- u64 pte1;
-
- pte1 = be64_to_cpu(hpte[1]);
hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
- rb = compute_tlbie_rb(v, pte1, pte_index);
+ rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
- /* Read PTE low word after tlbie to get final R/C values */
- remove_revmap_chain(kvm, pte_index, rev, v, pte1);
+ /*
+ * The reference (R) and change (C) bits in a HPT
+ * entry can be set by hardware at any time up until
+ * the HPTE is invalidated and the TLB invalidation
+ * sequence has completed. This means that when
+ * removing a HPTE, we need to re-read the HPTE after
+ * the invalidation sequence has completed in order to
+ * obtain reliable values of R and C.
+ */
+ remove_revmap_chain(kvm, pte_index, rev, v,
+ be64_to_cpu(hpte[1]));
}
r = rev->guest_rpte & ~HPTE_GR_RESERVED;
note_hpte_modification(kvm, rev);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 4d70df2..ffd98b2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1127,6 +1127,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
beq 4f
b guest_exit_cont
3:
@@ -1170,6 +1171,7 @@ mc_cont:
bl kvmhv_accumulate_time
#endif
+ mr r3, r12
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
nop
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 43dafb9..4d87122 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
BUG_ON(index >= 4096);
vpn = hpt_vpn(ea, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
hpte_slot_array = get_hpte_slot_array(pmdp);
if (psize == MMU_PAGE_4K) {
/*
@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
valid = hpte_valid(hpte_slot_array, index);
if (valid) {
/* update the hpte bits */
+ hash = hpt_hash(vpn, shift, ssize);
hidx = hpte_hash_index(hpte_slot_array, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
if (!valid) {
unsigned long hpte_group;
+ hash = hpt_hash(vpn, shift, ssize);
/* insert new entry */
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
new_pmd |= _PAGE_HASHPTE;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index bca2aeb..3ff29cf 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -99,6 +99,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
if (WARN_ON(!phb))
return;
@@ -106,10 +107,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&phb->msi_bmp,
- virq_to_hw(entry->irq) - phb->msi_base, 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
}
}
#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 02e4a17..3b6647e 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
int state;
int critical;
- status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
+ status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
+ &state);
if (state > 3)
critical = 1; /* Time Critical */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index df6a704..e6e8b24 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
eeh_dev_init(PCI_DN(np), pci->phb);
}
break;
+ case OF_RECONFIG_DETACH_NODE:
+ pci = PCI_DN(np);
+ if (pci)
+ list_del(&pci->list);
+ break;
default:
err = NOTIFY_DONE;
break;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index f086c6f..fd16cb5 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
struct fsl_msi *msi_data;
+ irq_hw_number_t hwirq;
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
return;
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index a3f660e..89496cf 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -65,6 +65,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
@@ -72,10 +73,11 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+ hwirq, ALLOC_CHUNK);
}
return;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index b2cef18..13a34b2 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}
return;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 6e2e6aa..02a137d 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+ irq_hw_number_t hwirq;
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
}
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index d478811..fac6ac9 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 42506b3..4da604e 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -167,7 +167,7 @@ unsigned long decompress_kernel(void)
#endif
puts("Uncompressing Linux... ");
- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
puts("Ok, booting the kernel.\n");
return (unsigned long) output;
}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index fe8d692..c78ba51 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -48,6 +48,19 @@ typedef struct
struct ucontext32 uc;
} rt_sigframe32;
+static inline void sigset_to_sigset32(unsigned long *set64,
+ compat_sigset_word *set32)
+{
+ set32[0] = (compat_sigset_word) set64[0];
+ set32[1] = (compat_sigset_word)(set64[0] >> 32);
+}
+
+static inline void sigset32_to_sigset(compat_sigset_word *set32,
+ unsigned long *set64)
+{
+ set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
+}
+
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -303,10 +316,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+ if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
@@ -323,10 +338,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
@@ -397,7 +414,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
/* Create struct sigcontext32 on the signal stack */
- memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
+ sigset_to_sigset32(set->sig, sc.oldmask);
sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
return -EFAULT;
@@ -458,6 +475,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
+ compat_sigset_t cset;
rt_sigframe32 __user *frame;
unsigned long restorer;
size_t frame_size;
@@ -505,11 +523,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
store_sigregs();
/* Create ucontext on the signal stack. */
+ sigset_to_sigset32(set->sig, cset.sig);
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs32(regs, &frame->uc.uc_mcontext) ||
- __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
+ __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7262fe4..1942f22 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -683,7 +683,7 @@ static void __init setup_memory(void)
/*
* Setup hardware capabilities.
*/
-static void __init setup_hwcaps(void)
+static int __init setup_hwcaps(void)
{
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
struct cpuid cpu_id;
@@ -749,9 +749,11 @@ static void __init setup_hwcaps(void)
elf_hwcap |= HWCAP_S390_TE;
/*
- * Vector extension HWCAP_S390_VXRS is bit 11.
+ * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
+ * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
+ * instead of facility bit 129.
*/
- if (test_facility(129))
+ if (MACHINE_HAS_VX)
elf_hwcap |= HWCAP_S390_VXRS;
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
@@ -788,7 +790,9 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z13");
break;
}
+ return 0;
}
+arch_initcall(setup_hwcaps);
/*
* Add system information as device randomness
@@ -871,11 +875,6 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
/*
- * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
- */
- setup_hwcaps();
-
- /*
* Create kernel page tables and switch to virtual addressing.
*/
paging_init();
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index 95470a4..208a975 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -132,7 +132,7 @@ void decompress_kernel(void)
puts("Uncompressing Linux... ");
cache_control(CACHE_ENABLE);
- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
cache_control(CACHE_DISABLE);
puts("Ok, booting the kernel.\n");
}
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 2e48eb8..c90930d 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 6bf2479..561a84d 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = camellia_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index dd6a34f..61af794 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
.setkey = des_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
.blkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = des3_ede_set_key,
.encrypt = cbc3_encrypt,
.decrypt = cbc3_decrypt,
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa20..6424249 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
#define VISEntryHalf \
- rd %fprs, %o5; \
- andcc %o5, FPRS_FEF, %g0; \
- be,pt %icc, 297f; \
- sethi %hi(298f), %g7; \
- sethi %hi(VISenterhalf), %g1; \
- jmpl %g1 + %lo(VISenterhalf), %g0; \
- or %g7, %lo(298f), %g7; \
- clr %o5; \
-297: wr %o5, FPRS_FEF, %fprs; \
-298:
+ VISEntry
+
+#define VISExitHalf \
+ VISExit
#define VISEntryHalfFast(fail_label) \
rd %fprs, %o5; \
@@ -47,7 +41,7 @@
ba,a,pt %xcc, fail_label; \
297: wr %o5, FPRS_FEF, %fprs;
-#define VISExitHalf \
+#define VISExitHalfFast \
wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a..83aeeb1 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+ VISExitHalfFast
+#else
VISExitHalf
-
+#endif
brz,pn %o2, .Lexit
cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9..a063d84 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3
- cmp %o5, FPRS_DU
- be,pn %icc, 6f
- sll %g1, 3, %g1
+ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+ sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32
80: jmpl %g7 + %g0, %g0
nop
-
-6: ldub [%g3 + TI_FPSAVED], %o5
- or %o5, FPRS_DU, %o5
- add %g6, TI_FPREGS+0x80, %g2
- stb %o5, [%g3 + TI_FPSAVED]
-
- sll %g1, 5, %g1
- add %g6, TI_FPREGS+0xc0, %g3
- wr %g0, FPRS_FEF, %fprs
- membar #Sync
- stda %f32, [%g2 + %g1] ASI_BLK_P
- stda %f48, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 80f
- nop
-
- .align 32
-80: jmpl %g7 + %g0, %g0
- nop
-
- .align 32
-VISenterhalf:
- ldub [%g6 + TI_FPDEPTH], %g1
- brnz,a,pn %g1, 1f
- cmp %g1, 1
- stb %g0, [%g6 + TI_FPSAVED]
- stx %fsr, [%g6 + TI_XFSR]
- clr %o5
- jmpl %g7 + %g0, %g0
- wr %g0, FPRS_FEF, %fprs
-
-1: bne,pn %icc, 2f
- srl %g1, 1, %g1
- ba,pt %xcc, vis1
- sub %g7, 8, %g7
-2: addcc %g6, %g1, %g3
- sll %g1, 3, %g1
- andn %o5, FPRS_DU, %g2
- stb %g2, [%g3 + TI_FPSAVED]
-
- rd %gsr, %g2
- add %g6, %g1, %g3
- stx %g2, [%g3 + TI_GSR]
- add %g6, %g1, %g2
- stx %fsr, [%g2 + TI_XFSR]
- sll %g1, 5, %g1
-3: andcc %o5, FPRS_DL, %g0
- be,pn %icc, 4f
- add %g6, TI_FPREGS, %g2
-
- add %g6, TI_FPREGS+0x40, %g3
- membar #Sync
- stda %f0, [%g2 + %g1] ASI_BLK_P
- stda %f16, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 4f
- nop
-
- .align 32
-4: and %o5, FPRS_DU, %o5
- jmpl %g7 + %g0, %g0
- wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a9..8069ce1 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
void VISenter(void);
EXPORT_SYMBOL(VISenter);
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04..c667e10 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
return -EFAULT;
- memset(to, 0, sizeof(*to));
-
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c
index 176d5bd..5c65dfe 100644
--- a/arch/unicore32/boot/compressed/misc.c
+++ b/arch/unicore32/boot/compressed/misc.c
@@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start,
output_ptr = get_unaligned_le32(tmp);
arch_decomp_puts("Uncompressing Linux...");
- decompress(input_data, input_data_end - input_data, NULL, NULL,
- output_data, NULL, error);
+ __decompress(input_data, input_data_end - input_data, NULL, NULL,
+ output_data, 0, NULL, error);
arch_decomp_puts(" done, booting the kernel.\n");
return output_ptr;
}
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a107b93..e28437e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
#endif
debug_putstr("\nDecompressing Linux... ");
- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
+ __decompress(input_data, input_len, NULL, NULL, output, output_len,
+ NULL, error);
parse_elf(output);
/*
* 32-bit always performs relocations. 64-bit relocations are only
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 2079baf..daf8d2b 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -294,6 +294,7 @@ static struct ahash_alg ghash_async_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
+ .cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89f..4e10d73 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0);
}
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc)
-{
- set_ldt(pc->ldt, pc->size);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- preempt_disable();
- load_LDT_nolock(pc);
- preempt_enable();
-}
-
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620..364d274 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
* we put the segment information here.
*/
typedef struct {
- void *ldt;
- int size;
+ struct ldt_struct *ldt;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index e997f70..80d67dd 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif
/*
+ * ldt_structs can be allocated, used, and freed, but they are never
+ * modified while live.
+ */
+struct ldt_struct {
+ /*
+ * Xen requires page-aligned LDTs with special permissions. This is
+ * needed to prevent us from installing evil descriptors such as
+ * call gates. On native, we could merge the ldt_struct and LDT
+ * allocations, but it's not worth trying to optimize.
+ */
+ struct desc_struct *entries;
+ int size;
+};
+
+static inline void load_mm_ldt(struct mm_struct *mm)
+{
+ struct ldt_struct *ldt;
+
+ /* lockless_dereference synchronizes with smp_store_release */
+ ldt = lockless_dereference(mm->context.ldt);
+
+ /*
+ * Any change to mm->context.ldt is followed by an IPI to all
+ * CPUs with the mm active. The LDT will not be freed until
+ * after the IPI is handled by all such CPUs. This means that,
+ * if the ldt_struct changes before we return, the values we see
+ * will be safe, and the new values will be loaded before we run
+ * any user code.
+ *
+ * NB: don't try to convert this to use RCU without extreme care.
+ * We would still need IRQs off, because we don't want to change
+ * the local LDT after an IPI loaded a newer value than the one
+ * that we can see.
+ */
+
+ if (unlikely(ldt))
+ set_ldt(ldt->entries, ldt->size);
+ else
+ clear_LDT();
+
+ DEBUG_LOCKS_WARN_ON(preemptible());
+}
+
+/*
* Used for LDT copy/destruction.
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
- * never free an LDT while the mm still exists. That
- * means that next->context.ldt != prev->context.ldt,
- * because mms never share an LDT.
+ * never set context.ldt to NULL while the mm still
+ * exists. That means that next->context.ldt !=
+ * prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
}
#endif
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 6ef9a16..c08949b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -101,13 +101,13 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
#ifdef CONFIG_PREEMPT_LAZY
- return unlikely(!raw_cpu_read_4(__preempt_count) || \
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
test_thread_flag(TIF_NEED_RESCHED_LAZY));
#else
- return unlikely(!raw_cpu_read_4(__preempt_count));
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
#endif
}
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b18..9dfce4e 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip;
unsigned long flags;
unsigned short cs;
- unsigned short __pad2; /* Was called gs, but was always zero. */
- unsigned short __pad1; /* Was called fs, but was always zero. */
- unsigned short ss;
+ unsigned short gs;
+ unsigned short fs;
+ unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 16dc4e8..d8b9f90 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
-
- /*
- * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
- * Linux saved and restored fs and gs in these slots. This
- * was counterproductive, as fsbase and gsbase were never
- * saved, so arch_prctl was presumably unreliable.
- *
- * If these slots are ever needed for any other purpose, there
- * is some risk that very old 64-bit binaries could get
- * confused. I doubt that many such binaries still work,
- * though, since the same patch in 2.5.64 also removed the
- * 64-bit set_thread_area syscall, so it appears that there is
- * no TLS API that works in both pre- and post-2.5.64 kernels.
- */
- __u16 __pad2; /* Was gs. */
- __u16 __pad1; /* Was fs. */
-
- __u16 ss;
+ __u16 gs;
+ __u16 fs;
+ __u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index dbe76a1..07bea80 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -489,6 +489,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
* stash over-ride to indicate we've been here
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index aef6531..d1918a8 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -325,10 +325,15 @@ done:
static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
{
+ unsigned long flags;
+
if (instr[0] != 0x90)
return;
+ local_irq_save(flags);
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+ sync_core();
+ local_irq_restore(flags);
DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
instr, a->instrlen - a->padlen, a->padlen);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index dcb5285..307a498 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
apic_write(APIC_LVTT, lvtt_value);
if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
+ /*
+ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
+ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
+ * According to Intel, MFENCE can do the serialization here.
+ */
+ asm volatile("mfence" : : : "memory");
+
printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
return;
}
@@ -1424,7 +1431,7 @@ static inline void __x2apic_disable(void)
{
u64 msr;
- if (cpu_has_apic)
+ if (!cpu_has_apic)
return;
rdmsrl(MSR_IA32_APICBASE, msr);
@@ -1483,10 +1490,13 @@ void x2apic_setup(void)
static __init void x2apic_disable(void)
{
- u32 x2apic_id;
+ u32 x2apic_id, state = x2apic_state;
+
+ x2apic_mode = 0;
+ x2apic_state = X2APIC_DISABLED;
- if (x2apic_state != X2APIC_ON)
- goto out;
+ if (state != X2APIC_ON)
+ return;
x2apic_id = read_apic_id();
if (x2apic_id >= 255)
@@ -1494,9 +1504,6 @@ static __init void x2apic_disable(void)
__x2apic_disable();
register_lapic_address(mp_lapic_addr);
-out:
- x2apic_state = X2APIC_DISABLED;
- x2apic_mode = 0;
}
static __init void x2apic_enable(void)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a62cf04..205e0f3 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1434,7 +1434,7 @@ void cpu_init(void)
load_sp0(t, &current->thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
clear_all_debug_regs();
dbg_restore_debug_regs();
@@ -1483,7 +1483,7 @@ void cpu_init(void)
load_sp0(t, thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index b4a41cf..e166d83 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -116,6 +116,27 @@ void mce_intel_hcpu_update(unsigned long cpu)
per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
}
+static void cmci_toggle_interrupt_mode(bool on)
+{
+ unsigned long flags, *owned;
+ int bank;
+ u64 val;
+
+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ owned = this_cpu_ptr(mce_banks_owned);
+ for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+
+ if (on)
+ val |= MCI_CTL2_CMCI_EN;
+ else
+ val &= ~MCI_CTL2_CMCI_EN;
+
+ wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ }
+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
unsigned long cmci_intel_adjust_timer(unsigned long interval)
{
if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
@@ -145,7 +166,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
*/
if (!atomic_read(&cmci_storm_on_cpus)) {
__this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
- cmci_reenable();
+ cmci_toggle_interrupt_mode(true);
cmci_recheck();
}
return CMCI_POLL_INTERVAL;
@@ -156,22 +177,6 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
}
}
-static void cmci_storm_disable_banks(void)
-{
- unsigned long flags, *owned;
- int bank;
- u64 val;
-
- raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- owned = this_cpu_ptr(mce_banks_owned);
- for_each_set_bit(bank, owned, MAX_NR_BANKS) {
- rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
- val &= ~MCI_CTL2_CMCI_EN;
- wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
- }
- raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
-}
-
static bool cmci_storm_detect(void)
{
unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
@@ -193,7 +198,7 @@ static bool cmci_storm_detect(void)
if (cnt <= CMCI_STORM_THRESHOLD)
return false;
- cmci_storm_disable_banks();
+ cmci_toggle_interrupt_mode(false);
__this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
r = atomic_add_return(1, &cmci_storm_on_cpus);
mce_timer_kick(CMCI_STORM_INTERVAL);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index aa4e3a7..4cc98a4 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2170,21 +2170,25 @@ static unsigned long get_segment_base(unsigned int segment)
int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ struct ldt_struct *ldt;
+
if (idx > LDT_ENTRIES)
return 0;
- if (idx > current->active_mm->context.size)
+ /* IRQs are off, so this synchronizes with smp_store_release */
+ ldt = lockless_dereference(current->active_mm->context.ldt);
+ if (!ldt || idx > ldt->size)
return 0;
- desc = current->active_mm->context.ldt;
+ desc = &ldt->entries[idx];
} else {
if (idx > GDT_ENTRIES)
return 0;
- desc = raw_cpu_ptr(gdt_page.gdt);
+ desc = raw_cpu_ptr(gdt_page.gdt) + idx;
}
- return get_desc_base(desc + idx);
+ return get_desc_base(desc);
}
#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 2813ea0..2221261 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2098,9 +2098,12 @@ static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
- struct event_constraint *c1 = cpuc->event_constraint[idx];
+ struct event_constraint *c1 = NULL;
struct event_constraint *c2;
+ if (idx >= 0) /* fake does < 0 */
+ c1 = cpuc->event_constraint[idx];
+
/*
* first time only
* - static constraint: no change across incremental scheduling calls
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c76d3e3..403ace5 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -184,10 +184,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
}
#ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
- unsigned long nr_pfn, void *arg)
+static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
{
- int *nr_ranges = arg;
+ unsigned int *nr_ranges = arg;
(*nr_ranges)++;
return 0;
@@ -213,7 +212,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->image = image;
- walk_system_ram_range(0, -1, &nr_ranges,
+ walk_system_ram_res(0, -1, &nr_ranges,
get_nr_ram_ranges_callback);
ced->max_nr_ranges = nr_ranges;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 9bf9070..db2a15c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -809,8 +809,6 @@ do_preempt_schedule_irq:
restore_c_regs_and_iret:
RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8
-
-irq_return:
INTERRUPT_RETURN
ENTRY(native_iret)
@@ -1413,7 +1411,18 @@ END(error_exit)
/* Runs on exception stack */
ENTRY(nmi)
INTR_FRAME
+ /*
+ * Fix up the exception frame if we're on Xen.
+ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+ * one value to the stack on native, so it may clobber the rdx
+ * scratch slot, but it won't clobber any of the important
+ * slots past it.
+ *
+ * Xen is a different story, because the Xen frame itself overlaps
+ * the "NMI executing" variable.
+ */
PARAVIRT_ADJUST_EXCEPTION_FRAME
+
/*
* We allow breakpoints in NMIs. If a breakpoint occurs, then
* the iretq it performs will take us out of NMI context.
@@ -1431,11 +1440,12 @@ ENTRY(nmi)
* If the variable is not set and the stack is not the NMI
* stack then:
* o Set the special variable on the stack
- * o Copy the interrupt frame into a "saved" location on the stack
- * o Copy the interrupt frame into a "copy" location on the stack
+ * o Copy the interrupt frame into an "outermost" location on the
+ * stack
+ * o Copy the interrupt frame into an "iret" location on the stack
* o Continue processing the NMI
* If the variable is set or the previous stack is the NMI stack:
- * o Modify the "copy" location to jump to the repeate_nmi
+ * o Modify the "iret" location to jump to the repeat_nmi
* o return back to the first NMI
*
* Now on exit of the first NMI, we first clear the stack variable
@@ -1444,32 +1454,154 @@ ENTRY(nmi)
* a nested NMI that updated the copy interrupt stack frame, a
* jump will be made to the repeat_nmi code that will handle the second
* NMI.
+ *
+ * However, espfix prevents us from directly returning to userspace
+ * with a single IRET instruction. Similarly, IRET to user mode
+ * can fault. We therefore handle NMIs from user space like
+ * other IST entries.
*/
/* Use %rdx as our temp variable throughout */
pushq_cfi %rdx
CFI_REL_OFFSET rdx, 0
+ testb $3, CS-RIP+8(%rsp)
+ jz .Lnmi_from_kernel
+
+ /*
+ * NMI from user mode. We need to run on the thread stack, but we
+ * can't go through the normal entry paths: NMIs are masked, and
+ * we don't want to enable interrupts, because then we'll end
+ * up in an awkward situation in which IRQs are on but NMIs
+ * are off.
+ *
+ * We also must not push anything to the stack before switching
+ * stacks lest we corrupt the "NMI executing" variable.
+ */
+
+ SWAPGS_UNSAFE_STACK
+ cld
+ movq %rsp, %rdx
+ movq PER_CPU_VAR(kernel_stack), %rsp
+ pushq 5*8(%rdx) /* pt_regs->ss */
+ pushq 4*8(%rdx) /* pt_regs->rsp */
+ pushq 3*8(%rdx) /* pt_regs->flags */
+ pushq 2*8(%rdx) /* pt_regs->cs */
+ pushq 1*8(%rdx) /* pt_regs->rip */
+ pushq $-1 /* pt_regs->orig_ax */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+ pushq (%rdx) /* pt_regs->dx */
+ pushq %rcx /* pt_regs->cx */
+ pushq %rax /* pt_regs->ax */
+ pushq %r8 /* pt_regs->r8 */
+ pushq %r9 /* pt_regs->r9 */
+ pushq %r10 /* pt_regs->r10 */
+ pushq %r11 /* pt_regs->r11 */
+ pushq %rbx /* pt_regs->rbx */
+ pushq %rbp /* pt_regs->rbp */
+ pushq %r12 /* pt_regs->r12 */
+ pushq %r13 /* pt_regs->r13 */
+ pushq %r14 /* pt_regs->r14 */
+ pushq %r15 /* pt_regs->r15 */
+
+ /*
+ * At this point we no longer need to worry about stack damage
+ * due to nesting -- we're on the normal thread stack and we're
+ * done with the NMI stack.
+ */
+ movq %rsp, %rdi
+ movq $-1, %rsi
+ call do_nmi
+
+ /*
+ * Return back to user mode. We must *not* do the normal exit
+ * work, because we don't want to enable interrupts. Fortunately,
+ * do_nmi doesn't modify pt_regs.
+ */
+ SWAPGS
+ jmp restore_c_regs_and_iret
+
+.Lnmi_from_kernel:
+ /*
+ * Here's what our stack frame will look like:
+ * +---------------------------------------------------------+
+ * | original SS |
+ * | original Return RSP |
+ * | original RFLAGS |
+ * | original CS |
+ * | original RIP |
+ * +---------------------------------------------------------+
+ * | temp storage for rdx |
+ * +---------------------------------------------------------+
+ * | "NMI executing" variable |
+ * +---------------------------------------------------------+
+ * | iret SS } Copied from "outermost" frame |
+ * | iret Return RSP } on each loop iteration; overwritten |
+ * | iret RFLAGS } by a nested NMI to force another |
+ * | iret CS } iteration if needed. |
+ * | iret RIP } |
+ * +---------------------------------------------------------+
+ * | outermost SS } initialized in first_nmi; |
+ * | outermost Return RSP } will not be changed before |
+ * | outermost RFLAGS } NMI processing is done. |
+ * | outermost CS } Copied to "iret" frame on each |
+ * | outermost RIP } iteration. |
+ * +---------------------------------------------------------+
+ * | pt_regs |
+ * +---------------------------------------------------------+
+ *
+ * The "original" frame is used by hardware. Before re-enabling
+ * NMIs, we need to be done with it, and we need to leave enough
+ * space for the asm code here.
+ *
+ * We return by executing IRET while RSP points to the "iret" frame.
+ * That will either return for real or it will loop back into NMI
+ * processing.
+ *
+ * The "outermost" frame is copied to the "iret" frame on each
+ * iteration of the loop, so each iteration starts with the "iret"
+ * frame pointing to the final return target.
+ */
+
/*
- * If %cs was not the kernel segment, then the NMI triggered in user
- * space, which means it is definitely not nested.
+ * Determine whether we're a nested NMI.
+ *
+ * If we interrupted kernel code between repeat_nmi and
+ * end_repeat_nmi, then we are a nested NMI. We must not
+ * modify the "iret" frame because it's being written by
+ * the outer NMI. That's okay; the outer NMI handler is
+ * about to about to call do_nmi anyway, so we can just
+ * resume the outer NMI.
*/
- cmpl $__KERNEL_CS, 16(%rsp)
- jne first_nmi
+
+ movq $repeat_nmi, %rdx
+ cmpq 8(%rsp), %rdx
+ ja 1f
+ movq $end_repeat_nmi, %rdx
+ cmpq 8(%rsp), %rdx
+ ja nested_nmi_out
+1:
/*
- * Check the special variable on the stack to see if NMIs are
- * executing.
+ * Now check "NMI executing". If it's set, then we're nested.
+ * This will not detect if we interrupted an outer NMI just
+ * before IRET.
*/
cmpl $1, -8(%rsp)
je nested_nmi
/*
- * Now test if the previous stack was an NMI stack.
- * We need the double check. We check the NMI stack to satisfy the
- * race when the first NMI clears the variable before returning.
- * We check the variable because the first NMI could be in a
- * breakpoint routine using a breakpoint stack.
+ * Now test if the previous stack was an NMI stack. This covers
+ * the case where we interrupt an outer NMI after it clears
+ * "NMI executing" but before IRET. We need to be careful, though:
+ * there is one case in which RSP could point to the NMI stack
+ * despite there being no NMI active: naughty userspace controls
+ * RSP at the very beginning of the SYSCALL targets. We can
+ * pull a fast one on naughty userspace, though: we program
+ * SYSCALL to mask DF, so userspace cannot cause DF to be set
+ * if it controls the kernel's RSP. We set DF before we clear
+ * "NMI executing".
*/
lea 6*8(%rsp), %rdx
/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
@@ -1480,25 +1612,21 @@ ENTRY(nmi)
cmpq %rdx, 4*8(%rsp)
/* If it is below the NMI stack, it is a normal NMI */
jb first_nmi
- /* Ah, it is within the NMI stack, treat it as nested */
+
+ /* Ah, it is within the NMI stack. */
+
+ testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
+ jz first_nmi /* RSP was user controlled. */
+
+ /* This is a nested NMI. */
CFI_REMEMBER_STATE
nested_nmi:
/*
- * Do nothing if we interrupted the fixup in repeat_nmi.
- * It's about to repeat the NMI handler, so we are fine
- * with ignoring this one.
+ * Modify the "iret" frame to point to repeat_nmi, forcing another
+ * iteration of NMI handling.
*/
- movq $repeat_nmi, %rdx
- cmpq 8(%rsp), %rdx
- ja 1f
- movq $end_repeat_nmi, %rdx
- cmpq 8(%rsp), %rdx
- ja nested_nmi_out
-
-1:
- /* Set up the interrupted NMIs stack to jump to repeat_nmi */
leaq -1*8(%rsp), %rdx
movq %rdx, %rsp
CFI_ADJUST_CFA_OFFSET 1*8
@@ -1517,60 +1645,23 @@ nested_nmi_out:
popq_cfi %rdx
CFI_RESTORE rdx
- /* No need to check faults here */
+ /* We are returning to kernel mode, so this cannot result in a fault. */
INTERRUPT_RETURN
CFI_RESTORE_STATE
first_nmi:
- /*
- * Because nested NMIs will use the pushed location that we
- * stored in rdx, we must keep that space available.
- * Here's what our stack frame will look like:
- * +-------------------------+
- * | original SS |
- * | original Return RSP |
- * | original RFLAGS |
- * | original CS |
- * | original RIP |
- * +-------------------------+
- * | temp storage for rdx |
- * +-------------------------+
- * | NMI executing variable |
- * +-------------------------+
- * | copied SS |
- * | copied Return RSP |
- * | copied RFLAGS |
- * | copied CS |
- * | copied RIP |
- * +-------------------------+
- * | Saved SS |
- * | Saved Return RSP |
- * | Saved RFLAGS |
- * | Saved CS |
- * | Saved RIP |
- * +-------------------------+
- * | pt_regs |
- * +-------------------------+
- *
- * The saved stack frame is used to fix up the copied stack frame
- * that a nested NMI may change to make the interrupted NMI iret jump
- * to the repeat_nmi. The original stack frame and the temp storage
- * is also used by nested NMIs and can not be trusted on exit.
- */
- /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
+ /* Restore rdx. */
movq (%rsp), %rdx
CFI_RESTORE rdx
- /* Set the NMI executing variable on the stack. */
+ /* Set "NMI executing" on the stack. */
pushq_cfi $1
- /*
- * Leave room for the "copied" frame
- */
+ /* Leave room for the "iret" frame */
subq $(5*8), %rsp
CFI_ADJUST_CFA_OFFSET 5*8
- /* Copy the stack frame to the Saved frame */
+ /* Copy the "original" frame to the "outermost" frame */
.rept 5
pushq_cfi 11*8(%rsp)
.endr
@@ -1578,6 +1669,7 @@ first_nmi:
/* Everything up to here is safe from nested NMIs */
+repeat_nmi:
/*
* If there was a nested NMI, the first NMI's iret will return
* here. But NMIs are still enabled and we can take another
@@ -1586,16 +1678,21 @@ first_nmi:
* it will just return, as we are about to repeat an NMI anyway.
* This makes it safe to copy to the stack frame that a nested
* NMI will update.
- */
-repeat_nmi:
- /*
- * Update the stack variable to say we are still in NMI (the update
- * is benign for the non-repeat case, where 1 was pushed just above
- * to this very stack slot).
+ *
+ * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
+ * we're repeating an NMI, gsbase has the same value that it had on
+ * the first iteration. paranoid_entry will load the kernel
+ * gsbase if needed before we call do_nmi.
+ *
+ * Set "NMI executing" in case we came back here via IRET.
*/
movq $1, 10*8(%rsp)
- /* Make another copy, this one may be modified by nested NMIs */
+ /*
+ * Copy the "outermost" frame to the "iret" frame. NMIs that nest
+ * here must not modify the "iret" frame while we're writing to
+ * it or it will end up containing garbage.
+ */
addq $(10*8), %rsp
CFI_ADJUST_CFA_OFFSET -10*8
.rept 5
@@ -1606,9 +1703,9 @@ repeat_nmi:
end_repeat_nmi:
/*
- * Everything below this point can be preempted by a nested
- * NMI if the first NMI took an exception and reset our iret stack
- * so that we repeat another NMI.
+ * Everything below this point can be preempted by a nested NMI.
+ * If this happens, then the inner NMI will change the "iret"
+ * frame to point back to repeat_nmi.
*/
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
ALLOC_PT_GPREGS_ON_STACK
@@ -1623,29 +1720,11 @@ end_repeat_nmi:
call paranoid_entry
DEFAULT_FRAME 0
- /*
- * Save off the CR2 register. If we take a page fault in the NMI then
- * it could corrupt the CR2 value. If the NMI preempts a page fault
- * handler before it was able to read the CR2 register, and then the
- * NMI itself takes a page fault, the page fault that was preempted
- * will read the information from the NMI page fault and not the
- * origin fault. Save it off and restore it if it changes.
- * Use the r12 callee-saved register.
- */
- movq %cr2, %r12
-
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
call do_nmi
- /* Did the NMI take a page fault? Restore cr2 if it did */
- movq %cr2, %rcx
- cmpq %rcx, %r12
- je 1f
- movq %r12, %cr2
-1:
-
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
@@ -1653,12 +1732,27 @@ nmi_swapgs:
nmi_restore:
RESTORE_EXTRA_REGS
RESTORE_C_REGS
- /* Pop the extra iret frame at once */
+
+ /* Point RSP at the "iret" frame. */
REMOVE_PT_GPREGS_FROM_STACK 6*8
- /* Clear the NMI executing stack variable */
- movq $0, 5*8(%rsp)
- jmp irq_return
+ /*
+ * Clear "NMI executing". Set DF first so that we can easily
+ * distinguish the remaining code between here and IRET from
+ * the SYSCALL entry and exit paths. On a native kernel, we
+ * could just inspect RIP, but, on paravirt kernels,
+ * INTERRUPT_RETURN can translate into a jump into a
+ * hypercall page.
+ */
+ std
+ movq $0, 5*8(%rsp) /* clear "NMI executing" */
+
+ /*
+ * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
+ * stack in a single instruction. We are returning to kernel
+ * mode, so this cannot result in a fault.
+ */
+ INTERRUPT_RETURN
CFI_ENDPROC
END(nmi)
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d..2bcc052 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -20,82 +21,82 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
-#ifdef CONFIG_SMP
+/* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm)
{
- if (current->active_mm == current_mm)
- load_LDT(&current->active_mm->context);
+ mm_context_t *pc;
+
+ if (current->active_mm != current_mm)
+ return;
+
+ pc = &current->active_mm->context;
+ set_ldt(pc->ldt->entries, pc->ldt->size);
}
-#endif
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
+static struct ldt_struct *alloc_ldt_struct(int size)
{
- void *oldldt, *newldt;
- int oldsize;
-
- if (mincount <= pc->size)
- return 0;
- oldsize = pc->size;
- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
+ struct ldt_struct *new_ldt;
+ int alloc_size;
+
+ if (size > LDT_ENTRIES)
+ return NULL;
+
+ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
+ if (!new_ldt)
+ return NULL;
+
+ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
+ alloc_size = size * LDT_ENTRY_SIZE;
+
+ /*
+ * Xen is very picky: it requires a page-aligned LDT that has no
+ * trailing nonzero bytes in any page that contains LDT descriptors.
+ * Keep it simple: zero the whole allocation and never allocate less
+ * than PAGE_SIZE.
+ */
+ if (alloc_size > PAGE_SIZE)
+ new_ldt->entries = vzalloc(alloc_size);
else
- newldt = (void *)__get_free_page(GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
+ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
- oldldt = pc->ldt;
- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
- (mincount - oldsize) * LDT_ENTRY_SIZE);
+ if (!new_ldt->entries) {
+ kfree(new_ldt);
+ return NULL;
+ }
- paravirt_alloc_ldt(newldt, mincount);
+ new_ldt->size = size;
+ return new_ldt;
+}
-#ifdef CONFIG_X86_64
- /* CHECKME: Do we really need this ? */
- wmb();
-#endif
- pc->ldt = newldt;
- wmb();
- pc->size = mincount;
- wmb();
-
- if (reload) {
-#ifdef CONFIG_SMP
- preempt_disable();
- load_LDT(pc);
- if (!cpumask_equal(mm_cpumask(current->mm),
- cpumask_of(smp_processor_id())))
- smp_call_function(flush_ldt, current->mm, 1);
- preempt_enable();
-#else
- load_LDT(pc);
-#endif
- }
- if (oldsize) {
- paravirt_free_ldt(oldldt, oldsize);
- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- put_page(virt_to_page(oldldt));
- }
- return 0;
+/* After calling this, the LDT is immutable. */
+static void finalize_ldt_struct(struct ldt_struct *ldt)
+{
+ paravirt_alloc_ldt(ldt->entries, ldt->size);
}
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+/* context.lock is held */
+static void install_ldt(struct mm_struct *current_mm,
+ struct ldt_struct *ldt)
{
- int err = alloc_ldt(new, old->size, 0);
- int i;
+ /* Synchronizes with lockless_dereference in load_mm_ldt. */
+ smp_store_release(&current_mm->context.ldt, ldt);
+
+ /* Activate the LDT for all CPUs using current_mm. */
+ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+}
- if (err < 0)
- return err;
+static void free_ldt_struct(struct ldt_struct *ldt)
+{
+ if (likely(!ldt))
+ return;
- for (i = 0; i < old->size; i++)
- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
- return 0;
+ paravirt_free_ldt(ldt->entries, ldt->size);
+ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(ldt->entries);
+ else
+ kfree(ldt->entries);
+ kfree(ldt);
}
/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+ struct ldt_struct *new_ldt;
struct mm_struct *old_mm;
int retval = 0;
mutex_init(&mm->context.lock);
- mm->context.size = 0;
old_mm = current->mm;
- if (old_mm && old_mm->context.size > 0) {
- mutex_lock(&old_mm->context.lock);
- retval = copy_ldt(&mm->context, &old_mm->context);
- mutex_unlock(&old_mm->context.lock);
+ if (!old_mm) {
+ mm->context.ldt = NULL;
+ return 0;
}
+
+ mutex_lock(&old_mm->context.lock);
+ if (!old_mm->context.ldt) {
+ mm->context.ldt = NULL;
+ goto out_unlock;
+ }
+
+ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+ if (!new_ldt) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(new_ldt->entries, old_mm->context.ldt->entries,
+ new_ldt->size * LDT_ENTRY_SIZE);
+ finalize_ldt_struct(new_ldt);
+
+ mm->context.ldt = new_ldt;
+
+out_unlock:
+ mutex_unlock(&old_mm->context.lock);
return retval;
}
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
- if (mm->context.size) {
-#ifdef CONFIG_X86_32
- /* CHECKME: Can this ever happen ? */
- if (mm == current->active_mm)
- clear_LDT();
-#endif
- paravirt_free_ldt(mm->context.ldt, mm->context.size);
- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- put_page(virt_to_page(mm->context.ldt));
- mm->context.size = 0;
- }
+ free_ldt_struct(mm->context.ldt);
+ mm->context.ldt = NULL;
}
static int read_ldt(void __user *ptr, unsigned long bytecount)
{
- int err;
+ int retval;
unsigned long size;
struct mm_struct *mm = current->mm;
- if (!mm->context.size)
- return 0;
+ mutex_lock(&mm->context.lock);
+
+ if (!mm->context.ldt) {
+ retval = 0;
+ goto out_unlock;
+ }
+
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
- mutex_lock(&mm->context.lock);
- size = mm->context.size * LDT_ENTRY_SIZE;
+ size = mm->context.ldt->size * LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
- err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
- mutex_unlock(&mm->context.lock);
- if (err < 0)
- goto error_return;
+ if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+ retval = -EFAULT;
+ goto out_unlock;
+ }
+
if (size != bytecount) {
- /* zero-fill the rest */
- if (clear_user(ptr + size, bytecount - size) != 0) {
- err = -EFAULT;
- goto error_return;
+ /* Zero-fill the rest and pretend we read bytecount bytes. */
+ if (clear_user(ptr + size, bytecount - size)) {
+ retval = -EFAULT;
+ goto out_unlock;
}
}
- return bytecount;
-error_return:
- return err;
+ retval = bytecount;
+
+out_unlock:
+ mutex_unlock(&mm->context.lock);
+ return retval;
}
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
struct desc_struct ldt;
int error;
struct user_desc ldt_info;
+ int oldsize, newsize;
+ struct ldt_struct *new_ldt, *old_ldt;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
goto out;
}
- mutex_lock(&mm->context.lock);
- if (ldt_info.entry_number >= mm->context.size) {
- error = alloc_ldt(&current->mm->context,
- ldt_info.entry_number + 1, 1);
- if (error < 0)
- goto out_unlock;
- }
-
- /* Allow LDTs to be cleared by the user. */
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
- if (oldmode || LDT_empty(&ldt_info)) {
- memset(&ldt, 0, sizeof(ldt));
- goto install;
+ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
+ LDT_empty(&ldt_info)) {
+ /* The user wants to clear the entry. */
+ memset(&ldt, 0, sizeof(ldt));
+ } else {
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ error = -EINVAL;
+ goto out;
}
+
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
}
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
- error = -EINVAL;
+ mutex_lock(&mm->context.lock);
+
+ old_ldt = mm->context.ldt;
+ oldsize = old_ldt ? old_ldt->size : 0;
+ newsize = max((int)(ldt_info.entry_number + 1), oldsize);
+
+ error = -ENOMEM;
+ new_ldt = alloc_ldt_struct(newsize);
+ if (!new_ldt)
goto out_unlock;
- }
- fill_ldt(&ldt, &ldt_info);
- if (oldmode)
- ldt.avl = 0;
+ if (old_ldt)
+ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+ new_ldt->entries[ldt_info.entry_number] = ldt;
+ finalize_ldt_struct(new_ldt);
- /* Install the new entry ... */
-install:
- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
+ install_ldt(mm, new_ldt);
+ free_ldt_struct(old_ldt);
error = 0;
out_unlock:
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index c3e985d..d05bd2e 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
NOKPROBE_SYMBOL(default_do_nmi);
/*
- * NMIs can hit breakpoints which will cause it to lose its
- * NMI context with the CPU when the breakpoint does an iret.
- */
-#ifdef CONFIG_X86_32
-/*
- * For i386, NMIs use the same stack as the kernel, and we can
- * add a workaround to the iret problem in C (preventing nested
- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
- * can be in:
+ * NMIs can page fault or hit breakpoints which will cause it to lose
+ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
+ *
+ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
+ * NMI processing. On x86_64, the asm glue protects us from nested NMIs
+ * if the outer NMI came from kernel mode, but we can still nest if the
+ * outer NMI came from user mode.
+ *
+ * To handle these nested NMIs, we have three states:
*
* 1) not running
* 2) executing
@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
* (Note, the latch is binary, thus multiple NMIs triggering,
* when one is running, are ignored. Only one NMI is restarted.)
*
- * If an NMI hits a breakpoint that executes an iret, another
- * NMI can preempt it. We do not want to allow this new NMI
- * to run, but we want to execute it when the first one finishes.
- * We set the state to "latched", and the exit of the first NMI will
- * perform a dec_return, if the result is zero (NOT_RUNNING), then
- * it will simply exit the NMI handler. If not, the dec_return
- * would have set the state to NMI_EXECUTING (what we want it to
- * be when we are running). In this case, we simply jump back
- * to rerun the NMI handler again, and restart the 'latched' NMI.
+ * If an NMI executes an iret, another NMI can preempt it. We do not
+ * want to allow this new NMI to run, but we want to execute it when the
+ * first one finishes. We set the state to "latched", and the exit of
+ * the first NMI will perform a dec_return, if the result is zero
+ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
+ * dec_return would have set the state to NMI_EXECUTING (what we want it
+ * to be when we are running). In this case, we simply jump back to
+ * rerun the NMI handler again, and restart the 'latched' NMI.
*
* No trap (breakpoint or page fault) should be hit before nmi_restart,
* thus there is no race between the first check of state for NOT_RUNNING
@@ -461,49 +460,36 @@ enum nmi_states {
static DEFINE_PER_CPU(enum nmi_states, nmi_state);
static DEFINE_PER_CPU(unsigned long, nmi_cr2);
-#define nmi_nesting_preprocess(regs) \
- do { \
- if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
- this_cpu_write(nmi_state, NMI_LATCHED); \
- return; \
- } \
- this_cpu_write(nmi_state, NMI_EXECUTING); \
- this_cpu_write(nmi_cr2, read_cr2()); \
- } while (0); \
- nmi_restart:
-
-#define nmi_nesting_postprocess() \
- do { \
- if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
- write_cr2(this_cpu_read(nmi_cr2)); \
- if (this_cpu_dec_return(nmi_state)) \
- goto nmi_restart; \
- } while (0)
-#else /* x86_64 */
+#ifdef CONFIG_X86_64
/*
- * In x86_64 things are a bit more difficult. This has the same problem
- * where an NMI hitting a breakpoint that calls iret will remove the
- * NMI context, allowing a nested NMI to enter. What makes this more
- * difficult is that both NMIs and breakpoints have their own stack.
- * When a new NMI or breakpoint is executed, the stack is set to a fixed
- * point. If an NMI is nested, it will have its stack set at that same
- * fixed address that the first NMI had, and will start corrupting the
- * stack. This is handled in entry_64.S, but the same problem exists with
- * the breakpoint stack.
+ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
+ * some care, the inner breakpoint will clobber the outer breakpoint's
+ * stack.
*
- * If a breakpoint is being processed, and the debug stack is being used,
- * if an NMI comes in and also hits a breakpoint, the stack pointer
- * will be set to the same fixed address as the breakpoint that was
- * interrupted, causing that stack to be corrupted. To handle this case,
- * check if the stack that was interrupted is the debug stack, and if
- * so, change the IDT so that new breakpoints will use the current stack
- * and not switch to the fixed address. On return of the NMI, switch back
- * to the original IDT.
+ * If a breakpoint is being processed, and the debug stack is being
+ * used, if an NMI comes in and also hits a breakpoint, the stack
+ * pointer will be set to the same fixed address as the breakpoint that
+ * was interrupted, causing that stack to be corrupted. To handle this
+ * case, check if the stack that was interrupted is the debug stack, and
+ * if so, change the IDT so that new breakpoints will use the current
+ * stack and not switch to the fixed address. On return of the NMI,
+ * switch back to the original IDT.
*/
static DEFINE_PER_CPU(int, update_debug_stack);
+#endif
-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
+dotraplinkage notrace void
+do_nmi(struct pt_regs *regs, long error_code)
{
+ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
+ this_cpu_write(nmi_state, NMI_LATCHED);
+ return;
+ }
+ this_cpu_write(nmi_state, NMI_EXECUTING);
+ this_cpu_write(nmi_cr2, read_cr2());
+nmi_restart:
+
+#ifdef CONFIG_X86_64
/*
* If we interrupted a breakpoint, it is possible that
* the nmi handler will have breakpoints too. We need to
@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
debug_stack_set_zero();
this_cpu_write(update_debug_stack, 1);
}
-}
-
-static inline void nmi_nesting_postprocess(void)
-{
- if (unlikely(this_cpu_read(update_debug_stack))) {
- debug_stack_reset();
- this_cpu_write(update_debug_stack, 0);
- }
-}
#endif
-dotraplinkage notrace void
-do_nmi(struct pt_regs *regs, long error_code)
-{
- nmi_nesting_preprocess(regs);
-
nmi_enter();
inc_irq_stat(__nmi_count);
@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
nmi_exit();
- /* On i386, may loop back to preprocess */
- nmi_nesting_postprocess();
+#ifdef CONFIG_X86_64
+ if (unlikely(this_cpu_read(update_debug_stack))) {
+ debug_stack_reset();
+ this_cpu_write(update_debug_stack, 0);
+ }
+#endif
+
+ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+ write_cr2(this_cpu_read(nmi_cr2));
+ if (this_cpu_dec_return(nmi_state))
+ goto nmi_restart;
}
NOKPROBE_SYMBOL(do_nmi);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c614dd4..1f316f0 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,10 +41,18 @@
#include <asm/timer.h>
#include <asm/special_insns.h>
-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+ ".global _paravirt_nop\n"
+ "_paravirt_nop:\n\t"
+ "ret\n\t"
+ ".size _paravirt_nop, . - _paravirt_nop\n\t"
+ ".type _paravirt_nop, @function\n\t"
+ ".popsection");
/* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6e338e3..9717437 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -453,6 +453,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
+ trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@@ -464,6 +465,7 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ddfdbf7..58e02d9 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all)
void release_thread(struct task_struct *dead_task)
{
if (dead_task->mm) {
- if (dead_task->mm->context.size) {
+ if (dead_task->mm->context.ldt) {
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm,
dead_task->mm->context.ldt,
- dead_task->mm->context.size);
+ dead_task->mm->context.ldt->size);
BUG();
}
}
@@ -499,27 +499,59 @@ void set_personality_ia32(bool x32)
}
EXPORT_SYMBOL_GPL(set_personality_ia32);
+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
unsigned long get_wchan(struct task_struct *p)
{
- unsigned long stack;
- u64 fp, ip;
+ unsigned long start, bottom, top, sp, fp, ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack = (unsigned long)task_stack_page(p);
- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
+
+ start = (unsigned long)task_stack_page(p);
+ if (!start)
+ return 0;
+
+ /*
+ * Layout of the stack page:
+ *
+ * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+ * PADDING
+ * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+ * stack
+ * ----------- bottom = start + sizeof(thread_info)
+ * thread_info
+ * ----------- start
+ *
+ * The tasks stack pointer points at the location where the
+ * framepointer is stored. The data on the stack is:
+ * ... IP FP ... IP FP
+ *
+ * We need to read FP and IP, so we need to adjust the upper
+ * bound by another unsigned long.
+ */
+ top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+ top -= 2 * sizeof(unsigned long);
+ bottom = start + sizeof(struct thread_info);
+
+ sp = READ_ONCE(p->thread.sp);
+ if (sp < bottom || sp > top)
return 0;
- fp = *(u64 *)(p->thread.sp);
+
+ fp = READ_ONCE(*(unsigned long *)sp);
do {
- if (fp < (unsigned long)stack ||
- fp >= (unsigned long)stack+THREAD_SIZE)
+ if (fp < bottom || fp > top)
return 0;
- ip = *(u64 *)(fp+8);
+ ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
if (!in_sched_functions(ip))
return ip;
- fp = *(u64 *)fp;
- } while (count++ < 16);
+ fp = READ_ONCE(*(unsigned long *)fp);
+ } while (count++ < 16 && p->state != TASK_RUNNING);
return 0;
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 74c44c4..12c28f7 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
COPY(r15);
#endif /* CONFIG_X86_64 */
+#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+ /* Kernel saves and restores only the CS segment register on signals,
+ * which is the bare minimum needed to allow mixed 32/64-bit code.
+ * App's signal handler can save/restore other segments if needed. */
+ COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
#else /* !CONFIG_X86_32 */
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
- put_user_ex(0, &sc->__pad2);
- put_user_ex(0, &sc->__pad1);
- put_user_ex(regs->ss, &sc->ss);
+ put_user_ex(0, &sc->gs);
+ put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */
put_user_ex(fpstate, &sc->fpstate);
@@ -450,19 +456,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
regs->sp = (unsigned long)frame;
- /*
- * Set up the CS and SS registers to run signal handlers in
- * 64-bit mode, even if the handler happens to be interrupting
- * 32-bit or 16-bit code.
- *
- * SS is subtle. In 64-bit mode, we don't need any particular
- * SS descriptor, but we do need SS to be valid. It's possible
- * that the old SS is entirely bogus -- this can happen if the
- * signal we're trying to deliver is #GP or #SS caused by a bad
- * SS value.
- */
+ /* Set up the CS register to run signal handlers in 64-bit mode,
+ even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
- regs->ss = __USER_DS;
return 0;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d..0ccb53a 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
+#include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
struct desc_struct *desc;
unsigned long base;
- seg &= ~7UL;
+ seg >>= 3;
mutex_lock(&child->mm->context.lock);
- if (unlikely((seg >> 3) >= child->mm->context.size))
+ if (unlikely(!child->mm->context.ldt ||
+ seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
- desc = child->mm->context.ldt + seg;
+ desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc);
/* 16-bit code segment? */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 5054497..21187eb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -21,6 +21,7 @@
#include <asm/hypervisor.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
+#include <asm/geode.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -1004,15 +1005,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static void __init check_system_tsc_reliable(void)
{
-#ifdef CONFIG_MGEODE_LX
- /* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
+ if (is_geode_lx()) {
+ /* RTSC counts during suspend */
#define RTSC_SUSP 0x100
- unsigned long res_low, res_high;
+ unsigned long res_low, res_high;
- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
- /* Geode_LX - the OLPC CPU has a very reliable TSC */
- if (res_low & RTSC_SUSP)
- tsc_clocksource_reliable = 1;
+ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+ /* Geode_LX - the OLPC CPU has a very reliable TSC */
+ if (res_low & RTSC_SUSP)
+ tsc_clocksource_reliable = 1;
+ }
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 9d28383..c4ea87e 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.apic->pending_events;
+ return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
}
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b733376..554e877 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep)
{
return ACCESS_ONCE(*sptep);
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- /* It is valid if the spte is zapped. */
- return spte == 0ull;
-}
#else
union split_spte {
struct {
@@ -478,23 +472,6 @@ retry:
return spte.spte;
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- union split_spte sspte = (union split_spte)spte;
- u32 high_mmio_mask = shadow_mmio_mask >> 32;
-
- /* It is valid if the spte is zapped. */
- if (spte == 0ull)
- return true;
-
- /* It is valid if the spte is being zapped. */
- if (sspte.spte_low == 0ull &&
- (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
- return true;
-
- return false;
-}
#endif
static bool spte_is_locklessly_modifiable(u64 spte)
@@ -3343,21 +3320,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
return vcpu_match_mmio_gva(vcpu, addr);
}
-
-/*
- * On direct hosts, the last spte is only allows two states
- * for mmio page fault:
- * - It is the mmio spte
- * - It is zapped or it is being zapped.
- *
- * This function completely checks the spte when the last spte
- * is not the mmio spte.
- */
-static bool check_direct_spte_mmio_pf(u64 spte)
-{
- return __check_direct_spte_mmio_pf(spte);
-}
-
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
@@ -3400,13 +3362,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
}
/*
- * It's ok if the gva is remapped by other cpus on shadow guest,
- * it's a BUG if the gfn is not a mmio page.
- */
- if (direct && !check_direct_spte_mmio_pf(spte))
- return RET_MMIO_PF_BUG;
-
- /*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4911bf1..7858cd9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -512,7 +512,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
if (svm->vmcb->control.next_rip != 0) {
- WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+ WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
svm->next_rip = svm->vmcb->control.next_rip;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2d73807..bc3041e1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6144,6 +6144,8 @@ static __init int hardware_setup(void)
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
+ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6cceb2c..37d79a0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2192,7 +2192,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 9b86812..274a52b 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
-#include <asm/desc.h>
#include <asm/user.h>
#include <asm/i387.h>
@@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info)
math_abort(FPU_info, SIGILL);
}
- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 2c61441..d342fce 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+ static struct desc_struct zero_desc;
+ struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ seg >>= 3;
+ mutex_lock(&current->mm->context.lock);
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ ret = current->mm->context.ldt->entries[seg];
+ mutex_unlock(&current->mm->context.lock);
+#endif
+ return ret;
+}
+
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99..8300db7 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
#include <linux/stddef.h>
#include <asm/uaccess.h>
-#include <asm/desc.h>
#include "fpu_system.h"
#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
addr->selector = PM_REG_(segment);
}
- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+ descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c8140e1..c23ab1e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end)
vaddr = start;
pgd_idx = pgd_index(vaddr);
+ pmd_idx = pmd_index(vaddr);
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3fba623..f9977a7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
* has been zapped already via cleanup_highmem().
*/
all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
- set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
rodata_test();
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 841ea05..4773849 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -679,6 +679,70 @@ out:
}
/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+ /* Initial call */
+ if (!entry)
+ return memmap.map_end - memmap.desc_size;
+
+ entry -= memmap.desc_size;
+ if (entry < memmap.map)
+ return NULL;
+
+ return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+ if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+ /*
+ * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+ * config table feature requires us to map all entries
+ * in the same order as they appear in the EFI memory
+ * map. That is to say, entry N must have a lower
+ * virtual address than entry N+1. This is because the
+ * firmware toolchain leaves relative references in
+ * the code/data sections, which are split and become
+ * separate EFI memory regions. Mapping things
+ * out-of-order leads to the firmware accessing
+ * unmapped addresses.
+ *
+ * Since we need to map things this way whether or not
+ * the kernel actually makes use of
+ * EFI_PROPERTIES_TABLE, let's just switch to this
+ * scheme by default for 64-bit.
+ */
+ return efi_map_next_entry_reverse(entry);
+ }
+
+ /* Initial call */
+ if (!entry)
+ return memmap.map;
+
+ entry += memmap.desc_size;
+ if (entry >= memmap.map_end)
+ return NULL;
+
+ return entry;
+}
+
+/*
* Map the efi memory ranges of the runtime services and update new_mmap with
* virtual addresses.
*/
@@ -688,7 +752,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
unsigned long left = 0;
efi_memory_desc_t *md;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ p = NULL;
+ while ((p = efi_map_next_entry(p))) {
md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
#ifdef CONFIG_X86_64
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 757678f..bf93844 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -23,6 +23,7 @@
#include <asm/debugreg.h>
#include <asm/fpu-internal.h> /* pcntxt_mask */
#include <asm/cpu.h>
+#include <asm/mmu_context.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -154,7 +155,7 @@ static void fix_processor_context(void)
syscall_init(); /* This sets MSR_*STAR and related */
#endif
load_TR_desc(); /* This does ltr */
- load_LDT(&current->active_mm->context); /* This does lldt */
+ load_mm_ldt(current->active_mm); /* This does lldt */
}
/**
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index e88fda8..4841453 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -8,7 +8,7 @@ config XEN
select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
- depends on X86_TSC
+ depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
config XEN_DOM0
def_bool y
depends on XEN && PCI_XEN && SWIOTLB_XEN
- depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+ depends on X86_IO_APIC && ACPI && PCI
config XEN_PVHVM
def_bool y
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755..4b6e29a 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o
+ p2m.o apic.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_DOM0) += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 46957ea..0cc6571 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -33,6 +33,10 @@
#include <linux/memblock.h>
#include <linux/edd.h>
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
@@ -483,6 +487,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte_t pte;
unsigned long pfn;
struct page *page;
+ unsigned char dummy;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
@@ -492,6 +497,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte = pfn_pte(pfn, prot);
+ /*
+ * Careful: update_va_mapping() will fail if the virtual address
+ * we're poking isn't populated in the page tables. We don't
+ * need to worry about the direct map (that's always in the page
+ * tables), but we need to be careful about vmap space. In
+ * particular, the top level page table can lazily propagate
+ * entries between processes, so if we've switched mms since we
+ * vmapped the target in the first place, we might not have the
+ * top-level page table entry populated.
+ *
+ * We disable preemption because we want the same mm active when
+ * we probe the target and when we issue the hypercall. We'll
+ * have the same nominal mm, but if we're a kernel thread, lazy
+ * mm dropping could change our pgd.
+ *
+ * Out of an abundance of caution, this uses __get_user() to fault
+ * in the target address just in case there's some obscure case
+ * in which the target address isn't readable.
+ */
+
+ preempt_disable();
+
+ pagefault_disable(); /* Avoid warnings due to being atomic. */
+ __get_user(dummy, (unsigned char __user __force *)v);
+ pagefault_enable();
+
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
@@ -503,6 +534,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
BUG();
} else
kmap_flush_unused();
+
+ preempt_enable();
}
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +543,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
int i;
+ /*
+ * We need to mark the all aliases of the LDT pages RO. We
+ * don't need to call vm_flush_aliases(), though, since that's
+ * only responsible for flushing aliases out the TLBs, not the
+ * page tables, and Xen will flush the TLB for us if needed.
+ *
+ * To avoid confusing future readers: none of this is necessary
+ * to load the LDT. The hypervisor only checks this when the
+ * LDT is faulted in due to subsequent descriptor access.
+ */
+
for(i = 0; i < entries; i += entries_per_page)
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
}
@@ -1758,6 +1802,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
.notifier_call = xen_hvm_cpu_notify,
};
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+ native_machine_shutdown();
+ if (kexec_in_progress)
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+ native_machine_crash_shutdown(regs);
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
@@ -1777,6 +1836,10 @@ static void __init xen_hvm_guest_init(void)
x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+ machine_ops.shutdown = xen_hvm_shutdown;
+ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
}
#endif
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9e195c6..bef30cb 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
{
}
-static inline void __init xen_init_apic(void)
-{
-}
#endif
+void __init xen_init_apic(void);
+
#ifdef CONFIG_XEN_EFI
extern void xen_efi_init(void);
#else
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 677bfcf..28f33a8 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -25,30 +25,39 @@ static inline void spill_registers(void)
{
#if XCHAL_NUM_AREGS > 16
__asm__ __volatile__ (
- " call12 1f\n"
+ " call8 1f\n"
" _j 2f\n"
" retw\n"
" .align 4\n"
"1:\n"
+#if XCHAL_NUM_AREGS == 32
+ " _entry a1, 32\n"
+ " addi a8, a0, 3\n"
+ " _entry a1, 16\n"
+ " mov a12, a12\n"
+ " retw\n"
+#else
" _entry a1, 48\n"
- " addi a12, a0, 3\n"
-#if XCHAL_NUM_AREGS > 32
- " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+ " call12 1f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
" _entry a1, 48\n"
" mov a12, a0\n"
" .endr\n"
-#endif
- " _entry a1, 48\n"
+ " _entry a1, 16\n"
#if XCHAL_NUM_AREGS % 12 == 0
- " mov a8, a8\n"
-#elif XCHAL_NUM_AREGS % 12 == 4
" mov a12, a12\n"
-#elif XCHAL_NUM_AREGS % 12 == 8
+#elif XCHAL_NUM_AREGS % 12 == 4
" mov a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a8, a8\n"
#endif
" retw\n"
+#endif
"2:\n"
- : : : "a12", "a13", "memory");
+ : : : "a8", "a9", "memory");
#else
__asm__ __volatile__ (
" mov a12, a12\n"
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 82bbfa5..a2a9021 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -568,12 +568,13 @@ user_exception_exit:
* (if we have restored WSBITS-1 frames).
*/
+2:
#if XCHAL_HAVE_THREADPTR
l32i a3, a1, PT_THREADPTR
wur a3, threadptr
#endif
-2: j common_exception_exit
+ j common_exception_exit
/* This is the kernel exception exit.
* We avoided to do a MOVSP when we entered the exception, but we
@@ -1820,7 +1821,7 @@ ENDPROC(system_call)
mov a12, a0
.endr
#endif
- _entry a1, 48
+ _entry a1, 16
#if XCHAL_NUM_AREGS % 12 == 0
mov a8, a8
#elif XCHAL_NUM_AREGS % 12 == 4
@@ -1844,7 +1845,7 @@ ENDPROC(system_call)
ENTRY(_switch_to)
- entry a1, 16
+ entry a1, 48
mov a11, a3 # and 'next' (a3)
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b79685e..279c5d6 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
{
- char *start_page = page;
struct request *rq;
+ int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
+
+ list_for_each_entry(rq, list, queuelist) {
+ const int rq_len = 2 * sizeof(rq) + 2;
+
+ /* if the output will be truncated */
+ if (PAGE_SIZE - 1 < len + rq_len) {
+ /* backspacing if it can't hold '\t...\n' */
+ if (PAGE_SIZE - 1 < len + 5)
+ len -= rq_len;
+ len += snprintf(page + len, PAGE_SIZE - 1 - len,
+ "\t...\n");
+ break;
+ }
+ len += snprintf(page + len, PAGE_SIZE - 1 - len,
+ "\t%p\n", rq);
+ }
- page += sprintf(page, "%s:\n", msg);
-
- list_for_each_entry(rq, list, queuelist)
- page += sprintf(page, "\t%p\n", rq);
-
- return page - start_page;
+ return len;
}
static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bf..e0057d0 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the combined capabilities of I/O
- * controller and storage device.
+ * the device driver based upon the capabilities of the I/O
+ * controller.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 8acb88603..9c1dc8d 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
struct crypto_alg *base = &alg->halg.base;
if (alg->halg.digestsize > PAGE_SIZE / 8 ||
- alg->halg.statesize > PAGE_SIZE / 8)
+ alg->halg.statesize > PAGE_SIZE / 8 ||
+ alg->halg.statesize == 0)
return -EINVAL;
base->cra_type = &crypto_ahash_type;
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index ff6d8ad..fb76552 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -153,6 +153,7 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"AEI0250"}, /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
{"AEI1240"}, /* Actiontec ISA PNP 56K X2 Fax Modem */
{"AKY1021"}, /* Rockwell 56K ACF II Fax+Data+Voice Modem */
+ {"ALI5123"}, /* ALi Fast Infrared Controller */
{"AZT4001"}, /* AZT3005 PnP SOUND DEVICE */
{"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */
{"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index cfd7581..b09ad55 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -826,6 +826,22 @@ void acpi_penalize_isa_irq(int irq, int active)
}
/*
+ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
+ * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
+ * PCI IRQs.
+ */
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+ polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
+}
+
+/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
* Useful for telling ACPI how not to interfere with your ISA sound card.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 65ee944..e6ea912 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -349,6 +349,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* JMicron 362B and 362C have an AHCI function with IDE class code */
{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
+ /* May need to update quirk_jmicron_async_suspend() for additions */
/* ATI */
{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
@@ -1377,18 +1378,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
- /*
- * The JMicron chip 361/363 contains one SATA controller and one
- * PATA controller,for powering on these both controllers, we must
- * follow the sequence one by one, otherwise one of them can not be
- * powered on successfully, so here we disable the async suspend
- * method for these chips.
- */
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
- (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
- pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
- device_disable_async_suspend(&pdev->dev);
-
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 41c99be..e0064d1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS:
* Block address read from @tf.
*/
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
- if (!dev || tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0;
}
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
- unsigned int err_mask;
-
- if (!ata_id_has_sense_reporting(dev->id))
- return;
-
- if (ata_id_sense_reporting_enabled(dev->id))
- return;
-
- err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to enable Sense Data Reporting, Emask 0x%x\n",
- err_mask);
- }
-}
-
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j];
}
}
- ata_dev_config_sense_reporting(dev);
+
dev->cdb_len = 16;
}
@@ -4248,6 +4230,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031..cb0508a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
- tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
- * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- * @dfl_sense_key: default sense key to use
- *
- * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- * SENSE. This function is EH helper.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * encoded sense data on success, 0 on failure or if sense data
- * is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
- struct scsi_cmnd *cmd)
-{
- struct ata_device *dev = qc->dev;
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- if (!cmd)
- return 0;
-
- DPRINTK("ATA request sense\n");
- ata_dev_warn(dev, "request sense\n");
- if (!ata_id_sense_reporting_enabled(dev->id)) {
- ata_dev_warn(qc->dev, "sense data reporting disabled\n");
- return 0;
- }
- ata_tf_init(dev, &tf);
-
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
- tf.command = ATA_CMD_REQ_SENSE_DATA;
- tf.protocol = ATA_PROT_NODATA;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- /*
- * ACS-4 states:
- * The device may set the SENSE DATA AVAILABLE bit to one in the
- * STATUS field and clear the ERROR bit to zero in the STATUS field
- * to indicate that the command returned completion without an error
- * and the sense data described in table 306 is available.
- *
- * IOW the 'ATA_SENSE' bit might not be set even though valid
- * sense data is available.
- * So check for both.
- */
- if ((tf.command & ATA_SENSE) ||
- tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
- ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
- tf.lbah, tf.lbam, tf.lbal);
- } else {
- ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
- }
- return err_mask;
-}
-
-/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if (qc->result_tf.auxiliary) {
- char sense_key, asc, ascq;
-
- sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
- asc = (qc->result_tf.auxiliary >> 8) & 0xff;
- ascq = qc->result_tf.auxiliary & 0xff;
- ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
- sense_key, asc, ascq);
- ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
- ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- }
-
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET;
}
- /*
- * Sense data reporting does not work if the
- * device fault bit is set.
- */
- if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
- tmp = ata_eh_request_sense(qc, qc->scsicmd);
- if (tmp)
- qc->err_mask |= tmp;
- else
- ata_scsi_set_sense_information(qc->scsicmd, tf);
- } else {
- ata_dev_warn(qc->dev, "sense data available but port frozen\n");
- }
- }
-
- /* Set by NCQ autosense or request sense above */
- if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- return 0;
-
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
+ ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 641a61a..0d7f0da 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
- if (!cmd)
- return;
-
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
-{
- u64 information;
-
- if (!cmd)
- return;
-
- information = ata_tf_read_block(tf, NULL);
- scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
- if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- } else if (!need_sense) {
+ if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a17..f840ca1 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
- struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 47e418b..4d1a5d2 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -143,18 +143,6 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
};
const struct ata_port_info *ppi[] = { &info, NULL };
- /*
- * The JMicron chip 361/363 contains one SATA controller and one
- * PATA controller,for powering on these both controllers, we must
- * follow the sequence one by one, otherwise one of them can not be
- * powered on successfully, so here we disable the async suspend
- * method for these chips.
- */
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
- (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
- pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
- device_disable_async_suspend(&pdev->dev);
-
return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 5b93852..0d75285 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -139,6 +139,7 @@ static int __init ks0108_init(void)
ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
+ parport_put_port(ks0108_parport);
if (ks0108_pardevice == NULL) {
printk(KERN_ERR KS0108_NAME ": ERROR: "
"parport didn't register new device\n");
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index df0c66c..fdba441 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu) /* skip itself */
continue;
+
sib_cpu_ci = get_cpu_cacheinfo(sibling);
+ if (!sib_cpu_ci->info_list)
+ continue;
+
sib_leaf = sib_cpu_ci->info_list + index;
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
static void free_cache_attributes(unsigned int cpu)
{
+ if (!per_cpu_cacheinfo(cpu))
+ return;
+
cache_shared_cpu_map_remove(cpu);
kfree(per_cpu_cacheinfo(cpu));
@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DEAD:
cache_remove_dev(cpu);
- if (per_cpu_cacheinfo(cpu))
- free_cache_attributes(cpu);
+ free_cache_attributes(cpu);
break;
}
return notifier_from_errno(rc);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index c8a53d1..8754646 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res,
if (!dr) {
add_dr(dev, &new_dr->node);
dr = new_dr;
- new_dr = NULL;
+ new_res = NULL;
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
- devres_free(new_dr);
+ devres_free(new_res);
return dr->data;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index a2aa65b..b10479c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -388,6 +388,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int page_nid;
+ /*
+ * memory block could have several absent sections from start.
+ * skip pfn range from absent section
+ */
+ if (!pfn_present(pfn)) {
+ pfn = round_down(pfn + PAGES_PER_SECTION,
+ PAGES_PER_SECTION) - 1;
+ continue;
+ }
+
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ebf034b..7403de9 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -375,9 +375,7 @@ int platform_device_add(struct platform_device *pdev)
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ if (r->parent)
release_resource(r);
}
@@ -408,9 +406,7 @@ void platform_device_del(struct platform_device *pdev)
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ if (r->parent)
release_resource(r);
}
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index c7b0fce..ac3c07d 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -37,7 +37,7 @@ struct pm_clock_entry {
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
*/
-static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
{
int ret;
@@ -49,8 +49,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
dev_err(dev, "%s: failed to enable clk %p, error %d\n",
__func__, ce->clk, ret);
}
-
- return ret;
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 1d0b116..0a60ef1 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -26,9 +26,10 @@
*/
void device_add_property_set(struct device *dev, struct property_set *pset)
{
- if (pset)
- pset->fwnode.type = FWNODE_PDATA;
+ if (!pset)
+ return;
+ pset->fwnode.type = FWNODE_PDATA;
set_secondary_fwnode(dev, &pset->fwnode);
}
EXPORT_SYMBOL_GPL(device_add_property_set);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a4..56486d9 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
- if (!present) {
- kfree(blk);
- return -ENOMEM;
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ GFP_KERNEL);
+ if (!present) {
+ kfree(blk);
+ return -ENOMEM;
+ }
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+ * sizeof(*present));
+ } else {
+ present = rbnode->cache_present;
}
/* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 5799a0b..c8941f3 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
{
- snprintf(buf, buf_size, "%x", max_val);
- return strlen(buf);
+ return snprintf(NULL, 0, "%x", max_val);
}
static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
+ if (buf_pos + tot_len + 1 >= count)
break;
/* Format the register */
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 53f2535..fe8f1e4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -522,6 +522,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1797,6 +1798,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request);
}
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+
+ if (obj_request_img_data_test(obj_request))
+ rbd_osd_copyup_callback(obj_request);
+ else
+ obj_request_done_set(obj_request);
+}
+
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
struct ceph_msg *msg)
{
@@ -1845,6 +1856,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_osd_discard_callback(obj_request);
break;
case CEPH_OSD_OP_CALL:
+ rbd_osd_call_callback(obj_request);
+ break;
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
rbd_osd_trivial_callback(obj_request);
@@ -2509,13 +2522,15 @@ out_unwind:
}
static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
struct rbd_device *rbd_dev;
struct page **pages;
u32 page_count;
+ dout("%s: obj %p\n", __func__, obj_request);
+
rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
obj_request->type == OBJ_REQUEST_NODATA);
rbd_assert(obj_request_img_data_test(obj_request));
@@ -2542,9 +2557,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
if (!obj_request->result)
obj_request->xferred = obj_request->length;
- /* Finish up with the normal image object callback */
-
- rbd_img_obj_callback(obj_request);
+ obj_request_done_set(obj_request);
}
static void
@@ -2629,7 +2642,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
/* All set, send it off. */
- orig_request->callback = rbd_img_obj_copyup_callback;
osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, orig_request);
if (!img_result)
@@ -5162,7 +5174,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
out_err:
if (parent) {
rbd_dev_unparent(rbd_dev);
- kfree(rbd_dev->header_name);
rbd_dev_destroy(parent);
} else {
rbd_put_client(rbdc);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 713fc9f..3e9ec95 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -362,8 +362,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
return;
}
- if (work_pending(&blkif->persistent_purge_work)) {
- pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+ if (work_busy(&blkif->persistent_purge_work)) {
+ pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
return;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2c61cf8..89c7371 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1118,8 +1118,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
- list_add(&indirect_page->lru, &info->indirect_pages);
+ if (!info->feature_persistent) {
+ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+ list_add(&indirect_page->lru, &info->indirect_pages);
+ }
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
}
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index f1ff39a..54d946a 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -325,12 +325,14 @@ void zcomp_destroy(struct zcomp *comp)
* allocate new zcomp and initialize it. return compressing
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
- * case of allocation error.
+ * case of allocation error, or any other error potentially
+ * returned by functions zcomp_strm_{multi,single}_create.
*/
struct zcomp *zcomp_create(const char *compress, int max_strm)
{
struct zcomp *comp;
struct zcomp_backend *backend;
+ int error;
backend = find_backend(compress);
if (!backend)
@@ -342,12 +344,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
comp->backend = backend;
if (max_strm > 1)
- zcomp_strm_multi_create(comp, max_strm);
+ error = zcomp_strm_multi_create(comp, max_strm);
else
- zcomp_strm_single_create(comp);
- if (!comp->stream) {
+ error = zcomp_strm_single_create(comp);
+ if (error) {
kfree(comp);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
}
return comp;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf7..5643b65 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
static void start_khwrngd(void)
{
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+ if (IS_ERR(hwrng_fill)) {
pr_err("hwrng_fill thread creation failed");
hwrng_fill = NULL;
}
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index a43048b..3c1a123 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -900,6 +900,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+ {
+ /*
+ * CPU fan speed going up and down on Dell Studio XPS 8100
+ * for unknown reasons.
+ */
+ .ident = "Dell Studio XPS 8100",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+ },
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
@@ -911,7 +926,8 @@ static int __init i8k_probe(void)
/*
* Get DMI information
*/
- if (!dmi_check_system(i8k_dmi_table)) {
+ if (!dmi_check_system(i8k_dmi_table) ||
+ dmi_check_system(i8k_blacklist_dmi_table)) {
if (!ignore_dmi && !force)
return -ENODEV;
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index 0dd8a4b..4a375ea 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -37,7 +37,8 @@
* Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
* or PA PLL available on keystone2. These PLLs are controlled by
* this register. Main PLL is controlled by a PLL controller.
- * @pllm: PLL register map address
+ * @pllm: PLL register map address for multiplier bits
+ * @pllod: PLL register map address for post divider bits
* @pll_ctl0: PLL controller map address
* @pllm_lower_mask: multiplier lower mask
* @pllm_upper_mask: multiplier upper mask
@@ -53,6 +54,7 @@ struct clk_pll_data {
u32 phy_pllm;
u32 phy_pll_ctl0;
void __iomem *pllm;
+ void __iomem *pllod;
void __iomem *pll_ctl0;
u32 pllm_lower_mask;
u32 pllm_upper_mask;
@@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
/* read post divider from od bits*/
postdiv = ((val & pll_data->clkod_mask) >>
pll_data->clkod_shift) + 1;
- else
+ else if (pll_data->pllod) {
+ postdiv = readl(pll_data->pllod);
+ postdiv = ((postdiv & pll_data->clkod_mask) >>
+ pll_data->clkod_shift) + 1;
+ } else
postdiv = pll_data->postdiv;
rate /= (prediv + 1);
@@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
/* assume the PLL has output divider register bits */
pll_data->clkod_mask = CLKOD_MASK;
pll_data->clkod_shift = CLKOD_SHIFT;
+
+ /*
+ * Check if there is an post-divider register. If not
+ * assume od bits are part of control register.
+ */
+ i = of_property_match_string(node, "reg-names",
+ "post-divider");
+ pll_data->pllod = of_iomap(node, i);
}
i = of_property_match_string(node, "reg-names", "control");
pll_data->pll_ctl0 = of_iomap(node, i);
if (!pll_data->pll_ctl0) {
pr_err("%s: ioremap failed\n", __func__);
+ iounmap(pll_data->pllod);
goto out;
}
@@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
pll_data->pllm = of_iomap(node, i);
if (!pll_data->pllm) {
iounmap(pll_data->pll_ctl0);
+ iounmap(pll_data->pllod);
goto out;
}
}
diff --git a/drivers/clk/pistachio/clk-pistachio.c b/drivers/clk/pistachio/clk-pistachio.c
index 8c0fe88..c4ceb5e 100644
--- a/drivers/clk/pistachio/clk-pistachio.c
+++ b/drivers/clk/pistachio/clk-pistachio.c
@@ -159,9 +159,15 @@ PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux",
"wifi_pll_mux", "bt_pll_mux" };
static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
-static unsigned int pistachio_critical_clks[] __initdata = {
- CLK_MIPS,
- CLK_PERIPH_SYS,
+static unsigned int pistachio_critical_clks_core[] __initdata = {
+ CLK_MIPS
+};
+
+static unsigned int pistachio_critical_clks_sys[] __initdata = {
+ PERIPH_CLK_SYS,
+ PERIPH_CLK_SYS_BUS,
+ PERIPH_CLK_DDR,
+ PERIPH_CLK_ROM,
};
static void __init pistachio_clk_init(struct device_node *np)
@@ -193,8 +199,8 @@ static void __init pistachio_clk_init(struct device_node *np)
pistachio_clk_register_provider(p);
- pistachio_clk_force_enable(p, pistachio_critical_clks,
- ARRAY_SIZE(pistachio_critical_clks));
+ pistachio_clk_force_enable(p, pistachio_critical_clks_core,
+ ARRAY_SIZE(pistachio_critical_clks_core));
}
CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init);
@@ -261,6 +267,9 @@ static void __init pistachio_clk_periph_init(struct device_node *np)
ARRAY_SIZE(pistachio_periph_gates));
pistachio_clk_register_provider(p);
+
+ pistachio_clk_force_enable(p, pistachio_critical_clks_sys,
+ ARRAY_SIZE(pistachio_critical_clks_sys));
}
CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph",
pistachio_clk_periph_init);
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index de53756..ebd0d2a 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -115,8 +115,7 @@ static int pll_gf40lp_frac_enable(struct clk_hw *hw)
u32 val;
val = pll_readl(pll, PLL_CTRL3);
- val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD |
- PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
+ val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD);
pll_writel(pll, val, PLL_CTRL3);
@@ -233,7 +232,7 @@ static int pll_gf40lp_laint_enable(struct clk_hw *hw)
u32 val;
val = pll_readl(pll, PLL_CTRL1);
- val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD |
+ val &= ~(PLL_INT_CTRL1_PD |
PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD);
pll_writel(pll, val, PLL_CTRL1);
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index 6cd88d9..542e45e 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -79,7 +79,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
clks[3] / 1000000, (clks[3] % 1000000) / 10000);
}
- return (unsigned int)clks[0];
+ return (unsigned int)clks[0] / KHz;
}
static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 5f9b54b..267511d 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -80,7 +80,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
pr_info("System bus clock: %ld.%02ldMHz\n",
clks[4] / 1000000, (clks[4] % 1000000) / 10000);
}
- return (unsigned int)clks[0];
+ return (unsigned int)clks[0] / KHz;
}
bool pxa27x_is_ppll_disabled(void)
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1e..4af4eed 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -78,7 +78,7 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info)
pr_info("System bus clock: %ld.%02ldMHz\n",
clks[4] / 1000000, (clks[4] % 1000000) / 10000);
}
- return (unsigned int)clks[0];
+ return (unsigned int)clks[0] / KHz;
}
static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 54a756b9..457c540 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -2105,6 +2105,7 @@ static struct clk_branch gcc_ce1_clk = {
"ce1_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index c66f7bc..5d75bff 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -2278,7 +2278,7 @@ static struct clk_branch gcc_prng_ahb_clk = {
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x45004,
- .enable_mask = BIT(0),
+ .enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
.parent_names = (const char *[]){
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index c39d098..f06a082 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -1783,6 +1783,7 @@ static struct clk_branch gcc_ce1_clk = {
"ce1_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index d17eb45..37f9611 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -578,7 +578,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(2), 5, GFLAGS),
- MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
+ MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT,
RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 3, GFLAGS),
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 714d6ba..f7890bf 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -85,6 +85,7 @@
#define DIV_PERIL4 0xc560
#define DIV_PERIL5 0xc564
#define E4X12_DIV_CAM1 0xc568
+#define E4X12_GATE_BUS_FSYS1 0xc744
#define GATE_SCLK_CAM 0xc820
#define GATE_IP_CAM 0xc920
#define GATE_IP_TV 0xc924
@@ -1095,6 +1096,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
0),
GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0,
0),
+ GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index e668e47..bdd2842 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -828,6 +828,8 @@ static void __init __s5pv210_clk_init(struct device_node *np,
s5pv210_clk_sleep_init();
+ samsung_clk_of_add_provider(np, ctx);
+
pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
"\tmout_epll = %ld, mout_vpll = %ld\n",
is_s5p6442 ? "S5P6442" : "S5PV210",
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 757636d..4ab28cf 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -163,7 +163,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
DT_CLK(NULL, "uart3_ick", "uart3_ick"),
- DT_CLK(NULL, "uart4_ick", "uart4_ick"),
DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@@ -308,6 +307,7 @@ static struct ti_dt_clk am35xx_clks[] = {
static struct ti_dt_clk omap36xx_clks[] = {
DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+ DT_CLK(NULL, "uart4_ick", "uart4_ick"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index c6e86a9..5122ef2 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -128,8 +128,8 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
{
struct clk_sp810 *sp810 = data;
- if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
- ARRAY_SIZE(sp810->timerclken)))
+ if (WARN_ON(clkspec->args_count != 1 ||
+ clkspec->args[0] >= ARRAY_SIZE(sp810->timerclken)))
return NULL;
return sp810->timerclken[clkspec->args[0]].clk;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index bab67db..663045c 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -255,7 +255,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
rcu_read_unlock();
tol_uV = opp_uV * priv->voltage_tolerance / 100;
- if (regulator_is_supported_voltage(cpu_reg, opp_uV,
+ if (regulator_is_supported_voltage(cpu_reg,
+ opp_uV - tol_uV,
opp_uV + tol_uV)) {
if (opp_uV < min_uV)
min_uV = opp_uV;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6f9d27f..e8d1699 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,9 +48,9 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
}
-static inline int32_t div_fp(int32_t x, int32_t y)
+static inline int32_t div_fp(s64 x, s64 y)
{
- return div_s64((int64_t)x << FRAC_BITS, y);
+ return div64_s64((int64_t)x << FRAC_BITS, y);
}
static inline int ceiling_fp(int32_t x)
@@ -795,7 +795,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
- u32 duration_us;
+ s64 duration_us;
u32 sample_time;
/*
@@ -822,8 +822,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
* to adjust our busyness.
*/
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
- duration_us = (u32) ktime_us_delta(cpu->sample.time,
- cpu->last_sample_time);
+ duration_us = ktime_us_delta(cpu->sample.time,
+ cpu->last_sample_time);
if (duration_us > sample_time * 3) {
sample_ratio = div_fp(int_tofp(sample_time),
int_tofp(duration_us));
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 332c8ef..0436997 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 48f4535..ede9e9e3 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
- BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 67f8081..e4311ce 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -494,8 +494,9 @@ out:
static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 2617cd4..dd7e9f3 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
- memcpy(nx_ctx->priv.ctr.iv,
+ memcpy(nx_ctx->priv.ctr.nonce,
in_key + key_len - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
- u8 *iv = nx_ctx->priv.ctr.iv;
+ u8 iv[16];
+ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = nx_ctx->priv.ctr.iv;
+ desc->info = iv;
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
}
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 88c5624..c6ebeb6 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -330,6 +330,7 @@ out:
static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
@@ -339,7 +340,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = nx_ctx->priv.gcm.iv;
+ desc.info = rctx->iv;
/* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
@@ -434,8 +435,8 @@ out:
static int gcm_aes_nx_encrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
@@ -444,8 +445,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
static int gcm_aes_nx_decrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
@@ -455,7 +456,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -467,7 +469,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 8c2faff..c2f7d4b 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
switch (key_len) {
case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
return -EINVAL;
}
- memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+ memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
return 0;
}
@@ -148,32 +149,29 @@ out:
return rc;
}
-static int nx_xcbc_init(struct shash_desc *desc)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
{
- struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct nx_sg *out_sg;
- int len;
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_AES);
+ err = nx_crypto_ctx_aes_xcbc_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
- memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
- memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
-
- len = AES_BLOCK_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ return 0;
+}
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ memset(sctx, 0, sizeof *sctx);
return 0;
}
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &len, nx_ctx->ap->sglen);
+
+ if (data_len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
do {
to_process = total - to_process;
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
(u8 *) sctx->buffer,
&data_len,
max_sg_len);
- if (data_len != sctx->count)
- return -EINVAL;
+ if (data_len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
}
data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
&data_len,
max_sg_len);
- if (data_len != to_process - sctx->count)
- return -EINVAL;
+ if (data_len != to_process - sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
&len, nx_ctx->ap->sglen);
- if (len != sctx->count)
- return -EINVAL;
+ if (len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+ if (len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init,
+ .cra_init = nx_crypto_ctx_aes_xcbc_init2,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 23621da..becb738 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -29,30 +29,28 @@
#include "nx.h"
-static int nx_sha256_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- int len;
- int rc;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
- len = SHA256_DIGEST_SIZE;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
- &nx_ctx->op.outlen,
- &len,
- (u8 *) sctx->state,
- NX_DS_SHA256);
+ return 0;
+}
- if (rc)
- goto out;
+static int nx_sha256_init(struct shash_desc *desc) {
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -64,7 +62,6 @@ static int nx_sha256_init(struct shash_desc *desc)
sctx->state[7] = __cpu_to_be32(SHA256_H7);
sctx->count = 0;
-out:
return 0;
}
@@ -74,10 +71,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
+ u32 max_sg_len;
u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -97,38 +96,57 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ data_len = SHA256_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA256_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
- /*
- * to_process: the SHA256_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - to_process;
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
- &nx_ctx->op.inlen,
- &data_len,
- (u8 *) sctx->buf,
- NX_DS_SHA256);
+ in_sg = nx_build_sg_list(in_sg,
+ (u8 *) sctx->buf,
+ &data_len,
+ max_sg_len);
- if (rc || data_len != buf_len)
+ if (data_len != buf_len) {
+ rc = -EINVAL;
goto out;
+ }
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
- &nx_ctx->op.inlen,
- &data_len,
- (u8 *) data,
- NX_DS_SHA256);
+ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+ &data_len, max_sg_len);
- if (rc)
- goto out;
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
@@ -173,12 +191,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
- int rc;
+ u32 max_sg_len;
+ int rc = 0;
int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
/* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */
if (sctx->count >= SHA256_BLOCK_SIZE) {
@@ -195,25 +220,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
len = sctx->count & (SHA256_BLOCK_SIZE - 1);
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
- &nx_ctx->op.inlen,
- &len,
- (u8 *) sctx->buf,
- NX_DS_SHA256);
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
+ &len, max_sg_len);
- if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
+ if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+ rc = -EINVAL;
goto out;
+ }
len = SHA256_DIGEST_SIZE;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
- &nx_ctx->op.outlen,
- &len,
- out,
- NX_DS_SHA256);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
- if (rc || len != SHA256_DIGEST_SIZE)
+ if (len != SHA256_DIGEST_SIZE) {
+ rc = -EINVAL;
goto out;
+ }
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -268,7 +292,7 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha256_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index b3adf10..b6e183d 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -28,30 +28,29 @@
#include "nx.h"
-static int nx_sha512_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- int len;
- int rc;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
- len = SHA512_DIGEST_SIZE;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
- &nx_ctx->op.outlen,
- &len,
- (u8 *)sctx->state,
- NX_DS_SHA512);
+ return 0;
+}
- if (rc || len != SHA512_DIGEST_SIZE)
- goto out;
+static int nx_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -63,7 +62,6 @@ static int nx_sha512_init(struct shash_desc *desc)
sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx->count[0] = 0;
-out:
return 0;
}
@@ -73,10 +71,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
+ u32 max_sg_len;
u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -96,39 +96,61 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ data_len = SHA512_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA512_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
- /*
- * to_process: the SHA512_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - leftover;
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
- leftover = total - to_process;
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
- &nx_ctx->op.inlen,
- &data_len,
- (u8 *) sctx->buf,
- NX_DS_SHA512);
+ in_sg = nx_build_sg_list(in_sg,
+ (u8 *) sctx->buf,
+ &data_len, max_sg_len);
- if (rc || data_len != buf_len)
+ if (data_len != buf_len) {
+ rc = -EINVAL;
goto out;
+ }
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
- &nx_ctx->op.inlen,
- &data_len,
- (u8 *) data,
- NX_DS_SHA512);
+ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+ &data_len, max_sg_len);
- if (rc || data_len != (to_process - buf_len))
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+ if (data_len != (to_process - buf_len)) {
+ rc = -EINVAL;
goto out;
+ }
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
@@ -172,13 +194,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ u32 max_sg_len;
u64 count0;
unsigned long irq_flags;
- int rc;
+ int rc = 0;
int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
/* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */
if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
@@ -200,24 +229,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
csbcpb->cpb.sha512.message_bit_length_lo = count0;
len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
- &nx_ctx->op.inlen,
- &len,
- (u8 *)sctx->buf,
- NX_DS_SHA512);
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
+ max_sg_len);
- if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
+ if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+ rc = -EINVAL;
goto out;
+ }
len = SHA512_DIGEST_SIZE;
- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
- &nx_ctx->op.outlen,
- &len,
- out,
- NX_DS_SHA512);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+ max_sg_len);
- if (rc)
- goto out;
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
@@ -273,7 +298,7 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha512_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 1da6dc5..737d33d 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* @delta: is the amount we need to crop in order to bound the list.
*
*/
-static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
+static long int trim_sg_list(struct nx_sg *sg,
+ struct nx_sg *end,
+ unsigned int delta,
+ unsigned int *nbytes)
{
+ long int oplen;
+ long int data_back;
+ unsigned int is_delta = delta;
+
while (delta && end > sg) {
struct nx_sg *last = end - 1;
@@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d
delta -= last->len;
}
}
- return (sg - end) * sizeof(struct nx_sg);
-}
-
-/**
- * nx_sha_build_sg_list - walk and build sg list to sha modes
- * using right bounds and limits.
- * @nx_ctx: NX crypto context for the lists we're building
- * @nx_sg: current sg list in or out list
- * @op_len: current op_len to be used in order to build a sg list
- * @nbytes: number or bytes to be processed
- * @offset: buf offset
- * @mode: SHA256 or SHA512
- */
-int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
- struct nx_sg *nx_in_outsg,
- s64 *op_len,
- unsigned int *nbytes,
- u8 *offset,
- u32 mode)
-{
- unsigned int delta = 0;
- unsigned int total = *nbytes;
- struct nx_sg *nx_insg = nx_in_outsg;
- unsigned int max_sg_len;
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-
- *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
- nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
-
- switch (mode) {
- case NX_DS_SHA256:
- if (*nbytes < total)
- delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
- break;
- case NX_DS_SHA512:
- if (*nbytes < total)
- delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
- break;
- default:
- return -EINVAL;
+ /* There are cases where we need to crop list in order to make it
+ * a block size multiple, but we also need to align data. In order to
+ * that we need to calculate how much we need to put back to be
+ * processed
+ */
+ oplen = (sg - end) * sizeof(struct nx_sg);
+ if (is_delta) {
+ data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
+ data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
+ *nbytes -= data_back;
}
- *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
- return 0;
+ return oplen;
}
/**
@@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists, not linear
* buffers */
- nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
- nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
+ nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
+ nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
return 0;
}
@@ -662,12 +635,14 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
/* entry points from the crypto tfm initializers */
int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
{
+ tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CCM);
}
int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
{
+ tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
NX_MODE_AES_GCM);
}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 6c9ecaa..c3ed837 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -2,6 +2,8 @@
#ifndef __NX_H__
#define __NX_H__
+#include <crypto/ctr.h>
+
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
#define NX_VERSION "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
#define NX_GCM4106_NONCE_LEN (4)
#define NX_GCM_CTR_OFFSET (12)
-struct nx_gcm_priv {
+struct nx_gcm_rctx {
u8 iv[16];
+};
+
+struct nx_gcm_priv {
u8 iauth_tag[16];
u8 nonce[NX_GCM4106_NONCE_LEN];
};
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
#define NX_CCM_AES_KEY_LEN (16)
#define NX_CCM4309_AES_KEY_LEN (19)
#define NX_CCM4309_NONCE_LEN (3)
-struct nx_ccm_priv {
+struct nx_ccm_rctx {
u8 iv[16];
+};
+
+struct nx_ccm_priv {
u8 b0[16];
u8 iauth_tag[16];
u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
};
struct nx_ctr_priv {
- u8 iv[16];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
};
struct nx_crypto_ctx {
@@ -153,8 +161,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
-int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
- s64 *, unsigned int *, u8 *, u32);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
struct scatterlist *, struct scatterlist *, unsigned int *,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1dc5b0a..34139a8 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
struct qat_alg_buf {
uint32_t len;
@@ -1271,7 +1272,10 @@ static struct crypto_alg qat_algs[] = { {
int qat_algs_register(void)
{
- if (atomic_add_return(1, &active_dev) == 1) {
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1) {
int i;
for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1280,21 +1284,25 @@ int qat_algs_register(void)
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
}
- return 0;
+ mutex_unlock(&algs_lock);
+ return ret;
}
int qat_algs_unregister(void)
{
- if (atomic_sub_return(1, &active_dev) == 0)
- return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
- return 0;
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ mutex_unlock(&algs_lock);
+ return ret;
}
int qat_algs_init(void)
{
- atomic_set(&active_dev, 0);
crypto_get_default_rng();
return 0;
}
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index ab300ea..41f9333 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -80,6 +80,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable();
@@ -97,6 +98,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
} else {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_encrypt(src, dst, &ctx->enc_key);
pagefault_enable();
}
@@ -111,6 +113,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
} else {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_decrypt(src, dst, &ctx->dec_key);
pagefault_enable();
}
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 1a559b7..c8e7f65 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -81,6 +81,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable();
@@ -108,6 +109,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
} else {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
@@ -143,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
} else {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 96dbee4..266e708 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -79,6 +79,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
pagefault_enable();
@@ -97,6 +98,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
pagefault_enable();
@@ -127,6 +129,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
(nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
pagefault_enable();
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index d0ffe27..917b3f0 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -116,6 +116,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_init_p8(ctx->htable, (const u64 *) key);
pagefault_enable();
@@ -142,6 +143,7 @@ static int p8_ghash_update(struct shash_desc *desc,
GHASH_DIGEST_SIZE - dctx->bytes);
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
GHASH_DIGEST_SIZE);
@@ -154,6 +156,7 @@ static int p8_ghash_update(struct shash_desc *desc,
if (len) {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
pagefault_enable();
@@ -182,6 +185,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
dctx->buffer[i] = 0;
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
GHASH_DIGEST_SIZE);
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
index 0a6f899..d8429cb 100644
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -61,6 +61,12 @@ $code=<<___;
mtspr 256,r0
li r10,0x30
lvx_u $H,0,r4 # load H
+ le?xor r7,r7,r7
+ le?addi r7,r7,0x8 # need a vperm start with 08
+ le?lvsr 5,0,r7
+ le?vspltisb 6,0x0f
+ le?vxor 5,5,6 # set a b-endian mask
+ le?vperm $H,$H,$H,5
vspltisb $xC2,-16 # 0xf0
vspltisb $t0,1 # one
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7992164e..c89a7ab 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -648,16 +648,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->lld.mbr_sa = mem;
desc->lld.mbr_da = atchan->sconfig.dst_addr;
}
- desc->lld.mbr_cfg = atchan->cfg;
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+ dwidth = at_xdmac_get_dwidth(atchan->cfg);
fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
- ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+ ? dwidth
: AT_XDMAC_CC_DWIDTH_BYTE;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
| AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
| AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
| (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
| (len >> fixed_dwidth); /* microblock length */
+ desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
+ AT_XDMAC_CC_DWIDTH(fixed_dwidth);
dev_dbg(chan2dev(chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 1022c2e..9e504d3 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
- int r = nr_channels - i - 1;
dwc->chan.device = &dw->dma;
dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
- dwc->priority = r;
+ dwc->priority = nr_channels - i - 1;
else
dwc->priority = i;
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
+ unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
void __iomem *addr = chip->regs + r * sizeof(u32);
dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 340f9e6..3dabc52 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param;
}
- last->last = false;
+ desc->last = false;
dma_cookie_assign(&desc->txd);
@@ -2621,6 +2621,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.brst_len = 1;
desc->rqcfg.brst_len = get_burst_len(desc, len);
+ desc->bytes_requested = len;
desc->txd.flags = flags;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b38..711d8ad 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*/
for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *csi = &mci->csrows[row];
+ struct csrow_info *csi = mci->csrows[row];
/*
* Get the configuration settings for this
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index e29560e..950c87f 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -13,6 +13,7 @@
*/
#include <linux/efi.h>
+#include <linux/sort.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -305,6 +306,44 @@ fail:
*/
#define EFI_RT_VIRTUAL_BASE 0x40000000
+static int cmp_mem_desc(const void *l, const void *r)
+{
+ const efi_memory_desc_t *left = l, *right = r;
+
+ return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ u64 left_end;
+
+ if (left == NULL || right == NULL)
+ return false;
+
+ left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+ return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+ EFI_MEMORY_WC | EFI_MEMORY_UC |
+ EFI_MEMORY_RUNTIME;
+
+ return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
/*
* efi_get_virtmap() - create a virtual mapping for the EFI memory map
*
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
int *count)
{
u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
- efi_memory_desc_t *out = runtime_map;
+ efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
int l;
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *in = (void *)memory_map + l;
+ /*
+ * To work around potential issues with the Properties Table feature
+ * introduced in UEFI 2.5, which may split PE/COFF executable images
+ * in memory into several RuntimeServicesCode and RuntimeServicesData
+ * regions, we need to preserve the relative offsets between adjacent
+ * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+ * The easiest way to find adjacent regions is to sort the memory map
+ * before traversing it.
+ */
+ sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+ for (l = 0; l < map_size; l += desc_size, prev = in) {
u64 paddr, size;
+ in = (void *)memory_map + l;
if (!(in->attribute & EFI_MEMORY_RUNTIME))
continue;
+ paddr = in->phys_addr;
+ size = in->num_pages * EFI_PAGE_SIZE;
+
/*
* Make the mapping compatible with 64k pages: this allows
* a 4k page size kernel to kexec a 64k page size kernel and
* vice versa.
*/
- paddr = round_down(in->phys_addr, SZ_64K);
- size = round_up(in->num_pages * EFI_PAGE_SIZE +
- in->phys_addr - paddr, SZ_64K);
-
- /*
- * Avoid wasting memory on PTEs by choosing a virtual base that
- * is compatible with section mappings if this region has the
- * appropriate size and physical alignment. (Sections are 2 MB
- * on 4k granule kernels)
- */
- if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
- efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ if (!regions_are_adjacent(prev, in) ||
+ !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+ paddr = round_down(in->phys_addr, SZ_64K);
+ size += in->phys_addr - paddr;
+
+ /*
+ * Avoid wasting memory on PTEs by choosing a virtual
+ * base that is compatible with section mappings if this
+ * region has the appropriate size and physical
+ * alignment. (Sections are 2 MB on 4k granule kernels)
+ */
+ if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+ efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ else
+ efi_virt_base = round_up(efi_virt_base, SZ_64K);
+ }
in->virt_addr = efi_virt_base + in->phys_addr - paddr;
efi_virt_base += size;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13..aebc459 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6..2a2eb96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
- cancel_work_sync(&mstb->mgr->work);
-
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
@@ -1294,7 +1292,6 @@ retry:
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
- WARN(1, "fail\n");
return -EIO;
}
@@ -1978,6 +1975,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->work);
+ flush_work(&mgr->destroy_connector_work);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
@@ -2731,6 +2730,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
+ flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
@@ -2766,12 +2766,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
if (msgs[num - 1].flags & I2C_M_RD)
reading = true;
- if (!reading) {
+ if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
ret = -EIO;
goto out;
}
+ memset(&msg, 0, sizeof(msg));
msg.req_type = DP_REMOTE_I2C_READ;
msg.u.i2c_read.num_transactions = num - 1;
msg.u.i2c_read.port_number = port->port_num;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f861361..4924d381 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_master *master = file_priv->master;
int ret = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
++file_priv->lock_count;
if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index eb7e610..92586b0 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- uint64_t dpms_status;
- int ret;
+ int dpms;
- ret = drm_object_property_get_value(&connector->base,
- dev->mode_config.dpms_property,
- &dpms_status);
- if (ret)
- return 0;
+ dpms = READ_ONCE(connector->dpms);
return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_dpms_name((int)dpms_status));
+ drm_get_dpms_name(dpms));
}
static ssize_t enabled_show(struct device *device,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a19d2c7..fb91df1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -647,15 +647,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
pci_disable_device(drm_dev->pdev);
/*
- * During hibernation on some GEN4 platforms the BIOS may try to access
+ * During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
* leave the device in D0 on those platforms and hope the BIOS will
- * power down the device properly. Platforms where this was seen:
- * Lenovo Thinkpad X301, X61s
+ * power down the device properly. The issue was seen on multiple old
+ * GENs with different BIOS vendors, so having an explicit blacklist
+ * is inpractical; apply the workaround on everything pre GEN6. The
+ * platforms where the issue was seen:
+ * Lenovo Thinkpad X301, X61s, X60, T60, X41
+ * Fujitsu FSC S7110
+ * Acer Aspire 1830T
*/
- if (!(hibernation &&
- drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
- INTEL_INFO(dev_priv)->gen == 4))
+ if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ae6f7f..7d53d7e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3190,15 +3190,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper = I915_READ(upper_reg); \
- u32 lower = I915_READ(lower_reg); \
- u32 tmp = I915_READ(upper_reg); \
- if (upper != tmp) { \
- upper = tmp; \
- lower = I915_READ(lower_reg); \
- WARN_ON(I915_READ(upper_reg) != upper); \
- } \
- (u64)upper << 32 | lower; })
+ u32 upper, lower, old_upper, loop = 0; \
+ upper = I915_READ(upper_reg); \
+ do { \
+ old_upper = upper; \
+ lower = I915_READ(lower_reg); \
+ upper = I915_READ(upper_reg); \
+ } while (upper != old_upper && loop++ < 2); \
+ (u64)upper << 32 | lower; })
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a3eadb9..a67a351 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1026,6 +1026,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
+ obj->dirty = 1; /* be paranoid */
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
@@ -1033,7 +1034,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
- obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6377b22..7ee23d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- args->phys_swizzle_mode = args->swizzle_mode;
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+ else
+ args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c684085..fadf986 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -41,7 +41,7 @@ find_section(struct bdb_header *bdb, int section_id)
{
u8 *base = (u8 *)bdb;
int index = 0;
- u16 total, current_size;
+ u32 total, current_size;
u8 current_id;
/* skip to first section */
@@ -56,6 +56,10 @@ find_section(struct bdb_header *bdb, int section_id)
current_size = *((u16 *)(base + index));
index += 2;
+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
+ current_size = *((const u32 *)(base + index + 1));
+
if (index + current_size > total)
return NULL;
@@ -845,6 +849,12 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
return;
}
+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 3) {
+ DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+ return;
+ }
+
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4021633..338d1de 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13781,6 +13781,24 @@ void intel_modeset_init(struct drm_device *dev)
if (INTEL_INFO(dev)->num_pipes == 0)
return;
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE);
+
+ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+ bios_lvds_use_ssc ? "en" : "dis",
+ dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+
intel_init_display(dev);
intel_init_audio(dev);
@@ -14266,7 +14284,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
int ret;
@@ -14275,16 +14292,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
- /*
- * There may be no VBT; and if the BIOS enabled SSC we can
- * just keep using it to avoid unnecessary flicker. Whereas if the
- * BIOS isn't using it, don't assume it will work even if the VBT
- * indicates as much.
- */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
- DREF_SSC1_ENABLE);
-
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d714a4b..fb2983f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1150,6 +1150,19 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+ /* WaDisableHBR2:skl */
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ return false;
+
+ if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+ (INTEL_INFO(dev)->gen >= 9))
+ return true;
+ else
+ return false;
+}
+
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
@@ -1163,11 +1176,8 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
*source_rates = default_rates;
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
- /* WaDisableHBR2:skl */
- return (DP_LINK_BW_2_7 >> 3) + 1;
- else if (INTEL_INFO(dev)->gen >= 8 ||
- (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (intel_dp_source_supports_hbr2(dev))
return (DP_LINK_BW_5_4 >> 3) + 1;
else
return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3783,10 +3793,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
}
- /* Training Pattern 3 support, both source and sink */
+ /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+ * have support for TP3 hence that check is used along with dpcd check
+ * to ensure TP3 can be enabled.
+ * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+ * supported but still not enabled.
+ */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
- (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+ intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
@@ -4676,9 +4691,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp_probe_oui(intel_dp);
- if (!intel_dp_probe_mst(intel_dp))
+ if (!intel_dp_probe_mst(intel_dp)) {
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
goto mst_fail;
-
+ }
} else {
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
@@ -4686,10 +4704,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (!intel_dp->is_mst) {
- /*
- * we'll check the link status via the normal hot plug path later -
- * but for short hpds we should check it now
- */
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5196642..c7a0b8d 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -1036,11 +1036,7 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->unregister = intel_connector_unregister;
/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
- if (dev_priv->vbt.dsi.config->dual_link) {
- /* XXX: does dual link work on either pipe? */
- intel_encoder->crtc_mask = (1 << PIPE_A);
- intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
- } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+ if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
intel_encoder->crtc_mask = (1 << PIPE_A);
intel_dsi->ports = (1 << PORT_A);
} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
@@ -1048,6 +1044,9 @@ void intel_dsi_init(struct drm_device *dev)
intel_dsi->ports = (1 << PORT_C);
}
+ if (dev_priv->vbt.dsi.config->dual_link)
+ intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 424e621..9ab7c1c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -848,6 +848,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
+
+ ctx_obj->dirty = true;
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6751553..567791b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
return 0;
}
+static int
+nouveau_fbcon_open(struct fb_info *info, int user)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ int ret = pm_runtime_get_sync(drm->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+ return 0;
+}
+
+static int
+nouveau_fbcon_release(struct fb_info *info, int user)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ pm_runtime_put(drm->dev->dev);
+ return 0;
+}
+
static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
+ .fb_open = nouveau_fbcon_open,
+ .fb_release = nouveau_fbcon_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nouveau_fbcon_fillrect,
@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
+ .fb_open = nouveau_fbcon_open,
+ .fb_release = nouveau_fbcon_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 4a0a8b2..52921a8 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -160,9 +160,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
*pwidth = head->width;
*pheight = head->height;
drm_mode_probed_add(connector, mode);
+ /* remember the last custom size for mode validation */
+ qdev->monitors_config_width = mode->hdisplay;
+ qdev->monitors_config_height = mode->vdisplay;
return 1;
}
+static struct mode_size {
+ int w;
+ int h;
+} common_modes[] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+};
+
static int qxl_add_common_modes(struct drm_connector *connector,
unsigned pwidth,
unsigned pheight)
@@ -170,29 +196,6 @@ static int qxl_add_common_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
int i;
- struct mode_size {
- int w;
- int h;
- } common_modes[] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
- };
-
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
60, false, false, false);
@@ -615,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->hdisplay,
adjusted_mode->vdisplay);
- if (qcrtc->index == 0)
+ if (bo->is_primary == false)
recreate_primary = true;
if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
@@ -823,11 +826,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
static int qxl_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *ddev = connector->dev;
+ struct qxl_device *qdev = ddev->dev_private;
+ int i;
+
/* TODO: is this called for user defined modes? (xrandr --add-mode)
* TODO: check that the mode fits in the framebuffer */
- DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
- mode->vdisplay, mode->status);
- return MODE_OK;
+
+ if(qdev->monitors_config_width == mode->hdisplay &&
+ qdev->monitors_config_height == mode->vdisplay)
+ return MODE_OK;
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
+ return MODE_OK;
+ }
+ return MODE_BAD;
}
static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
@@ -872,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = ddev->dev_private;
- int connected;
+ bool connected = false;
/* The first monitor is always connected */
- connected = (output->index == 0) ||
- (qdev->client_monitors_config &&
- qdev->client_monitors_config->count > output->index &&
- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
+ if (!qdev->client_monitors_config) {
+ if (output->index == 0)
+ connected = true;
+ } else
+ connected = qdev->client_monitors_config->count > output->index &&
+ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
DRM_DEBUG("#%d connected: %d\n", output->index, connected);
if (!connected)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7c6cafe..e66143c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -325,6 +325,8 @@ struct qxl_device {
struct work_struct fb_work;
struct drm_property *hotplug_mode_update_property;
+ int monitors_config_width;
+ int monitors_config_height;
};
/* forward declaration for QXL_INFO_IO */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index b435c85..447dbfa 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -171,8 +171,9 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return -E2BIG;
tx_buf[0] = msg->address & 0xff;
- tx_buf[1] = msg->address >> 8;
- tx_buf[2] = msg->request << 4;
+ tx_buf[1] = (msg->address >> 8) & 0xff;
+ tx_buf[2] = (msg->request << 4) |
+ ((msg->address >> 16) & 0xf);
tx_buf[3] = msg->size ? (msg->size - 1) : 0;
switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f43..b4ff4c1 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
}
break;
case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc..44480c1 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->offset;
-
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
- AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+ WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SRC_SELECT(dig->pin->id));
}
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_connector *connector, struct drm_display_mode *mode)
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 tmp = 0, offset;
+ u32 tmp = 0;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set DP mode */
tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
- struct cea_sad *sads, int sad_count)
+ struct cea_sad *sads, int sad_count)
{
- u32 offset;
int i;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
- WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+ WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
}
}
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
}
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto0 for HDMI */
u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
}
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto1 for DP */
u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c5..d77dd14 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
static void radeon_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin, u8 enable_mask)
{
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ int pin_count = 0;
+
+ if (!pin)
+ return;
+
+ if (rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ if (radeon_encoder_is_digital(encoder)) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ dig = radeon_encoder->enc_priv;
+ if (dig->pin == pin)
+ pin_count++;
+ }
+ }
+
+ if ((pin_count > 1) && (enable_mask == 0))
+ return;
+ }
+
if (rdev->audio.funcs->enable)
rdev->audio.funcs->enable(rdev, pin, enable_mask);
}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct cea_sad *sads;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
}
BUG_ON(!sads);
- radeon_encoder = to_radeon_encoder(encoder);
-
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u8 *sadb = NULL;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
- sad_count = drm_edid_to_speaker_allocation(
- radeon_connector_edid(connector), &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+ &sadb);
if (sad_count < 0) {
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
}
static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = 0;
-
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
-
- radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
}
void radeon_audio_detect(struct drm_connector *connector,
+ struct drm_encoder *encoder,
enum drm_connector_status status)
{
- struct radeon_device *rdev;
- struct radeon_encoder *radeon_encoder;
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
- if (!connector || !connector->encoder)
+ if (!radeon_audio_chipset_supported(rdev))
return;
- rdev = connector->encoder->dev->dev_private;
-
- if (!radeon_audio_chipset_supported(rdev))
+ if (!radeon_encoder_is_digital(encoder))
return;
- radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv;
if (status == connector_status_connected) {
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- radeon_encoder->audio = NULL;
- return;
- }
-
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
radeon_encoder->audio = rdev->audio.hdmi_funcs;
}
- dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ if (!dig->pin)
+ dig->pin = radeon_audio_get_pin(encoder);
+ radeon_audio_enable(rdev, dig->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
+ }
} else {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
}
}
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
}
static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
int err;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return -ENOENT;
- }
+ if (!connector)
+ return -EINVAL;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -548,13 +522,15 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
return err;
}
- if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
- if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
- else
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
- } else {
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
+ if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+ if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ else
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ }
}
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
@@ -563,8 +539,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
return err;
}
- if (dig && dig->afmt &&
- radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+ if (dig && dig->afmt && radeon_encoder->audio &&
+ radeon_encoder->audio->set_avi_packet)
radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
buffer, sizeof(buffer));
@@ -745,7 +721,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
}
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -756,6 +732,9 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
+ if (!connector)
+ return;
+
if (!dig || !dig->afmt)
return;
@@ -774,7 +753,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
}
void radeon_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304..059cc30 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
int radeon_audio_init(struct radeon_device *rdev);
void radeon_audio_detect(struct drm_connector *connector,
- enum drm_connector_status status);
+ struct drm_encoder *encoder,
+ enum drm_connector_status status);
u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
u32 offset, u32 reg);
void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b7..a9b01bc 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+ if (hss > lvds->native_mode.hdisplay)
+ hss = (10 - 1) * 8;
+
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ hss;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8);
@@ -3382,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_device == 0x30ae)
return;
+ /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x280a)
+ return;
+
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e..5a2cafb 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -95,6 +95,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't try to start link training before we
+ * have the dpcd */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
@@ -1379,8 +1384,16 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && radeon_connector->use_digital) {
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+
+ encoder = connector_funcs->best_encoder(connector);
+ if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
+ }
exit:
pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1730,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && encoder) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
out:
pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d2e9e9e..6743174 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
radeon_fbdev_init(rdev);
drm_kms_helper_poll_init(rdev->ddev);
- if (rdev->pm.dpm_enabled) {
- /* do dpm late init */
- ret = radeon_pm_late_init(rdev);
- if (ret) {
- rdev->pm.dpm_enabled = false;
- DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
- }
- /* set the dpm state for PX since there won't be
- * a modeset to call this.
- */
- radeon_pm_compute_clocks(rdev);
- }
+ /* do pm late init */
+ ret = radeon_pm_late_init(rdev);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index fcbd60b..3b0c229 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -116,8 +116,8 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
AUX_SW_WR_BYTES(bytes));
/* write the data header into the registers */
- /* request, addres, msg size */
- byte = (msg->request << 4);
+ /* request, address, msg size */
+ byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
WREG32(AUX_SW_DATA + aux_offset[instance],
AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 257b10be..4298613 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -283,6 +283,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f01c797..9af2d83 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
int offset;
bool last_buffer_filled_status;
int id;
- struct r600_audio_pin *pin;
};
struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
+ struct r600_audio_pin *pin;
int active_mst_links;
};
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c1ba83a..948c331 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1331,14 +1331,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
if (rdev->pm.num_power_states > 1) {
- /* where's the best place to put these? */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
-
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
@@ -1396,20 +1388,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
goto dpm_failed;
rdev->pm.dpm_enabled = true;
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- /* XXX: these are noops for dpm but are here for backwards compat */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
-
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
}
@@ -1550,9 +1528,44 @@ int radeon_pm_late_init(struct radeon_device *rdev)
int ret = 0;
if (rdev->pm.pm_method == PM_METHOD_DPM) {
- mutex_lock(&rdev->pm.mutex);
- ret = radeon_dpm_late_enable(rdev);
- mutex_unlock(&rdev->pm.mutex);
+ if (rdev->pm.dpm_enabled) {
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ /* XXX: these are noops for dpm but are here for backwards compat */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+
+ mutex_lock(&rdev->pm.mutex);
+ ret = radeon_dpm_late_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ if (ret) {
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ } else {
+ /* set the dpm state for PX since there won't be
+ * a modeset to call this.
+ */
+ radeon_pm_compute_clocks(rdev);
+ }
+ }
+ } else {
+ if (rdev->pm.num_power_states > 1) {
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+ }
}
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8da..97ad3bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index a2dbbbe..39bf747 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -537,7 +537,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data;
struct hid_device *hdev = dev->hdev;
u8 buf[64];
- __be16 word;
+ __le16 word;
ssize_t count;
size_t read_length = 0;
unsigned int retries;
@@ -554,7 +554,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
if (I2C_SMBUS_READ == read_write)
count = cp2112_read_req(buf, addr, read_length);
else
- count = cp2112_write_req(buf, addr, data->byte, NULL,
+ count = cp2112_write_req(buf, addr, command, NULL,
0);
break;
case I2C_SMBUS_BYTE_DATA:
@@ -569,7 +569,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
break;
case I2C_SMBUS_WORD_DATA:
read_length = 2;
- word = cpu_to_be16(data->word);
+ word = cpu_to_le16(data->word);
if (I2C_SMBUS_READ == read_write)
count = cp2112_write_read_req(buf, addr, read_length,
@@ -582,7 +582,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
size = I2C_SMBUS_WORD_DATA;
read_write = I2C_SMBUS_READ;
read_length = 2;
- word = cpu_to_be16(data->word);
+ word = cpu_to_le16(data->word);
count = cp2112_write_read_req(buf, addr, read_length, command,
(u8 *)&word, 2);
@@ -675,7 +675,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
data->byte = buf[0];
break;
case I2C_SMBUS_WORD_DATA:
- data->word = be16_to_cpup((__be16 *)buf);
+ data->word = le16_to_cpup((__le16 *)buf);
break;
case I2C_SMBUS_BLOCK_DATA:
if (read_length > I2C_SMBUS_BLOCK_MAX) {
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 008e89b..32d52d2 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
static void hidinput_cleanup_battery(struct hid_device *dev)
{
+ const struct power_supply_desc *psy_desc;
+
if (!dev->battery)
return;
+ psy_desc = dev->battery->desc;
power_supply_unregister(dev->battery);
- kfree(dev->battery->desc->name);
- kfree(dev->battery->desc);
+ kfree(psy_desc->name);
+ kfree(psy_desc);
dev->battery = NULL;
}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 9416731..b905d50 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
for (p = drvdata->rdesc;
p <= drvdata->rdesc + drvdata->rsize - 4;) {
if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
- p[3] < sizeof(params)) {
+ p[3] < ARRAY_SIZE(params)) {
v = params[p[3]];
put_unaligned(cpu_to_le32(v), (s32 *)p);
p += 4;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index bfbe1be..eab5bd6 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -164,7 +164,7 @@ static void hid_io_error(struct hid_device *hid)
if (time_after(jiffies, usbhid->stop_retry)) {
/* Retries failed, so do a port reset unless we lack bandwidth*/
- if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
+ if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
&& !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
schedule_work(&usbhid->reset_work);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index bd1c99d..2aaedbe 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -354,6 +354,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
/* NCT6776 specific data */
+/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
+#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
+#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
+
static const s8 NCT6776_ALARM_BITS[] = {
0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
@@ -3528,8 +3532,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3600,8 +3604,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3677,8 +3681,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 6153df73..08ff89d 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
{"nct7904", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
static struct i2c_driver nct7904_driver = {
.class = I2C_CLASS_HWMON,
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0a80e4a..3f7d487 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
}
#ifdef CONFIG_ACPI
+/*
+ * The HCNT/LCNT information coming from ACPI should be the most accurate
+ * for given platform. However, some systems get it wrong. On such systems
+ * we get better results by calculating those based on the input clock.
+ */
+static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
+ {
+ .ident = "Dell Inspiron 7348",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
+ },
+ },
+ { }
+};
+
static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
u16 *hcnt, u16 *lcnt, u32 *sda_hold)
{
@@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
union acpi_object *obj;
+ if (dmi_check_system(dw_i2c_no_acpi_params))
+ return;
+
if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
return;
@@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(&pdev->dev, "failure adding adapter\n");
- return r;
- }
-
if (dev->pm_runtime_disabled) {
pm_runtime_forbid(&pdev->dev);
} else {
@@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
}
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(&pdev->dev, "failure adding adapter\n");
+ pm_runtime_disable(&pdev->dev);
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5a84bea..d9d022c 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -688,15 +688,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, priv);
+
ret = i2c_add_numbered_adapter(adap);
if (ret < 0) {
dev_err(dev, "reg adap failed: %d\n", ret);
+ pm_runtime_disable(dev);
return ret;
}
- pm_runtime_enable(dev);
- platform_set_drvdata(pdev, priv);
-
dev_info(dev, "probed\n");
return 0;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 297e9c9..4247942 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->adap.nr = i2c->pdata->bus_num;
i2c->adap.dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, i2c);
+
+ pm_runtime_enable(&pdev->dev);
+
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ pm_runtime_disable(&pdev->dev);
s3c24xx_i2c_deregister_cpufreq(i2c);
clk_unprepare(i2c->clk);
return ret;
}
- platform_set_drvdata(pdev, i2c);
-
- pm_runtime_enable(&pdev->dev);
pm_runtime_enable(&i2c->adap.dev);
dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index b3d0e94..8d24393 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -53,7 +53,8 @@ config ADXRS450
config BMG160
tristate "BOSCH BMG160 Gyro Sensor"
depends on I2C
- select IIO_TRIGGERED_BUFFER if IIO_BUFFER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Bosch BMG160 Tri-axis Gyro Sensor
driver. This driver also supports BMI055 gyroscope.
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 2fd68f2..d42e4fe 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -780,7 +780,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.flags = ADIS16400_HAS_PROD_ID |
ADIS16400_HAS_SERIAL_NUMBER |
ADIS16400_BURST_DIAG_STAT,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(40000), /* 0.04 deg/s */
.accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
.temp_scale_nano = 73860000, /* 0.07386 C */
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 989605d..b94bfd3 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -110,6 +110,10 @@
struct adis16480_chip_info {
unsigned int num_channels;
const struct iio_chan_spec *channels;
+ unsigned int gyro_max_val;
+ unsigned int gyro_max_scale;
+ unsigned int accel_max_val;
+ unsigned int accel_max_scale;
};
struct adis16480 {
@@ -497,19 +501,21 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
static int adis16480_read_raw(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val, int *val2, long info)
{
+ struct adis16480 *st = iio_priv(indio_dev);
+
switch (info) {
case IIO_CHAN_INFO_RAW:
return adis_single_conversion(indio_dev, chan, 0, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
- *val = 0;
- *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->chip_info->gyro_max_scale;
+ *val2 = st->chip_info->gyro_max_val;
+ return IIO_VAL_FRACTIONAL;
case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->chip_info->accel_max_scale;
+ *val2 = st->chip_info->accel_max_val;
+ return IIO_VAL_FRACTIONAL;
case IIO_MAGN:
*val = 0;
*val2 = 100; /* 0.0001 gauss */
@@ -674,18 +680,39 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16375] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
+ /*
+ * storing the value in rad/degree and the scale in degree
+ * gives us the result in rad and better precession than
+ * storing the scale directly in rad.
+ */
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
+ .gyro_max_scale = 300,
+ .accel_max_val = IIO_M_S_2_TO_G(21973),
+ .accel_max_scale = 18,
},
[ADIS16480] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(12500),
+ .accel_max_scale = 5,
},
[ADIS16485] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(20000),
+ .accel_max_scale = 5,
},
[ADIS16488] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(22500),
+ .accel_max_scale = 18,
},
};
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index df919f4..7fa280b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -151,7 +151,7 @@ unsigned int iio_buffer_poll(struct file *filp,
struct iio_buffer *rb = indio_dev->buffer;
if (!indio_dev->info)
- return -ENODEV;
+ return 0;
poll_wait(filp, &rb->pollq, wait);
if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index a99692b..69b8c33 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -84,7 +84,7 @@ static unsigned int iio_event_poll(struct file *filep,
unsigned int events = 0;
if (!indio_dev->info)
- return -ENODEV;
+ return events;
poll_wait(filep, &ev_int->wait, wait);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b716b08..bebf11a 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -85,7 +85,7 @@
*/
struct ib_uverbs_device {
- struct kref ref;
+ atomic_t refcount;
int num_comp_vectors;
struct completion comp;
struct device *dev;
@@ -94,6 +94,7 @@ struct ib_uverbs_device {
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
+ struct kobject kobj;
};
struct ib_uverbs_event_file {
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a9f0489..ccc2494 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2244,6 +2244,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
next->send_flags = user_wr->send_flags;
if (is_ud) {
+ if (next->opcode != IB_WR_SEND &&
+ next->opcode != IB_WR_SEND_WITH_IMM) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
file->ucontext);
if (!next->wr.ud.ah) {
@@ -2283,9 +2289,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
user_wr->wr.atomic.compare_add;
next->wr.atomic.swap = user_wr->wr.atomic.swap;
next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
+ case IB_WR_SEND:
break;
default:
- break;
+ ret = -EINVAL;
+ goto out_put;
}
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 88cce9b..09686d4 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -129,14 +129,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
static void ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device);
-static void ib_uverbs_release_dev(struct kref *ref)
+static void ib_uverbs_release_dev(struct kobject *kobj)
{
struct ib_uverbs_device *dev =
- container_of(ref, struct ib_uverbs_device, ref);
+ container_of(kobj, struct ib_uverbs_device, kobj);
- complete(&dev->comp);
+ kfree(dev);
}
+static struct kobj_type ib_uverbs_dev_ktype = {
+ .release = ib_uverbs_release_dev,
+};
+
static void ib_uverbs_release_event_file(struct kref *ref)
{
struct ib_uverbs_event_file *file =
@@ -302,13 +306,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
return context->device->dealloc_ucontext(context);
}
+static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
+{
+ complete(&dev->comp);
+}
+
static void ib_uverbs_release_file(struct kref *ref)
{
struct ib_uverbs_file *file =
container_of(ref, struct ib_uverbs_file, ref);
module_put(file->device->ib_dev->owner);
- kref_put(&file->device->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&file->device->refcount))
+ ib_uverbs_comp_dev(file->device);
kfree(file);
}
@@ -742,9 +752,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
int ret;
dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
- if (dev)
- kref_get(&dev->ref);
- else
+ if (!atomic_inc_not_zero(&dev->refcount))
return -ENXIO;
if (!try_module_get(dev->ib_dev->owner)) {
@@ -765,6 +773,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
mutex_init(&file->mutex);
filp->private_data = file;
+ kobject_get(&dev->kobj);
return nonseekable_open(inode, filp);
@@ -772,13 +781,16 @@ err_module:
module_put(dev->ib_dev->owner);
err:
- kref_put(&dev->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&dev->refcount))
+ ib_uverbs_comp_dev(dev);
+
return ret;
}
static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
+ struct ib_uverbs_device *dev = file->device;
ib_uverbs_cleanup_ucontext(file, file->ucontext);
@@ -786,6 +798,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
kref_put(&file->ref, ib_uverbs_release_file);
+ kobject_put(&dev->kobj);
return 0;
}
@@ -881,10 +894,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (!uverbs_dev)
return;
- kref_init(&uverbs_dev->ref);
+ atomic_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
+ kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -911,6 +925,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+ uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
if (cdev_add(&uverbs_dev->cdev, base, 1))
goto err_cdev;
@@ -941,9 +956,10 @@ err_cdev:
clear_bit(devnum, overflow_map);
err:
- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&uverbs_dev->refcount))
+ ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kfree(uverbs_dev);
+ kobject_put(&uverbs_dev->kobj);
return;
}
@@ -963,9 +979,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
else
clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&uverbs_dev->refcount))
+ ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kfree(uverbs_dev);
+ kobject_put(&uverbs_dev->kobj);
}
static char *uverbs_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index f50a546..33fdd50 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
enum rdma_link_layer ll;
memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+ if (ll == IB_LINK_LAYER_ETHERNET)
+ ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
+ else
+ ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+
ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
if (ah->av.ib.stat_rate)
ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 0176caa..2857ed8 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -629,7 +629,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
* simulated FLUSH_ERR completions
*/
list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
- mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
+ mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
if (*npolled >= num_entries)
goto out;
}
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index ed327e6..a0559a8 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
{
struct mlx4_ib_dev *dev = ctx->dev;
struct ib_ah_attr ah_attr;
+ unsigned long flags;
- spin_lock(&dev->sm_lock);
+ spin_lock_irqsave(&dev->sm_lock, flags);
if (!dev->sm_ah[ctx->port - 1]) {
/* port is not yet Active, sm_ah not ready */
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
return -EAGAIN;
}
mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
&ah_attr, NULL, mad);
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 6797108..69fb5ba 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
struct mlx4_port *p;
int i;
int ret;
+ int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
+ IB_LINK_LAYER_ETHERNET;
p = kzalloc(sizeof *p, GFP_KERNEL);
if (!p)
@@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->pkey_group.name = "pkey_idx";
p->pkey_group.attrs =
- alloc_group_attrs(show_port_pkey, store_port_pkey,
+ alloc_group_attrs(show_port_pkey,
+ is_eth ? NULL : store_port_pkey,
dev->dev->caps.pkey_table_len[port_num]);
if (!p->pkey_group.attrs) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 71c5935..0c52f078 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1119,19 +1119,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
error:
- /*
- * Destroy the umem *before* destroying the MR, to ensure we
- * will not have any in-flight notifiers when destroying the
- * MR.
- *
- * As the MR is completely invalid to begin with, and this
- * error path is only taken if we can't push the mr entry into
- * the pagefault tree, this is safe.
- */
-
ib_umem_release(umem);
- /* Kill the MR, and return an error code. */
- clean_mr(mr);
return ERR_PTR(err);
}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index ad843c7..5afaa21 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
* unrestricted LKEY.
*/
rkt->gen++;
+ /*
+ * bits are capped in qib_verbs.c to insure enough bits
+ * for generation number
+ */
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
<< 8);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 4a35998..9dd5d9a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -40,6 +40,7 @@
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/random.h>
+#include <linux/vmalloc.h>
#include "qib.h"
#include "qib_common.h"
@@ -2089,10 +2090,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
* the LKEY). The remaining bits act as a generation number or tag.
*/
spin_lock_init(&dev->lk_table.lock);
+ /* insure generation is at least 4 bits see keys.c */
+ if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
+ qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
+ ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
+ ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
+ }
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
dev->lk_table.table = (struct qib_mregion __rcu **)
- __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+ vmalloc(lk_tab_size);
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
@@ -2265,7 +2272,7 @@ err_tx:
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
- free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
err_lk:
kfree(dev->qp_table);
err_qpt:
@@ -2319,8 +2326,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- free_pages((unsigned long) dev->lk_table.table,
- get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
kfree(dev->qp_table);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bfc8948..44ca28c 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -647,6 +647,8 @@ struct qib_qpn_table {
struct qpn_map map[QPNMAP_ENTRIES];
};
+#define MAX_LKEY_TABLE_BITS 23
+
struct qib_lkey_table {
spinlock_t lock; /* protect changes in this struct */
u32 next; /* next unused index (speeds search) */
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 6a594aa..c933d88 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
goto out;
}
+ tx_desc->mapped = true;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
@@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = &iser_task->desc;
- struct iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
/* DEVICE_REMOVAL event might have already released the device */
if (!device)
return;
- ib_dma_unmap_single(device->ib_device,
- tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ if (likely(tx_desc->mapped)) {
+ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ tx_desc->mapped = false;
+ }
/* mgmt tasks do not need special cleanup */
if (!task->sc)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 262ba1f..d2b6caf 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -270,6 +270,7 @@ enum iser_desc_type {
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
+ * @mapped: Is the task header mapped
*/
struct iser_tx_desc {
struct iser_hdr iser_header;
@@ -278,6 +279,7 @@ struct iser_tx_desc {
u64 dma_addr;
struct ib_sge tx_sg[2];
int num_sge;
+ bool mapped;
};
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3e2118e..0a47f42 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
- int err = 0;
+ int err;
struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
@@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn,
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
/* build the tx desc */
- iser_initialize_task_headers(task, tx_desc);
+ err = iser_initialize_task_headers(task, tx_desc);
+ if (err)
+ goto send_data_out_error;
mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
tx_dsg = &tx_desc->tx_sg[1];
@@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
send_data_out_error:
kmem_cache_free(ig.desc_cache, tx_desc);
- iser_err("conn %p failed err %d\n",conn, err);
+ iser_err("conn %p failed err %d\n", conn, err);
return err;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 575a072..c32a934 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2996,9 +2996,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
static int
isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
- int ret;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ int ret = 0;
switch (state) {
+ case ISTATE_REMOVE:
+ spin_lock_bh(&conn->cmd_lock);
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+ isert_put_cmd(isert_cmd, true);
+ break;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
ret = isert_put_nopin(cmd, conn, false);
break;
@@ -3363,6 +3370,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
wait_for_completion(&isert_conn->wait_comp_err);
}
+/**
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for
+ * unsolicitate dataout
+ * @conn: iscsi connection
+ *
+ * We might still have commands that are waiting for unsolicited
+ * dataouts messages. We must put the extra reference on those
+ * before blocking on the target_wait_for_session_cmds
+ */
+static void
+isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd, *tmp;
+ static LIST_HEAD(drop_cmd_list);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
+ if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
+ (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
+ (cmd->write_data_done < cmd->se_cmd.data_length))
+ list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
+ list_del_init(&cmd->i_conn_node);
+ if (cmd->i_state != ISTATE_REMOVE) {
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+
+ isert_info("conn %p dropping cmd %p\n", conn, cmd);
+ isert_put_cmd(isert_cmd, true);
+ }
+ }
+}
+
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
@@ -3381,8 +3423,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->mutex);
- isert_wait4cmds(conn);
isert_wait4flush(isert_conn);
+ isert_put_unsol_pending_cmds(conn);
+ isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 75c01b2..025f931 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2761,6 +2761,13 @@ static int srp_sdev_count(struct Scsi_Host *host)
return c;
}
+/*
+ * Return values:
+ * < 0 upon failure. Caller is responsible for SRP target port cleanup.
+ * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
+ * removal has been scheduled.
+ * 0 and target->state != SRP_TARGET_REMOVED upon success.
+ */
static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
{
struct srp_rport_identifiers ids;
@@ -3266,7 +3273,7 @@ static ssize_t srp_create_target(struct device *dev,
srp_free_ch_ib(target, ch);
srp_free_req_data(target, ch);
target->ch_count = ch - target->ch;
- break;
+ goto connected;
}
}
@@ -3276,6 +3283,7 @@ static ssize_t srp_create_target(struct device *dev,
node_idx++;
}
+connected:
target->scsi_host->nr_hw_queues = target->ch_count;
ret = srp_add_target(host, target);
@@ -3298,6 +3306,8 @@ out:
mutex_unlock(&host->add_target_mutex);
scsi_host_put(target->scsi_host);
+ if (ret < 0)
+ scsi_host_put(target->scsi_host);
return ret;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a18f41b..2ae522f 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
- int retval;
- retval = mutex_lock_interruptible(&evdev->mutex);
- if (retval)
- return retval;
+ mutex_lock(&evdev->mutex);
- if (!evdev->exist || client->revoked)
- retval = -ENODEV;
- else
- retval = input_flush_device(&evdev->handle, file);
+ if (evdev->exist && !client->revoked)
+ input_flush_device(&evdev->handle, file);
mutex_unlock(&evdev->mutex);
- return retval;
+ return 0;
}
static void evdev_free(struct device *dev)
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d721..c6dc644 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
* convert it to descriptor.
*/
if (!button->gpiod && gpio_is_valid(button->gpio)) {
- unsigned flags = 0;
+ unsigned flags = GPIOF_IN;
if (button->active_low)
flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a353b7d..bc7eed6 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
+#include <linux/dmi.h>
#include "psmouse.h"
#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
6-byte ALPS packet */
+#define ALPS_DELL 0x100 /* device is a Dell laptop */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
return;
}
- /* Non interleaved V2 dualpoint has separate stick button bits */
+ /* Dell non interleaved V2 dualpoint has separate stick button bits */
if (priv->proto_version == ALPS_PROTO_V2 &&
- priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+ priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
left |= packet[0] & 1;
right |= packet[0] & 2;
middle |= packet[0] & 4;
@@ -2542,6 +2544,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->byte0 = protocol->byte0;
priv->mask0 = protocol->mask0;
priv->flags = protocol->flags;
+ if (dmi_name_in_vendors("Dell"))
+ priv->flags |= ALPS_DELL;
priv->x_max = 2000;
priv->y_max = 1400;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index abeedc9..2570f2a2 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -41,7 +41,6 @@ struct pamu_isr_data {
static struct paace *ppaact;
static struct paace *spaact;
-static struct ome *omt __initdata;
/*
* Table for matching compatible strings, for device tree
@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used.
*/
-static const struct of_device_id guts_device_ids[] __initconst = {
+static const struct of_device_id guts_device_ids[] = {
{ .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", },
{}
@@ -599,7 +598,7 @@ found_cpu_node:
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
-static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
+static void setup_qbman_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
* this table to translate device transaction to appropriate corenet
* transaction.
*/
-static void __init setup_omt(struct ome *omt)
+static void setup_omt(struct ome *omt)
{
struct ome *ome;
@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
{
u32 pc_val;
@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+ phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
}
/* Enable all device LIODNS */
-static void __init setup_liodns(void)
+static void setup_liodns(void)
{
int i, len;
struct paace *ppaace;
@@ -846,7 +845,7 @@ struct ccsr_law {
/*
* Create a coherence subdomain for a given memory block.
*/
-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
{
struct device_node *np;
const __be32 *iprop;
@@ -988,7 +987,7 @@ error:
static const struct {
u32 svr;
u32 port_id;
-} port_id_map[] __initconst = {
+} port_id_map[] = {
{(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
{(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
{(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
@@ -1006,7 +1005,7 @@ static const struct {
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
-static int __init fsl_pamu_probe(struct platform_device *pdev)
+static int fsl_pamu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL;
@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
int irq;
phys_addr_t ppaact_phys;
phys_addr_t spaact_phys;
+ struct ome *omt;
phys_addr_t omt_phys;
size_t mem_size = 0;
unsigned int order = 0;
@@ -1200,7 +1200,7 @@ error:
return ret;
}
-static struct platform_driver fsl_of_pamu_driver __initdata = {
+static struct platform_driver fsl_of_pamu_driver = {
.driver = {
.name = "fsl-of-pamu",
},
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c87c4b1..c234279 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -681,6 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
+ entry = &root->lo;
if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
@@ -688,7 +689,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
}
devfn *= 2;
}
- entry = &root->lo;
if (*entry & 1)
context = phys_to_virt(*entry & VTD_PAGE_MASK);
else {
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4e46021..e29d5d7 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte;
static bool selftest_running = false;
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep);
+
static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
arm_lpae_iopte prot, int lvl,
@@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte = prot;
- /* We require an unmap first */
if (iopte_leaf(*ptep, lvl)) {
+ /* We require an unmap first */
WARN_ON(!selftest_running);
return -EEXIST;
+ } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+ /*
+ * We need to unmap and free the old table before
+ * overwriting it with a block entry.
+ */
+ arm_lpae_iopte *tblp;
+ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
+ if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+ return -EINVAL;
}
if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c845d99..e0ff5f4 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -26,6 +26,7 @@ struct tegra_smmu {
const struct tegra_smmu_soc *soc;
unsigned long pfn_mask;
+ unsigned long tlb_mask;
unsigned long *asids;
struct mutex lock;
@@ -65,7 +66,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_TLB_CONFIG 0x14
#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
-#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
+#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
+ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
#define SMMU_PTC_CONFIG 0x18
#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
@@ -716,6 +718,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
+ smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+ dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
+ smmu->tlb_mask);
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
@@ -725,7 +730,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu_writel(smmu, value, SMMU_PTC_CONFIG);
value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
- SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
+ SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
if (soc->supports_round_robin_arbitration)
value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index a2e8c3f..c2c578f 100644
--- a/drivers/irqchip/irq-atmel-aic5.c